1 // Copyright 2021 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_LOONG64
6 
7 #include "src/api/api-arguments.h"
8 #include "src/codegen/code-factory.h"
9 #include "src/codegen/interface-descriptors-inl.h"
10 #include "src/debug/debug.h"
11 #include "src/deoptimizer/deoptimizer.h"
12 #include "src/execution/frame-constants.h"
13 #include "src/execution/frames.h"
14 #include "src/logging/counters.h"
15 // For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
16 #include "src/codegen/loong64/constants-loong64.h"
17 #include "src/codegen/macro-assembler-inl.h"
18 #include "src/codegen/register-configuration.h"
19 #include "src/heap/heap-inl.h"
20 #include "src/objects/cell.h"
21 #include "src/objects/foreign.h"
22 #include "src/objects/heap-number.h"
23 #include "src/objects/js-generator.h"
24 #include "src/objects/objects-inl.h"
25 #include "src/objects/smi.h"
26 #include "src/runtime/runtime.h"
27 
28 #if V8_ENABLE_WEBASSEMBLY
29 #include "src/wasm/wasm-linkage.h"
30 #include "src/wasm/wasm-objects.h"
31 #endif  // V8_ENABLE_WEBASSEMBLY
32 
33 namespace v8 {
34 namespace internal {
35 
36 #define __ ACCESS_MASM(masm)
37 
Generate_Adaptor(MacroAssembler * masm,Address address)38 void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
39   __ li(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
40   __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
41           RelocInfo::CODE_TARGET);
42 }
43 
GenerateTailCallToReturnedCode(MacroAssembler * masm,Runtime::FunctionId function_id)44 static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
45                                            Runtime::FunctionId function_id) {
46   // ----------- S t a t e -------------
47   //  -- a0 : actual argument count
48   //  -- a1 : target function (preserved for callee)
49   //  -- a3 : new target (preserved for callee)
50   // -----------------------------------
51   {
52     FrameScope scope(masm, StackFrame::INTERNAL);
53     // Push a copy of the target function, the new target and the actual
54     // argument count.
55     // Push function as parameter to the runtime call.
56     __ SmiTag(kJavaScriptCallArgCountRegister);
57     __ Push(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
58             kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister);
59 
60     __ CallRuntime(function_id, 1);
61     __ LoadCodeObjectEntry(a2, a0);
62     // Restore target function, new target and actual argument count.
63     __ Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
64            kJavaScriptCallArgCountRegister);
65     __ SmiUntag(kJavaScriptCallArgCountRegister);
66   }
67 
68   static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
69   __ Jump(a2);
70 }
71 
72 namespace {
73 
Generate_JSBuiltinsConstructStubHelper(MacroAssembler * masm)74 void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
75   // ----------- S t a t e -------------
76   //  -- a0     : number of arguments
77   //  -- a1     : constructor function
78   //  -- a3     : new target
79   //  -- cp     : context
80   //  -- ra     : return address
81   //  -- sp[...]: constructor arguments
82   // -----------------------------------
83 
84   // Enter a construct frame.
85   {
86     FrameScope scope(masm, StackFrame::CONSTRUCT);
87 
88     // Preserve the incoming parameters on the stack.
89     __ SmiTag(a0);
90     __ Push(cp, a0);
91     __ SmiUntag(a0);
92 
93     // Set up pointer to last argument (skip receiver).
94     __ Add_d(
95         t2, fp,
96         Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
97     // Copy arguments and receiver to the expression stack.
98     __ PushArray(t2, a0, t3, t0);
99     // The receiver for the builtin/api call.
100     __ PushRoot(RootIndex::kTheHoleValue);
101 
102     // Call the function.
103     // a0: number of arguments (untagged)
104     // a1: constructor function
105     // a3: new target
106     __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
107 
108     // Restore context from the frame.
109     __ Ld_d(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
110     // Restore smi-tagged arguments count from the frame.
111     __ Ld_d(t3, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
112     // Leave construct frame.
113   }
114 
115   // Remove caller arguments from the stack and return.
116   __ SmiScale(t3, t3, kPointerSizeLog2);
117   __ Add_d(sp, sp, t3);
118   __ Add_d(sp, sp, kPointerSize);
119   __ Ret();
120 }
121 
122 }  // namespace
123 
124 // The construct stub for ES5 constructor functions and ES6 class constructors.
Generate_JSConstructStubGeneric(MacroAssembler * masm)125 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
126   // ----------- S t a t e -------------
127   //  --      a0: number of arguments (untagged)
128   //  --      a1: constructor function
129   //  --      a3: new target
130   //  --      cp: context
131   //  --      ra: return address
132   //  -- sp[...]: constructor arguments
133   // -----------------------------------
134 
135   // Enter a construct frame.
136   FrameScope scope(masm, StackFrame::MANUAL);
137   Label post_instantiation_deopt_entry, not_create_implicit_receiver;
138   __ EnterFrame(StackFrame::CONSTRUCT);
139 
140   // Preserve the incoming parameters on the stack.
141   __ SmiTag(a0);
142   __ Push(cp, a0, a1);
143   __ PushRoot(RootIndex::kUndefinedValue);
144   __ Push(a3);
145 
146   // ----------- S t a t e -------------
147   //  --        sp[0*kPointerSize]: new target
148   //  --        sp[1*kPointerSize]: padding
149   //  -- a1 and sp[2*kPointerSize]: constructor function
150   //  --        sp[3*kPointerSize]: number of arguments (tagged)
151   //  --        sp[4*kPointerSize]: context
152   // -----------------------------------
153 
154   __ Ld_d(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
155   __ Ld_wu(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset));
156   __ DecodeField<SharedFunctionInfo::FunctionKindBits>(t2);
157   __ JumpIfIsInRange(t2, kDefaultDerivedConstructor, kDerivedConstructor,
158                      &not_create_implicit_receiver);
159 
160   // If not derived class constructor: Allocate the new receiver object.
161   __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, t2,
162                       t3);
163   __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), RelocInfo::CODE_TARGET);
164   __ Branch(&post_instantiation_deopt_entry);
165 
166   // Else: use TheHoleValue as receiver for constructor call
167   __ bind(&not_create_implicit_receiver);
168   __ LoadRoot(a0, RootIndex::kTheHoleValue);
169 
170   // ----------- S t a t e -------------
171   //  --                          a0: receiver
172   //  -- Slot 4 / sp[0*kPointerSize]: new target
173   //  -- Slot 3 / sp[1*kPointerSize]: padding
174   //  -- Slot 2 / sp[2*kPointerSize]: constructor function
175   //  -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
176   //  -- Slot 0 / sp[4*kPointerSize]: context
177   // -----------------------------------
178   // Deoptimizer enters here.
179   masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
180       masm->pc_offset());
181   __ bind(&post_instantiation_deopt_entry);
182 
183   // Restore new target.
184   __ Pop(a3);
185 
186   // Push the allocated receiver to the stack.
187   __ Push(a0);
188 
189   // We need two copies because we may have to return the original one
190   // and the calling conventions dictate that the called function pops the
191   // receiver. The second copy is pushed after the arguments, we saved in a6
192   // since a0 will store the return value of callRuntime.
193   __ mov(a6, a0);
194 
195   // Set up pointer to last argument.
196   __ Add_d(
197       t2, fp,
198       Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
199 
200   // ----------- S t a t e -------------
201   //  --                 r3: new target
202   //  -- sp[0*kPointerSize]: implicit receiver
203   //  -- sp[1*kPointerSize]: implicit receiver
204   //  -- sp[2*kPointerSize]: padding
205   //  -- sp[3*kPointerSize]: constructor function
206   //  -- sp[4*kPointerSize]: number of arguments (tagged)
207   //  -- sp[5*kPointerSize]: context
208   // -----------------------------------
209 
210   // Restore constructor function and argument count.
211   __ Ld_d(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
212   __ Ld_d(a0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
213   __ SmiUntag(a0);
214 
215   Label stack_overflow;
216   __ StackOverflowCheck(a0, t0, t1, &stack_overflow);
217 
218   // TODO(victorgomes): When the arguments adaptor is completely removed, we
219   // should get the formal parameter count and copy the arguments in its
220   // correct position (including any undefined), instead of delaying this to
221   // InvokeFunction.
222 
223   // Copy arguments and receiver to the expression stack.
224   __ PushArray(t2, a0, t0, t1);
225   // We need two copies because we may have to return the original one
226   // and the calling conventions dictate that the called function pops the
227   // receiver. The second copy is pushed after the arguments,
228   __ Push(a6);
229 
230   // Call the function.
231   __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
232 
233   // ----------- S t a t e -------------
234   //  --                 s0: constructor result
235   //  -- sp[0*kPointerSize]: implicit receiver
236   //  -- sp[1*kPointerSize]: padding
237   //  -- sp[2*kPointerSize]: constructor function
238   //  -- sp[3*kPointerSize]: number of arguments
239   //  -- sp[4*kPointerSize]: context
240   // -----------------------------------
241 
242   // Store offset of return address for deoptimizer.
243   masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
244       masm->pc_offset());
245 
246   // If the result is an object (in the ECMA sense), we should get rid
247   // of the receiver and use the result; see ECMA-262 section 13.2.2-7
248   // on page 74.
249   Label use_receiver, do_throw, leave_and_return, check_receiver;
250 
251   // If the result is undefined, we jump out to using the implicit receiver.
252   __ JumpIfNotRoot(a0, RootIndex::kUndefinedValue, &check_receiver);
253 
254   // Otherwise we do a smi check and fall through to check if the return value
255   // is a valid receiver.
256 
257   // Throw away the result of the constructor invocation and use the
258   // on-stack receiver as the result.
259   __ bind(&use_receiver);
260   __ Ld_d(a0, MemOperand(sp, 0 * kPointerSize));
261   __ JumpIfRoot(a0, RootIndex::kTheHoleValue, &do_throw);
262 
263   __ bind(&leave_and_return);
264   // Restore smi-tagged arguments count from the frame.
265   __ Ld_d(a1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
266   // Leave construct frame.
267   __ LeaveFrame(StackFrame::CONSTRUCT);
268 
269   // Remove caller arguments from the stack and return.
270   __ SmiScale(a4, a1, kPointerSizeLog2);
271   __ Add_d(sp, sp, a4);
272   __ Add_d(sp, sp, kPointerSize);
273   __ Ret();
274 
275   __ bind(&check_receiver);
276   __ JumpIfSmi(a0, &use_receiver);
277 
278   // If the type of the result (stored in its map) is less than
279   // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
280   __ GetObjectType(a0, t2, t2);
281   STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
282   __ Branch(&leave_and_return, greater_equal, t2,
283             Operand(FIRST_JS_RECEIVER_TYPE));
284   __ Branch(&use_receiver);
285 
286   __ bind(&do_throw);
287   // Restore the context from the frame.
288   __ Ld_d(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
289   __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
290   __ break_(0xCC);
291 
292   __ bind(&stack_overflow);
293   // Restore the context from the frame.
294   __ Ld_d(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
295   __ CallRuntime(Runtime::kThrowStackOverflow);
296   __ break_(0xCC);
297 }
298 
Generate_JSBuiltinsConstructStub(MacroAssembler * masm)299 void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
300   Generate_JSBuiltinsConstructStubHelper(masm);
301 }
302 
AssertCodeIsBaseline(MacroAssembler * masm,Register code,Register scratch)303 static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
304                                  Register scratch) {
305   DCHECK(!AreAliased(code, scratch));
306   // Verify that the code kind is baseline code via the CodeKind.
307   __ Ld_d(scratch, FieldMemOperand(code, Code::kFlagsOffset));
308   __ DecodeField<Code::KindField>(scratch);
309   __ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
310             Operand(static_cast<int>(CodeKind::BASELINE)));
311 }
312 
313 // TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
314 // the more general dispatch.
GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler * masm,Register sfi_data,Register scratch1,Label * is_baseline)315 static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
316                                                     Register sfi_data,
317                                                     Register scratch1,
318                                                     Label* is_baseline) {
319   Label done;
320 
321   __ GetObjectType(sfi_data, scratch1, scratch1);
322   if (FLAG_debug_code) {
323     Label not_baseline;
324     __ Branch(&not_baseline, ne, scratch1, Operand(CODET_TYPE));
325     AssertCodeIsBaseline(masm, sfi_data, scratch1);
326     __ Branch(is_baseline);
327     __ bind(&not_baseline);
328   } else {
329     __ Branch(is_baseline, eq, scratch1, Operand(CODET_TYPE));
330   }
331   __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
332   __ Ld_d(sfi_data,
333           FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
334 
335   __ bind(&done);
336 }
337 
338 // static
Generate_ResumeGeneratorTrampoline(MacroAssembler * masm)339 void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
340   // ----------- S t a t e -------------
341   //  -- a0 : the value to pass to the generator
342   //  -- a1 : the JSGeneratorObject to resume
343   //  -- ra : return address
344   // -----------------------------------
345   // Store input value into generator object.
346   __ St_d(a0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
347   __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, a0,
348                       kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore);
349   // Check that a1 is still valid, RecordWrite might have clobbered it.
350   __ AssertGeneratorObject(a1);
351 
352   // Load suspended function and context.
353   __ Ld_d(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
354   __ Ld_d(cp, FieldMemOperand(a4, JSFunction::kContextOffset));
355 
356   // Flood function if we are stepping.
357   Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
358   Label stepping_prepared;
359   ExternalReference debug_hook =
360       ExternalReference::debug_hook_on_function_call_address(masm->isolate());
361   __ li(a5, debug_hook);
362   __ Ld_b(a5, MemOperand(a5, 0));
363   __ Branch(&prepare_step_in_if_stepping, ne, a5, Operand(zero_reg));
364 
365   // Flood function if we need to continue stepping in the suspended generator.
366   ExternalReference debug_suspended_generator =
367       ExternalReference::debug_suspended_generator_address(masm->isolate());
368   __ li(a5, debug_suspended_generator);
369   __ Ld_d(a5, MemOperand(a5, 0));
370   __ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(a5));
371   __ bind(&stepping_prepared);
372 
373   // Check the stack for overflow. We are not trying to catch interruptions
374   // (i.e. debug break and preemption) here, so check the "real stack limit".
375   Label stack_overflow;
376   __ LoadStackLimit(kScratchReg,
377                     MacroAssembler::StackLimitKind::kRealStackLimit);
378   __ Branch(&stack_overflow, lo, sp, Operand(kScratchReg));
379 
380   // ----------- S t a t e -------------
381   //  -- a1    : the JSGeneratorObject to resume
382   //  -- a4    : generator function
383   //  -- cp    : generator context
384   //  -- ra    : return address
385   // -----------------------------------
386 
387   // Push holes for arguments to generator function. Since the parser forced
388   // context allocation for any variables in generators, the actual argument
389   // values have already been copied into the context and these dummy values
390   // will never be used.
391   __ Ld_d(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
392   __ Ld_hu(
393       a3, FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
394   __ Ld_d(t1, FieldMemOperand(
395                   a1, JSGeneratorObject::kParametersAndRegistersOffset));
396   {
397     Label done_loop, loop;
398     __ bind(&loop);
399     __ Sub_d(a3, a3, Operand(1));
400     __ Branch(&done_loop, lt, a3, Operand(zero_reg));
401     __ Alsl_d(kScratchReg, a3, t1, kPointerSizeLog2, t7);
402     __ Ld_d(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
403     __ Push(kScratchReg);
404     __ Branch(&loop);
405     __ bind(&done_loop);
406     // Push receiver.
407     __ Ld_d(kScratchReg,
408             FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
409     __ Push(kScratchReg);
410   }
411 
412   // Underlying function needs to have bytecode available.
413   if (FLAG_debug_code) {
414     Label is_baseline;
415     __ Ld_d(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
416     __ Ld_d(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
417     GetSharedFunctionInfoBytecodeOrBaseline(masm, a3, t5, &is_baseline);
418     __ GetObjectType(a3, a3, a3);
419     __ Assert(eq, AbortReason::kMissingBytecodeArray, a3,
420               Operand(BYTECODE_ARRAY_TYPE));
421     __ bind(&is_baseline);
422   }
423 
424   // Resume (Ignition/TurboFan) generator object.
425   {
426     __ Ld_d(a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
427     __ Ld_hu(a0, FieldMemOperand(
428                      a0, SharedFunctionInfo::kFormalParameterCountOffset));
429     // We abuse new.target both to indicate that this is a resume call and to
430     // pass in the generator object.  In ordinary calls, new.target is always
431     // undefined because generator functions are non-constructable.
432     __ Move(a3, a1);
433     __ Move(a1, a4);
434     static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
435     __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
436     __ JumpCodeObject(a2);
437   }
438 
439   __ bind(&prepare_step_in_if_stepping);
440   {
441     FrameScope scope(masm, StackFrame::INTERNAL);
442     __ Push(a1, a4);
443     // Push hole as receiver since we do not use it for stepping.
444     __ PushRoot(RootIndex::kTheHoleValue);
445     __ CallRuntime(Runtime::kDebugOnFunctionCall);
446     __ Pop(a1);
447   }
448   __ Ld_d(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
449   __ Branch(&stepping_prepared);
450 
451   __ bind(&prepare_step_in_suspended_generator);
452   {
453     FrameScope scope(masm, StackFrame::INTERNAL);
454     __ Push(a1);
455     __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
456     __ Pop(a1);
457   }
458   __ Ld_d(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
459   __ Branch(&stepping_prepared);
460 
461   __ bind(&stack_overflow);
462   {
463     FrameScope scope(masm, StackFrame::INTERNAL);
464     __ CallRuntime(Runtime::kThrowStackOverflow);
465     __ break_(0xCC);  // This should be unreachable.
466   }
467 }
468 
Generate_ConstructedNonConstructable(MacroAssembler * masm)469 void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
470   FrameScope scope(masm, StackFrame::INTERNAL);
471   __ Push(a1);
472   __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
473 }
474 
475 // Clobbers scratch1 and scratch2; preserves all other registers.
Generate_CheckStackOverflow(MacroAssembler * masm,Register argc,Register scratch1,Register scratch2)476 static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
477                                         Register scratch1, Register scratch2) {
478   // Check the stack for overflow. We are not trying to catch
479   // interruptions (e.g. debug break and preemption) here, so the "real stack
480   // limit" is checked.
481   Label okay;
482   __ LoadStackLimit(scratch1, MacroAssembler::StackLimitKind::kRealStackLimit);
483   // Make a2 the space we have left. The stack might already be overflowed
484   // here which will cause r2 to become negative.
485   __ sub_d(scratch1, sp, scratch1);
486   // Check if the arguments will overflow the stack.
487   __ slli_d(scratch2, argc, kPointerSizeLog2);
488   __ Branch(&okay, gt, scratch1, Operand(scratch2));  // Signed comparison.
489 
490   // Out of stack space.
491   __ CallRuntime(Runtime::kThrowStackOverflow);
492 
493   __ bind(&okay);
494 }
495 
496 namespace {
497 
498 // Called with the native C calling convention. The corresponding function
499 // signature is either:
500 //
501 //   using JSEntryFunction = GeneratedCode<Address(
502 //       Address root_register_value, Address new_target, Address target,
503 //       Address receiver, intptr_t argc, Address** args)>;
504 // or
505 //   using JSEntryFunction = GeneratedCode<Address(
506 //       Address root_register_value, MicrotaskQueue* microtask_queue)>;
Generate_JSEntryVariant(MacroAssembler * masm,StackFrame::Type type,Builtin entry_trampoline)507 void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
508                              Builtin entry_trampoline) {
509   Label invoke, handler_entry, exit;
510 
511   {
512     NoRootArrayScope no_root_array(masm);
513 
514     // Registers:
515     //  either
516     //   a0: root register value
517     //   a1: entry address
518     //   a2: function
519     //   a3: receiver
520     //   a4: argc
521     //   a5: argv
522     //  or
523     //   a0: root register value
524     //   a1: microtask_queue
525 
526     // Save callee saved registers on the stack.
527     __ MultiPush(kCalleeSaved | ra.bit());
528 
529     // Save callee-saved FPU registers.
530     __ MultiPushFPU(kCalleeSavedFPU);
531     // Set up the reserved register for 0.0.
532     __ Move(kDoubleRegZero, 0.0);
533 
534     // Initialize the root register.
535     // C calling convention. The first argument is passed in a0.
536     __ mov(kRootRegister, a0);
537   }
538 
539   // a1: entry address
540   // a2: function
541   // a3: receiver
542   // a4: argc
543   // a5: argv
544 
545   // We build an EntryFrame.
546   __ li(s1, Operand(-1));  // Push a bad frame pointer to fail if it is used.
547   __ li(s2, Operand(StackFrame::TypeToMarker(type)));
548   __ li(s3, Operand(StackFrame::TypeToMarker(type)));
549   ExternalReference c_entry_fp = ExternalReference::Create(
550       IsolateAddressId::kCEntryFPAddress, masm->isolate());
551   __ li(s5, c_entry_fp);
552   __ Ld_d(s4, MemOperand(s5, 0));
553   __ Push(s1, s2, s3, s4);
554 
555   // Clear c_entry_fp, now we've pushed its previous value to the stack.
556   // If the c_entry_fp is not already zero and we don't clear it, the
557   // SafeStackFrameIterator will assume we are executing C++ and miss the JS
558   // frames on top.
559   __ St_d(zero_reg, MemOperand(s5, 0));
560 
561   // Set up frame pointer for the frame to be pushed.
562   __ addi_d(fp, sp, -EntryFrameConstants::kCallerFPOffset);
563 
564   // Registers:
565   //  either
566   //   a1: entry address
567   //   a2: function
568   //   a3: receiver
569   //   a4: argc
570   //   a5: argv
571   //  or
572   //   a1: microtask_queue
573   //
574   // Stack:
575   // caller fp          |
576   // function slot      | entry frame
577   // context slot       |
578   // bad fp (0xFF...F)  |
579   // callee saved registers + ra
580   // [ O32: 4 args slots]
581   // args
582 
583   // If this is the outermost JS call, set js_entry_sp value.
584   Label non_outermost_js;
585   ExternalReference js_entry_sp = ExternalReference::Create(
586       IsolateAddressId::kJSEntrySPAddress, masm->isolate());
587   __ li(s1, js_entry_sp);
588   __ Ld_d(s2, MemOperand(s1, 0));
589   __ Branch(&non_outermost_js, ne, s2, Operand(zero_reg));
590   __ St_d(fp, MemOperand(s1, 0));
591   __ li(s3, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
592   Label cont;
593   __ b(&cont);
594   __ nop();  // Branch delay slot nop.
595   __ bind(&non_outermost_js);
596   __ li(s3, Operand(StackFrame::INNER_JSENTRY_FRAME));
597   __ bind(&cont);
598   __ Push(s3);
599 
600   // Jump to a faked try block that does the invoke, with a faked catch
601   // block that sets the pending exception.
602   __ jmp(&invoke);
603   __ bind(&handler_entry);
604 
605   // Store the current pc as the handler offset. It's used later to create the
606   // handler table.
607   masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
608 
609   // Caught exception: Store result (exception) in the pending exception
610   // field in the JSEnv and return a failure sentinel.  Coming in here the
611   // fp will be invalid because the PushStackHandler below sets it to 0 to
612   // signal the existence of the JSEntry frame.
613   __ li(s1, ExternalReference::Create(
614                 IsolateAddressId::kPendingExceptionAddress, masm->isolate()));
615   __ St_d(a0,
616           MemOperand(s1, 0));  // We come back from 'invoke'. result is in a0.
617   __ LoadRoot(a0, RootIndex::kException);
618   __ b(&exit);  // b exposes branch delay slot.
619   __ nop();     // Branch delay slot nop.
620 
621   // Invoke: Link this frame into the handler chain.
622   __ bind(&invoke);
623   __ PushStackHandler();
624   // If an exception not caught by another handler occurs, this handler
625   // returns control to the code after the bal(&invoke) above, which
626   // restores all kCalleeSaved registers (including cp and fp) to their
627   // saved values before returning a failure to C.
628   //
629   // Registers:
630   //  either
631   //   a0: root register value
632   //   a1: entry address
633   //   a2: function
634   //   a3: receiver
635   //   a4: argc
636   //   a5: argv
637   //  or
638   //   a0: root register value
639   //   a1: microtask_queue
640   //
641   // Stack:
642   // handler frame
643   // entry frame
644   // callee saved registers + ra
645   // [ O32: 4 args slots]
646   // args
647   //
648   // Invoke the function by calling through JS entry trampoline builtin and
649   // pop the faked function when we return.
650 
651   Handle<Code> trampoline_code =
652       masm->isolate()->builtins()->code_handle(entry_trampoline);
653   __ Call(trampoline_code, RelocInfo::CODE_TARGET);
654 
655   // Unlink this frame from the handler chain.
656   __ PopStackHandler();
657 
658   __ bind(&exit);  // a0 holds result
659   // Check if the current stack frame is marked as the outermost JS frame.
660   Label non_outermost_js_2;
661   __ Pop(a5);
662   __ Branch(&non_outermost_js_2, ne, a5,
663             Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
664   __ li(a5, js_entry_sp);
665   __ St_d(zero_reg, MemOperand(a5, 0));
666   __ bind(&non_outermost_js_2);
667 
668   // Restore the top frame descriptors from the stack.
669   __ Pop(a5);
670   __ li(a4, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
671                                       masm->isolate()));
672   __ St_d(a5, MemOperand(a4, 0));
673 
674   // Reset the stack to the callee saved registers.
675   __ addi_d(sp, sp, -EntryFrameConstants::kCallerFPOffset);
676 
677   // Restore callee-saved fpu registers.
678   __ MultiPopFPU(kCalleeSavedFPU);
679 
680   // Restore callee saved registers from the stack.
681   __ MultiPop(kCalleeSaved | ra.bit());
682   // Return.
683   __ Jump(ra);
684 }
685 
686 }  // namespace
687 
Generate_JSEntry(MacroAssembler * masm)688 void Builtins::Generate_JSEntry(MacroAssembler* masm) {
689   Generate_JSEntryVariant(masm, StackFrame::ENTRY, Builtin::kJSEntryTrampoline);
690 }
691 
Generate_JSConstructEntry(MacroAssembler * masm)692 void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
693   Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
694                           Builtin::kJSConstructEntryTrampoline);
695 }
696 
Generate_JSRunMicrotasksEntry(MacroAssembler * masm)697 void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
698   Generate_JSEntryVariant(masm, StackFrame::ENTRY,
699                           Builtin::kRunMicrotasksTrampoline);
700 }
701 
Generate_JSEntryTrampolineHelper(MacroAssembler * masm,bool is_construct)702 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
703                                              bool is_construct) {
704   // ----------- S t a t e -------------
705   //  -- a1: new.target
706   //  -- a2: function
707   //  -- a3: receiver_pointer
708   //  -- a4: argc
709   //  -- a5: argv
710   // -----------------------------------
711 
712   // Enter an internal frame.
713   {
714     FrameScope scope(masm, StackFrame::INTERNAL);
715 
716     // Setup the context (we need to use the caller context from the isolate).
717     ExternalReference context_address = ExternalReference::Create(
718         IsolateAddressId::kContextAddress, masm->isolate());
719     __ li(cp, context_address);
720     __ Ld_d(cp, MemOperand(cp, 0));
721 
722     // Push the function and the receiver onto the stack.
723     __ Push(a2);
724 
725     // Check if we have enough stack space to push all arguments.
726     __ addi_d(a6, a4, 1);
727     Generate_CheckStackOverflow(masm, a6, a0, s2);
728 
729     // Copy arguments to the stack in a loop.
730     // a4: argc
731     // a5: argv, i.e. points to first arg
732     Label loop, entry;
733     __ Alsl_d(s1, a4, a5, kPointerSizeLog2, t7);
734     __ b(&entry);
735     // s1 points past last arg.
736     __ bind(&loop);
737     __ addi_d(s1, s1, -kPointerSize);
738     __ Ld_d(s2, MemOperand(s1, 0));  // Read next parameter.
739     __ Ld_d(s2, MemOperand(s2, 0));  // Dereference handle.
740     __ Push(s2);                     // Push parameter.
741     __ bind(&entry);
742     __ Branch(&loop, ne, a5, Operand(s1));
743 
744     // Push the receive.
745     __ Push(a3);
746 
747     // a0: argc
748     // a1: function
749     // a3: new.target
750     __ mov(a3, a1);
751     __ mov(a1, a2);
752     __ mov(a0, a4);
753 
754     // Initialize all JavaScript callee-saved registers, since they will be seen
755     // by the garbage collector as part of handlers.
756     __ LoadRoot(a4, RootIndex::kUndefinedValue);
757     __ mov(a5, a4);
758     __ mov(s1, a4);
759     __ mov(s2, a4);
760     __ mov(s3, a4);
761     __ mov(s4, a4);
762     __ mov(s5, a4);
763     // s6 holds the root address. Do not clobber.
764     // s7 is cp. Do not init.
765 
766     // Invoke the code.
767     Handle<Code> builtin = is_construct
768                                ? BUILTIN_CODE(masm->isolate(), Construct)
769                                : masm->isolate()->builtins()->Call();
770     __ Call(builtin, RelocInfo::CODE_TARGET);
771 
772     // Leave internal frame.
773   }
774   __ Jump(ra);
775 }
776 
Generate_JSEntryTrampoline(MacroAssembler * masm)777 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
778   Generate_JSEntryTrampolineHelper(masm, false);
779 }
780 
Generate_JSConstructEntryTrampoline(MacroAssembler * masm)781 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
782   Generate_JSEntryTrampolineHelper(masm, true);
783 }
784 
Generate_RunMicrotasksTrampoline(MacroAssembler * masm)785 void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
786   // a1: microtask_queue
787   __ mov(RunMicrotasksDescriptor::MicrotaskQueueRegister(), a1);
788   __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
789 }
790 
ReplaceClosureCodeWithOptimizedCode(MacroAssembler * masm,Register optimized_code,Register closure)791 static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
792                                                 Register optimized_code,
793                                                 Register closure) {
794   DCHECK(!AreAliased(optimized_code, closure));
795   // Store code entry in the closure.
796   __ St_d(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
797   __ RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code,
798                       kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore,
799                       RememberedSetAction::kOmit, SmiCheck::kOmit);
800 }
801 
LeaveInterpreterFrame(MacroAssembler * masm,Register scratch1,Register scratch2)802 static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
803                                   Register scratch2) {
804   Register params_size = scratch1;
805 
806   // Get the size of the formal parameters + receiver (in bytes).
807   __ Ld_d(params_size,
808           MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
809   __ Ld_w(params_size,
810           FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
811 
812   Register actual_params_size = scratch2;
813   // Compute the size of the actual parameters + receiver (in bytes).
814   __ Ld_d(actual_params_size,
815           MemOperand(fp, StandardFrameConstants::kArgCOffset));
816   __ slli_d(actual_params_size, actual_params_size, kPointerSizeLog2);
817   __ Add_d(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
818 
819   // If actual is bigger than formal, then we should use it to free up the stack
820   // arguments.
821   __ slt(t2, params_size, actual_params_size);
822   __ Movn(params_size, actual_params_size, t2);
823 
824   // Leave the frame (also dropping the register file).
825   __ LeaveFrame(StackFrame::INTERPRETED);
826 
827   // Drop receiver + arguments.
828   __ Add_d(sp, sp, params_size);
829 }
830 
831 // Tail-call |function_id| if |actual_marker| == |expected_marker|
TailCallRuntimeIfMarkerEquals(MacroAssembler * masm,Register actual_marker,OptimizationMarker expected_marker,Runtime::FunctionId function_id)832 static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
833                                           Register actual_marker,
834                                           OptimizationMarker expected_marker,
835                                           Runtime::FunctionId function_id) {
836   Label no_match;
837   __ Branch(&no_match, ne, actual_marker, Operand(expected_marker));
838   GenerateTailCallToReturnedCode(masm, function_id);
839   __ bind(&no_match);
840 }
841 
TailCallOptimizedCodeSlot(MacroAssembler * masm,Register optimized_code_entry)842 static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
843                                       Register optimized_code_entry) {
844   // ----------- S t a t e -------------
845   //  -- a0 : actual argument count
846   //  -- a3 : new target (preserved for callee if needed, and caller)
847   //  -- a1 : target function (preserved for callee if needed, and caller)
848   // -----------------------------------
849   DCHECK(!AreAliased(optimized_code_entry, a1, a3));
850 
851   Register closure = a1;
852   Label heal_optimized_code_slot;
853 
854   // If the optimized code is cleared, go to runtime to update the optimization
855   // marker field.
856   __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
857                    &heal_optimized_code_slot);
858 
859   // Check if the optimized code is marked for deopt. If it is, call the
860   // runtime to clear it.
861   __ Ld_d(a6, FieldMemOperand(optimized_code_entry,
862                               Code::kCodeDataContainerOffset));
863   __ Ld_w(a6, FieldMemOperand(a6, CodeDataContainer::kKindSpecificFlagsOffset));
864   __ And(a6, a6, Operand(1 << Code::kMarkedForDeoptimizationBit));
865   __ Branch(&heal_optimized_code_slot, ne, a6, Operand(zero_reg));
866 
867   // Optimized code is good, get it into the closure and link the closure into
868   // the optimized functions list, then tail call the optimized code.
869   // The feedback vector is no longer used, so re-use it as a scratch
870   // register.
871   ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure);
872 
873   static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
874   __ LoadCodeObjectEntry(a2, optimized_code_entry);
875   __ Jump(a2);
876 
877   // Optimized code slot contains deoptimized code or code is cleared and
878   // optimized code marker isn't updated. Evict the code, update the marker
879   // and re-enter the closure's code.
880   __ bind(&heal_optimized_code_slot);
881   GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
882 }
883 
MaybeOptimizeCode(MacroAssembler * masm,Register feedback_vector,Register optimization_marker)884 static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
885                               Register optimization_marker) {
886   // ----------- S t a t e -------------
887   //  -- a0 : actual argument count
888   //  -- a3 : new target (preserved for callee if needed, and caller)
889   //  -- a1 : target function (preserved for callee if needed, and caller)
890   //  -- feedback vector (preserved for caller if needed)
891   //  -- optimization_marker : a Smi containing a non-zero optimization marker.
892   // -----------------------------------
893   DCHECK(!AreAliased(feedback_vector, a1, a3, optimization_marker));
894 
895   // TODO(v8:8394): The logging of first execution will break if
896   // feedback vectors are not allocated. We need to find a different way of
897   // logging these events if required.
898   TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
899                                 OptimizationMarker::kLogFirstExecution,
900                                 Runtime::kFunctionFirstExecution);
901   TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
902                                 OptimizationMarker::kCompileOptimized,
903                                 Runtime::kCompileOptimized_NotConcurrent);
904   TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
905                                 OptimizationMarker::kCompileOptimizedConcurrent,
906                                 Runtime::kCompileOptimized_Concurrent);
907 
908   // Marker should be one of LogFirstExecution / CompileOptimized /
909   // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
910   // here.
911   if (FLAG_debug_code) {
912     __ stop();
913   }
914 }
915 
916 // Advance the current bytecode offset. This simulates what all bytecode
917 // handlers do upon completion of the underlying operation. Will bail out to a
918 // label if the bytecode (without prefix) is a return bytecode. Will not advance
919 // the bytecode offset if the current bytecode is a JumpLoop, instead just
920 // re-executing the JumpLoop to jump to the correct bytecode.
AdvanceBytecodeOffsetOrReturn(MacroAssembler * masm,Register bytecode_array,Register bytecode_offset,Register bytecode,Register scratch1,Register scratch2,Register scratch3,Label * if_return)921 static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
922                                           Register bytecode_array,
923                                           Register bytecode_offset,
924                                           Register bytecode, Register scratch1,
925                                           Register scratch2, Register scratch3,
926                                           Label* if_return) {
927   Register bytecode_size_table = scratch1;
928 
929   // The bytecode offset value will be increased by one in wide and extra wide
930   // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
931   // will restore the original bytecode. In order to simplify the code, we have
932   // a backup of it.
933   Register original_bytecode_offset = scratch3;
934   DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode,
935                      bytecode_size_table, original_bytecode_offset));
936   __ Move(original_bytecode_offset, bytecode_offset);
937   __ li(bytecode_size_table, ExternalReference::bytecode_size_table_address());
938 
939   // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
940   Label process_bytecode, extra_wide;
941   STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
942   STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
943   STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
944   STATIC_ASSERT(3 ==
945                 static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
946   __ Branch(&process_bytecode, hi, bytecode, Operand(3));
947   __ And(scratch2, bytecode, Operand(1));
948   __ Branch(&extra_wide, ne, scratch2, Operand(zero_reg));
949 
950   // Load the next bytecode and update table to the wide scaled table.
951   __ Add_d(bytecode_offset, bytecode_offset, Operand(1));
952   __ Add_d(scratch2, bytecode_array, bytecode_offset);
953   __ Ld_bu(bytecode, MemOperand(scratch2, 0));
954   __ Add_d(bytecode_size_table, bytecode_size_table,
955            Operand(kByteSize * interpreter::Bytecodes::kBytecodeCount));
956   __ jmp(&process_bytecode);
957 
958   __ bind(&extra_wide);
959   // Load the next bytecode and update table to the extra wide scaled table.
960   __ Add_d(bytecode_offset, bytecode_offset, Operand(1));
961   __ Add_d(scratch2, bytecode_array, bytecode_offset);
962   __ Ld_bu(bytecode, MemOperand(scratch2, 0));
963   __ Add_d(bytecode_size_table, bytecode_size_table,
964            Operand(2 * kByteSize * interpreter::Bytecodes::kBytecodeCount));
965 
966   __ bind(&process_bytecode);
967 
968 // Bailout to the return label if this is a return bytecode.
969 #define JUMP_IF_EQUAL(NAME)          \
970   __ Branch(if_return, eq, bytecode, \
971             Operand(static_cast<int>(interpreter::Bytecode::k##NAME)));
972   RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
973 #undef JUMP_IF_EQUAL
974 
975   // If this is a JumpLoop, re-execute it to perform the jump to the beginning
976   // of the loop.
977   Label end, not_jump_loop;
978   __ Branch(&not_jump_loop, ne, bytecode,
979             Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
980   // We need to restore the original bytecode_offset since we might have
981   // increased it to skip the wide / extra-wide prefix bytecode.
982   __ Move(bytecode_offset, original_bytecode_offset);
983   __ jmp(&end);
984 
985   __ bind(&not_jump_loop);
986   // Otherwise, load the size of the current bytecode and advance the offset.
987   __ Add_d(scratch2, bytecode_size_table, bytecode);
988   __ Ld_b(scratch2, MemOperand(scratch2, 0));
989   __ Add_d(bytecode_offset, bytecode_offset, scratch2);
990 
991   __ bind(&end);
992 }
993 
994 // Read off the optimization state in the feedback vector and check if there
995 // is optimized code or a optimization marker that needs to be processed.
LoadOptimizationStateAndJumpIfNeedsProcessing(MacroAssembler * masm,Register optimization_state,Register feedback_vector,Label * has_optimized_code_or_marker)996 static void LoadOptimizationStateAndJumpIfNeedsProcessing(
997     MacroAssembler* masm, Register optimization_state, Register feedback_vector,
998     Label* has_optimized_code_or_marker) {
999   ASM_CODE_COMMENT(masm);
1000   Register scratch = t2;
1001   // TODO(liuyu): Remove CHECK
1002   CHECK_NE(t2, optimization_state);
1003   CHECK_NE(t2, feedback_vector);
1004   __ Ld_w(optimization_state,
1005           FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
1006   __ And(
1007       scratch, optimization_state,
1008       Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
1009   __ Branch(has_optimized_code_or_marker, ne, scratch, Operand(zero_reg));
1010 }
1011 
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(MacroAssembler * masm,Register optimization_state,Register feedback_vector)1012 static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
1013     MacroAssembler* masm, Register optimization_state,
1014     Register feedback_vector) {
1015   ASM_CODE_COMMENT(masm);
1016   Label maybe_has_optimized_code;
1017   // Check if optimized code marker is available
1018   {
1019     UseScratchRegisterScope temps(masm);
1020     Register scratch = temps.Acquire();
1021     __ And(
1022         scratch, optimization_state,
1023         Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
1024     __ Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg));
1025   }
1026 
1027   Register optimization_marker = optimization_state;
1028   __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
1029   MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
1030 
1031   __ bind(&maybe_has_optimized_code);
1032   Register optimized_code_entry = optimization_state;
1033   __ Ld_d(optimization_marker,
1034           FieldMemOperand(feedback_vector,
1035                           FeedbackVector::kMaybeOptimizedCodeOffset));
1036 
1037   TailCallOptimizedCodeSlot(masm, optimized_code_entry);
1038 }
1039 
1040 // static
Generate_BaselineOutOfLinePrologue(MacroAssembler * masm)1041 void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
1042   UseScratchRegisterScope temps(masm);
1043   temps.Include(s1.bit() | s2.bit());
1044   temps.Exclude(t7.bit());
1045   auto descriptor =
1046       Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
1047   Register closure = descriptor.GetRegisterParameter(
1048       BaselineOutOfLinePrologueDescriptor::kClosure);
1049   // Load the feedback vector from the closure.
1050   Register feedback_vector = temps.Acquire();
1051   __ Ld_d(feedback_vector,
1052           FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1053   __ Ld_d(feedback_vector,
1054           FieldMemOperand(feedback_vector, Cell::kValueOffset));
1055   if (FLAG_debug_code) {
1056     UseScratchRegisterScope temps(masm);
1057     Register scratch = temps.Acquire();
1058     __ GetObjectType(feedback_vector, scratch, scratch);
1059     __ Assert(eq, AbortReason::kExpectedFeedbackVector, scratch,
1060               Operand(FEEDBACK_VECTOR_TYPE));
1061   }
1062   // Check for an optimization marker.
1063   Label has_optimized_code_or_marker;
1064   Register optimization_state = no_reg;
1065   {
1066     UseScratchRegisterScope temps(masm);
1067     optimization_state = temps.Acquire();
1068     // optimization_state will be used only in |has_optimized_code_or_marker|
1069     // and outside it can be reused.
1070     LoadOptimizationStateAndJumpIfNeedsProcessing(
1071         masm, optimization_state, feedback_vector,
1072         &has_optimized_code_or_marker);
1073   }
1074   // Increment invocation count for the function.
1075   {
1076     UseScratchRegisterScope temps(masm);
1077     Register invocation_count = temps.Acquire();
1078     __ Ld_w(invocation_count,
1079             FieldMemOperand(feedback_vector,
1080                             FeedbackVector::kInvocationCountOffset));
1081     __ Add_w(invocation_count, invocation_count, Operand(1));
1082     __ St_w(invocation_count,
1083             FieldMemOperand(feedback_vector,
1084                             FeedbackVector::kInvocationCountOffset));
1085   }
1086 
1087   FrameScope frame_scope(masm, StackFrame::MANUAL);
1088   {
1089     ASM_CODE_COMMENT_STRING(masm, "Frame Setup");
1090     // Normally the first thing we'd do here is Push(ra, fp), but we already
1091     // entered the frame in BaselineCompiler::Prologue, as we had to use the
1092     // value ra before the call to this BaselineOutOfLinePrologue builtin.
1093     Register callee_context = descriptor.GetRegisterParameter(
1094         BaselineOutOfLinePrologueDescriptor::kCalleeContext);
1095     Register callee_js_function = descriptor.GetRegisterParameter(
1096         BaselineOutOfLinePrologueDescriptor::kClosure);
1097     __ Push(callee_context, callee_js_function);
1098     DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
1099     DCHECK_EQ(callee_js_function, kJSFunctionRegister);
1100 
1101     Register argc = descriptor.GetRegisterParameter(
1102         BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
1103     // We'll use the bytecode for both code age/OSR resetting, and pushing onto
1104     // the frame, so load it into a register.
1105     Register bytecodeArray = descriptor.GetRegisterParameter(
1106         BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
1107 
1108     // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
1109     // are 8-bit fields next to each other, so we could just optimize by writing
1110     // a 16-bit. These static asserts guard our assumption is valid.
1111     STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
1112                   BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
1113     STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
1114     __ St_h(zero_reg,
1115             FieldMemOperand(bytecodeArray,
1116                             BytecodeArray::kOsrLoopNestingLevelOffset));
1117 
1118     __ Push(argc, bytecodeArray);
1119 
1120     // Baseline code frames store the feedback vector where interpreter would
1121     // store the bytecode offset.
1122     if (FLAG_debug_code) {
1123       UseScratchRegisterScope temps(masm);
1124       Register invocation_count = temps.Acquire();
1125       __ GetObjectType(feedback_vector, invocation_count, invocation_count);
1126       __ Assert(eq, AbortReason::kExpectedFeedbackVector, invocation_count,
1127                 Operand(FEEDBACK_VECTOR_TYPE));
1128     }
1129     // Our stack is currently aligned. We have have to push something along with
1130     // the feedback vector to keep it that way -- we may as well start
1131     // initialising the register frame.
1132     // TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves
1133     // `undefined` in the accumulator register, to skip the load in the baseline
1134     // code.
1135     __ Push(feedback_vector);
1136   }
1137 
1138   Label call_stack_guard;
1139   Register frame_size = descriptor.GetRegisterParameter(
1140       BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
1141   {
1142     ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt check");
1143     // Stack check. This folds the checks for both the interrupt stack limit
1144     // check and the real stack limit into one by just checking for the
1145     // interrupt limit. The interrupt limit is either equal to the real stack
1146     // limit or tighter. By ensuring we have space until that limit after
1147     // building the frame we can quickly precheck both at once.
1148     UseScratchRegisterScope temps(masm);
1149     Register sp_minus_frame_size = temps.Acquire();
1150     __ Sub_d(sp_minus_frame_size, sp, frame_size);
1151     Register interrupt_limit = temps.Acquire();
1152     __ LoadStackLimit(interrupt_limit,
1153                       MacroAssembler::StackLimitKind::kInterruptStackLimit);
1154     __ Branch(&call_stack_guard, Uless, sp_minus_frame_size,
1155               Operand(interrupt_limit));
1156   }
1157 
1158   // Do "fast" return to the caller pc in ra.
1159   // TODO(v8:11429): Document this frame setup better.
1160   __ Ret();
1161 
1162   __ bind(&has_optimized_code_or_marker);
1163   {
1164     ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
1165     UseScratchRegisterScope temps(masm);
1166     temps.Exclude(optimization_state);
1167     // Ensure the optimization_state is not allocated again.
1168     // Drop the frame created by the baseline call.
1169     __ Pop(ra, fp);
1170     MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
1171                                                  feedback_vector);
1172     __ Trap();
1173   }
1174 
1175   __ bind(&call_stack_guard);
1176   {
1177     ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
1178     FrameScope frame_scope(masm, StackFrame::INTERNAL);
1179     // Save incoming new target or generator
1180     __ Push(kJavaScriptCallNewTargetRegister);
1181     __ SmiTag(frame_size);
1182     __ Push(frame_size);
1183     __ CallRuntime(Runtime::kStackGuardWithGap);
1184     __ Pop(kJavaScriptCallNewTargetRegister);
1185   }
1186   __ Ret();
1187   temps.Exclude(s1.bit() | s2.bit());
1188 }
1189 
1190 // Generate code for entering a JS function with the interpreter.
1191 // On entry to the function the receiver and arguments have been pushed on the
1192 // stack left to right.
1193 //
1194 // The live registers are:
1195 //   o a0 : actual argument count (not including the receiver)
1196 //   o a1: the JS function object being called.
1197 //   o a3: the incoming new target or generator object
1198 //   o cp: our context
1199 //   o fp: the caller's frame pointer
1200 //   o sp: stack pointer
1201 //   o ra: return address
1202 //
1203 // The function builds an interpreter frame.  See InterpreterFrameConstants in
1204 // frame-constants.h for its layout.
Generate_InterpreterEntryTrampoline(MacroAssembler * masm)1205 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
1206   Register closure = a1;
1207   Register feedback_vector = a2;
1208 
1209   // Get the bytecode array from the function object and load it into
1210   // kInterpreterBytecodeArrayRegister.
1211   __ Ld_d(kScratchReg,
1212           FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
1213   __ Ld_d(
1214       kInterpreterBytecodeArrayRegister,
1215       FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset));
1216   Label is_baseline;
1217   GetSharedFunctionInfoBytecodeOrBaseline(
1218       masm, kInterpreterBytecodeArrayRegister, kScratchReg, &is_baseline);
1219 
1220   // The bytecode array could have been flushed from the shared function info,
1221   // if so, call into CompileLazy.
1222   Label compile_lazy;
1223   __ GetObjectType(kInterpreterBytecodeArrayRegister, kScratchReg, kScratchReg);
1224   __ Branch(&compile_lazy, ne, kScratchReg, Operand(BYTECODE_ARRAY_TYPE));
1225 
1226   // Load the feedback vector from the closure.
1227   __ Ld_d(feedback_vector,
1228           FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1229   __ Ld_d(feedback_vector,
1230           FieldMemOperand(feedback_vector, Cell::kValueOffset));
1231 
1232   Label push_stack_frame;
1233   // Check if feedback vector is valid. If valid, check for optimized code
1234   // and update invocation count. Otherwise, setup the stack frame.
1235   __ Ld_d(a4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
1236   __ Ld_hu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
1237   __ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE));
1238 
1239   // Read off the optimization state in the feedback vector, and if there
1240   // is optimized code or an optimization marker, call that instead.
1241   Register optimization_state = a4;
1242   __ Ld_w(optimization_state,
1243           FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
1244 
1245   // Check if the optimized code slot is not empty or has a optimization marker.
1246   Label has_optimized_code_or_marker;
1247 
1248   __ andi(t0, optimization_state,
1249           FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask);
1250   __ Branch(&has_optimized_code_or_marker, ne, t0, Operand(zero_reg));
1251 
1252   Label not_optimized;
1253   __ bind(&not_optimized);
1254 
1255   // Increment invocation count for the function.
1256   __ Ld_w(a4, FieldMemOperand(feedback_vector,
1257                               FeedbackVector::kInvocationCountOffset));
1258   __ Add_w(a4, a4, Operand(1));
1259   __ St_w(a4, FieldMemOperand(feedback_vector,
1260                               FeedbackVector::kInvocationCountOffset));
1261 
1262   // Open a frame scope to indicate that there is a frame on the stack.  The
1263   // MANUAL indicates that the scope shouldn't actually generate code to set up
1264   // the frame (that is done below).
1265   __ bind(&push_stack_frame);
1266   FrameScope frame_scope(masm, StackFrame::MANUAL);
1267   __ PushStandardFrame(closure);
1268 
1269   // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
1270   // 8-bit fields next to each other, so we could just optimize by writing a
1271   // 16-bit. These static asserts guard our assumption is valid.
1272   STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
1273                 BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
1274   STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
1275   __ St_h(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
1276                                     BytecodeArray::kOsrLoopNestingLevelOffset));
1277 
1278   // Load initial bytecode offset.
1279   __ li(kInterpreterBytecodeOffsetRegister,
1280         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1281 
1282   // Push bytecode array and Smi tagged bytecode array offset.
1283   __ SmiTag(a4, kInterpreterBytecodeOffsetRegister);
1284   __ Push(kInterpreterBytecodeArrayRegister, a4);
1285 
1286   // Allocate the local and temporary register file on the stack.
1287   Label stack_overflow;
1288   {
1289     // Load frame size (word) from the BytecodeArray object.
1290     __ Ld_w(a4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
1291                                 BytecodeArray::kFrameSizeOffset));
1292 
1293     // Do a stack check to ensure we don't go over the limit.
1294     __ Sub_d(a5, sp, Operand(a4));
1295     __ LoadStackLimit(a2, MacroAssembler::StackLimitKind::kRealStackLimit);
1296     __ Branch(&stack_overflow, lo, a5, Operand(a2));
1297 
1298     // If ok, push undefined as the initial value for all register file entries.
1299     Label loop_header;
1300     Label loop_check;
1301     __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1302     __ Branch(&loop_check);
1303     __ bind(&loop_header);
1304     // TODO(rmcilroy): Consider doing more than one push per loop iteration.
1305     __ Push(kInterpreterAccumulatorRegister);
1306     // Continue loop if not done.
1307     __ bind(&loop_check);
1308     __ Sub_d(a4, a4, Operand(kPointerSize));
1309     __ Branch(&loop_header, ge, a4, Operand(zero_reg));
1310   }
1311 
1312   // If the bytecode array has a valid incoming new target or generator object
1313   // register, initialize it with incoming value which was passed in r3.
1314   Label no_incoming_new_target_or_generator_register;
1315   __ Ld_w(a5, FieldMemOperand(
1316                   kInterpreterBytecodeArrayRegister,
1317                   BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
1318   __ Branch(&no_incoming_new_target_or_generator_register, eq, a5,
1319             Operand(zero_reg));
1320   __ Alsl_d(a5, a5, fp, kPointerSizeLog2, t7);
1321   __ St_d(a3, MemOperand(a5, 0));
1322   __ bind(&no_incoming_new_target_or_generator_register);
1323 
1324   // Perform interrupt stack check.
1325   // TODO(solanes): Merge with the real stack limit check above.
1326   Label stack_check_interrupt, after_stack_check_interrupt;
1327   __ LoadStackLimit(a5, MacroAssembler::StackLimitKind::kInterruptStackLimit);
1328   __ Branch(&stack_check_interrupt, lo, sp, Operand(a5));
1329   __ bind(&after_stack_check_interrupt);
1330 
1331   // The accumulator is already loaded with undefined.
1332 
1333   // Load the dispatch table into a register and dispatch to the bytecode
1334   // handler at the current bytecode offset.
1335   Label do_dispatch;
1336   __ bind(&do_dispatch);
1337   __ li(kInterpreterDispatchTableRegister,
1338         ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1339   __ Add_d(t5, kInterpreterBytecodeArrayRegister,
1340            kInterpreterBytecodeOffsetRegister);
1341   __ Ld_bu(a7, MemOperand(t5, 0));
1342   __ Alsl_d(kScratchReg, a7, kInterpreterDispatchTableRegister,
1343             kPointerSizeLog2, t7);
1344   __ Ld_d(kJavaScriptCallCodeStartRegister, MemOperand(kScratchReg, 0));
1345   __ Call(kJavaScriptCallCodeStartRegister);
1346   masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
1347 
1348   // Any returns to the entry trampoline are either due to the return bytecode
1349   // or the interpreter tail calling a builtin and then a dispatch.
1350 
1351   // Get bytecode array and bytecode offset from the stack frame.
1352   __ Ld_d(kInterpreterBytecodeArrayRegister,
1353           MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1354   __ Ld_d(kInterpreterBytecodeOffsetRegister,
1355           MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1356   __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1357 
1358   // Either return, or advance to the next bytecode and dispatch.
1359   Label do_return;
1360   __ Add_d(a1, kInterpreterBytecodeArrayRegister,
1361            kInterpreterBytecodeOffsetRegister);
1362   __ Ld_bu(a1, MemOperand(a1, 0));
1363   AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1364                                 kInterpreterBytecodeOffsetRegister, a1, a2, a3,
1365                                 a4, &do_return);
1366   __ jmp(&do_dispatch);
1367 
1368   __ bind(&do_return);
1369   // The return value is in a0.
1370   LeaveInterpreterFrame(masm, t0, t1);
1371   __ Jump(ra);
1372 
1373   __ bind(&stack_check_interrupt);
1374   // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
1375   // for the call to the StackGuard.
1376   __ li(kInterpreterBytecodeOffsetRegister,
1377         Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
1378                              kFunctionEntryBytecodeOffset)));
1379   __ St_d(kInterpreterBytecodeOffsetRegister,
1380           MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1381   __ CallRuntime(Runtime::kStackGuard);
1382 
1383   // After the call, restore the bytecode array, bytecode offset and accumulator
1384   // registers again. Also, restore the bytecode offset in the stack to its
1385   // previous value.
1386   __ Ld_d(kInterpreterBytecodeArrayRegister,
1387           MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1388   __ li(kInterpreterBytecodeOffsetRegister,
1389         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1390   __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1391 
1392   __ SmiTag(a5, kInterpreterBytecodeOffsetRegister);
1393   __ St_d(a5, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1394 
1395   __ jmp(&after_stack_check_interrupt);
1396 
1397   __ bind(&has_optimized_code_or_marker);
1398   MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
1399                                                feedback_vector);
1400 
1401   __ bind(&is_baseline);
1402   {
1403     // Load the feedback vector from the closure.
1404     __ Ld_d(feedback_vector,
1405             FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1406     __ Ld_d(feedback_vector,
1407             FieldMemOperand(feedback_vector, Cell::kValueOffset));
1408 
1409     Label install_baseline_code;
1410     // Check if feedback vector is valid. If not, call prepare for baseline to
1411     // allocate it.
1412     __ Ld_d(t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
1413     __ Ld_hu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
1414     __ Branch(&install_baseline_code, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
1415 
1416     // Check for an optimization marker.
1417     LoadOptimizationStateAndJumpIfNeedsProcessing(
1418         masm, optimization_state, feedback_vector,
1419         &has_optimized_code_or_marker);
1420 
1421     // Load the baseline code into the closure.
1422     __ Move(a2, kInterpreterBytecodeArrayRegister);
1423     static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
1424     ReplaceClosureCodeWithOptimizedCode(masm, a2, closure);
1425     __ JumpCodeObject(a2);
1426 
1427     __ bind(&install_baseline_code);
1428     GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode);
1429   }
1430 
1431   __ bind(&compile_lazy);
1432   GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
1433   // Unreachable code.
1434   __ break_(0xCC);
1435 
1436   __ bind(&stack_overflow);
1437   __ CallRuntime(Runtime::kThrowStackOverflow);
1438   // Unreachable code.
1439   __ break_(0xCC);
1440 }
1441 
GenerateInterpreterPushArgs(MacroAssembler * masm,Register num_args,Register start_address,Register scratch,Register scratch2)1442 static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
1443                                         Register start_address,
1444                                         Register scratch, Register scratch2) {
1445   // Find the address of the last argument.
1446   __ Sub_d(scratch, num_args, Operand(1));
1447   __ slli_d(scratch, scratch, kPointerSizeLog2);
1448   __ Sub_d(start_address, start_address, scratch);
1449 
1450   // Push the arguments.
1451   __ PushArray(start_address, num_args, scratch, scratch2,
1452                TurboAssembler::PushArrayOrder::kReverse);
1453 }
1454 
1455 // static
Generate_InterpreterPushArgsThenCallImpl(MacroAssembler * masm,ConvertReceiverMode receiver_mode,InterpreterPushArgsMode mode)1456 void Builtins::Generate_InterpreterPushArgsThenCallImpl(
1457     MacroAssembler* masm, ConvertReceiverMode receiver_mode,
1458     InterpreterPushArgsMode mode) {
1459   DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
1460   // ----------- S t a t e -------------
1461   //  -- a0 : the number of arguments (not including the receiver)
1462   //  -- a2 : the address of the first argument to be pushed. Subsequent
1463   //          arguments should be consecutive above this, in the same order as
1464   //          they are to be pushed onto the stack.
1465   //  -- a1 : the target to call (can be any Object).
1466   // -----------------------------------
1467   Label stack_overflow;
1468   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1469     // The spread argument should not be pushed.
1470     __ Sub_d(a0, a0, Operand(1));
1471   }
1472 
1473   __ Add_d(a3, a0, Operand(1));  // Add one for receiver.
1474 
1475   __ StackOverflowCheck(a3, a4, t0, &stack_overflow);
1476 
1477   if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1478     // Don't copy receiver.
1479     __ mov(a3, a0);
1480   }
1481 
1482   // This function modifies a2, t0 and a4.
1483   GenerateInterpreterPushArgs(masm, a3, a2, a4, t0);
1484 
1485   if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1486     __ PushRoot(RootIndex::kUndefinedValue);
1487   }
1488 
1489   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1490     // Pass the spread in the register a2.
1491     // a2 already points to the penultime argument, the spread
1492     // is below that.
1493     __ Ld_d(a2, MemOperand(a2, -kSystemPointerSize));
1494   }
1495 
1496   // Call the target.
1497   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1498     __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
1499             RelocInfo::CODE_TARGET);
1500   } else {
1501     __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
1502             RelocInfo::CODE_TARGET);
1503   }
1504 
1505   __ bind(&stack_overflow);
1506   {
1507     __ TailCallRuntime(Runtime::kThrowStackOverflow);
1508     // Unreachable code.
1509     __ break_(0xCC);
1510   }
1511 }
1512 
1513 // static
Generate_InterpreterPushArgsThenConstructImpl(MacroAssembler * masm,InterpreterPushArgsMode mode)1514 void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
1515     MacroAssembler* masm, InterpreterPushArgsMode mode) {
1516   // ----------- S t a t e -------------
1517   // -- a0 : argument count (not including receiver)
1518   // -- a3 : new target
1519   // -- a1 : constructor to call
1520   // -- a2 : allocation site feedback if available, undefined otherwise.
1521   // -- a4 : address of the first argument
1522   // -----------------------------------
1523   Label stack_overflow;
1524   __ addi_d(a6, a0, 1);
1525   __ StackOverflowCheck(a6, a5, t0, &stack_overflow);
1526 
1527   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1528     // The spread argument should not be pushed.
1529     __ Sub_d(a0, a0, Operand(1));
1530   }
1531 
1532   // Push the arguments, This function modifies t0, a4 and a5.
1533   GenerateInterpreterPushArgs(masm, a0, a4, a5, t0);
1534 
1535   // Push a slot for the receiver.
1536   __ Push(zero_reg);
1537 
1538   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1539     // Pass the spread in the register a2.
1540     // a4 already points to the penultimate argument, the spread
1541     // lies in the next interpreter register.
1542     __ Ld_d(a2, MemOperand(a4, -kSystemPointerSize));
1543   } else {
1544     __ AssertUndefinedOrAllocationSite(a2, t0);
1545   }
1546 
1547   if (mode == InterpreterPushArgsMode::kArrayFunction) {
1548     __ AssertFunction(a1);
1549 
1550     // Tail call to the function-specific construct stub (still in the caller
1551     // context at this point).
1552     __ Jump(BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl),
1553             RelocInfo::CODE_TARGET);
1554   } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1555     // Call the constructor with a0, a1, and a3 unmodified.
1556     __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
1557             RelocInfo::CODE_TARGET);
1558   } else {
1559     DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
1560     // Call the constructor with a0, a1, and a3 unmodified.
1561     __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
1562   }
1563 
1564   __ bind(&stack_overflow);
1565   {
1566     __ TailCallRuntime(Runtime::kThrowStackOverflow);
1567     // Unreachable code.
1568     __ break_(0xCC);
1569   }
1570 }
1571 
Generate_InterpreterEnterBytecode(MacroAssembler * masm)1572 static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
1573   // Set the return address to the correct point in the interpreter entry
1574   // trampoline.
1575   Label builtin_trampoline, trampoline_loaded;
1576   Smi interpreter_entry_return_pc_offset(
1577       masm->isolate()->heap()->interpreter_entry_return_pc_offset());
1578   DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
1579 
1580   // If the SFI function_data is an InterpreterData, the function will have a
1581   // custom copy of the interpreter entry trampoline for profiling. If so,
1582   // get the custom trampoline, otherwise grab the entry address of the global
1583   // trampoline.
1584   __ Ld_d(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
1585   __ Ld_d(t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
1586   __ Ld_d(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
1587   __ GetObjectType(t0, kInterpreterDispatchTableRegister,
1588                    kInterpreterDispatchTableRegister);
1589   __ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister,
1590             Operand(INTERPRETER_DATA_TYPE));
1591 
1592   __ Ld_d(t0,
1593           FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
1594   __ Add_d(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
1595   __ Branch(&trampoline_loaded);
1596 
1597   __ bind(&builtin_trampoline);
1598   __ li(t0, ExternalReference::
1599                 address_of_interpreter_entry_trampoline_instruction_start(
1600                     masm->isolate()));
1601   __ Ld_d(t0, MemOperand(t0, 0));
1602 
1603   __ bind(&trampoline_loaded);
1604   __ Add_d(ra, t0, Operand(interpreter_entry_return_pc_offset.value()));
1605 
1606   // Initialize the dispatch table register.
1607   __ li(kInterpreterDispatchTableRegister,
1608         ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1609 
1610   // Get the bytecode array pointer from the frame.
1611   __ Ld_d(kInterpreterBytecodeArrayRegister,
1612           MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1613 
1614   if (FLAG_debug_code) {
1615     // Check function data field is actually a BytecodeArray object.
1616     __ SmiTst(kInterpreterBytecodeArrayRegister, kScratchReg);
1617     __ Assert(ne,
1618               AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
1619               kScratchReg, Operand(zero_reg));
1620     __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
1621     __ Assert(eq,
1622               AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
1623               a1, Operand(BYTECODE_ARRAY_TYPE));
1624   }
1625 
1626   // Get the target bytecode offset from the frame.
1627   __ SmiUntag(kInterpreterBytecodeOffsetRegister,
1628               MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1629 
1630   if (FLAG_debug_code) {
1631     Label okay;
1632     __ Branch(&okay, ge, kInterpreterBytecodeOffsetRegister,
1633               Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1634     // Unreachable code.
1635     __ break_(0xCC);
1636     __ bind(&okay);
1637   }
1638 
1639   // Dispatch to the target bytecode.
1640   __ Add_d(a1, kInterpreterBytecodeArrayRegister,
1641            kInterpreterBytecodeOffsetRegister);
1642   __ Ld_bu(a7, MemOperand(a1, 0));
1643   __ Alsl_d(a1, a7, kInterpreterDispatchTableRegister, kPointerSizeLog2, t7);
1644   __ Ld_d(kJavaScriptCallCodeStartRegister, MemOperand(a1, 0));
1645   __ Jump(kJavaScriptCallCodeStartRegister);
1646 }
1647 
Generate_InterpreterEnterAtNextBytecode(MacroAssembler * masm)1648 void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
1649   // Advance the current bytecode offset stored within the given interpreter
1650   // stack frame. This simulates what all bytecode handlers do upon completion
1651   // of the underlying operation.
1652   __ Ld_d(kInterpreterBytecodeArrayRegister,
1653           MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1654   __ Ld_d(kInterpreterBytecodeOffsetRegister,
1655           MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1656   __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1657 
1658   Label enter_bytecode, function_entry_bytecode;
1659   __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
1660             Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
1661                     kFunctionEntryBytecodeOffset));
1662 
1663   // Load the current bytecode.
1664   __ Add_d(a1, kInterpreterBytecodeArrayRegister,
1665            kInterpreterBytecodeOffsetRegister);
1666   __ Ld_bu(a1, MemOperand(a1, 0));
1667 
1668   // Advance to the next bytecode.
1669   Label if_return;
1670   AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1671                                 kInterpreterBytecodeOffsetRegister, a1, a2, a3,
1672                                 a4, &if_return);
1673 
1674   __ bind(&enter_bytecode);
1675   // Convert new bytecode offset to a Smi and save in the stackframe.
1676   __ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
1677   __ St_d(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1678 
1679   Generate_InterpreterEnterBytecode(masm);
1680 
1681   __ bind(&function_entry_bytecode);
1682   // If the code deoptimizes during the implicit function entry stack interrupt
1683   // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
1684   // not a valid bytecode offset. Detect this case and advance to the first
1685   // actual bytecode.
1686   __ li(kInterpreterBytecodeOffsetRegister,
1687         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1688   __ Branch(&enter_bytecode);
1689 
1690   // We should never take the if_return path.
1691   __ bind(&if_return);
1692   __ Abort(AbortReason::kInvalidBytecodeAdvance);
1693 }
1694 
Generate_InterpreterEnterAtBytecode(MacroAssembler * masm)1695 void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
1696   Generate_InterpreterEnterBytecode(masm);
1697 }
1698 
1699 namespace {
Generate_ContinueToBuiltinHelper(MacroAssembler * masm,bool java_script_builtin,bool with_result)1700 void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
1701                                       bool java_script_builtin,
1702                                       bool with_result) {
1703   const RegisterConfiguration* config(RegisterConfiguration::Default());
1704   int allocatable_register_count = config->num_allocatable_general_registers();
1705   UseScratchRegisterScope temps(masm);
1706   Register scratch = temps.Acquire();
1707   if (with_result) {
1708     if (java_script_builtin) {
1709       __ mov(scratch, a0);
1710     } else {
1711       // Overwrite the hole inserted by the deoptimizer with the return value
1712       // from the LAZY deopt point.
1713       __ St_d(
1714           a0,
1715           MemOperand(
1716               sp, config->num_allocatable_general_registers() * kPointerSize +
1717                       BuiltinContinuationFrameConstants::kFixedFrameSize));
1718     }
1719   }
1720   for (int i = allocatable_register_count - 1; i >= 0; --i) {
1721     int code = config->GetAllocatableGeneralCode(i);
1722     __ Pop(Register::from_code(code));
1723     if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
1724       __ SmiUntag(Register::from_code(code));
1725     }
1726   }
1727 
1728   if (with_result && java_script_builtin) {
1729     // Overwrite the hole inserted by the deoptimizer with the return value from
1730     // the LAZY deopt point. t0 contains the arguments count, the return value
1731     // from LAZY is always the last argument.
1732     __ Add_d(a0, a0,
1733              Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
1734     __ Alsl_d(t0, a0, sp, kSystemPointerSizeLog2, t7);
1735     __ St_d(scratch, MemOperand(t0, 0));
1736     // Recover arguments count.
1737     __ Sub_d(a0, a0,
1738              Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
1739   }
1740 
1741   __ Ld_d(
1742       fp,
1743       MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1744   // Load builtin index (stored as a Smi) and use it to get the builtin start
1745   // address from the builtins table.
1746   __ Pop(t0);
1747   __ Add_d(sp, sp,
1748            Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1749   __ Pop(ra);
1750   __ LoadEntryFromBuiltinIndex(t0);
1751   __ Jump(t0);
1752 }
1753 }  // namespace
1754 
Generate_ContinueToCodeStubBuiltin(MacroAssembler * masm)1755 void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
1756   Generate_ContinueToBuiltinHelper(masm, false, false);
1757 }
1758 
Generate_ContinueToCodeStubBuiltinWithResult(MacroAssembler * masm)1759 void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
1760     MacroAssembler* masm) {
1761   Generate_ContinueToBuiltinHelper(masm, false, true);
1762 }
1763 
Generate_ContinueToJavaScriptBuiltin(MacroAssembler * masm)1764 void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
1765   Generate_ContinueToBuiltinHelper(masm, true, false);
1766 }
1767 
Generate_ContinueToJavaScriptBuiltinWithResult(MacroAssembler * masm)1768 void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
1769     MacroAssembler* masm) {
1770   Generate_ContinueToBuiltinHelper(masm, true, true);
1771 }
1772 
Generate_NotifyDeoptimized(MacroAssembler * masm)1773 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
1774   {
1775     FrameScope scope(masm, StackFrame::INTERNAL);
1776     __ CallRuntime(Runtime::kNotifyDeoptimized);
1777   }
1778 
1779   DCHECK_EQ(kInterpreterAccumulatorRegister.code(), a0.code());
1780   __ Ld_d(a0, MemOperand(sp, 0 * kPointerSize));
1781   __ Add_d(sp, sp, Operand(1 * kPointerSize));  // Remove state.
1782   __ Ret();
1783 }
1784 
1785 namespace {
1786 
Generate_OSREntry(MacroAssembler * masm,Register entry_address,Operand offset=Operand (zero_reg))1787 void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
1788                        Operand offset = Operand(zero_reg)) {
1789   __ Add_d(ra, entry_address, offset);
1790   // And "return" to the OSR entry point of the function.
1791   __ Ret();
1792 }
1793 
OnStackReplacement(MacroAssembler * masm,bool is_interpreter)1794 void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
1795   {
1796     FrameScope scope(masm, StackFrame::INTERNAL);
1797     __ CallRuntime(Runtime::kCompileForOnStackReplacement);
1798   }
1799 
1800   // If the code object is null, just return to the caller.
1801   __ Ret(eq, a0, Operand(Smi::zero()));
1802 
1803   if (is_interpreter) {
1804     // Drop the handler frame that is be sitting on top of the actual
1805     // JavaScript frame. This is the case then OSR is triggered from bytecode.
1806     __ LeaveFrame(StackFrame::STUB);
1807   }
1808 
1809   // Load deoptimization data from the code object.
1810   // <deopt_data> = <code>[#deoptimization_data_offset]
1811   __ Ld_d(a1, MemOperand(a0, Code::kDeoptimizationDataOrInterpreterDataOffset -
1812                                  kHeapObjectTag));
1813 
1814   // Load the OSR entrypoint offset from the deoptimization data.
1815   // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
1816   __ SmiUntag(a1, MemOperand(a1, FixedArray::OffsetOfElementAt(
1817                                      DeoptimizationData::kOsrPcOffsetIndex) -
1818                                      kHeapObjectTag));
1819 
1820   // Compute the target address = code_obj + header_size + osr_offset
1821   // <entry_addr> = <code_obj> + #header_size + <osr_offset>
1822   __ Add_d(a0, a0, a1);
1823   Generate_OSREntry(masm, a0, Operand(Code::kHeaderSize - kHeapObjectTag));
1824 }
1825 }  // namespace
1826 
Generate_InterpreterOnStackReplacement(MacroAssembler * masm)1827 void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
1828   return OnStackReplacement(masm, true);
1829 }
1830 
Generate_BaselineOnStackReplacement(MacroAssembler * masm)1831 void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
1832   __ Ld_d(kContextRegister,
1833           MemOperand(fp, StandardFrameConstants::kContextOffset));
1834   return OnStackReplacement(masm, false);
1835 }
1836 
1837 // static
Generate_FunctionPrototypeApply(MacroAssembler * masm)1838 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
1839   // ----------- S t a t e -------------
1840   //  -- a0    : argc
1841   //  -- sp[0] : receiver
1842   //  -- sp[4] : thisArg
1843   //  -- sp[8] : argArray
1844   // -----------------------------------
1845 
1846   Register argc = a0;
1847   Register arg_array = a2;
1848   Register receiver = a1;
1849   Register this_arg = a5;
1850   Register undefined_value = a3;
1851   Register scratch = a4;
1852 
1853   __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
1854 
1855   // 1. Load receiver into a1, argArray into a2 (if present), remove all
1856   // arguments from the stack (including the receiver), and push thisArg (if
1857   // present) instead.
1858   {
1859     // Claim (2 - argc) dummy arguments form the stack, to put the stack in a
1860     // consistent state for a simple pop operation.
1861 
1862     __ mov(scratch, argc);
1863     __ Ld_d(this_arg, MemOperand(sp, kPointerSize));
1864     __ Ld_d(arg_array, MemOperand(sp, 2 * kPointerSize));
1865     __ Movz(arg_array, undefined_value, scratch);  // if argc == 0
1866     __ Movz(this_arg, undefined_value, scratch);   // if argc == 0
1867     __ Sub_d(scratch, scratch, Operand(1));
1868     __ Movz(arg_array, undefined_value, scratch);  // if argc == 1
1869     __ Ld_d(receiver, MemOperand(sp, 0));
1870     __ Alsl_d(sp, argc, sp, kSystemPointerSizeLog2, t7);
1871     __ St_d(this_arg, MemOperand(sp, 0));
1872   }
1873 
1874   // ----------- S t a t e -------------
1875   //  -- a2    : argArray
1876   //  -- a1    : receiver
1877   //  -- a3    : undefined root value
1878   //  -- sp[0] : thisArg
1879   // -----------------------------------
1880 
1881   // 2. We don't need to check explicitly for callable receiver here,
1882   // since that's the first thing the Call/CallWithArrayLike builtins
1883   // will do.
1884 
1885   // 3. Tail call with no arguments if argArray is null or undefined.
1886   Label no_arguments;
1887   __ JumpIfRoot(arg_array, RootIndex::kNullValue, &no_arguments);
1888   __ Branch(&no_arguments, eq, arg_array, Operand(undefined_value));
1889 
1890   // 4a. Apply the receiver to the given argArray.
1891   __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1892           RelocInfo::CODE_TARGET);
1893 
1894   // 4b. The argArray is either null or undefined, so we tail call without any
1895   // arguments to the receiver.
1896   __ bind(&no_arguments);
1897   {
1898     __ mov(a0, zero_reg);
1899     DCHECK(receiver == a1);
1900     __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1901   }
1902 }
1903 
1904 // static
Generate_FunctionPrototypeCall(MacroAssembler * masm)1905 void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
1906   // 1. Get the callable to call (passed as receiver) from the stack.
1907   { __ Pop(a1); }
1908 
1909   // 2. Make sure we have at least one argument.
1910   // a0: actual number of arguments
1911   {
1912     Label done;
1913     __ Branch(&done, ne, a0, Operand(zero_reg));
1914     __ PushRoot(RootIndex::kUndefinedValue);
1915     __ Add_d(a0, a0, Operand(1));
1916     __ bind(&done);
1917   }
1918 
1919   // 3. Adjust the actual number of arguments.
1920   __ addi_d(a0, a0, -1);
1921 
1922   // 4. Call the callable.
1923   __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1924 }
1925 
Generate_ReflectApply(MacroAssembler * masm)1926 void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
1927   // ----------- S t a t e -------------
1928   //  -- a0     : argc
1929   //  -- sp[0]  : receiver
1930   //  -- sp[8]  : target         (if argc >= 1)
1931   //  -- sp[16] : thisArgument   (if argc >= 2)
1932   //  -- sp[24] : argumentsList  (if argc == 3)
1933   // -----------------------------------
1934 
1935   Register argc = a0;
1936   Register arguments_list = a2;
1937   Register target = a1;
1938   Register this_argument = a5;
1939   Register undefined_value = a3;
1940   Register scratch = a4;
1941 
1942   __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
1943 
1944   // 1. Load target into a1 (if present), argumentsList into a2 (if present),
1945   // remove all arguments from the stack (including the receiver), and push
1946   // thisArgument (if present) instead.
1947   {
1948     // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
1949     // consistent state for a simple pop operation.
1950 
1951     __ mov(scratch, argc);
1952     __ Ld_d(target, MemOperand(sp, kPointerSize));
1953     __ Ld_d(this_argument, MemOperand(sp, 2 * kPointerSize));
1954     __ Ld_d(arguments_list, MemOperand(sp, 3 * kPointerSize));
1955     __ Movz(arguments_list, undefined_value, scratch);  // if argc == 0
1956     __ Movz(this_argument, undefined_value, scratch);   // if argc == 0
1957     __ Movz(target, undefined_value, scratch);          // if argc == 0
1958     __ Sub_d(scratch, scratch, Operand(1));
1959     __ Movz(arguments_list, undefined_value, scratch);  // if argc == 1
1960     __ Movz(this_argument, undefined_value, scratch);   // if argc == 1
1961     __ Sub_d(scratch, scratch, Operand(1));
1962     __ Movz(arguments_list, undefined_value, scratch);  // if argc == 2
1963 
1964     __ Alsl_d(sp, argc, sp, kSystemPointerSizeLog2, t7);
1965     __ St_d(this_argument, MemOperand(sp, 0));  // Overwrite receiver
1966   }
1967 
1968   // ----------- S t a t e -------------
1969   //  -- a2    : argumentsList
1970   //  -- a1    : target
1971   //  -- a3    : undefined root value
1972   //  -- sp[0] : thisArgument
1973   // -----------------------------------
1974 
1975   // 2. We don't need to check explicitly for callable target here,
1976   // since that's the first thing the Call/CallWithArrayLike builtins
1977   // will do.
1978 
1979   // 3. Apply the target to the given argumentsList.
1980   __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1981           RelocInfo::CODE_TARGET);
1982 }
1983 
Generate_ReflectConstruct(MacroAssembler * masm)1984 void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
1985   // ----------- S t a t e -------------
1986   //  -- a0     : argc
1987   //  -- sp[0]   : receiver
1988   //  -- sp[8]   : target
1989   //  -- sp[16]  : argumentsList
1990   //  -- sp[24]  : new.target (optional)
1991   // -----------------------------------
1992 
1993   Register argc = a0;
1994   Register arguments_list = a2;
1995   Register target = a1;
1996   Register new_target = a3;
1997   Register undefined_value = a4;
1998   Register scratch = a5;
1999 
2000   __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
2001 
2002   // 1. Load target into a1 (if present), argumentsList into a2 (if present),
2003   // new.target into a3 (if present, otherwise use target), remove all
2004   // arguments from the stack (including the receiver), and push thisArgument
2005   // (if present) instead.
2006   {
2007     // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
2008     // consistent state for a simple pop operation.
2009 
2010     __ mov(scratch, argc);
2011     __ Ld_d(target, MemOperand(sp, kPointerSize));
2012     __ Ld_d(arguments_list, MemOperand(sp, 2 * kPointerSize));
2013     __ Ld_d(new_target, MemOperand(sp, 3 * kPointerSize));
2014     __ Movz(arguments_list, undefined_value, scratch);  // if argc == 0
2015     __ Movz(new_target, undefined_value, scratch);      // if argc == 0
2016     __ Movz(target, undefined_value, scratch);          // if argc == 0
2017     __ Sub_d(scratch, scratch, Operand(1));
2018     __ Movz(arguments_list, undefined_value, scratch);  // if argc == 1
2019     __ Movz(new_target, target, scratch);               // if argc == 1
2020     __ Sub_d(scratch, scratch, Operand(1));
2021     __ Movz(new_target, target, scratch);  // if argc == 2
2022 
2023     __ Alsl_d(sp, argc, sp, kSystemPointerSizeLog2, t7);
2024     __ St_d(undefined_value, MemOperand(sp, 0));  // Overwrite receiver
2025   }
2026 
2027   // ----------- S t a t e -------------
2028   //  -- a2    : argumentsList
2029   //  -- a1    : target
2030   //  -- a3    : new.target
2031   //  -- sp[0] : receiver (undefined)
2032   // -----------------------------------
2033 
2034   // 2. We don't need to check explicitly for constructor target here,
2035   // since that's the first thing the Construct/ConstructWithArrayLike
2036   // builtins will do.
2037 
2038   // 3. We don't need to check explicitly for constructor new.target here,
2039   // since that's the second thing the Construct/ConstructWithArrayLike
2040   // builtins will do.
2041 
2042   // 4. Construct the target with the given new.target and argumentsList.
2043   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike),
2044           RelocInfo::CODE_TARGET);
2045 }
2046 
2047 // static
Generate_CallOrConstructVarargs(MacroAssembler * masm,Handle<Code> code)2048 void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
2049                                                Handle<Code> code) {
2050   // ----------- S t a t e -------------
2051   //  -- a1 : target
2052   //  -- a0 : number of parameters on the stack (not including the receiver)
2053   //  -- a2 : arguments list (a FixedArray)
2054   //  -- a4 : len (number of elements to push from args)
2055   //  -- a3 : new.target (for [[Construct]])
2056   // -----------------------------------
2057   if (FLAG_debug_code) {
2058     // Allow a2 to be a FixedArray, or a FixedDoubleArray if a4 == 0.
2059     Label ok, fail;
2060     __ AssertNotSmi(a2);
2061     __ GetObjectType(a2, t8, t8);
2062     __ Branch(&ok, eq, t8, Operand(FIXED_ARRAY_TYPE));
2063     __ Branch(&fail, ne, t8, Operand(FIXED_DOUBLE_ARRAY_TYPE));
2064     __ Branch(&ok, eq, a4, Operand(zero_reg));
2065     // Fall through.
2066     __ bind(&fail);
2067     __ Abort(AbortReason::kOperandIsNotAFixedArray);
2068 
2069     __ bind(&ok);
2070   }
2071 
2072   Register args = a2;
2073   Register len = a4;
2074 
2075   // Check for stack overflow.
2076   Label stack_overflow;
2077   __ StackOverflowCheck(len, kScratchReg, a5, &stack_overflow);
2078 
2079   // Move the arguments already in the stack,
2080   // including the receiver and the return address.
2081   {
2082     Label copy;
2083     Register src = a6, dest = a7;
2084     __ mov(src, sp);
2085     __ slli_d(t0, a4, kSystemPointerSizeLog2);
2086     __ Sub_d(sp, sp, Operand(t0));
2087     // Update stack pointer.
2088     __ mov(dest, sp);
2089     __ Add_d(t0, a0, Operand(zero_reg));
2090 
2091     __ bind(&copy);
2092     __ Ld_d(t1, MemOperand(src, 0));
2093     __ St_d(t1, MemOperand(dest, 0));
2094     __ Sub_d(t0, t0, Operand(1));
2095     __ Add_d(src, src, Operand(kSystemPointerSize));
2096     __ Add_d(dest, dest, Operand(kSystemPointerSize));
2097     __ Branch(&copy, ge, t0, Operand(zero_reg));
2098   }
2099 
2100   // Push arguments onto the stack (thisArgument is already on the stack).
2101   {
2102     Label done, push, loop;
2103     Register src = a6;
2104     Register scratch = len;
2105 
2106     __ addi_d(src, args, FixedArray::kHeaderSize - kHeapObjectTag);
2107     __ Add_d(a0, a0, len);  // The 'len' argument for Call() or Construct().
2108     __ Branch(&done, eq, len, Operand(zero_reg));
2109     __ slli_d(scratch, len, kPointerSizeLog2);
2110     __ Sub_d(scratch, sp, Operand(scratch));
2111     __ LoadRoot(t1, RootIndex::kTheHoleValue);
2112     __ bind(&loop);
2113     __ Ld_d(a5, MemOperand(src, 0));
2114     __ addi_d(src, src, kPointerSize);
2115     __ Branch(&push, ne, a5, Operand(t1));
2116     __ LoadRoot(a5, RootIndex::kUndefinedValue);
2117     __ bind(&push);
2118     __ St_d(a5, MemOperand(a7, 0));
2119     __ Add_d(a7, a7, Operand(kSystemPointerSize));
2120     __ Add_d(scratch, scratch, Operand(kSystemPointerSize));
2121     __ Branch(&loop, ne, scratch, Operand(sp));
2122     __ bind(&done);
2123   }
2124 
2125   // Tail-call to the actual Call or Construct builtin.
2126   __ Jump(code, RelocInfo::CODE_TARGET);
2127 
2128   __ bind(&stack_overflow);
2129   __ TailCallRuntime(Runtime::kThrowStackOverflow);
2130 }
2131 
2132 // static
Generate_CallOrConstructForwardVarargs(MacroAssembler * masm,CallOrConstructMode mode,Handle<Code> code)2133 void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
2134                                                       CallOrConstructMode mode,
2135                                                       Handle<Code> code) {
2136   // ----------- S t a t e -------------
2137   //  -- a0 : the number of arguments (not including the receiver)
2138   //  -- a3 : the new.target (for [[Construct]] calls)
2139   //  -- a1 : the target to call (can be any Object)
2140   //  -- a2 : start index (to support rest parameters)
2141   // -----------------------------------
2142 
2143   // Check if new.target has a [[Construct]] internal method.
2144   if (mode == CallOrConstructMode::kConstruct) {
2145     Label new_target_constructor, new_target_not_constructor;
2146     __ JumpIfSmi(a3, &new_target_not_constructor);
2147     __ Ld_d(t1, FieldMemOperand(a3, HeapObject::kMapOffset));
2148     __ Ld_bu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
2149     __ And(t1, t1, Operand(Map::Bits1::IsConstructorBit::kMask));
2150     __ Branch(&new_target_constructor, ne, t1, Operand(zero_reg));
2151     __ bind(&new_target_not_constructor);
2152     {
2153       FrameScope scope(masm, StackFrame::MANUAL);
2154       __ EnterFrame(StackFrame::INTERNAL);
2155       __ Push(a3);
2156       __ CallRuntime(Runtime::kThrowNotConstructor);
2157     }
2158     __ bind(&new_target_constructor);
2159   }
2160 
2161   Label stack_done, stack_overflow;
2162   __ Ld_d(a7, MemOperand(fp, StandardFrameConstants::kArgCOffset));
2163   __ Sub_w(a7, a7, a2);
2164   __ Branch(&stack_done, le, a7, Operand(zero_reg));
2165   {
2166     // Check for stack overflow.
2167     __ StackOverflowCheck(a7, a4, a5, &stack_overflow);
2168 
2169     // Forward the arguments from the caller frame.
2170 
2171     // Point to the first argument to copy (skipping the receiver).
2172     __ Add_d(a6, fp,
2173              Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
2174                      kSystemPointerSize));
2175     __ Alsl_d(a6, a2, a6, kSystemPointerSizeLog2, t7);
2176 
2177     // Move the arguments already in the stack,
2178     // including the receiver and the return address.
2179     {
2180       Label copy;
2181       Register src = t0, dest = a2;
2182       __ mov(src, sp);
2183       // Update stack pointer.
2184       __ slli_d(t1, a7, kSystemPointerSizeLog2);
2185       __ Sub_d(sp, sp, Operand(t1));
2186       __ mov(dest, sp);
2187       __ Add_d(t2, a0, Operand(zero_reg));
2188 
2189       __ bind(&copy);
2190       __ Ld_d(t1, MemOperand(src, 0));
2191       __ St_d(t1, MemOperand(dest, 0));
2192       __ Sub_d(t2, t2, Operand(1));
2193       __ Add_d(src, src, Operand(kSystemPointerSize));
2194       __ Add_d(dest, dest, Operand(kSystemPointerSize));
2195       __ Branch(&copy, ge, t2, Operand(zero_reg));
2196     }
2197 
2198     // Copy arguments from the caller frame.
2199     // TODO(victorgomes): Consider using forward order as potentially more cache
2200     // friendly.
2201     {
2202       Label loop;
2203       __ Add_d(a0, a0, a7);
2204       __ bind(&loop);
2205       {
2206         __ Sub_w(a7, a7, Operand(1));
2207         __ Alsl_d(t0, a7, a6, kPointerSizeLog2, t7);
2208         __ Ld_d(kScratchReg, MemOperand(t0, 0));
2209         __ Alsl_d(t0, a7, a2, kPointerSizeLog2, t7);
2210         __ St_d(kScratchReg, MemOperand(t0, 0));
2211         __ Branch(&loop, ne, a7, Operand(zero_reg));
2212       }
2213     }
2214   }
2215   __ Branch(&stack_done);
2216   __ bind(&stack_overflow);
2217   __ TailCallRuntime(Runtime::kThrowStackOverflow);
2218   __ bind(&stack_done);
2219 
2220   // Tail-call to the {code} handler.
2221   __ Jump(code, RelocInfo::CODE_TARGET);
2222 }
2223 
2224 // static
Generate_CallFunction(MacroAssembler * masm,ConvertReceiverMode mode)2225 void Builtins::Generate_CallFunction(MacroAssembler* masm,
2226                                      ConvertReceiverMode mode) {
2227   // ----------- S t a t e -------------
2228   //  -- a0 : the number of arguments (not including the receiver)
2229   //  -- a1 : the function to call (checked to be a JSFunction)
2230   // -----------------------------------
2231   __ AssertFunction(a1);
2232 
2233   // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
2234   // Check that function is not a "classConstructor".
2235   Label class_constructor;
2236   __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2237   __ Ld_wu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
2238   __ And(kScratchReg, a3,
2239          Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
2240   __ Branch(&class_constructor, ne, kScratchReg, Operand(zero_reg));
2241 
2242   // Enter the context of the function; ToObject has to run in the function
2243   // context, and we also need to take the global proxy from the function
2244   // context in case of conversion.
2245   __ Ld_d(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
2246   // We need to convert the receiver for non-native sloppy mode functions.
2247   Label done_convert;
2248   __ Ld_wu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
2249   __ And(kScratchReg, a3,
2250          Operand(SharedFunctionInfo::IsNativeBit::kMask |
2251                  SharedFunctionInfo::IsStrictBit::kMask));
2252   __ Branch(&done_convert, ne, kScratchReg, Operand(zero_reg));
2253   {
2254     // ----------- S t a t e -------------
2255     //  -- a0 : the number of arguments (not including the receiver)
2256     //  -- a1 : the function to call (checked to be a JSFunction)
2257     //  -- a2 : the shared function info.
2258     //  -- cp : the function context.
2259     // -----------------------------------
2260 
2261     if (mode == ConvertReceiverMode::kNullOrUndefined) {
2262       // Patch receiver to global proxy.
2263       __ LoadGlobalProxy(a3);
2264     } else {
2265       Label convert_to_object, convert_receiver;
2266       __ LoadReceiver(a3, a0);
2267       __ JumpIfSmi(a3, &convert_to_object);
2268       STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
2269       __ GetObjectType(a3, a4, a4);
2270       __ Branch(&done_convert, hs, a4, Operand(FIRST_JS_RECEIVER_TYPE));
2271       if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
2272         Label convert_global_proxy;
2273         __ JumpIfRoot(a3, RootIndex::kUndefinedValue, &convert_global_proxy);
2274         __ JumpIfNotRoot(a3, RootIndex::kNullValue, &convert_to_object);
2275         __ bind(&convert_global_proxy);
2276         {
2277           // Patch receiver to global proxy.
2278           __ LoadGlobalProxy(a3);
2279         }
2280         __ Branch(&convert_receiver);
2281       }
2282       __ bind(&convert_to_object);
2283       {
2284         // Convert receiver using ToObject.
2285         // TODO(bmeurer): Inline the allocation here to avoid building the frame
2286         // in the fast case? (fall back to AllocateInNewSpace?)
2287         FrameScope scope(masm, StackFrame::INTERNAL);
2288         __ SmiTag(a0);
2289         __ Push(a0, a1);
2290         __ mov(a0, a3);
2291         __ Push(cp);
2292         __ Call(BUILTIN_CODE(masm->isolate(), ToObject),
2293                 RelocInfo::CODE_TARGET);
2294         __ Pop(cp);
2295         __ mov(a3, a0);
2296         __ Pop(a0, a1);
2297         __ SmiUntag(a0);
2298       }
2299       __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2300       __ bind(&convert_receiver);
2301     }
2302     __ StoreReceiver(a3, a0, kScratchReg);
2303   }
2304   __ bind(&done_convert);
2305 
2306   // ----------- S t a t e -------------
2307   //  -- a0 : the number of arguments (not including the receiver)
2308   //  -- a1 : the function to call (checked to be a JSFunction)
2309   //  -- a2 : the shared function info.
2310   //  -- cp : the function context.
2311   // -----------------------------------
2312 
2313   __ Ld_hu(
2314       a2, FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
2315   __ InvokeFunctionCode(a1, no_reg, a2, a0, InvokeType::kJump);
2316 
2317   // The function is a "classConstructor", need to raise an exception.
2318   __ bind(&class_constructor);
2319   {
2320     FrameScope frame(masm, StackFrame::INTERNAL);
2321     __ Push(a1);
2322     __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
2323   }
2324 }
2325 
2326 // static
Generate_CallBoundFunctionImpl(MacroAssembler * masm)2327 void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
2328   // ----------- S t a t e -------------
2329   //  -- a0 : the number of arguments (not including the receiver)
2330   //  -- a1 : the function to call (checked to be a JSBoundFunction)
2331   // -----------------------------------
2332   __ AssertBoundFunction(a1);
2333 
2334   // Patch the receiver to [[BoundThis]].
2335   {
2336     __ Ld_d(t0, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
2337     __ StoreReceiver(t0, a0, kScratchReg);
2338   }
2339 
2340   // Load [[BoundArguments]] into a2 and length of that into a4.
2341   __ Ld_d(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
2342   __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
2343 
2344   // ----------- S t a t e -------------
2345   //  -- a0 : the number of arguments (not including the receiver)
2346   //  -- a1 : the function to call (checked to be a JSBoundFunction)
2347   //  -- a2 : the [[BoundArguments]] (implemented as FixedArray)
2348   //  -- a4 : the number of [[BoundArguments]]
2349   // -----------------------------------
2350 
2351   // Reserve stack space for the [[BoundArguments]].
2352   {
2353     Label done;
2354     __ slli_d(a5, a4, kPointerSizeLog2);
2355     __ Sub_d(t0, sp, Operand(a5));
2356     // Check the stack for overflow. We are not trying to catch interruptions
2357     // (i.e. debug break and preemption) here, so check the "real stack limit".
2358     __ LoadStackLimit(kScratchReg,
2359                       MacroAssembler::StackLimitKind::kRealStackLimit);
2360     __ Branch(&done, hs, t0, Operand(kScratchReg));
2361     {
2362       FrameScope scope(masm, StackFrame::MANUAL);
2363       __ EnterFrame(StackFrame::INTERNAL);
2364       __ CallRuntime(Runtime::kThrowStackOverflow);
2365     }
2366     __ bind(&done);
2367   }
2368 
2369   // Pop receiver.
2370   __ Pop(t0);
2371 
2372   // Push [[BoundArguments]].
2373   {
2374     Label loop, done_loop;
2375     __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
2376     __ Add_d(a0, a0, Operand(a4));
2377     __ Add_d(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2378     __ bind(&loop);
2379     __ Sub_d(a4, a4, Operand(1));
2380     __ Branch(&done_loop, lt, a4, Operand(zero_reg));
2381     __ Alsl_d(a5, a4, a2, kPointerSizeLog2, t7);
2382     __ Ld_d(kScratchReg, MemOperand(a5, 0));
2383     __ Push(kScratchReg);
2384     __ Branch(&loop);
2385     __ bind(&done_loop);
2386   }
2387 
2388   // Push receiver.
2389   __ Push(t0);
2390 
2391   // Call the [[BoundTargetFunction]] via the Call builtin.
2392   __ Ld_d(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2393   __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
2394           RelocInfo::CODE_TARGET);
2395 }
2396 
2397 // static
Generate_Call(MacroAssembler * masm,ConvertReceiverMode mode)2398 void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
2399   // ----------- S t a t e -------------
2400   //  -- a0 : the number of arguments (not including the receiver)
2401   //  -- a1 : the target to call (can be any Object).
2402   // -----------------------------------
2403 
2404   Label non_callable, non_smi;
2405   __ JumpIfSmi(a1, &non_callable);
2406   __ bind(&non_smi);
2407   __ LoadMap(t1, a1);
2408   __ GetInstanceTypeRange(t1, t2, FIRST_JS_FUNCTION_TYPE, t8);
2409   __ Jump(masm->isolate()->builtins()->CallFunction(mode),
2410           RelocInfo::CODE_TARGET, ls, t8,
2411           Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
2412   __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
2413           RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
2414 
2415   // Check if target has a [[Call]] internal method.
2416   __ Ld_bu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
2417   __ And(t1, t1, Operand(Map::Bits1::IsCallableBit::kMask));
2418   __ Branch(&non_callable, eq, t1, Operand(zero_reg));
2419 
2420   __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq,
2421           t2, Operand(JS_PROXY_TYPE));
2422 
2423   // 2. Call to something else, which might have a [[Call]] internal method (if
2424   // not we raise an exception).
2425   // Overwrite the original receiver with the (original) target.
2426   __ StoreReceiver(a1, a0, kScratchReg);
2427   // Let the "call_as_function_delegate" take care of the rest.
2428   __ LoadNativeContextSlot(a1, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
2429   __ Jump(masm->isolate()->builtins()->CallFunction(
2430               ConvertReceiverMode::kNotNullOrUndefined),
2431           RelocInfo::CODE_TARGET);
2432 
2433   // 3. Call to something that is not callable.
2434   __ bind(&non_callable);
2435   {
2436     FrameScope scope(masm, StackFrame::INTERNAL);
2437     __ Push(a1);
2438     __ CallRuntime(Runtime::kThrowCalledNonCallable);
2439   }
2440 }
2441 
Generate_ConstructFunction(MacroAssembler * masm)2442 void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
2443   // ----------- S t a t e -------------
2444   //  -- a0 : the number of arguments (not including the receiver)
2445   //  -- a1 : the constructor to call (checked to be a JSFunction)
2446   //  -- a3 : the new target (checked to be a constructor)
2447   // -----------------------------------
2448   __ AssertConstructor(a1);
2449   __ AssertFunction(a1);
2450 
2451   // Calling convention for function specific ConstructStubs require
2452   // a2 to contain either an AllocationSite or undefined.
2453   __ LoadRoot(a2, RootIndex::kUndefinedValue);
2454 
2455   Label call_generic_stub;
2456 
2457   // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
2458   __ Ld_d(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2459   __ Ld_wu(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset));
2460   __ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
2461   __ Branch(&call_generic_stub, eq, a4, Operand(zero_reg));
2462 
2463   __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
2464           RelocInfo::CODE_TARGET);
2465 
2466   __ bind(&call_generic_stub);
2467   __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric),
2468           RelocInfo::CODE_TARGET);
2469 }
2470 
2471 // static
Generate_ConstructBoundFunction(MacroAssembler * masm)2472 void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
2473   // ----------- S t a t e -------------
2474   //  -- a0 : the number of arguments (not including the receiver)
2475   //  -- a1 : the function to call (checked to be a JSBoundFunction)
2476   //  -- a3 : the new target (checked to be a constructor)
2477   // -----------------------------------
2478   __ AssertConstructor(a1);
2479   __ AssertBoundFunction(a1);
2480 
2481   // Load [[BoundArguments]] into a2 and length of that into a4.
2482   __ Ld_d(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
2483   __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
2484 
2485   // ----------- S t a t e -------------
2486   //  -- a0 : the number of arguments (not including the receiver)
2487   //  -- a1 : the function to call (checked to be a JSBoundFunction)
2488   //  -- a2 : the [[BoundArguments]] (implemented as FixedArray)
2489   //  -- a3 : the new target (checked to be a constructor)
2490   //  -- a4 : the number of [[BoundArguments]]
2491   // -----------------------------------
2492 
2493   // Reserve stack space for the [[BoundArguments]].
2494   {
2495     Label done;
2496     __ slli_d(a5, a4, kPointerSizeLog2);
2497     __ Sub_d(t0, sp, Operand(a5));
2498     // Check the stack for overflow. We are not trying to catch interruptions
2499     // (i.e. debug break and preemption) here, so check the "real stack limit".
2500     __ LoadStackLimit(kScratchReg,
2501                       MacroAssembler::StackLimitKind::kRealStackLimit);
2502     __ Branch(&done, hs, t0, Operand(kScratchReg));
2503     {
2504       FrameScope scope(masm, StackFrame::MANUAL);
2505       __ EnterFrame(StackFrame::INTERNAL);
2506       __ CallRuntime(Runtime::kThrowStackOverflow);
2507     }
2508     __ bind(&done);
2509   }
2510 
2511   // Pop receiver.
2512   __ Pop(t0);
2513 
2514   // Push [[BoundArguments]].
2515   {
2516     Label loop, done_loop;
2517     __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
2518     __ Add_d(a0, a0, Operand(a4));
2519     __ Add_d(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2520     __ bind(&loop);
2521     __ Sub_d(a4, a4, Operand(1));
2522     __ Branch(&done_loop, lt, a4, Operand(zero_reg));
2523     __ Alsl_d(a5, a4, a2, kPointerSizeLog2, t7);
2524     __ Ld_d(kScratchReg, MemOperand(a5, 0));
2525     __ Push(kScratchReg);
2526     __ Branch(&loop);
2527     __ bind(&done_loop);
2528   }
2529 
2530   // Push receiver.
2531   __ Push(t0);
2532 
2533   // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
2534   {
2535     Label skip_load;
2536     __ Branch(&skip_load, ne, a1, Operand(a3));
2537     __ Ld_d(a3,
2538             FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2539     __ bind(&skip_load);
2540   }
2541 
2542   // Construct the [[BoundTargetFunction]] via the Construct builtin.
2543   __ Ld_d(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2544   __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
2545 }
2546 
2547 // static
Generate_Construct(MacroAssembler * masm)2548 void Builtins::Generate_Construct(MacroAssembler* masm) {
2549   // ----------- S t a t e -------------
2550   //  -- a0 : the number of arguments (not including the receiver)
2551   //  -- a1 : the constructor to call (can be any Object)
2552   //  -- a3 : the new target (either the same as the constructor or
2553   //          the JSFunction on which new was invoked initially)
2554   // -----------------------------------
2555 
2556   // Check if target is a Smi.
2557   Label non_constructor, non_proxy;
2558   __ JumpIfSmi(a1, &non_constructor);
2559 
2560   // Check if target has a [[Construct]] internal method.
2561   __ Ld_d(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
2562   __ Ld_bu(t3, FieldMemOperand(t1, Map::kBitFieldOffset));
2563   __ And(t3, t3, Operand(Map::Bits1::IsConstructorBit::kMask));
2564   __ Branch(&non_constructor, eq, t3, Operand(zero_reg));
2565 
2566   // Dispatch based on instance type.
2567   __ GetInstanceTypeRange(t1, t2, FIRST_JS_FUNCTION_TYPE, t8);
2568   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
2569           RelocInfo::CODE_TARGET, ls, t8,
2570           Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
2571 
2572   // Only dispatch to bound functions after checking whether they are
2573   // constructors.
2574   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
2575           RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
2576 
2577   // Only dispatch to proxies after checking whether they are constructors.
2578   __ Branch(&non_proxy, ne, t2, Operand(JS_PROXY_TYPE));
2579   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
2580           RelocInfo::CODE_TARGET);
2581 
2582   // Called Construct on an exotic Object with a [[Construct]] internal method.
2583   __ bind(&non_proxy);
2584   {
2585     // Overwrite the original receiver with the (original) target.
2586     __ StoreReceiver(a1, a0, kScratchReg);
2587     // Let the "call_as_constructor_delegate" take care of the rest.
2588     __ LoadNativeContextSlot(a1, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
2589     __ Jump(masm->isolate()->builtins()->CallFunction(),
2590             RelocInfo::CODE_TARGET);
2591   }
2592 
2593   // Called Construct on an Object that doesn't have a [[Construct]] internal
2594   // method.
2595   __ bind(&non_constructor);
2596   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable),
2597           RelocInfo::CODE_TARGET);
2598 }
2599 
2600 #if V8_ENABLE_WEBASSEMBLY
Generate_WasmCompileLazy(MacroAssembler * masm)2601 void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
2602   // The function index was put in t0 by the jump table trampoline.
2603   // Convert to Smi for the runtime call
2604   __ SmiTag(kWasmCompileLazyFuncIndexRegister);
2605   {
2606     HardAbortScope hard_abort(masm);  // Avoid calls to Abort.
2607     FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
2608 
2609     // Save all parameter registers (see wasm-linkage.h). They might be
2610     // overwritten in the runtime call below. We don't have any callee-saved
2611     // registers in wasm, so no need to store anything else.
2612     RegList gp_regs = 0;
2613     for (Register gp_param_reg : wasm::kGpParamRegisters) {
2614       gp_regs |= gp_param_reg.bit();
2615     }
2616 
2617     RegList fp_regs = 0;
2618     for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
2619       fp_regs |= fp_param_reg.bit();
2620     }
2621 
2622     CHECK_EQ(NumRegs(gp_regs), arraysize(wasm::kGpParamRegisters));
2623     CHECK_EQ(NumRegs(fp_regs), arraysize(wasm::kFpParamRegisters));
2624     CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs,
2625              NumRegs(gp_regs));
2626     CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
2627              NumRegs(fp_regs));
2628 
2629     __ MultiPush(gp_regs);
2630     __ MultiPushFPU(fp_regs);
2631 
2632     // kFixedFrameSizeFromFp is hard coded to include space for Simd
2633     // registers, so we still need to allocate extra (unused) space on the stack
2634     // as if they were saved.
2635     __ Sub_d(sp, sp, base::bits::CountPopulation(fp_regs) * kDoubleSize);
2636 
2637     // Pass instance and function index as an explicit arguments to the runtime
2638     // function.
2639     __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
2640     // Initialize the JavaScript context with 0. CEntry will use it to
2641     // set the current context on the isolate.
2642     __ Move(kContextRegister, Smi::zero());
2643     __ CallRuntime(Runtime::kWasmCompileLazy, 2);
2644     __ mov(t8, a0);
2645 
2646     __ Add_d(sp, sp, base::bits::CountPopulation(fp_regs) * kDoubleSize);
2647     // Restore registers.
2648     __ MultiPopFPU(fp_regs);
2649     __ MultiPop(gp_regs);
2650   }
2651   // Finally, jump to the entrypoint.
2652   __ Jump(t8);
2653 }
2654 
Generate_WasmDebugBreak(MacroAssembler * masm)2655 void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
2656   HardAbortScope hard_abort(masm);  // Avoid calls to Abort.
2657   {
2658     FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
2659 
2660     // Save all parameter registers. They might hold live values, we restore
2661     // them after the runtime call.
2662     __ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs);
2663     __ MultiPushFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
2664 
2665     // Initialize the JavaScript context with 0. CEntry will use it to
2666     // set the current context on the isolate.
2667     __ Move(cp, Smi::zero());
2668     __ CallRuntime(Runtime::kWasmDebugBreak, 0);
2669 
2670     // Restore registers.
2671     __ MultiPopFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
2672     __ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs);
2673   }
2674   __ Ret();
2675 }
2676 
Generate_GenericJSToWasmWrapper(MacroAssembler * masm)2677 void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
2678   __ Trap();
2679 }
2680 
Generate_WasmOnStackReplace(MacroAssembler * masm)2681 void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
2682   // Only needed on x64.
2683   __ Trap();
2684 }
2685 
2686 #endif  // V8_ENABLE_WEBASSEMBLY
2687 
Generate_CEntry(MacroAssembler * masm,int result_size,SaveFPRegsMode save_doubles,ArgvMode argv_mode,bool builtin_exit_frame)2688 void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
2689                                SaveFPRegsMode save_doubles, ArgvMode argv_mode,
2690                                bool builtin_exit_frame) {
2691   // Called from JavaScript; parameters are on stack as if calling JS function
2692   // a0: number of arguments including receiver
2693   // a1: pointer to builtin function
2694   // fp: frame pointer    (restored after C call)
2695   // sp: stack pointer    (restored as callee's sp after C call)
2696   // cp: current context  (C callee-saved)
2697   //
2698   // If argv_mode == ArgvMode::kRegister:
2699   // a2: pointer to the first argument
2700 
2701   if (argv_mode == ArgvMode::kRegister) {
2702     // Move argv into the correct register.
2703     __ mov(s1, a2);
2704   } else {
2705     // Compute the argv pointer in a callee-saved register.
2706     __ Alsl_d(s1, a0, sp, kPointerSizeLog2, t7);
2707     __ Sub_d(s1, s1, kPointerSize);
2708   }
2709 
2710   // Enter the exit frame that transitions from JavaScript to C++.
2711   FrameScope scope(masm, StackFrame::MANUAL);
2712   __ EnterExitFrame(
2713       save_doubles == SaveFPRegsMode::kSave, 0,
2714       builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
2715 
2716   // s0: number of arguments  including receiver (C callee-saved)
2717   // s1: pointer to first argument (C callee-saved)
2718   // s2: pointer to builtin function (C callee-saved)
2719 
2720   // Prepare arguments for C routine.
2721   // a0 = argc
2722   __ mov(s0, a0);
2723   __ mov(s2, a1);
2724 
2725   // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
2726   // also need to reserve the 4 argument slots on the stack.
2727 
2728   __ AssertStackIsAligned();
2729 
2730   // a0 = argc, a1 = argv, a2 = isolate
2731   __ li(a2, ExternalReference::isolate_address(masm->isolate()));
2732   __ mov(a1, s1);
2733 
2734   __ StoreReturnAddressAndCall(s2);
2735 
2736   // Result returned in a0 or a1:a0 - do not destroy these registers!
2737 
2738   // Check result for exception sentinel.
2739   Label exception_returned;
2740   __ LoadRoot(a4, RootIndex::kException);
2741   __ Branch(&exception_returned, eq, a4, Operand(a0));
2742 
2743   // Check that there is no pending exception, otherwise we
2744   // should have returned the exception sentinel.
2745   if (FLAG_debug_code) {
2746     Label okay;
2747     ExternalReference pending_exception_address = ExternalReference::Create(
2748         IsolateAddressId::kPendingExceptionAddress, masm->isolate());
2749     __ li(a2, pending_exception_address);
2750     __ Ld_d(a2, MemOperand(a2, 0));
2751     __ LoadRoot(a4, RootIndex::kTheHoleValue);
2752     // Cannot use check here as it attempts to generate call into runtime.
2753     __ Branch(&okay, eq, a4, Operand(a2));
2754     __ stop();
2755     __ bind(&okay);
2756   }
2757 
2758   // Exit C frame and return.
2759   // a0:a1: result
2760   // sp: stack pointer
2761   // fp: frame pointer
2762   Register argc = argv_mode == ArgvMode::kRegister
2763                       // We don't want to pop arguments so set argc to no_reg.
2764                       ? no_reg
2765                       // s0: still holds argc (callee-saved).
2766                       : s0;
2767   __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc, EMIT_RETURN);
2768 
2769   // Handling of exception.
2770   __ bind(&exception_returned);
2771 
2772   ExternalReference pending_handler_context_address = ExternalReference::Create(
2773       IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
2774   ExternalReference pending_handler_entrypoint_address =
2775       ExternalReference::Create(
2776           IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
2777   ExternalReference pending_handler_fp_address = ExternalReference::Create(
2778       IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
2779   ExternalReference pending_handler_sp_address = ExternalReference::Create(
2780       IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
2781 
2782   // Ask the runtime for help to determine the handler. This will set a0 to
2783   // contain the current pending exception, don't clobber it.
2784   ExternalReference find_handler =
2785       ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
2786   {
2787     FrameScope scope(masm, StackFrame::MANUAL);
2788     __ PrepareCallCFunction(3, 0, a0);
2789     __ mov(a0, zero_reg);
2790     __ mov(a1, zero_reg);
2791     __ li(a2, ExternalReference::isolate_address(masm->isolate()));
2792     __ CallCFunction(find_handler, 3);
2793   }
2794 
2795   // Retrieve the handler context, SP and FP.
2796   __ li(cp, pending_handler_context_address);
2797   __ Ld_d(cp, MemOperand(cp, 0));
2798   __ li(sp, pending_handler_sp_address);
2799   __ Ld_d(sp, MemOperand(sp, 0));
2800   __ li(fp, pending_handler_fp_address);
2801   __ Ld_d(fp, MemOperand(fp, 0));
2802 
2803   // If the handler is a JS frame, restore the context to the frame. Note that
2804   // the context will be set to (cp == 0) for non-JS frames.
2805   Label zero;
2806   __ Branch(&zero, eq, cp, Operand(zero_reg));
2807   __ St_d(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2808   __ bind(&zero);
2809 
2810   // Clear c_entry_fp, like we do in `LeaveExitFrame`.
2811   {
2812     UseScratchRegisterScope temps(masm);
2813     Register scratch = temps.Acquire();
2814     __ li(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
2815                                              masm->isolate()));
2816     __ St_d(zero_reg, MemOperand(scratch, 0));
2817   }
2818 
2819   // Compute the handler entry address and jump to it.
2820   __ li(t7, pending_handler_entrypoint_address);
2821   __ Ld_d(t7, MemOperand(t7, 0));
2822   __ Jump(t7);
2823 }
2824 
Generate_DoubleToI(MacroAssembler * masm)2825 void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
2826   Label done;
2827   Register result_reg = t0;
2828 
2829   Register scratch = GetRegisterThatIsNotOneOf(result_reg);
2830   Register scratch2 = GetRegisterThatIsNotOneOf(result_reg, scratch);
2831   Register scratch3 = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch2);
2832   DoubleRegister double_scratch = kScratchDoubleReg;
2833 
2834   // Account for saved regs.
2835   const int kArgumentOffset = 4 * kPointerSize;
2836 
2837   __ Push(result_reg);
2838   __ Push(scratch, scratch2, scratch3);
2839 
2840   // Load double input.
2841   __ Fld_d(double_scratch, MemOperand(sp, kArgumentOffset));
2842 
2843   // Try a conversion to a signed integer.
2844   __ ftintrz_w_d(double_scratch, double_scratch);
2845   // Move the converted value into the result register.
2846   __ movfr2gr_s(scratch3, double_scratch);
2847 
2848   // Retrieve and restore the FCSR.
2849   __ movfcsr2gr(scratch);
2850 
2851   // Check for overflow and NaNs.
2852   __ And(scratch, scratch,
2853          kFCSRExceptionCauseMask ^ kFCSRDivideByZeroCauseMask);
2854   // If we had no exceptions then set result_reg and we are done.
2855   Label error;
2856   __ Branch(&error, ne, scratch, Operand(zero_reg));
2857   __ Move(result_reg, scratch3);
2858   __ Branch(&done);
2859   __ bind(&error);
2860 
2861   // Load the double value and perform a manual truncation.
2862   Register input_high = scratch2;
2863   Register input_low = scratch3;
2864 
2865   __ Ld_w(input_low,
2866           MemOperand(sp, kArgumentOffset + Register::kMantissaOffset));
2867   __ Ld_w(input_high,
2868           MemOperand(sp, kArgumentOffset + Register::kExponentOffset));
2869 
2870   Label normal_exponent;
2871   // Extract the biased exponent in result.
2872   __ bstrpick_w(result_reg, input_high,
2873                 HeapNumber::kExponentShift + HeapNumber::kExponentBits - 1,
2874                 HeapNumber::kExponentShift);
2875 
2876   // Check for Infinity and NaNs, which should return 0.
2877   __ Sub_w(scratch, result_reg, HeapNumber::kExponentMask);
2878   __ Movz(result_reg, zero_reg, scratch);
2879   __ Branch(&done, eq, scratch, Operand(zero_reg));
2880 
2881   // Express exponent as delta to (number of mantissa bits + 31).
2882   __ Sub_w(result_reg, result_reg,
2883            Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
2884 
2885   // If the delta is strictly positive, all bits would be shifted away,
2886   // which means that we can return 0.
2887   __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
2888   __ mov(result_reg, zero_reg);
2889   __ Branch(&done);
2890 
2891   __ bind(&normal_exponent);
2892   const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
2893   // Calculate shift.
2894   __ Add_w(scratch, result_reg,
2895            Operand(kShiftBase + HeapNumber::kMantissaBits));
2896 
2897   // Save the sign.
2898   Register sign = result_reg;
2899   result_reg = no_reg;
2900   __ And(sign, input_high, Operand(HeapNumber::kSignMask));
2901 
2902   // On ARM shifts > 31 bits are valid and will result in zero. On LOONG64 we
2903   // need to check for this specific case.
2904   Label high_shift_needed, high_shift_done;
2905   __ Branch(&high_shift_needed, lt, scratch, Operand(32));
2906   __ mov(input_high, zero_reg);
2907   __ Branch(&high_shift_done);
2908   __ bind(&high_shift_needed);
2909 
2910   // Set the implicit 1 before the mantissa part in input_high.
2911   __ Or(input_high, input_high,
2912         Operand(1 << HeapNumber::kMantissaBitsInTopWord));
2913   // Shift the mantissa bits to the correct position.
2914   // We don't need to clear non-mantissa bits as they will be shifted away.
2915   // If they weren't, it would mean that the answer is in the 32bit range.
2916   __ sll_w(input_high, input_high, scratch);
2917 
2918   __ bind(&high_shift_done);
2919 
2920   // Replace the shifted bits with bits from the lower mantissa word.
2921   Label pos_shift, shift_done;
2922   __ li(kScratchReg, 32);
2923   __ sub_w(scratch, kScratchReg, scratch);
2924   __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
2925 
2926   // Negate scratch.
2927   __ Sub_w(scratch, zero_reg, scratch);
2928   __ sll_w(input_low, input_low, scratch);
2929   __ Branch(&shift_done);
2930 
2931   __ bind(&pos_shift);
2932   __ srl_w(input_low, input_low, scratch);
2933 
2934   __ bind(&shift_done);
2935   __ Or(input_high, input_high, Operand(input_low));
2936   // Restore sign if necessary.
2937   __ mov(scratch, sign);
2938   result_reg = sign;
2939   sign = no_reg;
2940   __ Sub_w(result_reg, zero_reg, input_high);
2941   __ Movz(result_reg, input_high, scratch);
2942 
2943   __ bind(&done);
2944 
2945   __ St_d(result_reg, MemOperand(sp, kArgumentOffset));
2946   __ Pop(scratch, scratch2, scratch3);
2947   __ Pop(result_reg);
2948   __ Ret();
2949 }
2950 
2951 namespace {
2952 
AddressOffset(ExternalReference ref0,ExternalReference ref1)2953 int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
2954   int64_t offset = (ref0.address() - ref1.address());
2955   DCHECK(static_cast<int>(offset) == offset);
2956   return static_cast<int>(offset);
2957 }
2958 
2959 // Calls an API function.  Allocates HandleScope, extracts returned value
2960 // from handle and propagates exceptions.  Restores context.  stack_space
2961 // - space to be unwound on exit (includes the call JS arguments space and
2962 // the additional space allocated for the fast call).
CallApiFunctionAndReturn(MacroAssembler * masm,Register function_address,ExternalReference thunk_ref,int stack_space,MemOperand * stack_space_operand,MemOperand return_value_operand)2963 void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
2964                               ExternalReference thunk_ref, int stack_space,
2965                               MemOperand* stack_space_operand,
2966                               MemOperand return_value_operand) {
2967   Isolate* isolate = masm->isolate();
2968   ExternalReference next_address =
2969       ExternalReference::handle_scope_next_address(isolate);
2970   const int kNextOffset = 0;
2971   const int kLimitOffset = AddressOffset(
2972       ExternalReference::handle_scope_limit_address(isolate), next_address);
2973   const int kLevelOffset = AddressOffset(
2974       ExternalReference::handle_scope_level_address(isolate), next_address);
2975 
2976   DCHECK(function_address == a1 || function_address == a2);
2977 
2978   Label profiler_enabled, end_profiler_check;
2979   __ li(t7, ExternalReference::is_profiling_address(isolate));
2980   __ Ld_b(t7, MemOperand(t7, 0));
2981   __ Branch(&profiler_enabled, ne, t7, Operand(zero_reg));
2982   __ li(t7, ExternalReference::address_of_runtime_stats_flag());
2983   __ Ld_w(t7, MemOperand(t7, 0));
2984   __ Branch(&profiler_enabled, ne, t7, Operand(zero_reg));
2985   {
2986     // Call the api function directly.
2987     __ mov(t7, function_address);
2988     __ Branch(&end_profiler_check);
2989   }
2990 
2991   __ bind(&profiler_enabled);
2992   {
2993     // Additional parameter is the address of the actual callback.
2994     __ li(t7, thunk_ref);
2995   }
2996   __ bind(&end_profiler_check);
2997 
2998   // Allocate HandleScope in callee-save registers.
2999   __ li(s5, next_address);
3000   __ Ld_d(s0, MemOperand(s5, kNextOffset));
3001   __ Ld_d(s1, MemOperand(s5, kLimitOffset));
3002   __ Ld_w(s2, MemOperand(s5, kLevelOffset));
3003   __ Add_w(s2, s2, Operand(1));
3004   __ St_w(s2, MemOperand(s5, kLevelOffset));
3005 
3006   __ StoreReturnAddressAndCall(t7);
3007 
3008   Label promote_scheduled_exception;
3009   Label delete_allocated_handles;
3010   Label leave_exit_frame;
3011   Label return_value_loaded;
3012 
3013   // Load value from ReturnValue.
3014   __ Ld_d(a0, return_value_operand);
3015   __ bind(&return_value_loaded);
3016 
3017   // No more valid handles (the result handle was the last one). Restore
3018   // previous handle scope.
3019   __ St_d(s0, MemOperand(s5, kNextOffset));
3020   if (FLAG_debug_code) {
3021     __ Ld_w(a1, MemOperand(s5, kLevelOffset));
3022     __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
3023              Operand(s2));
3024   }
3025   __ Sub_w(s2, s2, Operand(1));
3026   __ St_w(s2, MemOperand(s5, kLevelOffset));
3027   __ Ld_d(kScratchReg, MemOperand(s5, kLimitOffset));
3028   __ Branch(&delete_allocated_handles, ne, s1, Operand(kScratchReg));
3029 
3030   // Leave the API exit frame.
3031   __ bind(&leave_exit_frame);
3032 
3033   if (stack_space_operand == nullptr) {
3034     DCHECK_NE(stack_space, 0);
3035     __ li(s0, Operand(stack_space));
3036   } else {
3037     DCHECK_EQ(stack_space, 0);
3038     __ Ld_d(s0, *stack_space_operand);
3039   }
3040 
3041   static constexpr bool kDontSaveDoubles = false;
3042   static constexpr bool kRegisterContainsSlotCount = false;
3043   __ LeaveExitFrame(kDontSaveDoubles, s0, NO_EMIT_RETURN,
3044                     kRegisterContainsSlotCount);
3045 
3046   // Check if the function scheduled an exception.
3047   __ LoadRoot(a4, RootIndex::kTheHoleValue);
3048   __ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate));
3049   __ Ld_d(a5, MemOperand(kScratchReg, 0));
3050   __ Branch(&promote_scheduled_exception, ne, a4, Operand(a5));
3051 
3052   __ Ret();
3053 
3054   // Re-throw by promoting a scheduled exception.
3055   __ bind(&promote_scheduled_exception);
3056   __ TailCallRuntime(Runtime::kPromoteScheduledException);
3057 
3058   // HandleScope limit has changed. Delete allocated extensions.
3059   __ bind(&delete_allocated_handles);
3060   __ St_d(s1, MemOperand(s5, kLimitOffset));
3061   __ mov(s0, a0);
3062   __ PrepareCallCFunction(1, s1);
3063   __ li(a0, ExternalReference::isolate_address(isolate));
3064   __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
3065   __ mov(a0, s0);
3066   __ jmp(&leave_exit_frame);
3067 }
3068 
3069 }  // namespace
3070 
Generate_CallApiCallback(MacroAssembler * masm)3071 void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
3072   // ----------- S t a t e -------------
3073   //  -- cp                  : context
3074   //  -- a1                  : api function address
3075   //  -- a2                  : arguments count (not including the receiver)
3076   //  -- a3                  : call data
3077   //  -- a0                  : holder
3078   //  -- sp[0]               : receiver
3079   //  -- sp[8]               : first argument
3080   //  -- ...
3081   //  -- sp[(argc) * 8]      : last argument
3082   // -----------------------------------
3083 
3084   Register api_function_address = a1;
3085   Register argc = a2;
3086   Register call_data = a3;
3087   Register holder = a0;
3088   Register scratch = t0;
3089   Register base = t1;  // For addressing MemOperands on the stack.
3090 
3091   DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch,
3092                      base));
3093 
3094   using FCA = FunctionCallbackArguments;
3095 
3096   STATIC_ASSERT(FCA::kArgsLength == 6);
3097   STATIC_ASSERT(FCA::kNewTargetIndex == 5);
3098   STATIC_ASSERT(FCA::kDataIndex == 4);
3099   STATIC_ASSERT(FCA::kReturnValueOffset == 3);
3100   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
3101   STATIC_ASSERT(FCA::kIsolateIndex == 1);
3102   STATIC_ASSERT(FCA::kHolderIndex == 0);
3103 
3104   // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
3105   //
3106   // Target state:
3107   //   sp[0 * kPointerSize]: kHolder
3108   //   sp[1 * kPointerSize]: kIsolate
3109   //   sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue)
3110   //   sp[3 * kPointerSize]: undefined (kReturnValue)
3111   //   sp[4 * kPointerSize]: kData
3112   //   sp[5 * kPointerSize]: undefined (kNewTarget)
3113 
3114   // Set up the base register for addressing through MemOperands. It will point
3115   // at the receiver (located at sp + argc * kPointerSize).
3116   __ Alsl_d(base, argc, sp, kPointerSizeLog2, t7);
3117 
3118   // Reserve space on the stack.
3119   __ Sub_d(sp, sp, Operand(FCA::kArgsLength * kPointerSize));
3120 
3121   // kHolder.
3122   __ St_d(holder, MemOperand(sp, 0 * kPointerSize));
3123 
3124   // kIsolate.
3125   __ li(scratch, ExternalReference::isolate_address(masm->isolate()));
3126   __ St_d(scratch, MemOperand(sp, 1 * kPointerSize));
3127 
3128   // kReturnValueDefaultValue and kReturnValue.
3129   __ LoadRoot(scratch, RootIndex::kUndefinedValue);
3130   __ St_d(scratch, MemOperand(sp, 2 * kPointerSize));
3131   __ St_d(scratch, MemOperand(sp, 3 * kPointerSize));
3132 
3133   // kData.
3134   __ St_d(call_data, MemOperand(sp, 4 * kPointerSize));
3135 
3136   // kNewTarget.
3137   __ St_d(scratch, MemOperand(sp, 5 * kPointerSize));
3138 
3139   // Keep a pointer to kHolder (= implicit_args) in a scratch register.
3140   // We use it below to set up the FunctionCallbackInfo object.
3141   __ mov(scratch, sp);
3142 
3143   // Allocate the v8::Arguments structure in the arguments' space since
3144   // it's not controlled by GC.
3145   static constexpr int kApiStackSpace = 4;
3146   static constexpr bool kDontSaveDoubles = false;
3147   FrameScope frame_scope(masm, StackFrame::MANUAL);
3148   __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace);
3149 
3150   // EnterExitFrame may align the sp.
3151 
3152   // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
3153   // Arguments are after the return address (pushed by EnterExitFrame()).
3154   __ St_d(scratch, MemOperand(sp, 1 * kPointerSize));
3155 
3156   // FunctionCallbackInfo::values_ (points at the first varargs argument passed
3157   // on the stack).
3158   __ Add_d(scratch, scratch,
3159            Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
3160 
3161   __ St_d(scratch, MemOperand(sp, 2 * kPointerSize));
3162 
3163   // FunctionCallbackInfo::length_.
3164   // Stored as int field, 32-bit integers within struct on stack always left
3165   // justified by n64 ABI.
3166   __ St_w(argc, MemOperand(sp, 3 * kPointerSize));
3167 
3168   // We also store the number of bytes to drop from the stack after returning
3169   // from the API function here.
3170   // Note: Unlike on other architectures, this stores the number of slots to
3171   // drop, not the number of bytes.
3172   __ Add_d(scratch, argc, Operand(FCA::kArgsLength + 1 /* receiver */));
3173   __ St_d(scratch, MemOperand(sp, 4 * kPointerSize));
3174 
3175   // v8::InvocationCallback's argument.
3176   DCHECK(!AreAliased(api_function_address, scratch, a0));
3177   __ Add_d(a0, sp, Operand(1 * kPointerSize));
3178 
3179   ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
3180 
3181   // There are two stack slots above the arguments we constructed on the stack.
3182   // TODO(jgruber): Document what these arguments are.
3183   static constexpr int kStackSlotsAboveFCA = 2;
3184   MemOperand return_value_operand(
3185       fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize);
3186 
3187   static constexpr int kUseStackSpaceOperand = 0;
3188   MemOperand stack_space_operand(sp, 4 * kPointerSize);
3189 
3190   AllowExternalCallThatCantCauseGC scope(masm);
3191   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3192                            kUseStackSpaceOperand, &stack_space_operand,
3193                            return_value_operand);
3194 }
3195 
Generate_CallApiGetter(MacroAssembler * masm)3196 void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
3197   // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
3198   // name below the exit frame to make GC aware of them.
3199   STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
3200   STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
3201   STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
3202   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
3203   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
3204   STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
3205   STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
3206   STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
3207 
3208   Register receiver = ApiGetterDescriptor::ReceiverRegister();
3209   Register holder = ApiGetterDescriptor::HolderRegister();
3210   Register callback = ApiGetterDescriptor::CallbackRegister();
3211   Register scratch = a4;
3212   DCHECK(!AreAliased(receiver, holder, callback, scratch));
3213 
3214   Register api_function_address = a2;
3215 
3216   // Here and below +1 is for name() pushed after the args_ array.
3217   using PCA = PropertyCallbackArguments;
3218   __ Sub_d(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
3219   __ St_d(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
3220   __ Ld_d(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
3221   __ St_d(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
3222   __ LoadRoot(scratch, RootIndex::kUndefinedValue);
3223   __ St_d(scratch,
3224           MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
3225   __ St_d(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
3226                                       kPointerSize));
3227   __ li(scratch, ExternalReference::isolate_address(masm->isolate()));
3228   __ St_d(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
3229   __ St_d(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
3230   // should_throw_on_error -> false
3231   DCHECK_EQ(0, Smi::zero().ptr());
3232   __ St_d(zero_reg,
3233           MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
3234   __ Ld_d(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
3235   __ St_d(scratch, MemOperand(sp, 0 * kPointerSize));
3236 
3237   // v8::PropertyCallbackInfo::args_ array and name handle.
3238   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
3239 
3240   // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
3241   __ mov(a0, sp);                               // a0 = Handle<Name>
3242   __ Add_d(a1, a0, Operand(1 * kPointerSize));  // a1 = v8::PCI::args_
3243 
3244   const int kApiStackSpace = 1;
3245   FrameScope frame_scope(masm, StackFrame::MANUAL);
3246   __ EnterExitFrame(false, kApiStackSpace);
3247 
3248   // Create v8::PropertyCallbackInfo object on the stack and initialize
3249   // it's args_ field.
3250   __ St_d(a1, MemOperand(sp, 1 * kPointerSize));
3251   __ Add_d(a1, sp, Operand(1 * kPointerSize));
3252   // a1 = v8::PropertyCallbackInfo&
3253 
3254   ExternalReference thunk_ref =
3255       ExternalReference::invoke_accessor_getter_callback();
3256 
3257   __ Ld_d(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
3258   __ Ld_d(api_function_address,
3259           FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
3260 
3261   // +3 is to skip prolog, return address and name handle.
3262   MemOperand return_value_operand(
3263       fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
3264   MemOperand* const kUseStackSpaceConstant = nullptr;
3265   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3266                            kStackUnwindSpace, kUseStackSpaceConstant,
3267                            return_value_operand);
3268 }
3269 
Generate_DirectCEntry(MacroAssembler * masm)3270 void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
3271   // The sole purpose of DirectCEntry is for movable callers (e.g. any general
3272   // purpose Code object) to be able to call into C functions that may trigger
3273   // GC and thus move the caller.
3274   //
3275   // DirectCEntry places the return address on the stack (updated by the GC),
3276   // making the call GC safe. The irregexp backend relies on this.
3277 
3278   __ St_d(ra, MemOperand(sp, 0));  // Store the return address.
3279   __ Call(t7);                     // Call the C++ function.
3280   __ Ld_d(ra, MemOperand(sp, 0));  // Return to calling code.
3281 
3282   // TODO(LOONG_dev): LOONG64 Check this assert.
3283   if (FLAG_debug_code && FLAG_enable_slow_asserts) {
3284     // In case of an error the return address may point to a memory area
3285     // filled with kZapValue by the GC. Dereference the address and check for
3286     // this.
3287     __ Ld_d(a4, MemOperand(ra, 0));
3288     __ Assert(ne, AbortReason::kReceivedInvalidReturnAddress, a4,
3289               Operand(reinterpret_cast<uint64_t>(kZapValue)));
3290   }
3291 
3292   __ Jump(ra);
3293 }
3294 
3295 namespace {
3296 
3297 // This code tries to be close to ia32 code so that any changes can be
3298 // easily ported.
Generate_DeoptimizationEntry(MacroAssembler * masm,DeoptimizeKind deopt_kind)3299 void Generate_DeoptimizationEntry(MacroAssembler* masm,
3300                                   DeoptimizeKind deopt_kind) {
3301   Isolate* isolate = masm->isolate();
3302 
3303   // Unlike on ARM we don't save all the registers, just the useful ones.
3304   // For the rest, there are gaps on the stack, so the offsets remain the same.
3305   const int kNumberOfRegisters = Register::kNumRegisters;
3306 
3307   RegList restored_regs = kJSCallerSaved | kCalleeSaved;
3308   RegList saved_regs = restored_regs | sp.bit() | ra.bit();
3309 
3310   const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
3311 
3312   // Save all double FPU registers before messing with them.
3313   __ Sub_d(sp, sp, Operand(kDoubleRegsSize));
3314   const RegisterConfiguration* config = RegisterConfiguration::Default();
3315   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
3316     int code = config->GetAllocatableDoubleCode(i);
3317     const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
3318     int offset = code * kDoubleSize;
3319     __ Fst_d(fpu_reg, MemOperand(sp, offset));
3320   }
3321 
3322   // Push saved_regs (needed to populate FrameDescription::registers_).
3323   // Leave gaps for other registers.
3324   __ Sub_d(sp, sp, kNumberOfRegisters * kPointerSize);
3325   for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
3326     if ((saved_regs & (1 << i)) != 0) {
3327       __ St_d(ToRegister(i), MemOperand(sp, kPointerSize * i));
3328     }
3329   }
3330 
3331   __ li(a2,
3332         ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate));
3333   __ St_d(fp, MemOperand(a2, 0));
3334 
3335   const int kSavedRegistersAreaSize =
3336       (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
3337 
3338   __ li(a2, Operand(Deoptimizer::kFixedExitSizeMarker));
3339   // Get the address of the location in the code object (a3) (return
3340   // address for lazy deoptimization) and compute the fp-to-sp delta in
3341   // register a4.
3342   __ mov(a3, ra);
3343   __ Add_d(a4, sp, Operand(kSavedRegistersAreaSize));
3344 
3345   __ sub_d(a4, fp, a4);
3346 
3347   // Allocate a new deoptimizer object.
3348   __ PrepareCallCFunction(6, a5);
3349   // Pass six arguments, according to n64 ABI.
3350   __ mov(a0, zero_reg);
3351   Label context_check;
3352   __ Ld_d(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
3353   __ JumpIfSmi(a1, &context_check);
3354   __ Ld_d(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
3355   __ bind(&context_check);
3356   __ li(a1, Operand(static_cast<int>(deopt_kind)));
3357   // a2: bailout id already loaded.
3358   // a3: code address or 0 already loaded.
3359   // a4: already has fp-to-sp delta.
3360   __ li(a5, ExternalReference::isolate_address(isolate));
3361 
3362   // Call Deoptimizer::New().
3363   {
3364     AllowExternalCallThatCantCauseGC scope(masm);
3365     __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
3366   }
3367 
3368   // Preserve "deoptimizer" object in register a0 and get the input
3369   // frame descriptor pointer to a1 (deoptimizer->input_);
3370   // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
3371   __ Ld_d(a1, MemOperand(a0, Deoptimizer::input_offset()));
3372 
3373   // Copy core registers into FrameDescription::registers_[kNumRegisters].
3374   DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
3375   for (int i = 0; i < kNumberOfRegisters; i++) {
3376     int offset = (i * kPointerSize) + FrameDescription::registers_offset();
3377     if ((saved_regs & (1 << i)) != 0) {
3378       __ Ld_d(a2, MemOperand(sp, i * kPointerSize));
3379       __ St_d(a2, MemOperand(a1, offset));
3380     } else if (FLAG_debug_code) {
3381       __ li(a2, Operand(kDebugZapValue));
3382       __ St_d(a2, MemOperand(a1, offset));
3383     }
3384   }
3385 
3386   int double_regs_offset = FrameDescription::double_registers_offset();
3387   // Copy FPU registers to
3388   // double_registers_[DoubleRegister::kNumAllocatableRegisters]
3389   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
3390     int code = config->GetAllocatableDoubleCode(i);
3391     int dst_offset = code * kDoubleSize + double_regs_offset;
3392     int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
3393     __ Fld_d(f0, MemOperand(sp, src_offset));
3394     __ Fst_d(f0, MemOperand(a1, dst_offset));
3395   }
3396 
3397   // Remove the saved registers from the stack.
3398   __ Add_d(sp, sp, Operand(kSavedRegistersAreaSize));
3399 
3400   // Compute a pointer to the unwinding limit in register a2; that is
3401   // the first stack slot not part of the input frame.
3402   __ Ld_d(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
3403   __ add_d(a2, a2, sp);
3404 
3405   // Unwind the stack down to - but not including - the unwinding
3406   // limit and copy the contents of the activation frame to the input
3407   // frame description.
3408   __ Add_d(a3, a1, Operand(FrameDescription::frame_content_offset()));
3409   Label pop_loop;
3410   Label pop_loop_header;
3411   __ Branch(&pop_loop_header);
3412   __ bind(&pop_loop);
3413   __ Pop(a4);
3414   __ St_d(a4, MemOperand(a3, 0));
3415   __ addi_d(a3, a3, sizeof(uint64_t));
3416   __ bind(&pop_loop_header);
3417   __ BranchShort(&pop_loop, ne, a2, Operand(sp));
3418   // Compute the output frame in the deoptimizer.
3419   __ Push(a0);  // Preserve deoptimizer object across call.
3420   // a0: deoptimizer object; a1: scratch.
3421   __ PrepareCallCFunction(1, a1);
3422   // Call Deoptimizer::ComputeOutputFrames().
3423   {
3424     AllowExternalCallThatCantCauseGC scope(masm);
3425     __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
3426   }
3427   __ Pop(a0);  // Restore deoptimizer object (class Deoptimizer).
3428 
3429   __ Ld_d(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
3430 
3431   // Replace the current (input) frame with the output frames.
3432   Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
3433   // Outer loop state: a4 = current "FrameDescription** output_",
3434   // a1 = one past the last FrameDescription**.
3435   __ Ld_w(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
3436   __ Ld_d(a4, MemOperand(a0, Deoptimizer::output_offset()));  // a4 is output_.
3437   __ Alsl_d(a1, a1, a4, kPointerSizeLog2);
3438   __ Branch(&outer_loop_header);
3439   __ bind(&outer_push_loop);
3440   // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
3441   __ Ld_d(a2, MemOperand(a4, 0));  // output_[ix]
3442   __ Ld_d(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
3443   __ Branch(&inner_loop_header);
3444   __ bind(&inner_push_loop);
3445   __ Sub_d(a3, a3, Operand(sizeof(uint64_t)));
3446   __ Add_d(a6, a2, Operand(a3));
3447   __ Ld_d(a7, MemOperand(a6, FrameDescription::frame_content_offset()));
3448   __ Push(a7);
3449   __ bind(&inner_loop_header);
3450   __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
3451 
3452   __ Add_d(a4, a4, Operand(kPointerSize));
3453   __ bind(&outer_loop_header);
3454   __ BranchShort(&outer_push_loop, lt, a4, Operand(a1));
3455 
3456   __ Ld_d(a1, MemOperand(a0, Deoptimizer::input_offset()));
3457   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
3458     int code = config->GetAllocatableDoubleCode(i);
3459     const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
3460     int src_offset = code * kDoubleSize + double_regs_offset;
3461     __ Fld_d(fpu_reg, MemOperand(a1, src_offset));
3462   }
3463 
3464   // Push pc and continuation from the last output frame.
3465   __ Ld_d(a6, MemOperand(a2, FrameDescription::pc_offset()));
3466   __ Push(a6);
3467   __ Ld_d(a6, MemOperand(a2, FrameDescription::continuation_offset()));
3468   __ Push(a6);
3469 
3470   // Technically restoring 'at' should work unless zero_reg is also restored
3471   // but it's safer to check for this.
3472   DCHECK(!(t7.bit() & restored_regs));
3473   // Restore the registers from the last output frame.
3474   __ mov(t7, a2);
3475   for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
3476     int offset = (i * kPointerSize) + FrameDescription::registers_offset();
3477     if ((restored_regs & (1 << i)) != 0) {
3478       __ Ld_d(ToRegister(i), MemOperand(t7, offset));
3479     }
3480   }
3481 
3482   __ Pop(t7);  // Get continuation, leave pc on stack.
3483   __ Pop(ra);
3484   __ Jump(t7);
3485   __ stop();
3486 }
3487 
3488 }  // namespace
3489 
Generate_DeoptimizationEntry_Eager(MacroAssembler * masm)3490 void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
3491   Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
3492 }
3493 
Generate_DeoptimizationEntry_Soft(MacroAssembler * masm)3494 void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
3495   Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
3496 }
3497 
Generate_DeoptimizationEntry_Bailout(MacroAssembler * masm)3498 void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
3499   Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
3500 }
3501 
Generate_DeoptimizationEntry_Lazy(MacroAssembler * masm)3502 void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
3503   Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
3504 }
3505 
3506 namespace {
3507 
3508 // Restarts execution either at the current or next (in execution order)
3509 // bytecode. If there is baseline code on the shared function info, converts an
3510 // interpreter frame into a baseline frame and continues execution in baseline
3511 // code. Otherwise execution continues with bytecode.
Generate_BaselineOrInterpreterEntry(MacroAssembler * masm,bool next_bytecode,bool is_osr=false)3512 void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
3513                                          bool next_bytecode,
3514                                          bool is_osr = false) {
3515   Label start;
3516   __ bind(&start);
3517 
3518   // Get function from the frame.
3519   Register closure = a1;
3520   __ Ld_d(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
3521 
3522   // Get the Code object from the shared function info.
3523   Register code_obj = s1;
3524   __ Ld_d(code_obj,
3525           FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
3526   __ Ld_d(code_obj,
3527           FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
3528 
3529   // Check if we have baseline code. For OSR entry it is safe to assume we
3530   // always have baseline code.
3531   if (!is_osr) {
3532     Label start_with_baseline;
3533     __ GetObjectType(code_obj, t2, t2);
3534     __ Branch(&start_with_baseline, eq, t2, Operand(CODET_TYPE));
3535 
3536     // Start with bytecode as there is no baseline code.
3537     Builtin builtin_id = next_bytecode
3538                              ? Builtin::kInterpreterEnterAtNextBytecode
3539                              : Builtin::kInterpreterEnterAtBytecode;
3540     __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
3541             RelocInfo::CODE_TARGET);
3542 
3543     // Start with baseline code.
3544     __ bind(&start_with_baseline);
3545   } else if (FLAG_debug_code) {
3546     __ GetObjectType(code_obj, t2, t2);
3547     __ Assert(eq, AbortReason::kExpectedBaselineData, t2, Operand(CODET_TYPE));
3548   }
3549 
3550   if (FLAG_debug_code) {
3551     AssertCodeIsBaseline(masm, code_obj, t2);
3552   }
3553 
3554   // Replace BytecodeOffset with the feedback vector.
3555   Register feedback_vector = a2;
3556   __ Ld_d(feedback_vector,
3557           FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
3558   __ Ld_d(feedback_vector,
3559           FieldMemOperand(feedback_vector, Cell::kValueOffset));
3560 
3561   Label install_baseline_code;
3562   // Check if feedback vector is valid. If not, call prepare for baseline to
3563   // allocate it.
3564   __ GetObjectType(feedback_vector, t2, t2);
3565   __ Branch(&install_baseline_code, ne, t2, Operand(FEEDBACK_VECTOR_TYPE));
3566 
3567   // Save BytecodeOffset from the stack frame.
3568   __ SmiUntag(kInterpreterBytecodeOffsetRegister,
3569               MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
3570   // Replace BytecodeOffset with the feedback vector.
3571   __ St_d(feedback_vector,
3572           MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
3573   feedback_vector = no_reg;
3574 
3575   // Compute baseline pc for bytecode offset.
3576   ExternalReference get_baseline_pc_extref;
3577   if (next_bytecode || is_osr) {
3578     get_baseline_pc_extref =
3579         ExternalReference::baseline_pc_for_next_executed_bytecode();
3580   } else {
3581     get_baseline_pc_extref =
3582         ExternalReference::baseline_pc_for_bytecode_offset();
3583   }
3584 
3585   Register get_baseline_pc = a3;
3586   __ li(get_baseline_pc, get_baseline_pc_extref);
3587 
3588   // If the code deoptimizes during the implicit function entry stack interrupt
3589   // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
3590   // not a valid bytecode offset.
3591   // TODO(pthier): Investigate if it is feasible to handle this special case
3592   // in TurboFan instead of here.
3593   Label valid_bytecode_offset, function_entry_bytecode;
3594   if (!is_osr) {
3595     __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
3596               Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
3597                       kFunctionEntryBytecodeOffset));
3598   }
3599 
3600   __ Sub_d(kInterpreterBytecodeOffsetRegister,
3601            kInterpreterBytecodeOffsetRegister,
3602            (BytecodeArray::kHeaderSize - kHeapObjectTag));
3603 
3604   __ bind(&valid_bytecode_offset);
3605   // Get bytecode array from the stack frame.
3606   __ Ld_d(kInterpreterBytecodeArrayRegister,
3607           MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
3608   // Save the accumulator register, since it's clobbered by the below call.
3609   __ Push(kInterpreterAccumulatorRegister);
3610   {
3611     Register arg_reg_1 = a0;
3612     Register arg_reg_2 = a1;
3613     Register arg_reg_3 = a2;
3614     __ Move(arg_reg_1, code_obj);
3615     __ Move(arg_reg_2, kInterpreterBytecodeOffsetRegister);
3616     __ Move(arg_reg_3, kInterpreterBytecodeArrayRegister);
3617     FrameScope scope(masm, StackFrame::INTERNAL);
3618     __ CallCFunction(get_baseline_pc, 3, 0);
3619   }
3620   __ Add_d(code_obj, code_obj, kReturnRegister0);
3621   __ Pop(kInterpreterAccumulatorRegister);
3622 
3623   if (is_osr) {
3624     // Reset the OSR loop nesting depth to disarm back edges.
3625     // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
3626     // Sparkplug here.
3627     // TODO(liuyu): Remove Ld as arm64 after register reallocation.
3628     __ Ld_d(kInterpreterBytecodeArrayRegister,
3629             MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
3630     __ St_h(zero_reg,
3631             FieldMemOperand(kInterpreterBytecodeArrayRegister,
3632                             BytecodeArray::kOsrLoopNestingLevelOffset));
3633     Generate_OSREntry(masm, code_obj,
3634                       Operand(Code::kHeaderSize - kHeapObjectTag));
3635   } else {
3636     __ Add_d(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
3637     __ Jump(code_obj);
3638   }
3639   __ Trap();  // Unreachable.
3640 
3641   if (!is_osr) {
3642     __ bind(&function_entry_bytecode);
3643     // If the bytecode offset is kFunctionEntryOffset, get the start address of
3644     // the first bytecode.
3645     __ mov(kInterpreterBytecodeOffsetRegister, zero_reg);
3646     if (next_bytecode) {
3647       __ li(get_baseline_pc,
3648             ExternalReference::baseline_pc_for_bytecode_offset());
3649     }
3650     __ Branch(&valid_bytecode_offset);
3651   }
3652 
3653   __ bind(&install_baseline_code);
3654   {
3655     FrameScope scope(masm, StackFrame::INTERNAL);
3656     __ Push(kInterpreterAccumulatorRegister);
3657     __ Push(closure);
3658     __ CallRuntime(Runtime::kInstallBaselineCode, 1);
3659     __ Pop(kInterpreterAccumulatorRegister);
3660   }
3661   // Retry from the start after installing baseline code.
3662   __ Branch(&start);
3663 }
3664 
3665 }  // namespace
3666 
Generate_BaselineOrInterpreterEnterAtBytecode(MacroAssembler * masm)3667 void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
3668     MacroAssembler* masm) {
3669   Generate_BaselineOrInterpreterEntry(masm, false);
3670 }
3671 
Generate_BaselineOrInterpreterEnterAtNextBytecode(MacroAssembler * masm)3672 void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
3673     MacroAssembler* masm) {
3674   Generate_BaselineOrInterpreterEntry(masm, true);
3675 }
3676 
Generate_InterpreterOnStackReplacement_ToBaseline(MacroAssembler * masm)3677 void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
3678     MacroAssembler* masm) {
3679   Generate_BaselineOrInterpreterEntry(masm, false, true);
3680 }
3681 
Generate_DynamicCheckMapsTrampoline(MacroAssembler * masm)3682 void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
3683   Generate_DynamicCheckMapsTrampoline<DynamicCheckMapsDescriptor>(
3684       masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMaps));
3685 }
3686 
Generate_DynamicCheckMapsWithFeedbackVectorTrampoline(MacroAssembler * masm)3687 void Builtins::Generate_DynamicCheckMapsWithFeedbackVectorTrampoline(
3688     MacroAssembler* masm) {
3689   Generate_DynamicCheckMapsTrampoline<
3690       DynamicCheckMapsWithFeedbackVectorDescriptor>(
3691       masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMapsWithFeedbackVector));
3692 }
3693 
3694 template <class Descriptor>
Generate_DynamicCheckMapsTrampoline(MacroAssembler * masm,Handle<Code> builtin_target)3695 void Builtins::Generate_DynamicCheckMapsTrampoline(
3696     MacroAssembler* masm, Handle<Code> builtin_target) {
3697   FrameScope scope(masm, StackFrame::MANUAL);
3698   __ EnterFrame(StackFrame::INTERNAL);
3699 
3700   // Only save the registers that the DynamicCheckMaps builtin can clobber.
3701   Descriptor descriptor;
3702   RegList registers = descriptor.allocatable_registers();
3703   // FLAG_debug_code is enabled CSA checks will call C function and so we need
3704   // to save all CallerSaved registers too.
3705   if (FLAG_debug_code) registers |= kJSCallerSaved;
3706   __ MaybeSaveRegisters(registers);
3707 
3708   // Load the immediate arguments from the deopt exit to pass to the builtin.
3709   Register slot_arg = descriptor.GetRegisterParameter(Descriptor::kSlot);
3710   Register handler_arg = descriptor.GetRegisterParameter(Descriptor::kHandler);
3711   __ Ld_d(handler_arg, MemOperand(fp, CommonFrameConstants::kCallerPCOffset));
3712   __ Ld_d(
3713       slot_arg,
3714       MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs1PcOffset));
3715   __ Ld_d(
3716       handler_arg,
3717       MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs2PcOffset));
3718   __ Call(builtin_target, RelocInfo::CODE_TARGET);
3719 
3720   Label deopt, bailout;
3721   __ Branch(&deopt, ne, a0,
3722             Operand(static_cast<int64_t>(DynamicCheckMapsStatus::kSuccess)));
3723 
3724   __ MaybeRestoreRegisters(registers);
3725   __ LeaveFrame(StackFrame::INTERNAL);
3726   __ Ret();
3727 
3728   __ bind(&deopt);
3729   __ Branch(&bailout, eq, a0,
3730             Operand(static_cast<int64_t>(DynamicCheckMapsStatus::kBailout)));
3731 
3732   if (FLAG_debug_code) {
3733     __ Assert(eq, AbortReason::kUnexpectedDynamicCheckMapsStatus, a0,
3734               Operand(static_cast<int64_t>(DynamicCheckMapsStatus::kDeopt)));
3735   }
3736   __ MaybeRestoreRegisters(registers);
3737   __ LeaveFrame(StackFrame::INTERNAL);
3738   Handle<Code> deopt_eager = masm->isolate()->builtins()->code_handle(
3739       Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kEager));
3740   __ Jump(deopt_eager, RelocInfo::CODE_TARGET);
3741 
3742   __ bind(&bailout);
3743   __ MaybeRestoreRegisters(registers);
3744   __ LeaveFrame(StackFrame::INTERNAL);
3745   Handle<Code> deopt_bailout = masm->isolate()->builtins()->code_handle(
3746       Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kBailout));
3747   __ Jump(deopt_bailout, RelocInfo::CODE_TARGET);
3748 }
3749 
3750 #undef __
3751 
3752 }  // namespace internal
3753 }  // namespace v8
3754 
3755 #endif  // V8_TARGET_ARCH_LOONG64
3756