1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_S390
6 
7 #include "src/assembler-inl.h"
8 #include "src/code-factory.h"
9 #include "src/code-stubs.h"
10 #include "src/debug/debug.h"
11 #include "src/deoptimizer.h"
12 #include "src/frame-constants.h"
13 #include "src/frames.h"
14 #include "src/runtime/runtime.h"
15 
16 namespace v8 {
17 namespace internal {
18 
19 #define __ ACCESS_MASM(masm)
20 
Generate_Adaptor(MacroAssembler * masm,Address address,ExitFrameType exit_frame_type)21 void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
22                                 ExitFrameType exit_frame_type) {
23   __ Move(r7, ExternalReference::Create(address));
24   if (exit_frame_type == BUILTIN_EXIT) {
25     __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
26             RelocInfo::CODE_TARGET);
27   } else {
28     DCHECK(exit_frame_type == EXIT);
29     __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithExitFrame),
30             RelocInfo::CODE_TARGET);
31   }
32 }
33 
34 namespace {
35 
AdaptorWithExitFrameType(MacroAssembler * masm,Builtins::ExitFrameType exit_frame_type)36 void AdaptorWithExitFrameType(MacroAssembler* masm,
37                               Builtins::ExitFrameType exit_frame_type) {
38   // ----------- S t a t e -------------
39   //  -- r2                 : number of arguments excluding receiver
40   //  -- r3                 : target
41   //  -- r5                 : new.target
42   //  -- r7                 : entry point
43   //  -- sp[0]              : last argument
44   //  -- ...
45   //  -- sp[4 * (argc - 1)] : first argument
46   //  -- sp[4 * argc]       : receiver
47   // -----------------------------------
48   __ AssertFunction(r3);
49 
50   // Make sure we operate in the context of the called function (for example
51   // ConstructStubs implemented in C++ will be run in the context of the caller
52   // instead of the callee, due to the way that [[Construct]] is defined for
53   // ordinary functions).
54   __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
55 
56   // CEntry expects r2 to contain the number of arguments including the
57   // receiver and the extra arguments.
58   __ AddP(r2, r2,
59           Operand(BuiltinExitFrameConstants::kNumExtraArgsWithReceiver));
60 
61   // Insert extra arguments.
62   __ PushRoot(Heap::kTheHoleValueRootIndex);  // Padding.
63   __ SmiTag(r2);
64   __ Push(r2, r3, r5);
65   __ SmiUntag(r2);
66 
67   // Jump to the C entry runtime stub directly here instead of using
68   // JumpToExternalReference. We have already loaded entry point to r7
69   // in Generate_adaptor.
70   __ LoadRR(r3, r7);
71   Handle<Code> code =
72       CodeFactory::CEntry(masm->isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
73                           exit_frame_type == Builtins::BUILTIN_EXIT);
74   __ Jump(code, RelocInfo::CODE_TARGET);
75 }
76 }  // namespace
77 
Generate_AdaptorWithExitFrame(MacroAssembler * masm)78 void Builtins::Generate_AdaptorWithExitFrame(MacroAssembler* masm) {
79   AdaptorWithExitFrameType(masm, EXIT);
80 }
81 
Generate_AdaptorWithBuiltinExitFrame(MacroAssembler * masm)82 void Builtins::Generate_AdaptorWithBuiltinExitFrame(MacroAssembler* masm) {
83   AdaptorWithExitFrameType(masm, BUILTIN_EXIT);
84 }
85 
86 // Load the built-in Array function from the current context.
GenerateLoadArrayFunction(MacroAssembler * masm,Register result)87 static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
88   // Load the Array function from the current native context.
89   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
90 }
91 
Generate_InternalArrayConstructor(MacroAssembler * masm)92 void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
93   // ----------- S t a t e -------------
94   //  -- r2     : number of arguments
95   //  -- lr     : return address
96   //  -- sp[...]: constructor arguments
97   // -----------------------------------
98   Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
99 
100   if (FLAG_debug_code) {
101     // Initial map for the builtin InternalArray functions should be maps.
102     __ LoadP(r4, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
103     __ TestIfSmi(r4);
104     __ Assert(ne, AbortReason::kUnexpectedInitialMapForInternalArrayFunction,
105               cr0);
106     __ CompareObjectType(r4, r5, r6, MAP_TYPE);
107     __ Assert(eq, AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
108   }
109 
110   // Run the native code for the InternalArray function called as a normal
111   // function.
112   // tail call a stub
113   __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
114   InternalArrayConstructorStub stub(masm->isolate());
115   __ TailCallStub(&stub);
116 }
117 
Generate_ArrayConstructor(MacroAssembler * masm)118 void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
119   // ----------- S t a t e -------------
120   //  -- r2     : number of arguments
121   //  -- r3     : array function
122   //  -- lr     : return address
123   //  -- sp[...]: constructor arguments
124   // -----------------------------------
125   Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
126 
127   // Get the Array function.
128   GenerateLoadArrayFunction(masm, r3);
129 
130   if (FLAG_debug_code) {
131     // Initial map for the builtin Array functions should be maps.
132     __ LoadP(r4, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
133     __ TestIfSmi(r4);
134     __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, cr0);
135     __ CompareObjectType(r4, r6, r7, MAP_TYPE);
136     __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
137   }
138 
139   // r4 is the AllocationSite - here undefined.
140   __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
141   // If r5 (new target) is undefined, then this is the 'Call' case, so move
142   // r3 (the constructor) to r5.
143   Label call;
144   __ CmpP(r5, r4);
145   __ bne(&call);
146   __ LoadRR(r5, r3);
147 
148   // Run the native code for the Array function called as a normal function.
149   __ bind(&call);
150   ArrayConstructorStub stub(masm->isolate());
151   __ TailCallStub(&stub);
152 }
153 
GenerateTailCallToReturnedCode(MacroAssembler * masm,Runtime::FunctionId function_id)154 static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
155                                            Runtime::FunctionId function_id) {
156   // ----------- S t a t e -------------
157   //  -- r2 : argument count (preserved for callee)
158   //  -- r3 : target function (preserved for callee)
159   //  -- r5 : new target (preserved for callee)
160   // -----------------------------------
161   {
162     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
163     // Push the number of arguments to the callee.
164     // Push a copy of the target function and the new target.
165     // Push function as parameter to the runtime call.
166     __ SmiTag(r2);
167     __ Push(r2, r3, r5, r3);
168 
169     __ CallRuntime(function_id, 1);
170     __ LoadRR(r4, r2);
171 
172     // Restore target function and new target.
173     __ Pop(r2, r3, r5);
174     __ SmiUntag(r2);
175   }
176   static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
177   __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
178   __ JumpToJSEntry(r4);
179 }
180 
181 namespace {
182 
Generate_JSBuiltinsConstructStubHelper(MacroAssembler * masm)183 void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
184   Label post_instantiation_deopt_entry;
185   // ----------- S t a t e -------------
186   //  -- r2     : number of arguments
187   //  -- r3     : constructor function
188   //  -- r5     : new target
189   //  -- cp     : context
190   //  -- lr     : return address
191   //  -- sp[...]: constructor arguments
192   // -----------------------------------
193 
194   // Enter a construct frame.
195   {
196     FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
197 
198     // Preserve the incoming parameters on the stack.
199     __ SmiTag(r2);
200     __ Push(cp, r2);
201     __ SmiUntag(r2);
202     // The receiver for the builtin/api call.
203     __ PushRoot(Heap::kTheHoleValueRootIndex);
204     // Set up pointer to last argument.
205     __ la(r6, MemOperand(fp, StandardFrameConstants::kCallerSPOffset));
206 
207     // Copy arguments and receiver to the expression stack.
208     // r2: number of arguments
209     // r3: constructor function
210     // r4: address of last argument (caller sp)
211     // r5: new target
212     // cr0: condition indicating whether r2 is zero
213     // sp[0]: receiver
214     // sp[1]: receiver
215     // sp[2]: number of arguments (smi-tagged)
216     Label loop, no_args;
217     __ beq(&no_args);
218     __ ShiftLeftP(ip, r2, Operand(kPointerSizeLog2));
219     __ SubP(sp, sp, ip);
220     __ LoadRR(r1, r2);
221     __ bind(&loop);
222     __ lay(ip, MemOperand(ip, -kPointerSize));
223     __ LoadP(r0, MemOperand(ip, r6));
224     __ StoreP(r0, MemOperand(ip, sp));
225     __ BranchOnCount(r1, &loop);
226     __ bind(&no_args);
227 
228     // Call the function.
229     // r2: number of arguments
230     // r3: constructor function
231     // r5: new target
232 
233     ParameterCount actual(r2);
234     __ InvokeFunction(r3, r5, actual, CALL_FUNCTION);
235 
236     // Restore context from the frame.
237     __ LoadP(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
238     // Restore smi-tagged arguments count from the frame.
239     __ LoadP(r3, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
240 
241     // Leave construct frame.
242   }
243   // Remove caller arguments from the stack and return.
244   STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
245 
246   __ SmiToPtrArrayOffset(r3, r3);
247   __ AddP(sp, sp, r3);
248   __ AddP(sp, sp, Operand(kPointerSize));
249   __ Ret();
250 }
251 
252 }  // namespace
253 
254 // The construct stub for ES5 constructor functions and ES6 class constructors.
Generate_JSConstructStubGeneric(MacroAssembler * masm)255 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
256   // ----------- S t a t e -------------
257   //  --      r2: number of arguments (untagged)
258   //  --      r3: constructor function
259   //  --      r5: new target
260   //  --      cp: context
261   //  --      lr: return address
262   //  -- sp[...]: constructor arguments
263   // -----------------------------------
264 
265   // Enter a construct frame.
266   {
267     FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
268     Label post_instantiation_deopt_entry, not_create_implicit_receiver;
269 
270     // Preserve the incoming parameters on the stack.
271     __ SmiTag(r2);
272     __ Push(cp, r2, r3);
273     __ PushRoot(Heap::kUndefinedValueRootIndex);
274     __ Push(r5);
275 
276     // ----------- S t a t e -------------
277     //  --        sp[0*kPointerSize]: new target
278     //  --        sp[1*kPointerSize]: padding
279     //  -- r3 and sp[2*kPointerSize]: constructor function
280     //  --        sp[3*kPointerSize]: number of arguments (tagged)
281     //  --        sp[4*kPointerSize]: context
282     // -----------------------------------
283 
284     __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
285     __ LoadlW(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
286     __ TestBitMask(r6, SharedFunctionInfo::IsDerivedConstructorBit::kMask, r0);
287     __ bne(&not_create_implicit_receiver);
288 
289     // If not derived class constructor: Allocate the new receiver object.
290     __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
291                         r6, r7);
292     __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
293             RelocInfo::CODE_TARGET);
294     __ b(&post_instantiation_deopt_entry);
295 
296     // Else: use TheHoleValue as receiver for constructor call
297     __ bind(&not_create_implicit_receiver);
298     __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
299 
300     // ----------- S t a t e -------------
301     //  --                          r2: receiver
302     //  -- Slot 4 / sp[0*kPointerSize]: new target
303     //  -- Slot 3 / sp[1*kPointerSize]: padding
304     //  -- Slot 2 / sp[2*kPointerSize]: constructor function
305     //  -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
306     //  -- Slot 0 / sp[4*kPointerSize]: context
307     // -----------------------------------
308     // Deoptimizer enters here.
309     masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
310         masm->pc_offset());
311     __ bind(&post_instantiation_deopt_entry);
312 
313     // Restore new target.
314     __ Pop(r5);
315     // Push the allocated receiver to the stack. We need two copies
316     // because we may have to return the original one and the calling
317     // conventions dictate that the called function pops the receiver.
318     __ Push(r2, r2);
319 
320     // ----------- S t a t e -------------
321     //  --                 r5: new target
322     //  -- sp[0*kPointerSize]: implicit receiver
323     //  -- sp[1*kPointerSize]: implicit receiver
324     //  -- sp[2*kPointerSize]: padding
325     //  -- sp[3*kPointerSize]: constructor function
326     //  -- sp[4*kPointerSize]: number of arguments (tagged)
327     //  -- sp[5*kPointerSize]: context
328     // -----------------------------------
329 
330     // Restore constructor function and argument count.
331     __ LoadP(r3, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
332     __ LoadP(r2, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
333     __ SmiUntag(r2);
334 
335     // Set up pointer to last argument.
336     __ la(r6, MemOperand(fp, StandardFrameConstants::kCallerSPOffset));
337 
338     // Copy arguments and receiver to the expression stack.
339     Label loop, no_args;
340     // ----------- S t a t e -------------
341     //  --                        r2: number of arguments (untagged)
342     //  --                        r5: new target
343     //  --                        r6: pointer to last argument
344     //  --                        cr0: condition indicating whether r2 is zero
345     //  --        sp[0*kPointerSize]: implicit receiver
346     //  --        sp[1*kPointerSize]: implicit receiver
347     //  --        sp[2*kPointerSize]: padding
348     //  -- r3 and sp[3*kPointerSize]: constructor function
349     //  --        sp[4*kPointerSize]: number of arguments (tagged)
350     //  --        sp[5*kPointerSize]: context
351     // -----------------------------------
352 
353     __ beq(&no_args);
354     __ ShiftLeftP(ip, r2, Operand(kPointerSizeLog2));
355     __ SubP(sp, sp, ip);
356     __ LoadRR(r1, r2);
357     __ bind(&loop);
358     __ lay(ip, MemOperand(ip, -kPointerSize));
359     __ LoadP(r0, MemOperand(ip, r6));
360     __ StoreP(r0, MemOperand(ip, sp));
361     __ BranchOnCount(r1, &loop);
362     __ bind(&no_args);
363 
364     // Call the function.
365     ParameterCount actual(r2);
366     __ InvokeFunction(r3, r5, actual, CALL_FUNCTION);
367 
368     // ----------- S t a t e -------------
369     //  --                 r0: constructor result
370     //  -- sp[0*kPointerSize]: implicit receiver
371     //  -- sp[1*kPointerSize]: padding
372     //  -- sp[2*kPointerSize]: constructor function
373     //  -- sp[3*kPointerSize]: number of arguments
374     //  -- sp[4*kPointerSize]: context
375     // -----------------------------------
376 
377     // Store offset of return address for deoptimizer.
378     masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
379         masm->pc_offset());
380 
381     // Restore the context from the frame.
382     __ LoadP(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
383 
384     // If the result is an object (in the ECMA sense), we should get rid
385     // of the receiver and use the result; see ECMA-262 section 13.2.2-7
386     // on page 74.
387     Label use_receiver, do_throw, leave_frame;
388 
389     // If the result is undefined, we jump out to using the implicit receiver.
390     __ JumpIfRoot(r2, Heap::kUndefinedValueRootIndex, &use_receiver);
391 
392     // Otherwise we do a smi check and fall through to check if the return value
393     // is a valid receiver.
394 
395     // If the result is a smi, it is *not* an object in the ECMA sense.
396     __ JumpIfSmi(r2, &use_receiver);
397 
398     // If the type of the result (stored in its map) is less than
399     // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
400     STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
401     __ CompareObjectType(r2, r6, r6, FIRST_JS_RECEIVER_TYPE);
402     __ bge(&leave_frame);
403     __ b(&use_receiver);
404 
405     __ bind(&do_throw);
406     __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
407 
408     // Throw away the result of the constructor invocation and use the
409     // on-stack receiver as the result.
410     __ bind(&use_receiver);
411     __ LoadP(r2, MemOperand(sp));
412     __ JumpIfRoot(r2, Heap::kTheHoleValueRootIndex, &do_throw);
413 
414     __ bind(&leave_frame);
415     // Restore smi-tagged arguments count from the frame.
416     __ LoadP(r3, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
417     // Leave construct frame.
418   }
419 
420   // Remove caller arguments from the stack and return.
421   STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
422 
423   __ SmiToPtrArrayOffset(r3, r3);
424   __ AddP(sp, sp, r3);
425   __ AddP(sp, sp, Operand(kPointerSize));
426   __ Ret();
427 }
428 
Generate_JSBuiltinsConstructStub(MacroAssembler * masm)429 void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
430   Generate_JSBuiltinsConstructStubHelper(masm);
431 }
432 
GetSharedFunctionInfoBytecode(MacroAssembler * masm,Register sfi_data,Register scratch1)433 static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
434                                           Register sfi_data,
435                                           Register scratch1) {
436   Label done;
437 
438   __ CompareObjectType(sfi_data, scratch1, scratch1, INTERPRETER_DATA_TYPE);
439   __ bne(&done, Label::kNear);
440   __ LoadP(sfi_data,
441            FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
442   __ bind(&done);
443 }
444 
445 // static
Generate_ResumeGeneratorTrampoline(MacroAssembler * masm)446 void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
447   // ----------- S t a t e -------------
448   //  -- r2 : the value to pass to the generator
449   //  -- r3 : the JSGeneratorObject to resume
450   //  -- lr : return address
451   // -----------------------------------
452   __ AssertGeneratorObject(r3);
453 
454   // Store input value into generator object.
455   __ StoreP(r2, FieldMemOperand(r3, JSGeneratorObject::kInputOrDebugPosOffset),
456             r0);
457   __ RecordWriteField(r3, JSGeneratorObject::kInputOrDebugPosOffset, r2, r5,
458                       kLRHasNotBeenSaved, kDontSaveFPRegs);
459 
460   // Load suspended function and context.
461   __ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
462   __ LoadP(cp, FieldMemOperand(r6, JSFunction::kContextOffset));
463 
464   // Flood function if we are stepping.
465   Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
466   Label stepping_prepared;
467   ExternalReference debug_hook =
468       ExternalReference::debug_hook_on_function_call_address(masm->isolate());
469   __ Move(ip, debug_hook);
470   __ LoadB(ip, MemOperand(ip));
471   __ CmpSmiLiteral(ip, Smi::kZero, r0);
472   __ bne(&prepare_step_in_if_stepping);
473 
474   // Flood function if we need to continue stepping in the suspended generator.
475 
476   ExternalReference debug_suspended_generator =
477       ExternalReference::debug_suspended_generator_address(masm->isolate());
478 
479   __ Move(ip, debug_suspended_generator);
480   __ LoadP(ip, MemOperand(ip));
481   __ CmpP(ip, r3);
482   __ beq(&prepare_step_in_suspended_generator);
483   __ bind(&stepping_prepared);
484 
485   // Check the stack for overflow. We are not trying to catch interruptions
486   // (i.e. debug break and preemption) here, so check the "real stack limit".
487   Label stack_overflow;
488   __ CompareRoot(sp, Heap::kRealStackLimitRootIndex);
489   __ blt(&stack_overflow);
490 
491   // Push receiver.
492   __ LoadP(ip, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
493   __ Push(ip);
494 
495   // ----------- S t a t e -------------
496   //  -- r3    : the JSGeneratorObject to resume
497   //  -- r6    : generator function
498   //  -- cp    : generator context
499   //  -- lr    : return address
500   //  -- sp[0] : generator receiver
501   // -----------------------------------
502 
503   // Push holes for arguments to generator function. Since the parser forced
504   // context allocation for any variables in generators, the actual argument
505   // values have already been copied into the context and these dummy values
506   // will never be used.
507   __ LoadP(r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
508   __ LoadW(
509       r2, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
510   {
511     Label loop, done_loop;
512     __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
513 #if V8_TARGET_ARCH_S390X
514     __ CmpP(r2, Operand::Zero());
515     __ beq(&done_loop);
516 #else
517     __ LoadAndTestP(r2, r2);
518     __ beq(&done_loop);
519 #endif
520     __ LoadRR(r1, r2);
521     __ bind(&loop);
522     __ push(ip);
523     __ BranchOnCount(r1, &loop);
524     __ bind(&done_loop);
525   }
526 
527   // Underlying function needs to have bytecode available.
528   if (FLAG_debug_code) {
529     __ LoadP(r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
530     __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
531     GetSharedFunctionInfoBytecode(masm, r5, r1);
532     __ CompareObjectType(r5, r5, r5, BYTECODE_ARRAY_TYPE);
533     __ Assert(eq, AbortReason::kMissingBytecodeArray);
534   }
535 
536   // Resume (Ignition/TurboFan) generator object.
537   {
538     // We abuse new.target both to indicate that this is a resume call and to
539     // pass in the generator object.  In ordinary calls, new.target is always
540     // undefined because generator functions are non-constructable.
541     __ LoadRR(r5, r3);
542     __ LoadRR(r3, r6);
543     static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
544     __ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
545     __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
546     __ JumpToJSEntry(r4);
547   }
548 
549   __ bind(&prepare_step_in_if_stepping);
550   {
551     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
552     __ Push(r3, r6);
553     // Push hole as receiver since we do not use it for stepping.
554     __ PushRoot(Heap::kTheHoleValueRootIndex);
555     __ CallRuntime(Runtime::kDebugOnFunctionCall);
556     __ Pop(r3);
557     __ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
558   }
559   __ b(&stepping_prepared);
560 
561   __ bind(&prepare_step_in_suspended_generator);
562   {
563     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
564     __ Push(r3);
565     __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
566     __ Pop(r3);
567     __ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
568   }
569   __ b(&stepping_prepared);
570 
571   __ bind(&stack_overflow);
572   {
573     FrameScope scope(masm, StackFrame::INTERNAL);
574     __ CallRuntime(Runtime::kThrowStackOverflow);
575     __ bkpt(0);  // This should be unreachable.
576   }
577 }
578 
Generate_ConstructedNonConstructable(MacroAssembler * masm)579 void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
580   FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
581   __ push(r3);
582   __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
583 }
584 
585 // Clobbers r4; preserves all other registers.
Generate_CheckStackOverflow(MacroAssembler * masm,Register argc)586 static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc) {
587   // Check the stack for overflow. We are not trying to catch
588   // interruptions (e.g. debug break and preemption) here, so the "real stack
589   // limit" is checked.
590   Label okay;
591   __ LoadRoot(r4, Heap::kRealStackLimitRootIndex);
592   // Make r4 the space we have left. The stack might already be overflowed
593   // here which will cause r4 to become negative.
594   __ SubP(r4, sp, r4);
595   // Check if the arguments will overflow the stack.
596   __ ShiftLeftP(r0, argc, Operand(kPointerSizeLog2));
597   __ CmpP(r4, r0);
598   __ bgt(&okay);  // Signed comparison.
599 
600   // Out of stack space.
601   __ CallRuntime(Runtime::kThrowStackOverflow);
602 
603   __ bind(&okay);
604 }
605 
Generate_JSEntryTrampolineHelper(MacroAssembler * masm,bool is_construct)606 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
607                                              bool is_construct) {
608   // Called from Generate_JS_Entry
609   // r2: new.target
610   // r3: function
611   // r4: receiver
612   // r5: argc
613   // r6: argv
614   // r0,r7-r9, cp may be clobbered
615   ProfileEntryHookStub::MaybeCallEntryHook(masm);
616 
617   // Enter an internal frame.
618   {
619     // FrameScope ends up calling MacroAssembler::EnterFrame here
620     FrameScope scope(masm, StackFrame::INTERNAL);
621 
622     // Setup the context (we need to use the caller context from the isolate).
623     ExternalReference context_address = ExternalReference::Create(
624         IsolateAddressId::kContextAddress, masm->isolate());
625     __ Move(cp, context_address);
626     __ LoadP(cp, MemOperand(cp));
627 
628     // Push the function and the receiver onto the stack.
629     __ Push(r3, r4);
630 
631     // Check if we have enough stack space to push all arguments.
632     // Clobbers r4.
633     Generate_CheckStackOverflow(masm, r5);
634 
635     // Copy arguments to the stack in a loop from argv to sp.
636     // The arguments are actually placed in reverse order on sp
637     // compared to argv (i.e. arg1 is highest memory in sp).
638     // r3: function
639     // r5: argc
640     // r6: argv, i.e. points to first arg
641     // r7: scratch reg to hold scaled argc
642     // r8: scratch reg to hold arg handle
643     // r9: scratch reg to hold index into argv
644     Label argLoop, argExit;
645     intptr_t zero = 0;
646     __ ShiftLeftP(r7, r5, Operand(kPointerSizeLog2));
647     __ SubRR(sp, r7);                // Buy the stack frame to fit args
648     __ LoadImmP(r9, Operand(zero));  // Initialize argv index
649     __ bind(&argLoop);
650     __ CmpPH(r7, Operand(zero));
651     __ beq(&argExit, Label::kNear);
652     __ lay(r7, MemOperand(r7, -kPointerSize));
653     __ LoadP(r8, MemOperand(r9, r6));         // read next parameter
654     __ la(r9, MemOperand(r9, kPointerSize));  // r9++;
655     __ LoadP(r0, MemOperand(r8));             // dereference handle
656     __ StoreP(r0, MemOperand(r7, sp));        // push parameter
657     __ b(&argLoop);
658     __ bind(&argExit);
659 
660     // Setup new.target and argc.
661     __ LoadRR(r6, r2);
662     __ LoadRR(r2, r5);
663     __ LoadRR(r5, r6);
664 
665     // Initialize all JavaScript callee-saved registers, since they will be seen
666     // by the garbage collector as part of handlers.
667     __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
668     __ LoadRR(r7, r6);
669     __ LoadRR(r8, r6);
670     __ LoadRR(r9, r6);
671 
672     // Invoke the code.
673     Handle<Code> builtin = is_construct
674                                ? BUILTIN_CODE(masm->isolate(), Construct)
675                                : masm->isolate()->builtins()->Call();
676     __ Call(builtin, RelocInfo::CODE_TARGET);
677 
678     // Exit the JS frame and remove the parameters (except function), and
679     // return.
680   }
681   __ b(r14);
682 
683   // r2: result
684 }
685 
Generate_JSEntryTrampoline(MacroAssembler * masm)686 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
687   Generate_JSEntryTrampolineHelper(masm, false);
688 }
689 
Generate_JSConstructEntryTrampoline(MacroAssembler * masm)690 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
691   Generate_JSEntryTrampolineHelper(masm, true);
692 }
693 
ReplaceClosureCodeWithOptimizedCode(MacroAssembler * masm,Register optimized_code,Register closure,Register scratch1,Register scratch2,Register scratch3)694 static void ReplaceClosureCodeWithOptimizedCode(
695     MacroAssembler* masm, Register optimized_code, Register closure,
696     Register scratch1, Register scratch2, Register scratch3) {
697   // Store code entry in the closure.
698   __ StoreP(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset),
699             r0);
700   __ LoadRR(scratch1,
701             optimized_code);  // Write barrier clobbers scratch1 below.
702   __ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
703                       kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
704                       OMIT_SMI_CHECK);
705 }
706 
LeaveInterpreterFrame(MacroAssembler * masm,Register scratch)707 static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
708   Register args_count = scratch;
709 
710   // Get the arguments + receiver count.
711   __ LoadP(args_count,
712            MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
713   __ LoadlW(args_count,
714             FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
715 
716   // Leave the frame (also dropping the register file).
717   __ LeaveFrame(StackFrame::INTERPRETED);
718 
719   __ AddP(sp, sp, args_count);
720 }
721 
722 // Tail-call |function_id| if |smi_entry| == |marker|
TailCallRuntimeIfMarkerEquals(MacroAssembler * masm,Register smi_entry,OptimizationMarker marker,Runtime::FunctionId function_id)723 static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
724                                           Register smi_entry,
725                                           OptimizationMarker marker,
726                                           Runtime::FunctionId function_id) {
727   Label no_match;
728   __ CmpSmiLiteral(smi_entry, Smi::FromEnum(marker), r0);
729   __ bne(&no_match);
730   GenerateTailCallToReturnedCode(masm, function_id);
731   __ bind(&no_match);
732 }
733 
MaybeTailCallOptimizedCodeSlot(MacroAssembler * masm,Register feedback_vector,Register scratch1,Register scratch2,Register scratch3)734 static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
735                                            Register feedback_vector,
736                                            Register scratch1, Register scratch2,
737                                            Register scratch3) {
738   // ----------- S t a t e -------------
739   //  -- r0 : argument count (preserved for callee if needed, and caller)
740   //  -- r3 : new target (preserved for callee if needed, and caller)
741   //  -- r1 : target function (preserved for callee if needed, and caller)
742   //  -- feedback vector (preserved for caller if needed)
743   // -----------------------------------
744   DCHECK(
745       !AreAliased(feedback_vector, r2, r3, r5, scratch1, scratch2, scratch3));
746 
747   Label optimized_code_slot_is_weak_ref, fallthrough;
748 
749   Register closure = r3;
750   Register optimized_code_entry = scratch1;
751 
752   __ LoadP(
753       optimized_code_entry,
754       FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
755 
756   // Check if the code entry is a Smi. If yes, we interpret it as an
757   // optimisation marker. Otherwise, interpret it as a weak reference to a code
758   // object.
759   __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
760 
761   {
762     // Optimized code slot is a Smi optimization marker.
763 
764     // Fall through if no optimization trigger.
765     __ CmpSmiLiteral(optimized_code_entry,
766                      Smi::FromEnum(OptimizationMarker::kNone), r0);
767     __ beq(&fallthrough);
768 
769     TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
770                                   OptimizationMarker::kLogFirstExecution,
771                                   Runtime::kFunctionFirstExecution);
772     TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
773                                   OptimizationMarker::kCompileOptimized,
774                                   Runtime::kCompileOptimized_NotConcurrent);
775     TailCallRuntimeIfMarkerEquals(
776         masm, optimized_code_entry,
777         OptimizationMarker::kCompileOptimizedConcurrent,
778         Runtime::kCompileOptimized_Concurrent);
779 
780     {
781       // Otherwise, the marker is InOptimizationQueue, so fall through hoping
782       // that an interrupt will eventually update the slot with optimized code.
783       if (FLAG_debug_code) {
784         __ CmpSmiLiteral(
785             optimized_code_entry,
786             Smi::FromEnum(OptimizationMarker::kInOptimizationQueue), r0);
787         __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
788       }
789       __ b(&fallthrough, Label::kNear);
790     }
791   }
792 
793   {
794     // Optimized code slot is a weak reference.
795     __ bind(&optimized_code_slot_is_weak_ref);
796 
797     __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough);
798 
799     // Check if the optimized code is marked for deopt. If it is, call the
800     // runtime to clear it.
801     Label found_deoptimized_code;
802     __ LoadP(scratch2, FieldMemOperand(optimized_code_entry,
803                                        Code::kCodeDataContainerOffset));
804     __ LoadW(
805         scratch2,
806         FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset));
807     __ TestBit(scratch2, Code::kMarkedForDeoptimizationBit, r0);
808     __ bne(&found_deoptimized_code);
809 
810     // Optimized code is good, get it into the closure and link the closure into
811     // the optimized functions list, then tail call the optimized code.
812     // The feedback vector is no longer used, so re-use it as a scratch
813     // register.
814     ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
815                                         scratch2, scratch3, feedback_vector);
816     static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
817     __ AddP(r4, optimized_code_entry,
818             Operand(Code::kHeaderSize - kHeapObjectTag));
819     __ Jump(r4);
820 
821     // Optimized code slot contains deoptimized code, evict it and re-enter the
822     // closure's code.
823     __ bind(&found_deoptimized_code);
824     GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
825   }
826 
827   // Fall-through if the optimized code cell is clear and there is no
828   // optimization marker.
829   __ bind(&fallthrough);
830 }
831 
832 // Advance the current bytecode offset. This simulates what all bytecode
833 // handlers do upon completion of the underlying operation. Will bail out to a
834 // label if the bytecode (without prefix) is a return bytecode.
AdvanceBytecodeOffsetOrReturn(MacroAssembler * masm,Register bytecode_array,Register bytecode_offset,Register bytecode,Register scratch1,Label * if_return)835 static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
836                                           Register bytecode_array,
837                                           Register bytecode_offset,
838                                           Register bytecode, Register scratch1,
839                                           Label* if_return) {
840   Register bytecode_size_table = scratch1;
841   Register scratch2 = bytecode;
842   DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
843                      bytecode));
844   __ Move(bytecode_size_table,
845           ExternalReference::bytecode_size_table_address());
846 
847   // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
848   Label process_bytecode, extra_wide;
849   STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
850   STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
851   STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
852   STATIC_ASSERT(3 ==
853                 static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
854   __ CmpP(bytecode, Operand(0x3));
855   __ bgt(&process_bytecode);
856   __ tmll(bytecode, Operand(0x1));
857   __ bne(&extra_wide);
858 
859   // Load the next bytecode and update table to the wide scaled table.
860   __ AddP(bytecode_offset, bytecode_offset, Operand(1));
861   __ LoadlB(bytecode, MemOperand(bytecode_array, bytecode_offset));
862   __ AddP(bytecode_size_table, bytecode_size_table,
863           Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
864   __ b(&process_bytecode);
865 
866   __ bind(&extra_wide);
867   // Load the next bytecode and update table to the extra wide scaled table.
868   __ AddP(bytecode_offset, bytecode_offset, Operand(1));
869   __ LoadlB(bytecode, MemOperand(bytecode_array, bytecode_offset));
870   __ AddP(bytecode_size_table, bytecode_size_table,
871           Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
872 
873   // Load the size of the current bytecode.
874   __ bind(&process_bytecode);
875 
876 // Bailout to the return label if this is a return bytecode.
877 #define JUMP_IF_EQUAL(NAME)                                           \
878   __ CmpP(bytecode,                                                   \
879           Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
880   __ beq(if_return);
881   RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
882 #undef JUMP_IF_EQUAL
883 
884   // Otherwise, load the size of the current bytecode and advance the offset.
885   __ ShiftLeftP(scratch2, bytecode, Operand(2));
886   __ LoadlW(scratch2, MemOperand(bytecode_size_table, scratch2));
887   __ AddP(bytecode_offset, bytecode_offset, scratch2);
888 }
889 
890 // Generate code for entering a JS function with the interpreter.
891 // On entry to the function the receiver and arguments have been pushed on the
892 // stack left to right.  The actual argument count matches the formal parameter
893 // count expected by the function.
894 //
895 // The live registers are:
896 //   o r3: the JS function object being called.
897 //   o r5: the incoming new target or generator object
898 //   o cp: our context
899 //   o pp: the caller's constant pool pointer (if enabled)
900 //   o fp: the caller's frame pointer
901 //   o sp: stack pointer
902 //   o lr: return address
903 //
904 // The function builds an interpreter frame.  See InterpreterFrameConstants in
905 // frames.h for its layout.
Generate_InterpreterEntryTrampoline(MacroAssembler * masm)906 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
907   ProfileEntryHookStub::MaybeCallEntryHook(masm);
908 
909   Register closure = r3;
910   Register feedback_vector = r4;
911 
912   // Load the feedback vector from the closure.
913   __ LoadP(feedback_vector,
914            FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
915   __ LoadP(feedback_vector,
916            FieldMemOperand(feedback_vector, Cell::kValueOffset));
917   // Read off the optimized code slot in the feedback vector, and if there
918   // is optimized code or an optimization marker, call that instead.
919   MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
920 
921   // Open a frame scope to indicate that there is a frame on the stack.  The
922   // MANUAL indicates that the scope shouldn't actually generate code to set up
923   // the frame (that is done below).
924   FrameScope frame_scope(masm, StackFrame::MANUAL);
925   __ PushStandardFrame(closure);
926 
927   // Get the bytecode array from the function object (or from the DebugInfo if
928   // it is present) and load it into kInterpreterBytecodeArrayRegister.
929   Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
930   __ LoadP(r2, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
931   // Load original bytecode array or the debug copy.
932   __ LoadP(kInterpreterBytecodeArrayRegister,
933            FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset));
934   GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r6);
935   __ LoadP(r6, FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset));
936   __ TestIfSmi(r6);
937   __ bne(&maybe_load_debug_bytecode_array);
938   __ bind(&bytecode_array_loaded);
939 
940   // Increment invocation count for the function.
941   __ LoadW(r1, FieldMemOperand(feedback_vector,
942                                FeedbackVector::kInvocationCountOffset));
943   __ AddP(r1, r1, Operand(1));
944   __ StoreW(r1, FieldMemOperand(feedback_vector,
945                                 FeedbackVector::kInvocationCountOffset));
946 
947   // Check function data field is actually a BytecodeArray object.
948   if (FLAG_debug_code) {
949     __ TestIfSmi(kInterpreterBytecodeArrayRegister);
950     __ Assert(
951         ne, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
952     __ CompareObjectType(kInterpreterBytecodeArrayRegister, r2, no_reg,
953                          BYTECODE_ARRAY_TYPE);
954     __ Assert(
955         eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
956   }
957 
958   // Reset code age.
959   __ mov(r1, Operand(BytecodeArray::kNoAgeBytecodeAge));
960   __ StoreByte(r1, FieldMemOperand(kInterpreterBytecodeArrayRegister,
961                                    BytecodeArray::kBytecodeAgeOffset),
962                r0);
963 
964   // Load the initial bytecode offset.
965   __ mov(kInterpreterBytecodeOffsetRegister,
966          Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
967 
968   // Push bytecode array and Smi tagged bytecode array offset.
969   __ SmiTag(r4, kInterpreterBytecodeOffsetRegister);
970   __ Push(kInterpreterBytecodeArrayRegister, r4);
971 
972   // Allocate the local and temporary register file on the stack.
973   {
974     // Load frame size (word) from the BytecodeArray object.
975     __ LoadlW(r4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
976                                   BytecodeArray::kFrameSizeOffset));
977 
978     // Do a stack check to ensure we don't go over the limit.
979     Label ok;
980     __ SubP(r8, sp, r4);
981     __ LoadRoot(r0, Heap::kRealStackLimitRootIndex);
982     __ CmpLogicalP(r8, r0);
983     __ bge(&ok);
984     __ CallRuntime(Runtime::kThrowStackOverflow);
985     __ bind(&ok);
986 
987     // If ok, push undefined as the initial value for all register file entries.
988     // TODO(rmcilroy): Consider doing more than one push per loop iteration.
989     Label loop, no_args;
990     __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
991     __ ShiftRightP(r4, r4, Operand(kPointerSizeLog2));
992     __ LoadAndTestP(r4, r4);
993     __ beq(&no_args);
994     __ LoadRR(r1, r4);
995     __ bind(&loop);
996     __ push(r8);
997     __ SubP(r1, Operand(1));
998     __ bne(&loop);
999     __ bind(&no_args);
1000   }
1001 
1002   // If the bytecode array has a valid incoming new target or generator object
1003   // register, initialize it with incoming value which was passed in r6.
1004   Label no_incoming_new_target_or_generator_register;
1005   __ LoadW(r8, FieldMemOperand(
1006                    kInterpreterBytecodeArrayRegister,
1007                    BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
1008   __ CmpP(r8, Operand::Zero());
1009   __ beq(&no_incoming_new_target_or_generator_register);
1010   __ ShiftLeftP(r8, r8, Operand(kPointerSizeLog2));
1011   __ StoreP(r5, MemOperand(fp, r8));
1012   __ bind(&no_incoming_new_target_or_generator_register);
1013 
1014   // Load accumulator with undefined.
1015   __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
1016   // Load the dispatch table into a register and dispatch to the bytecode
1017   // handler at the current bytecode offset.
1018   Label do_dispatch;
1019   __ bind(&do_dispatch);
1020   __ mov(kInterpreterDispatchTableRegister,
1021          Operand(ExternalReference::interpreter_dispatch_table_address(
1022              masm->isolate())));
1023 
1024   __ LoadlB(r5, MemOperand(kInterpreterBytecodeArrayRegister,
1025                            kInterpreterBytecodeOffsetRegister));
1026   __ ShiftLeftP(r5, r5, Operand(kPointerSizeLog2));
1027   __ LoadP(kJavaScriptCallCodeStartRegister,
1028            MemOperand(kInterpreterDispatchTableRegister, r5));
1029   __ Call(kJavaScriptCallCodeStartRegister);
1030 
1031   masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
1032 
1033   // Any returns to the entry trampoline are either due to the return bytecode
1034   // or the interpreter tail calling a builtin and then a dispatch.
1035 
1036   // Get bytecode array and bytecode offset from the stack frame.
1037   __ LoadP(kInterpreterBytecodeArrayRegister,
1038            MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1039   __ LoadP(kInterpreterBytecodeOffsetRegister,
1040            MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1041   __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1042 
1043   // Either return, or advance to the next bytecode and dispatch.
1044   Label do_return;
1045   __ LoadlB(r3, MemOperand(kInterpreterBytecodeArrayRegister,
1046                            kInterpreterBytecodeOffsetRegister));
1047   AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1048                                 kInterpreterBytecodeOffsetRegister, r3, r4,
1049                                 &do_return);
1050   __ b(&do_dispatch);
1051 
1052   __ bind(&do_return);
1053   // The return value is in r2.
1054   LeaveInterpreterFrame(masm, r4);
1055   __ Ret();
1056 
1057   // Load debug copy of the bytecode array if it exists.
1058   // kInterpreterBytecodeArrayRegister is already loaded with
1059   // SharedFunctionInfo::kFunctionDataOffset.
1060   __ bind(&maybe_load_debug_bytecode_array);
1061   __ LoadP(ip, FieldMemOperand(r6, DebugInfo::kDebugBytecodeArrayOffset));
1062   __ JumpIfRoot(ip, Heap::kUndefinedValueRootIndex, &bytecode_array_loaded);
1063 
1064   __ LoadRR(kInterpreterBytecodeArrayRegister, ip);
1065   __ LoadP(ip, FieldMemOperand(r6, DebugInfo::kFlagsOffset));
1066   __ SmiUntag(ip);
1067   __ AndP(ip, ip, Operand(DebugInfo::kDebugExecutionMode));
1068 
1069   ExternalReference debug_execution_mode =
1070       ExternalReference::debug_execution_mode_address(masm->isolate());
1071   __ mov(r6, Operand(debug_execution_mode));
1072   __ LoadW(r6, MemOperand(r6));
1073   STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
1074                 static_cast<int>(DebugInfo::kSideEffects));
1075   __ CmpP(r6, ip);
1076   __ beq(&bytecode_array_loaded);
1077 
1078   __ Push(closure, feedback_vector, kInterpreterBytecodeArrayRegister, closure);
1079   __ CallRuntime(Runtime::kDebugApplyInstrumentation);
1080   __ Pop(closure, feedback_vector, kInterpreterBytecodeArrayRegister);
1081   __ b(&bytecode_array_loaded);
1082 }
1083 
Generate_StackOverflowCheck(MacroAssembler * masm,Register num_args,Register scratch,Label * stack_overflow)1084 static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
1085                                         Register scratch,
1086                                         Label* stack_overflow) {
1087   // Check the stack for overflow. We are not trying to catch
1088   // interruptions (e.g. debug break and preemption) here, so the "real stack
1089   // limit" is checked.
1090   __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
1091   // Make scratch the space we have left. The stack might already be overflowed
1092   // here which will cause scratch to become negative.
1093   __ SubP(scratch, sp, scratch);
1094   // Check if the arguments will overflow the stack.
1095   __ ShiftLeftP(r0, num_args, Operand(kPointerSizeLog2));
1096   __ CmpP(scratch, r0);
1097   __ ble(stack_overflow);  // Signed comparison.
1098 }
1099 
Generate_InterpreterPushArgs(MacroAssembler * masm,Register num_args,Register index,Register count,Register scratch)1100 static void Generate_InterpreterPushArgs(MacroAssembler* masm,
1101                                          Register num_args, Register index,
1102                                          Register count, Register scratch) {
1103   Label loop, skip;
1104   __ CmpP(count, Operand::Zero());
1105   __ beq(&skip);
1106   __ AddP(index, index, Operand(kPointerSize));  // Bias up for LoadPU
1107   __ LoadRR(r0, count);
1108   __ bind(&loop);
1109   __ LoadP(scratch, MemOperand(index, -kPointerSize));
1110   __ lay(index, MemOperand(index, -kPointerSize));
1111   __ push(scratch);
1112   __ SubP(r0, Operand(1));
1113   __ bne(&loop);
1114   __ bind(&skip);
1115 }
1116 
1117 // static
Generate_InterpreterPushArgsThenCallImpl(MacroAssembler * masm,ConvertReceiverMode receiver_mode,InterpreterPushArgsMode mode)1118 void Builtins::Generate_InterpreterPushArgsThenCallImpl(
1119     MacroAssembler* masm, ConvertReceiverMode receiver_mode,
1120     InterpreterPushArgsMode mode) {
1121   DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
1122   // ----------- S t a t e -------------
1123   //  -- r2 : the number of arguments (not including the receiver)
1124   //  -- r4 : the address of the first argument to be pushed. Subsequent
1125   //          arguments should be consecutive above this, in the same order as
1126   //          they are to be pushed onto the stack.
1127   //  -- r3 : the target to call (can be any Object).
1128   // -----------------------------------
1129   Label stack_overflow;
1130 
1131   // Calculate number of arguments (AddP one for receiver).
1132   __ AddP(r5, r2, Operand(1));
1133   Generate_StackOverflowCheck(masm, r5, ip, &stack_overflow);
1134 
1135   // Push "undefined" as the receiver arg if we need to.
1136   if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1137     __ PushRoot(Heap::kUndefinedValueRootIndex);
1138     __ LoadRR(r5, r2);  // Argument count is correct.
1139   }
1140 
1141   // Push the arguments.
1142   Generate_InterpreterPushArgs(masm, r5, r4, r5, r6);
1143   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1144     __ Pop(r4);                   // Pass the spread in a register
1145     __ SubP(r2, r2, Operand(1));  // Subtract one for spread
1146   }
1147 
1148   // Call the target.
1149   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1150     __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
1151             RelocInfo::CODE_TARGET);
1152   } else {
1153     __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
1154             RelocInfo::CODE_TARGET);
1155   }
1156 
1157   __ bind(&stack_overflow);
1158   {
1159     __ TailCallRuntime(Runtime::kThrowStackOverflow);
1160     // Unreachable Code.
1161     __ bkpt(0);
1162   }
1163 }
1164 
1165 // static
Generate_InterpreterPushArgsThenConstructImpl(MacroAssembler * masm,InterpreterPushArgsMode mode)1166 void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
1167     MacroAssembler* masm, InterpreterPushArgsMode mode) {
1168   // ----------- S t a t e -------------
1169   // -- r2 : argument count (not including receiver)
1170   // -- r5 : new target
1171   // -- r3 : constructor to call
1172   // -- r4 : allocation site feedback if available, undefined otherwise.
1173   // -- r6 : address of the first argument
1174   // -----------------------------------
1175   Label stack_overflow;
1176 
1177   // Push a slot for the receiver to be constructed.
1178   __ LoadImmP(r0, Operand::Zero());
1179   __ push(r0);
1180 
1181   // Push the arguments (skip if none).
1182   Label skip;
1183   __ CmpP(r2, Operand::Zero());
1184   __ beq(&skip);
1185   Generate_StackOverflowCheck(masm, r2, ip, &stack_overflow);
1186   Generate_InterpreterPushArgs(masm, r2, r6, r2, r7);
1187   __ bind(&skip);
1188 
1189   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1190     __ Pop(r4);                   // Pass the spread in a register
1191     __ SubP(r2, r2, Operand(1));  // Subtract one for spread
1192   } else {
1193     __ AssertUndefinedOrAllocationSite(r4, r7);
1194   }
1195   if (mode == InterpreterPushArgsMode::kArrayFunction) {
1196     __ AssertFunction(r3);
1197 
1198     // Tail call to the array construct stub (still in the caller
1199     // context at this point).
1200     ArrayConstructorStub array_constructor_stub(masm->isolate());
1201     __ Jump(array_constructor_stub.GetCode(), RelocInfo::CODE_TARGET);
1202   } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1203     // Call the constructor with r2, r3, and r5 unmodified.
1204     __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
1205             RelocInfo::CODE_TARGET);
1206   } else {
1207     DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
1208     // Call the constructor with r2, r3, and r5 unmodified.
1209     __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
1210   }
1211 
1212   __ bind(&stack_overflow);
1213   {
1214     __ TailCallRuntime(Runtime::kThrowStackOverflow);
1215     // Unreachable Code.
1216     __ bkpt(0);
1217   }
1218 }
1219 
Generate_InterpreterEnterBytecode(MacroAssembler * masm)1220 static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
1221   // Set the return address to the correct point in the interpreter entry
1222   // trampoline.
1223   Label builtin_trampoline, trampoline_loaded;
1224   Smi* interpreter_entry_return_pc_offset(
1225       masm->isolate()->heap()->interpreter_entry_return_pc_offset());
1226   DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
1227 
1228   // If the SFI function_data is an InterpreterData, get the trampoline stored
1229   // in it, otherwise get the trampoline from the builtins list.
1230   __ LoadP(r4, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
1231   __ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
1232   __ LoadP(r4, FieldMemOperand(r4, SharedFunctionInfo::kFunctionDataOffset));
1233   __ CompareObjectType(r4, kInterpreterDispatchTableRegister,
1234                        kInterpreterDispatchTableRegister,
1235                        INTERPRETER_DATA_TYPE);
1236   __ bne(&builtin_trampoline);
1237 
1238   __ LoadP(r4,
1239            FieldMemOperand(r4, InterpreterData::kInterpreterTrampolineOffset));
1240   __ b(&trampoline_loaded);
1241 
1242   __ bind(&builtin_trampoline);
1243   __ Move(r4, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
1244 
1245   __ bind(&trampoline_loaded);
1246   __ AddP(r14, r4, Operand(interpreter_entry_return_pc_offset->value() +
1247                            Code::kHeaderSize - kHeapObjectTag));
1248 
1249   // Initialize the dispatch table register.
1250   __ Move(
1251       kInterpreterDispatchTableRegister,
1252       ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1253 
1254   // Get the bytecode array pointer from the frame.
1255   __ LoadP(kInterpreterBytecodeArrayRegister,
1256            MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1257 
1258   if (FLAG_debug_code) {
1259     // Check function data field is actually a BytecodeArray object.
1260     __ TestIfSmi(kInterpreterBytecodeArrayRegister);
1261     __ Assert(
1262         ne, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
1263     __ CompareObjectType(kInterpreterBytecodeArrayRegister, r3, no_reg,
1264                          BYTECODE_ARRAY_TYPE);
1265     __ Assert(
1266         eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
1267   }
1268 
1269   // Get the target bytecode offset from the frame.
1270   __ LoadP(kInterpreterBytecodeOffsetRegister,
1271            MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1272   __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1273 
1274   // Dispatch to the target bytecode.
1275   __ LoadlB(ip, MemOperand(kInterpreterBytecodeArrayRegister,
1276                            kInterpreterBytecodeOffsetRegister));
1277   __ ShiftLeftP(ip, ip, Operand(kPointerSizeLog2));
1278   __ LoadP(kJavaScriptCallCodeStartRegister,
1279            MemOperand(kInterpreterDispatchTableRegister, ip));
1280   __ Jump(kJavaScriptCallCodeStartRegister);
1281 }
1282 
Generate_InterpreterEnterBytecodeAdvance(MacroAssembler * masm)1283 void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
1284   // Get bytecode array and bytecode offset from the stack frame.
1285   __ LoadP(kInterpreterBytecodeArrayRegister,
1286            MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1287   __ LoadP(kInterpreterBytecodeOffsetRegister,
1288            MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1289   __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1290 
1291   // Load the current bytecode.
1292   __ LoadlB(r3, MemOperand(kInterpreterBytecodeArrayRegister,
1293                            kInterpreterBytecodeOffsetRegister));
1294 
1295   // Advance to the next bytecode.
1296   Label if_return;
1297   AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1298                                 kInterpreterBytecodeOffsetRegister, r3, r4,
1299                                 &if_return);
1300 
1301   // Convert new bytecode offset to a Smi and save in the stackframe.
1302   __ SmiTag(r4, kInterpreterBytecodeOffsetRegister);
1303   __ StoreP(r4,
1304             MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1305 
1306   Generate_InterpreterEnterBytecode(masm);
1307 
1308   // We should never take the if_return path.
1309   __ bind(&if_return);
1310   __ Abort(AbortReason::kInvalidBytecodeAdvance);
1311 }
1312 
Generate_InterpreterEnterBytecodeDispatch(MacroAssembler * masm)1313 void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
1314   Generate_InterpreterEnterBytecode(masm);
1315 }
1316 
Generate_CompileLazyDeoptimizedCode(MacroAssembler * masm)1317 void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
1318   // Set the code slot inside the JSFunction to CompileLazy.
1319   __ Move(r4, BUILTIN_CODE(masm->isolate(), CompileLazy));
1320   __ StoreP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
1321   __ RecordWriteField(r3, JSFunction::kCodeOffset, r4, r6, kLRHasNotBeenSaved,
1322                       kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
1323   // Jump to compile lazy.
1324   Generate_CompileLazy(masm);
1325 }
1326 
GetSharedFunctionInfoCode(MacroAssembler * masm,Register sfi_data,Register scratch1)1327 static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
1328                                       Register scratch1) {
1329   // Figure out the SFI's code object.
1330   Label done;
1331   Label check_is_bytecode_array;
1332   Label check_is_exported_function_data;
1333   Label check_is_fixed_array;
1334   Label check_is_pre_parsed_scope_data;
1335   Label check_is_function_template_info;
1336   Label check_is_interpreter_data;
1337 
1338   Register data_type = scratch1;
1339 
1340   // IsSmi: Is builtin
1341   __ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
1342   __ Move(scratch1, ExternalReference::builtins_address(masm->isolate()));
1343   __ SmiUntag(sfi_data, kPointerSizeLog2);
1344   __ LoadP(sfi_data, MemOperand(scratch1, sfi_data));
1345   __ b(&done);
1346 
1347   // Get map for subsequent checks.
1348   __ bind(&check_is_bytecode_array);
1349   __ LoadP(data_type, FieldMemOperand(sfi_data, HeapObject::kMapOffset));
1350   __ LoadHalfWordP(data_type,
1351                    FieldMemOperand(data_type, Map::kInstanceTypeOffset));
1352 
1353   // IsBytecodeArray: Interpret bytecode
1354   __ CmpP(data_type, Operand(BYTECODE_ARRAY_TYPE));
1355   __ bne(&check_is_exported_function_data);
1356   __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
1357   __ b(&done);
1358 
1359   // IsWasmExportedFunctionData: Use the wrapper code
1360   __ bind(&check_is_exported_function_data);
1361   __ CmpP(data_type, Operand(WASM_EXPORTED_FUNCTION_DATA_TYPE));
1362   __ bne(&check_is_fixed_array);
1363   __ LoadP(
1364       sfi_data,
1365       FieldMemOperand(sfi_data, WasmExportedFunctionData::kWrapperCodeOffset));
1366   __ b(&done);
1367 
1368   // IsFixedArray: Instantiate using AsmWasmData
1369   __ bind(&check_is_fixed_array);
1370   __ CmpP(data_type, Operand(FIXED_ARRAY_TYPE));
1371   __ bne(&check_is_pre_parsed_scope_data);
1372   __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
1373   __ b(&done);
1374 
1375   // IsPreParsedScopeData: Compile lazy
1376   __ bind(&check_is_pre_parsed_scope_data);
1377   __ CmpP(data_type, Operand(TUPLE2_TYPE));
1378   __ bne(&check_is_function_template_info);
1379   __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
1380   __ b(&done);
1381 
1382   // IsFunctionTemplateInfo: API call
1383   __ bind(&check_is_function_template_info);
1384   __ CmpP(data_type, Operand(FUNCTION_TEMPLATE_INFO_TYPE));
1385   __ bne(&check_is_interpreter_data);
1386   __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
1387   __ b(&done);
1388 
1389   // IsInterpreterData: Interpret bytecode
1390   __ bind(&check_is_interpreter_data);
1391   if (FLAG_debug_code) {
1392     __ CmpP(data_type, Operand(INTERPRETER_DATA_TYPE));
1393     __ Assert(eq, AbortReason::kInvalidSharedFunctionInfoData);
1394   }
1395   __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
1396   __ LoadP(
1397       sfi_data,
1398       FieldMemOperand(sfi_data, InterpreterData::kInterpreterTrampolineOffset));
1399 
1400   __ bind(&done);
1401 }
1402 
Generate_CompileLazy(MacroAssembler * masm)1403 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
1404   // ----------- S t a t e -------------
1405   //  -- r2 : argument count (preserved for callee)
1406   //  -- r5 : new target (preserved for callee)
1407   //  -- r3 : target function (preserved for callee)
1408   // -----------------------------------
1409   // First lookup code, maybe we don't need to compile!
1410   Label gotta_call_runtime;
1411 
1412   Register closure = r3;
1413   Register feedback_vector = r4;
1414 
1415   // Do we have a valid feedback vector?
1416   __ LoadP(feedback_vector,
1417            FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1418   __ LoadP(feedback_vector,
1419            FieldMemOperand(feedback_vector, Cell::kValueOffset));
1420   __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
1421                 &gotta_call_runtime);
1422 
1423   // Is there an optimization marker or optimized code in the feedback vector?
1424   MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
1425 
1426   // We found no optimized code. Infer the code object needed for the SFI.
1427   Register entry = r6;
1428   __ LoadP(entry,
1429            FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
1430   __ LoadP(entry,
1431            FieldMemOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
1432   GetSharedFunctionInfoCode(masm, entry, r7);
1433 
1434   // If code entry points to anything other than CompileLazy, install that.
1435   __ Move(r7, masm->CodeObject());
1436   __ CmpP(entry, r7);
1437   __ beq(&gotta_call_runtime);
1438 
1439   // Install the SFI's code entry.
1440   __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeOffset), r0);
1441   __ LoadRR(r8, entry);  // Write barrier clobbers r8 below.
1442   __ RecordWriteField(closure, JSFunction::kCodeOffset, r8, r7,
1443                       kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
1444                       OMIT_SMI_CHECK);
1445   __ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
1446   __ JumpToJSEntry(entry);
1447 
1448   __ bind(&gotta_call_runtime);
1449   GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
1450 }
1451 
1452 // Lazy deserialization design doc: http://goo.gl/dxkYDZ.
Generate_DeserializeLazy(MacroAssembler * masm)1453 void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
1454   // ----------- S t a t e -------------
1455   //  -- r2 : argument count (preserved for callee)
1456   //  -- r5 : new target (preserved for callee)
1457   //  -- r3 : target function (preserved for callee)
1458   // -----------------------------------
1459 
1460   Label deserialize_in_runtime;
1461 
1462   Register target = r3;  // Must be preserved
1463   Register scratch0 = r4;
1464   Register scratch1 = r6;
1465 
1466   CHECK(scratch0 != r2 && scratch0 != r5 && scratch0 != r3);
1467   CHECK(scratch1 != r2 && scratch1 != r5 && scratch1 != r3);
1468   CHECK(scratch0 != scratch1);
1469 
1470   // Load the builtin id for lazy deserialization from SharedFunctionInfo.
1471 
1472   __ AssertFunction(target);
1473   __ LoadP(scratch0,
1474            FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
1475 
1476   __ LoadP(scratch1,
1477            FieldMemOperand(scratch0, SharedFunctionInfo::kFunctionDataOffset));
1478   __ AssertSmi(scratch1);
1479 
1480   // The builtin may already have been deserialized. If that is the case, it is
1481   // stored in the builtins table, and we can copy to correct code object to
1482   // both the shared function info and function without calling into runtime.
1483   //
1484   // Otherwise, we need to call into runtime to deserialize.
1485 
1486   {
1487     // Load the code object at builtins_table[builtin_id] into scratch1.
1488 
1489     __ SmiUntag(scratch1);
1490     __ Move(scratch0, ExternalReference::builtins_address(masm->isolate()));
1491     __ ShiftLeftP(scratch1, scratch1, Operand(kPointerSizeLog2));
1492     __ LoadP(scratch1, MemOperand(scratch0, scratch1));
1493 
1494     // Check if the loaded code object has already been deserialized. This is
1495     // the case iff it does not equal DeserializeLazy.
1496 
1497     __ Move(scratch0, masm->CodeObject());
1498     __ CmpP(scratch1, scratch0);
1499     __ beq(&deserialize_in_runtime);
1500   }
1501   {
1502     // If we've reached this spot, the target builtin has been deserialized and
1503     // we simply need to copy it over to the target function.
1504 
1505     Register target_builtin = scratch1;
1506 
1507     __ StoreP(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset));
1508     __ LoadRR(r8, target_builtin);  // Write barrier clobbers r9 below.
1509     __ RecordWriteField(target, JSFunction::kCodeOffset, r8, r7,
1510                         kLRHasNotBeenSaved, kDontSaveFPRegs,
1511                         OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
1512 
1513     // All copying is done. Jump to the deserialized code object.
1514 
1515     __ AddP(target_builtin, target_builtin,
1516             Operand(Code::kHeaderSize - kHeapObjectTag));
1517     __ LoadRR(ip, target_builtin);
1518     __ Jump(ip);
1519   }
1520 
1521   __ bind(&deserialize_in_runtime);
1522   GenerateTailCallToReturnedCode(masm, Runtime::kDeserializeLazy);
1523 }
1524 
Generate_InstantiateAsmJs(MacroAssembler * masm)1525 void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
1526   // ----------- S t a t e -------------
1527   //  -- r2 : argument count (preserved for callee)
1528   //  -- r3 : new target (preserved for callee)
1529   //  -- r5 : target function (preserved for callee)
1530   // -----------------------------------
1531   Label failed;
1532   {
1533     FrameScope scope(masm, StackFrame::INTERNAL);
1534     // Preserve argument count for later compare.
1535     __ Move(r6, r2);
1536     // Push a copy of the target function and the new target.
1537     __ SmiTag(r2);
1538     // Push another copy as a parameter to the runtime call.
1539     __ Push(r2, r3, r5, r3);
1540 
1541     // Copy arguments from caller (stdlib, foreign, heap).
1542     Label args_done;
1543     for (int j = 0; j < 4; ++j) {
1544       Label over;
1545       if (j < 3) {
1546         __ CmpP(r6, Operand(j));
1547         __ b(ne, &over);
1548       }
1549       for (int i = j - 1; i >= 0; --i) {
1550         __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
1551                                         i * kPointerSize));
1552         __ push(r6);
1553       }
1554       for (int i = 0; i < 3 - j; ++i) {
1555         __ PushRoot(Heap::kUndefinedValueRootIndex);
1556       }
1557       if (j < 3) {
1558         __ jmp(&args_done);
1559         __ bind(&over);
1560       }
1561     }
1562     __ bind(&args_done);
1563 
1564     // Call runtime, on success unwind frame, and parent frame.
1565     __ CallRuntime(Runtime::kInstantiateAsmJs, 4);
1566     // A smi 0 is returned on failure, an object on success.
1567     __ JumpIfSmi(r2, &failed);
1568 
1569     __ Drop(2);
1570     __ pop(r6);
1571     __ SmiUntag(r6);
1572     scope.GenerateLeaveFrame();
1573 
1574     __ AddP(r6, r6, Operand(1));
1575     __ Drop(r6);
1576     __ Ret();
1577 
1578     __ bind(&failed);
1579     // Restore target function and new target.
1580     __ Pop(r2, r3, r5);
1581     __ SmiUntag(r2);
1582   }
1583   // On failure, tail call back to regular js by re-calling the function
1584   // which has be reset to the compile lazy builtin.
1585   static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
1586   __ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
1587   __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
1588   __ JumpToJSEntry(r4);
1589 }
1590 
1591 namespace {
Generate_ContinueToBuiltinHelper(MacroAssembler * masm,bool java_script_builtin,bool with_result)1592 void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
1593                                       bool java_script_builtin,
1594                                       bool with_result) {
1595   const RegisterConfiguration* config(RegisterConfiguration::Default());
1596   int allocatable_register_count = config->num_allocatable_general_registers();
1597   if (with_result) {
1598     // Overwrite the hole inserted by the deoptimizer with the return value from
1599     // the LAZY deopt point.
1600     __ StoreP(
1601         r2, MemOperand(
1602                 sp, config->num_allocatable_general_registers() * kPointerSize +
1603                         BuiltinContinuationFrameConstants::kFixedFrameSize));
1604   }
1605   for (int i = allocatable_register_count - 1; i >= 0; --i) {
1606     int code = config->GetAllocatableGeneralCode(i);
1607     __ Pop(Register::from_code(code));
1608     if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
1609       __ SmiUntag(Register::from_code(code));
1610     }
1611   }
1612   __ LoadP(
1613       fp,
1614       MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1615   __ Pop(ip);
1616   __ AddP(sp, sp,
1617           Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1618   __ Pop(r0);
1619   __ LoadRR(r14, r0);
1620   __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
1621   __ Jump(ip);
1622 }
1623 }  // namespace
1624 
Generate_ContinueToCodeStubBuiltin(MacroAssembler * masm)1625 void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
1626   Generate_ContinueToBuiltinHelper(masm, false, false);
1627 }
1628 
Generate_ContinueToCodeStubBuiltinWithResult(MacroAssembler * masm)1629 void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
1630     MacroAssembler* masm) {
1631   Generate_ContinueToBuiltinHelper(masm, false, true);
1632 }
1633 
Generate_ContinueToJavaScriptBuiltin(MacroAssembler * masm)1634 void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
1635   Generate_ContinueToBuiltinHelper(masm, true, false);
1636 }
1637 
Generate_ContinueToJavaScriptBuiltinWithResult(MacroAssembler * masm)1638 void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
1639     MacroAssembler* masm) {
1640   Generate_ContinueToBuiltinHelper(masm, true, true);
1641 }
1642 
Generate_NotifyDeoptimized(MacroAssembler * masm)1643 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
1644   {
1645     FrameScope scope(masm, StackFrame::INTERNAL);
1646     __ CallRuntime(Runtime::kNotifyDeoptimized);
1647   }
1648 
1649   DCHECK_EQ(kInterpreterAccumulatorRegister.code(), r2.code());
1650   __ pop(r2);
1651   __ Ret();
1652 }
1653 
Generate_OnStackReplacementHelper(MacroAssembler * masm,bool has_handler_frame)1654 static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
1655                                               bool has_handler_frame) {
1656   // Lookup the function in the JavaScript frame.
1657   if (has_handler_frame) {
1658     __ LoadP(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1659     __ LoadP(r2, MemOperand(r2, JavaScriptFrameConstants::kFunctionOffset));
1660   } else {
1661     __ LoadP(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
1662   }
1663 
1664   {
1665     FrameScope scope(masm, StackFrame::INTERNAL);
1666     // Pass function as argument.
1667     __ push(r2);
1668     __ CallRuntime(Runtime::kCompileForOnStackReplacement);
1669   }
1670 
1671   // If the code object is null, just return to the caller.
1672   Label skip;
1673   __ CmpSmiLiteral(r2, Smi::kZero, r0);
1674   __ bne(&skip);
1675   __ Ret();
1676 
1677   __ bind(&skip);
1678 
1679   // Drop any potential handler frame that is be sitting on top of the actual
1680   // JavaScript frame. This is the case then OSR is triggered from bytecode.
1681   if (has_handler_frame) {
1682     __ LeaveFrame(StackFrame::STUB);
1683   }
1684 
1685   // Load deoptimization data from the code object.
1686   // <deopt_data> = <code>[#deoptimization_data_offset]
1687   __ LoadP(r3, FieldMemOperand(r2, Code::kDeoptimizationDataOffset));
1688 
1689   // Load the OSR entrypoint offset from the deoptimization data.
1690   // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
1691   __ LoadP(r3, FieldMemOperand(r3, FixedArray::OffsetOfElementAt(
1692                                        DeoptimizationData::kOsrPcOffsetIndex)));
1693   __ SmiUntag(r3);
1694 
1695   // Compute the target address = code_obj + header_size + osr_offset
1696   // <entry_addr> = <code_obj> + #header_size + <osr_offset>
1697   __ AddP(r2, r3);
1698   __ AddP(r0, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
1699   __ LoadRR(r14, r0);
1700 
1701   // And "return" to the OSR entry point of the function.
1702   __ Ret();
1703 }
1704 
Generate_OnStackReplacement(MacroAssembler * masm)1705 void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
1706   Generate_OnStackReplacementHelper(masm, false);
1707 }
1708 
Generate_InterpreterOnStackReplacement(MacroAssembler * masm)1709 void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
1710   Generate_OnStackReplacementHelper(masm, true);
1711 }
1712 
1713 // static
Generate_FunctionPrototypeApply(MacroAssembler * masm)1714 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
1715   // ----------- S t a t e -------------
1716   //  -- r2    : argc
1717   //  -- sp[0] : argArray
1718   //  -- sp[4] : thisArg
1719   //  -- sp[8] : receiver
1720   // -----------------------------------
1721 
1722   // 1. Load receiver into r3, argArray into r4 (if present), remove all
1723   // arguments from the stack (including the receiver), and push thisArg (if
1724   // present) instead.
1725   {
1726     Label skip;
1727     Register arg_size = r7;
1728     Register new_sp = r5;
1729     Register scratch = r6;
1730     __ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
1731     __ AddP(new_sp, sp, arg_size);
1732     __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
1733     __ LoadRR(r4, scratch);
1734     __ LoadP(r3, MemOperand(new_sp, 0));  // receiver
1735     __ CmpP(arg_size, Operand(kPointerSize));
1736     __ blt(&skip);
1737     __ LoadP(scratch, MemOperand(new_sp, 1 * -kPointerSize));  // thisArg
1738     __ beq(&skip);
1739     __ LoadP(r4, MemOperand(new_sp, 2 * -kPointerSize));  // argArray
1740     __ bind(&skip);
1741     __ LoadRR(sp, new_sp);
1742     __ StoreP(scratch, MemOperand(sp, 0));
1743   }
1744 
1745   // ----------- S t a t e -------------
1746   //  -- r4    : argArray
1747   //  -- r3    : receiver
1748   //  -- sp[0] : thisArg
1749   // -----------------------------------
1750 
1751   // 2. We don't need to check explicitly for callable receiver here,
1752   // since that's the first thing the Call/CallWithArrayLike builtins
1753   // will do.
1754 
1755   // 3. Tail call with no arguments if argArray is null or undefined.
1756   Label no_arguments;
1757   __ JumpIfRoot(r4, Heap::kNullValueRootIndex, &no_arguments);
1758   __ JumpIfRoot(r4, Heap::kUndefinedValueRootIndex, &no_arguments);
1759 
1760   // 4a. Apply the receiver to the given argArray.
1761   __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1762           RelocInfo::CODE_TARGET);
1763 
1764   // 4b. The argArray is either null or undefined, so we tail call without any
1765   // arguments to the receiver.
1766   __ bind(&no_arguments);
1767   {
1768     __ LoadImmP(r2, Operand::Zero());
1769     __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1770   }
1771 }
1772 
1773 // static
Generate_FunctionPrototypeCall(MacroAssembler * masm)1774 void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
1775   // 1. Make sure we have at least one argument.
1776   // r2: actual number of arguments
1777   {
1778     Label done;
1779     __ CmpP(r2, Operand::Zero());
1780     __ bne(&done, Label::kNear);
1781     __ PushRoot(Heap::kUndefinedValueRootIndex);
1782     __ AddP(r2, Operand(1));
1783     __ bind(&done);
1784   }
1785 
1786   // r2: actual number of arguments
1787   // 2. Get the callable to call (passed as receiver) from the stack.
1788   __ ShiftLeftP(r4, r2, Operand(kPointerSizeLog2));
1789   __ LoadP(r3, MemOperand(sp, r4));
1790 
1791   // 3. Shift arguments and return address one slot down on the stack
1792   //    (overwriting the original receiver).  Adjust argument count to make
1793   //    the original first argument the new receiver.
1794   // r2: actual number of arguments
1795   // r3: callable
1796   {
1797     Label loop;
1798     // Calculate the copy start address (destination). Copy end address is sp.
1799     __ AddP(r4, sp, r4);
1800 
1801     __ bind(&loop);
1802     __ LoadP(ip, MemOperand(r4, -kPointerSize));
1803     __ StoreP(ip, MemOperand(r4));
1804     __ SubP(r4, Operand(kPointerSize));
1805     __ CmpP(r4, sp);
1806     __ bne(&loop);
1807     // Adjust the actual number of arguments and remove the top element
1808     // (which is a copy of the last argument).
1809     __ SubP(r2, Operand(1));
1810     __ pop();
1811   }
1812 
1813   // 4. Call the callable.
1814   __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1815 }
1816 
Generate_ReflectApply(MacroAssembler * masm)1817 void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
1818   // ----------- S t a t e -------------
1819   //  -- r2     : argc
1820   //  -- sp[0]  : argumentsList
1821   //  -- sp[4]  : thisArgument
1822   //  -- sp[8]  : target
1823   //  -- sp[12] : receiver
1824   // -----------------------------------
1825 
1826   // 1. Load target into r3 (if present), argumentsList into r4 (if present),
1827   // remove all arguments from the stack (including the receiver), and push
1828   // thisArgument (if present) instead.
1829   {
1830     Label skip;
1831     Register arg_size = r7;
1832     Register new_sp = r5;
1833     Register scratch = r6;
1834     __ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
1835     __ AddP(new_sp, sp, arg_size);
1836     __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
1837     __ LoadRR(scratch, r3);
1838     __ LoadRR(r4, r3);
1839     __ CmpP(arg_size, Operand(kPointerSize));
1840     __ blt(&skip);
1841     __ LoadP(r3, MemOperand(new_sp, 1 * -kPointerSize));  // target
1842     __ beq(&skip);
1843     __ LoadP(scratch, MemOperand(new_sp, 2 * -kPointerSize));  // thisArgument
1844     __ CmpP(arg_size, Operand(2 * kPointerSize));
1845     __ beq(&skip);
1846     __ LoadP(r4, MemOperand(new_sp, 3 * -kPointerSize));  // argumentsList
1847     __ bind(&skip);
1848     __ LoadRR(sp, new_sp);
1849     __ StoreP(scratch, MemOperand(sp, 0));
1850   }
1851 
1852   // ----------- S t a t e -------------
1853   //  -- r4    : argumentsList
1854   //  -- r3    : target
1855   //  -- sp[0] : thisArgument
1856   // -----------------------------------
1857 
1858   // 2. We don't need to check explicitly for callable target here,
1859   // since that's the first thing the Call/CallWithArrayLike builtins
1860   // will do.
1861 
1862   // 3 Apply the target to the given argumentsList.
1863   __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1864           RelocInfo::CODE_TARGET);
1865 }
1866 
Generate_ReflectConstruct(MacroAssembler * masm)1867 void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
1868   // ----------- S t a t e -------------
1869   //  -- r2     : argc
1870   //  -- sp[0]  : new.target (optional)
1871   //  -- sp[4]  : argumentsList
1872   //  -- sp[8]  : target
1873   //  -- sp[12] : receiver
1874   // -----------------------------------
1875 
1876   // 1. Load target into r3 (if present), argumentsList into r4 (if present),
1877   // new.target into r5 (if present, otherwise use target), remove all
1878   // arguments from the stack (including the receiver), and push thisArgument
1879   // (if present) instead.
1880   {
1881     Label skip;
1882     Register arg_size = r7;
1883     Register new_sp = r6;
1884     __ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
1885     __ AddP(new_sp, sp, arg_size);
1886     __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
1887     __ LoadRR(r4, r3);
1888     __ LoadRR(r5, r3);
1889     __ StoreP(r3, MemOperand(new_sp, 0));  // receiver (undefined)
1890     __ CmpP(arg_size, Operand(kPointerSize));
1891     __ blt(&skip);
1892     __ LoadP(r3, MemOperand(new_sp, 1 * -kPointerSize));  // target
1893     __ LoadRR(r5, r3);  // new.target defaults to target
1894     __ beq(&skip);
1895     __ LoadP(r4, MemOperand(new_sp, 2 * -kPointerSize));  // argumentsList
1896     __ CmpP(arg_size, Operand(2 * kPointerSize));
1897     __ beq(&skip);
1898     __ LoadP(r5, MemOperand(new_sp, 3 * -kPointerSize));  // new.target
1899     __ bind(&skip);
1900     __ LoadRR(sp, new_sp);
1901   }
1902 
1903   // ----------- S t a t e -------------
1904   //  -- r4    : argumentsList
1905   //  -- r5    : new.target
1906   //  -- r3    : target
1907   //  -- sp[0] : receiver (undefined)
1908   // -----------------------------------
1909 
1910   // 2. We don't need to check explicitly for constructor target here,
1911   // since that's the first thing the Construct/ConstructWithArrayLike
1912   // builtins will do.
1913 
1914   // 3. We don't need to check explicitly for constructor new.target here,
1915   // since that's the second thing the Construct/ConstructWithArrayLike
1916   // builtins will do.
1917 
1918   // 4. Construct the target with the given new.target and argumentsList.
1919   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike),
1920           RelocInfo::CODE_TARGET);
1921 }
1922 
EnterArgumentsAdaptorFrame(MacroAssembler * masm)1923 static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
1924   __ SmiTag(r2);
1925   __ Load(r6, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
1926   // Stack updated as such:
1927   //    old SP --->
1928   //                 R14 Return Addr
1929   //                 Old FP                     <--- New FP
1930   //                 Argument Adapter SMI
1931   //                 Function
1932   //                 ArgC as SMI
1933   //                 Padding                    <--- New SP
1934   __ lay(sp, MemOperand(sp, -5 * kPointerSize));
1935 
1936   // Cleanse the top nibble of 31-bit pointers.
1937   __ CleanseP(r14);
1938   __ StoreP(r14, MemOperand(sp, 4 * kPointerSize));
1939   __ StoreP(fp, MemOperand(sp, 3 * kPointerSize));
1940   __ StoreP(r6, MemOperand(sp, 2 * kPointerSize));
1941   __ StoreP(r3, MemOperand(sp, 1 * kPointerSize));
1942   __ StoreP(r2, MemOperand(sp, 0 * kPointerSize));
1943   __ Push(Smi::kZero);  // Padding.
1944   __ la(fp,
1945         MemOperand(sp, ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp));
1946 }
1947 
LeaveArgumentsAdaptorFrame(MacroAssembler * masm)1948 static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
1949   // ----------- S t a t e -------------
1950   //  -- r2 : result being passed through
1951   // -----------------------------------
1952   // Get the number of arguments passed (as a smi), tear down the frame and
1953   // then tear down the parameters.
1954   __ LoadP(r3, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
1955   int stack_adjustment = kPointerSize;  // adjust for receiver
1956   __ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR, stack_adjustment);
1957   __ SmiToPtrArrayOffset(r3, r3);
1958   __ lay(sp, MemOperand(sp, r3));
1959 }
1960 
1961 // static
Generate_CallOrConstructVarargs(MacroAssembler * masm,Handle<Code> code)1962 void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
1963                                                Handle<Code> code) {
1964   // ----------- S t a t e -------------
1965   //  -- r3 : target
1966   //  -- r2 : number of parameters on the stack (not including the receiver)
1967   //  -- r4 : arguments list (a FixedArray)
1968   //  -- r6 : len (number of elements to push from args)
1969   //  -- r5 : new.target (for [[Construct]])
1970   // -----------------------------------
1971 
1972   __ AssertFixedArray(r4);
1973   // Check for stack overflow.
1974   {
1975     // Check the stack for overflow. We are not trying to catch interruptions
1976     // (i.e. debug break and preemption) here, so check the "real stack limit".
1977     Label done;
1978     __ LoadRoot(ip, Heap::kRealStackLimitRootIndex);
1979     // Make ip the space we have left. The stack might already be overflowed
1980     // here which will cause ip to become negative.
1981     __ SubP(ip, sp, ip);
1982     // Check if the arguments will overflow the stack.
1983     __ ShiftLeftP(r0, r6, Operand(kPointerSizeLog2));
1984     __ CmpP(ip, r0);  // Signed comparison.
1985     __ bgt(&done);
1986     __ TailCallRuntime(Runtime::kThrowStackOverflow);
1987     __ bind(&done);
1988   }
1989 
1990   // Push arguments onto the stack (thisArgument is already on the stack).
1991   {
1992     Label loop, no_args, skip;
1993     __ CmpP(r6, Operand::Zero());
1994     __ beq(&no_args);
1995     __ AddP(r4, r4,
1996             Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
1997     __ LoadRR(r1, r6);
1998     __ bind(&loop);
1999     __ LoadP(ip, MemOperand(r4, kPointerSize));
2000     __ la(r4, MemOperand(r4, kPointerSize));
2001     __ CompareRoot(ip, Heap::kTheHoleValueRootIndex);
2002     __ bne(&skip, Label::kNear);
2003     __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
2004     __ bind(&skip);
2005     __ push(ip);
2006     __ BranchOnCount(r1, &loop);
2007     __ bind(&no_args);
2008     __ AddP(r2, r2, r6);
2009   }
2010 
2011   // Tail-call to the actual Call or Construct builtin.
2012   __ Jump(code, RelocInfo::CODE_TARGET);
2013 }
2014 
2015 // static
Generate_CallOrConstructForwardVarargs(MacroAssembler * masm,CallOrConstructMode mode,Handle<Code> code)2016 void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
2017                                                       CallOrConstructMode mode,
2018                                                       Handle<Code> code) {
2019   // ----------- S t a t e -------------
2020   //  -- r2 : the number of arguments (not including the receiver)
2021   //  -- r5 : the new.target (for [[Construct]] calls)
2022   //  -- r3 : the target to call (can be any Object)
2023   //  -- r4 : start index (to support rest parameters)
2024   // -----------------------------------
2025 
2026   Register scratch = r8;
2027 
2028   if (mode == CallOrConstructMode::kConstruct) {
2029     Label new_target_constructor, new_target_not_constructor;
2030     __ JumpIfSmi(r5, &new_target_not_constructor);
2031     __ LoadP(scratch, FieldMemOperand(r5, HeapObject::kMapOffset));
2032     __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
2033     __ tmll(scratch, Operand(Map::IsConstructorBit::kShift));
2034     __ bne(&new_target_constructor);
2035     __ bind(&new_target_not_constructor);
2036     {
2037       FrameScope scope(masm, StackFrame::MANUAL);
2038       __ EnterFrame(StackFrame::INTERNAL);
2039       __ Push(r5);
2040       __ CallRuntime(Runtime::kThrowNotConstructor);
2041     }
2042     __ bind(&new_target_constructor);
2043   }
2044 
2045   // Check if we have an arguments adaptor frame below the function frame.
2046   Label arguments_adaptor, arguments_done;
2047   __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2048   __ LoadP(ip, MemOperand(r6, CommonFrameConstants::kContextOrFrameTypeOffset));
2049   __ CmpP(ip, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
2050   __ beq(&arguments_adaptor);
2051   {
2052     __ LoadP(r7, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
2053     __ LoadP(r7, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
2054     __ LoadW(r7, FieldMemOperand(
2055                      r7, SharedFunctionInfo::kFormalParameterCountOffset));
2056     __ LoadRR(r6, fp);
2057   }
2058   __ b(&arguments_done);
2059   __ bind(&arguments_adaptor);
2060   {
2061     // Load the length from the ArgumentsAdaptorFrame.
2062     __ LoadP(r7, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset));
2063     __ SmiUntag(r7);
2064   }
2065   __ bind(&arguments_done);
2066 
2067   Label stack_done, stack_overflow;
2068   __ SubP(r7, r7, r4);
2069   __ CmpP(r7, Operand::Zero());
2070   __ ble(&stack_done);
2071   {
2072     // Check for stack overflow.
2073     Generate_StackOverflowCheck(masm, r7, r4, &stack_overflow);
2074 
2075     // Forward the arguments from the caller frame.
2076     {
2077       Label loop;
2078       __ AddP(r6, r6, Operand(kPointerSize));
2079       __ AddP(r2, r2, r7);
2080       __ bind(&loop);
2081       {
2082         __ ShiftLeftP(ip, r7, Operand(kPointerSizeLog2));
2083         __ LoadP(ip, MemOperand(r6, ip));
2084         __ push(ip);
2085         __ SubP(r7, r7, Operand(1));
2086         __ CmpP(r7, Operand::Zero());
2087         __ bne(&loop);
2088       }
2089     }
2090   }
2091   __ b(&stack_done);
2092   __ bind(&stack_overflow);
2093   __ TailCallRuntime(Runtime::kThrowStackOverflow);
2094   __ bind(&stack_done);
2095 
2096   // Tail-call to the {code} handler.
2097   __ Jump(code, RelocInfo::CODE_TARGET);
2098 }
2099 
2100 // static
Generate_CallFunction(MacroAssembler * masm,ConvertReceiverMode mode)2101 void Builtins::Generate_CallFunction(MacroAssembler* masm,
2102                                      ConvertReceiverMode mode) {
2103   // ----------- S t a t e -------------
2104   //  -- r2 : the number of arguments (not including the receiver)
2105   //  -- r3 : the function to call (checked to be a JSFunction)
2106   // -----------------------------------
2107   __ AssertFunction(r3);
2108 
2109   // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
2110   // Check that the function is not a "classConstructor".
2111   Label class_constructor;
2112   __ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
2113   __ LoadlW(r5, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
2114   __ TestBitMask(r5, SharedFunctionInfo::IsClassConstructorBit::kMask, r0);
2115   __ bne(&class_constructor);
2116 
2117   // Enter the context of the function; ToObject has to run in the function
2118   // context, and we also need to take the global proxy from the function
2119   // context in case of conversion.
2120   __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
2121   // We need to convert the receiver for non-native sloppy mode functions.
2122   Label done_convert;
2123   __ AndP(r0, r5,
2124           Operand(SharedFunctionInfo::IsStrictBit::kMask |
2125                   SharedFunctionInfo::IsNativeBit::kMask));
2126   __ bne(&done_convert);
2127   {
2128     // ----------- S t a t e -------------
2129     //  -- r2 : the number of arguments (not including the receiver)
2130     //  -- r3 : the function to call (checked to be a JSFunction)
2131     //  -- r4 : the shared function info.
2132     //  -- cp : the function context.
2133     // -----------------------------------
2134 
2135     if (mode == ConvertReceiverMode::kNullOrUndefined) {
2136       // Patch receiver to global proxy.
2137       __ LoadGlobalProxy(r5);
2138     } else {
2139       Label convert_to_object, convert_receiver;
2140       __ ShiftLeftP(r5, r2, Operand(kPointerSizeLog2));
2141       __ LoadP(r5, MemOperand(sp, r5));
2142       __ JumpIfSmi(r5, &convert_to_object);
2143       STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
2144       __ CompareObjectType(r5, r6, r6, FIRST_JS_RECEIVER_TYPE);
2145       __ bge(&done_convert);
2146       if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
2147         Label convert_global_proxy;
2148         __ JumpIfRoot(r5, Heap::kUndefinedValueRootIndex,
2149                       &convert_global_proxy);
2150         __ JumpIfNotRoot(r5, Heap::kNullValueRootIndex, &convert_to_object);
2151         __ bind(&convert_global_proxy);
2152         {
2153           // Patch receiver to global proxy.
2154           __ LoadGlobalProxy(r5);
2155         }
2156         __ b(&convert_receiver);
2157       }
2158       __ bind(&convert_to_object);
2159       {
2160         // Convert receiver using ToObject.
2161         // TODO(bmeurer): Inline the allocation here to avoid building the frame
2162         // in the fast case? (fall back to AllocateInNewSpace?)
2163         FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2164         __ SmiTag(r2);
2165         __ Push(r2, r3);
2166         __ LoadRR(r2, r5);
2167         __ Push(cp);
2168         __ Call(BUILTIN_CODE(masm->isolate(), ToObject),
2169                 RelocInfo::CODE_TARGET);
2170         __ Pop(cp);
2171         __ LoadRR(r5, r2);
2172         __ Pop(r2, r3);
2173         __ SmiUntag(r2);
2174       }
2175       __ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
2176       __ bind(&convert_receiver);
2177     }
2178     __ ShiftLeftP(r6, r2, Operand(kPointerSizeLog2));
2179     __ StoreP(r5, MemOperand(sp, r6));
2180   }
2181   __ bind(&done_convert);
2182 
2183   // ----------- S t a t e -------------
2184   //  -- r2 : the number of arguments (not including the receiver)
2185   //  -- r3 : the function to call (checked to be a JSFunction)
2186   //  -- r4 : the shared function info.
2187   //  -- cp : the function context.
2188   // -----------------------------------
2189 
2190   __ LoadW(
2191       r4, FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset));
2192   ParameterCount actual(r2);
2193   ParameterCount expected(r4);
2194   __ InvokeFunctionCode(r3, no_reg, expected, actual, JUMP_FUNCTION);
2195 
2196   // The function is a "classConstructor", need to raise an exception.
2197   __ bind(&class_constructor);
2198   {
2199     FrameAndConstantPoolScope frame(masm, StackFrame::INTERNAL);
2200     __ push(r3);
2201     __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
2202   }
2203 }
2204 
2205 namespace {
2206 
Generate_PushBoundArguments(MacroAssembler * masm)2207 void Generate_PushBoundArguments(MacroAssembler* masm) {
2208   // ----------- S t a t e -------------
2209   //  -- r2 : the number of arguments (not including the receiver)
2210   //  -- r3 : target (checked to be a JSBoundFunction)
2211   //  -- r5 : new.target (only in case of [[Construct]])
2212   // -----------------------------------
2213 
2214   // Load [[BoundArguments]] into r4 and length of that into r6.
2215   Label no_bound_arguments;
2216   __ LoadP(r4, FieldMemOperand(r3, JSBoundFunction::kBoundArgumentsOffset));
2217   __ LoadP(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
2218   __ SmiUntag(r6);
2219   __ LoadAndTestP(r6, r6);
2220   __ beq(&no_bound_arguments);
2221   {
2222     // ----------- S t a t e -------------
2223     //  -- r2 : the number of arguments (not including the receiver)
2224     //  -- r3 : target (checked to be a JSBoundFunction)
2225     //  -- r4 : the [[BoundArguments]] (implemented as FixedArray)
2226     //  -- r5 : new.target (only in case of [[Construct]])
2227     //  -- r6 : the number of [[BoundArguments]]
2228     // -----------------------------------
2229 
2230     // Reserve stack space for the [[BoundArguments]].
2231     {
2232       Label done;
2233       __ LoadRR(r8, sp);  // preserve previous stack pointer
2234       __ ShiftLeftP(r9, r6, Operand(kPointerSizeLog2));
2235       __ SubP(sp, sp, r9);
2236       // Check the stack for overflow. We are not trying to catch interruptions
2237       // (i.e. debug break and preemption) here, so check the "real stack
2238       // limit".
2239       __ CompareRoot(sp, Heap::kRealStackLimitRootIndex);
2240       __ bgt(&done);  // Signed comparison.
2241       // Restore the stack pointer.
2242       __ LoadRR(sp, r8);
2243       {
2244         FrameScope scope(masm, StackFrame::MANUAL);
2245         __ EnterFrame(StackFrame::INTERNAL);
2246         __ CallRuntime(Runtime::kThrowStackOverflow);
2247       }
2248       __ bind(&done);
2249     }
2250 
2251     // Relocate arguments down the stack.
2252     //  -- r2 : the number of arguments (not including the receiver)
2253     //  -- r8 : the previous stack pointer
2254     //  -- r9: the size of the [[BoundArguments]]
2255     {
2256       Label skip, loop;
2257       __ LoadImmP(r7, Operand::Zero());
2258       __ CmpP(r2, Operand::Zero());
2259       __ beq(&skip);
2260       __ LoadRR(r1, r2);
2261       __ bind(&loop);
2262       __ LoadP(r0, MemOperand(r8, r7));
2263       __ StoreP(r0, MemOperand(sp, r7));
2264       __ AddP(r7, r7, Operand(kPointerSize));
2265       __ BranchOnCount(r1, &loop);
2266       __ bind(&skip);
2267     }
2268 
2269     // Copy [[BoundArguments]] to the stack (below the arguments).
2270     {
2271       Label loop;
2272       __ AddP(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2273       __ AddP(r4, r4, r9);
2274       __ LoadRR(r1, r6);
2275       __ bind(&loop);
2276       __ LoadP(r0, MemOperand(r4, -kPointerSize));
2277       __ lay(r4, MemOperand(r4, -kPointerSize));
2278       __ StoreP(r0, MemOperand(sp, r7));
2279       __ AddP(r7, r7, Operand(kPointerSize));
2280       __ BranchOnCount(r1, &loop);
2281       __ AddP(r2, r2, r6);
2282     }
2283   }
2284   __ bind(&no_bound_arguments);
2285 }
2286 
2287 }  // namespace
2288 
2289 // static
Generate_CallBoundFunctionImpl(MacroAssembler * masm)2290 void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
2291   // ----------- S t a t e -------------
2292   //  -- r2 : the number of arguments (not including the receiver)
2293   //  -- r3 : the function to call (checked to be a JSBoundFunction)
2294   // -----------------------------------
2295   __ AssertBoundFunction(r3);
2296 
2297   // Patch the receiver to [[BoundThis]].
2298   __ LoadP(ip, FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset));
2299   __ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
2300   __ StoreP(ip, MemOperand(sp, r1));
2301 
2302   // Push the [[BoundArguments]] onto the stack.
2303   Generate_PushBoundArguments(masm);
2304 
2305   // Call the [[BoundTargetFunction]] via the Call builtin.
2306   __ LoadP(r3,
2307            FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
2308   __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
2309           RelocInfo::CODE_TARGET);
2310 }
2311 
2312 // static
Generate_Call(MacroAssembler * masm,ConvertReceiverMode mode)2313 void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
2314   // ----------- S t a t e -------------
2315   //  -- r2 : the number of arguments (not including the receiver)
2316   //  -- r3 : the target to call (can be any Object).
2317   // -----------------------------------
2318 
2319   Label non_callable, non_function, non_smi;
2320   __ JumpIfSmi(r3, &non_callable);
2321   __ bind(&non_smi);
2322   __ CompareObjectType(r3, r6, r7, JS_FUNCTION_TYPE);
2323   __ Jump(masm->isolate()->builtins()->CallFunction(mode),
2324           RelocInfo::CODE_TARGET, eq);
2325   __ CmpP(r7, Operand(JS_BOUND_FUNCTION_TYPE));
2326   __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
2327           RelocInfo::CODE_TARGET, eq);
2328 
2329   // Check if target has a [[Call]] internal method.
2330   __ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
2331   __ TestBit(r6, Map::IsCallableBit::kShift);
2332   __ beq(&non_callable);
2333 
2334   // Check if target is a proxy and call CallProxy external builtin
2335   __ CmpP(r7, Operand(JS_PROXY_TYPE));
2336   __ bne(&non_function);
2337   __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET);
2338 
2339   // 2. Call to something else, which might have a [[Call]] internal method (if
2340   // not we raise an exception).
2341   __ bind(&non_function);
2342   // Overwrite the original receiver the (original) target.
2343   __ ShiftLeftP(r7, r2, Operand(kPointerSizeLog2));
2344   __ StoreP(r3, MemOperand(sp, r7));
2345   // Let the "call_as_function_delegate" take care of the rest.
2346   __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r3);
2347   __ Jump(masm->isolate()->builtins()->CallFunction(
2348               ConvertReceiverMode::kNotNullOrUndefined),
2349           RelocInfo::CODE_TARGET);
2350 
2351   // 3. Call to something that is not callable.
2352   __ bind(&non_callable);
2353   {
2354     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2355     __ Push(r3);
2356     __ CallRuntime(Runtime::kThrowCalledNonCallable);
2357   }
2358 }
2359 
2360 // static
Generate_ConstructFunction(MacroAssembler * masm)2361 void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
2362   // ----------- S t a t e -------------
2363   //  -- r2 : the number of arguments (not including the receiver)
2364   //  -- r3 : the constructor to call (checked to be a JSFunction)
2365   //  -- r5 : the new target (checked to be a constructor)
2366   // -----------------------------------
2367   __ AssertConstructor(r3, r1);
2368   __ AssertFunction(r3);
2369 
2370   // Calling convention for function specific ConstructStubs require
2371   // r4 to contain either an AllocationSite or undefined.
2372   __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
2373 
2374   Label call_generic_stub;
2375 
2376   // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
2377   __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
2378   __ LoadlW(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
2379   __ AndP(r6, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
2380   __ beq(&call_generic_stub);
2381 
2382   __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
2383           RelocInfo::CODE_TARGET);
2384 
2385   __ bind(&call_generic_stub);
2386   __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric),
2387           RelocInfo::CODE_TARGET);
2388 }
2389 
2390 // static
Generate_ConstructBoundFunction(MacroAssembler * masm)2391 void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
2392   // ----------- S t a t e -------------
2393   //  -- r2 : the number of arguments (not including the receiver)
2394   //  -- r3 : the function to call (checked to be a JSBoundFunction)
2395   //  -- r5 : the new target (checked to be a constructor)
2396   // -----------------------------------
2397   __ AssertConstructor(r3, r1);
2398   __ AssertBoundFunction(r3);
2399 
2400   // Push the [[BoundArguments]] onto the stack.
2401   Generate_PushBoundArguments(masm);
2402 
2403   // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
2404   Label skip;
2405   __ CmpP(r3, r5);
2406   __ bne(&skip);
2407   __ LoadP(r5,
2408            FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
2409   __ bind(&skip);
2410 
2411   // Construct the [[BoundTargetFunction]] via the Construct builtin.
2412   __ LoadP(r3,
2413            FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
2414   __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
2415 }
2416 
2417 // static
Generate_Construct(MacroAssembler * masm)2418 void Builtins::Generate_Construct(MacroAssembler* masm) {
2419   // ----------- S t a t e -------------
2420   //  -- r2 : the number of arguments (not including the receiver)
2421   //  -- r3 : the constructor to call (can be any Object)
2422   //  -- r5 : the new target (either the same as the constructor or
2423   //          the JSFunction on which new was invoked initially)
2424   // -----------------------------------
2425 
2426   // Check if target is a Smi.
2427   Label non_constructor, non_proxy;
2428   __ JumpIfSmi(r3, &non_constructor);
2429 
2430   // Check if target has a [[Construct]] internal method.
2431   __ LoadP(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
2432   __ LoadlB(r4, FieldMemOperand(r6, Map::kBitFieldOffset));
2433   __ TestBit(r4, Map::IsConstructorBit::kShift);
2434   __ beq(&non_constructor);
2435 
2436   // Dispatch based on instance type.
2437   __ CompareInstanceType(r6, r7, JS_FUNCTION_TYPE);
2438   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
2439           RelocInfo::CODE_TARGET, eq);
2440 
2441   // Only dispatch to bound functions after checking whether they are
2442   // constructors.
2443   __ CmpP(r7, Operand(JS_BOUND_FUNCTION_TYPE));
2444   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
2445           RelocInfo::CODE_TARGET, eq);
2446 
2447   // Only dispatch to proxies after checking whether they are constructors.
2448   __ CmpP(r7, Operand(JS_PROXY_TYPE));
2449   __ bne(&non_proxy);
2450   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
2451           RelocInfo::CODE_TARGET);
2452 
2453   // Called Construct on an exotic Object with a [[Construct]] internal method.
2454   __ bind(&non_proxy);
2455   {
2456     // Overwrite the original receiver with the (original) target.
2457     __ ShiftLeftP(r7, r2, Operand(kPointerSizeLog2));
2458     __ StoreP(r3, MemOperand(sp, r7));
2459     // Let the "call_as_constructor_delegate" take care of the rest.
2460     __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r3);
2461     __ Jump(masm->isolate()->builtins()->CallFunction(),
2462             RelocInfo::CODE_TARGET);
2463   }
2464 
2465   // Called Construct on an Object that doesn't have a [[Construct]] internal
2466   // method.
2467   __ bind(&non_constructor);
2468   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable),
2469           RelocInfo::CODE_TARGET);
2470 }
2471 
2472 // static
Generate_AllocateInNewSpace(MacroAssembler * masm)2473 void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
2474   // ----------- S t a t e -------------
2475   //  -- r3 : requested object size (untagged)
2476   //  -- lr : return address
2477   // -----------------------------------
2478   __ SmiTag(r3);
2479   __ Push(r3);
2480   __ LoadSmiLiteral(cp, Smi::kZero);
2481   __ TailCallRuntime(Runtime::kAllocateInNewSpace);
2482 }
2483 
2484 // static
Generate_AllocateInOldSpace(MacroAssembler * masm)2485 void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
2486   // ----------- S t a t e -------------
2487   //  -- r3 : requested object size (untagged)
2488   //  -- lr : return address
2489   // -----------------------------------
2490   __ SmiTag(r3);
2491   __ LoadSmiLiteral(r4, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
2492   __ Push(r3, r4);
2493   __ LoadSmiLiteral(cp, Smi::kZero);
2494   __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
2495 }
2496 
2497 // static
Generate_Abort(MacroAssembler * masm)2498 void Builtins::Generate_Abort(MacroAssembler* masm) {
2499   // ----------- S t a t e -------------
2500   //  -- r3 : message_id as Smi
2501   //  -- lr : return address
2502   // -----------------------------------
2503   __ push(r3);
2504   __ LoadSmiLiteral(cp, Smi::kZero);
2505   __ TailCallRuntime(Runtime::kAbort);
2506 }
2507 
Generate_ArgumentsAdaptorTrampoline(MacroAssembler * masm)2508 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
2509   // ----------- S t a t e -------------
2510   //  -- r2 : actual number of arguments
2511   //  -- r3 : function (passed through to callee)
2512   //  -- r4 : expected number of arguments
2513   //  -- r5 : new target (passed through to callee)
2514   // -----------------------------------
2515 
2516   Label invoke, dont_adapt_arguments, stack_overflow;
2517 
2518   Label enough, too_few;
2519   __ CmpP(r2, r4);
2520   __ blt(&too_few);
2521   __ CmpP(r4, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
2522   __ beq(&dont_adapt_arguments);
2523 
2524   {  // Enough parameters: actual >= expected
2525     __ bind(&enough);
2526     EnterArgumentsAdaptorFrame(masm);
2527     Generate_StackOverflowCheck(masm, r4, r7, &stack_overflow);
2528 
2529     // Calculate copy start address into r2 and copy end address into r6.
2530     // r2: actual number of arguments as a smi
2531     // r3: function
2532     // r4: expected number of arguments
2533     // r5: new target (passed through to callee)
2534     __ SmiToPtrArrayOffset(r2, r2);
2535     __ AddP(r2, fp);
2536     // adjust for return address and receiver
2537     __ AddP(r2, r2, Operand(2 * kPointerSize));
2538     __ ShiftLeftP(r6, r4, Operand(kPointerSizeLog2));
2539     __ SubP(r6, r2, r6);
2540 
2541     // Copy the arguments (including the receiver) to the new stack frame.
2542     // r2: copy start address
2543     // r3: function
2544     // r4: expected number of arguments
2545     // r5: new target (passed through to callee)
2546     // r6: copy end address
2547 
2548     Label copy;
2549     __ bind(&copy);
2550     __ LoadP(r0, MemOperand(r2, 0));
2551     __ push(r0);
2552     __ CmpP(r2, r6);  // Compare before moving to next argument.
2553     __ lay(r2, MemOperand(r2, -kPointerSize));
2554     __ bne(&copy);
2555 
2556     __ b(&invoke);
2557   }
2558 
2559   {  // Too few parameters: Actual < expected
2560     __ bind(&too_few);
2561 
2562     EnterArgumentsAdaptorFrame(masm);
2563     Generate_StackOverflowCheck(masm, r4, r7, &stack_overflow);
2564 
2565     // Calculate copy start address into r0 and copy end address is fp.
2566     // r2: actual number of arguments as a smi
2567     // r3: function
2568     // r4: expected number of arguments
2569     // r5: new target (passed through to callee)
2570     __ SmiToPtrArrayOffset(r2, r2);
2571     __ lay(r2, MemOperand(r2, fp));
2572 
2573     // Copy the arguments (including the receiver) to the new stack frame.
2574     // r2: copy start address
2575     // r3: function
2576     // r4: expected number of arguments
2577     // r5: new target (passed through to callee)
2578     Label copy;
2579     __ bind(&copy);
2580     // Adjust load for return address and receiver.
2581     __ LoadP(r0, MemOperand(r2, 2 * kPointerSize));
2582     __ push(r0);
2583     __ CmpP(r2, fp);  // Compare before moving to next argument.
2584     __ lay(r2, MemOperand(r2, -kPointerSize));
2585     __ bne(&copy);
2586 
2587     // Fill the remaining expected arguments with undefined.
2588     // r3: function
2589     // r4: expected number of argumentus
2590     __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2591     __ ShiftLeftP(r6, r4, Operand(kPointerSizeLog2));
2592     __ SubP(r6, fp, r6);
2593     // Adjust for frame.
2594     __ SubP(r6, r6,
2595             Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
2596                     kPointerSize));
2597 
2598     Label fill;
2599     __ bind(&fill);
2600     __ push(r0);
2601     __ CmpP(sp, r6);
2602     __ bne(&fill);
2603   }
2604 
2605   // Call the entry point.
2606   __ bind(&invoke);
2607   __ LoadRR(r2, r4);
2608   // r2 : expected number of arguments
2609   // r3 : function (passed through to callee)
2610   // r5 : new target (passed through to callee)
2611   static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
2612   __ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
2613   __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
2614   __ CallJSEntry(r4);
2615 
2616   // Store offset of return address for deoptimizer.
2617   masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
2618 
2619   // Exit frame and return.
2620   LeaveArgumentsAdaptorFrame(masm);
2621   __ Ret();
2622 
2623   // -------------------------------------------
2624   // Dont adapt arguments.
2625   // -------------------------------------------
2626   __ bind(&dont_adapt_arguments);
2627   static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
2628   __ LoadP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
2629   __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
2630   __ JumpToJSEntry(r4);
2631 
2632   __ bind(&stack_overflow);
2633   {
2634     FrameScope frame(masm, StackFrame::MANUAL);
2635     __ CallRuntime(Runtime::kThrowStackOverflow);
2636     __ bkpt(0);
2637   }
2638 }
2639 
Generate_WasmCompileLazy(MacroAssembler * masm)2640 void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
2641   {
2642     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2643 
2644     // Save all parameter registers (see wasm-linkage.cc). They might be
2645     // overwritten in the runtime call below. We don't have any callee-saved
2646     // registers in wasm, so no need to store anything else.
2647     constexpr RegList gp_regs = Register::ListOf<r2, r3, r4, r5, r6>();
2648 #if V8_TARGET_ARCH_S390X
2649     constexpr RegList fp_regs = DoubleRegister::ListOf<d0, d2, d4, d6>();
2650 #else
2651     constexpr RegList fp_regs = DoubleRegister::ListOf<d0, d2>();
2652 #endif
2653     __ MultiPush(gp_regs);
2654     __ MultiPushDoubles(fp_regs);
2655 
2656     // Pass the WASM instance as an explicit argument to WasmCompileLazy.
2657     __ Push(kWasmInstanceRegister);
2658     // Initialize the JavaScript context with 0. CEntry will use it to
2659     // set the current context on the isolate.
2660     __ LoadSmiLiteral(cp, Smi::kZero);
2661     __ CallRuntime(Runtime::kWasmCompileLazy);
2662     // The entrypoint address is the first return value.
2663     __ LoadRR(ip, r2);
2664     // The WASM instance is the second return value.
2665     __ LoadRR(kWasmInstanceRegister, kReturnRegister1);
2666 
2667     // Restore registers.
2668     __ MultiPopDoubles(fp_regs);
2669     __ MultiPop(gp_regs);
2670   }
2671   // Finally, jump to the entrypoint.
2672   __ Jump(ip);
2673 }
2674 
Generate_CEntry(MacroAssembler * masm,int result_size,SaveFPRegsMode save_doubles,ArgvMode argv_mode,bool builtin_exit_frame)2675 void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
2676                                SaveFPRegsMode save_doubles, ArgvMode argv_mode,
2677                                bool builtin_exit_frame) {
2678   // Called from JavaScript; parameters are on stack as if calling JS function.
2679   // r2: number of arguments including receiver
2680   // r3: pointer to builtin function
2681   // fp: frame pointer  (restored after C call)
2682   // sp: stack pointer  (restored as callee's sp after C call)
2683   // cp: current context  (C callee-saved)
2684   //
2685   // If argv_mode == kArgvInRegister:
2686   // r4: pointer to the first argument
2687   ProfileEntryHookStub::MaybeCallEntryHook(masm);
2688 
2689   __ LoadRR(r7, r3);
2690 
2691   if (argv_mode == kArgvInRegister) {
2692     // Move argv into the correct register.
2693     __ LoadRR(r3, r4);
2694   } else {
2695     // Compute the argv pointer.
2696     __ ShiftLeftP(r3, r2, Operand(kPointerSizeLog2));
2697     __ lay(r3, MemOperand(r3, sp, -kPointerSize));
2698   }
2699 
2700   // Enter the exit frame that transitions from JavaScript to C++.
2701   FrameScope scope(masm, StackFrame::MANUAL);
2702 
2703   // Need at least one extra slot for return address location.
2704   int arg_stack_space = 1;
2705 
2706   // Pass buffer for return value on stack if necessary
2707   bool needs_return_buffer =
2708       result_size == 2 && !ABI_RETURNS_OBJECTPAIR_IN_REGS;
2709   if (needs_return_buffer) {
2710     arg_stack_space += result_size;
2711   }
2712 
2713 #if V8_TARGET_ARCH_S390X
2714   // 64-bit linux pass Argument object by reference not value
2715   arg_stack_space += 2;
2716 #endif
2717 
2718   __ EnterExitFrame(
2719       save_doubles, arg_stack_space,
2720       builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
2721 
2722   // Store a copy of argc, argv in callee-saved registers for later.
2723   __ LoadRR(r6, r2);
2724   __ LoadRR(r8, r3);
2725   // r2, r6: number of arguments including receiver  (C callee-saved)
2726   // r3, r8: pointer to the first argument
2727   // r7: pointer to builtin function  (C callee-saved)
2728 
2729   // Result returned in registers or stack, depending on result size and ABI.
2730 
2731   Register isolate_reg = r4;
2732   if (needs_return_buffer) {
2733     // The return value is 16-byte non-scalar value.
2734     // Use frame storage reserved by calling function to pass return
2735     // buffer as implicit first argument in R2.  Shfit original parameters
2736     // by one register each.
2737     __ LoadRR(r4, r3);
2738     __ LoadRR(r3, r2);
2739     __ la(r2, MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize));
2740     isolate_reg = r5;
2741   }
2742   // Call C built-in.
2743   __ Move(isolate_reg, ExternalReference::isolate_address(masm->isolate()));
2744 
2745   Register target = r7;
2746 
2747   // To let the GC traverse the return address of the exit frames, we need to
2748   // know where the return address is. The CEntryStub is unmovable, so
2749   // we can store the address on the stack to be able to find it again and
2750   // we never have to restore it, because it will not change.
2751   {
2752     Label return_label;
2753     __ larl(r14, &return_label);  // Generate the return addr of call later.
2754     __ StoreP(r14, MemOperand(sp, kStackFrameRASlot * kPointerSize));
2755 
2756     // zLinux ABI requires caller's frame to have sufficient space for callee
2757     // preserved regsiter save area.
2758     // __ lay(sp, MemOperand(sp, -kCalleeRegisterSaveAreaSize));
2759     __ b(target);
2760     __ bind(&return_label);
2761     // __ la(sp, MemOperand(sp, +kCalleeRegisterSaveAreaSize));
2762   }
2763 
2764   // If return value is on the stack, pop it to registers.
2765   if (needs_return_buffer) {
2766     __ LoadP(r3, MemOperand(r2, kPointerSize));
2767     __ LoadP(r2, MemOperand(r2));
2768   }
2769 
2770   // Check result for exception sentinel.
2771   Label exception_returned;
2772   __ CompareRoot(r2, Heap::kExceptionRootIndex);
2773   __ beq(&exception_returned, Label::kNear);
2774 
2775   // Check that there is no pending exception, otherwise we
2776   // should have returned the exception sentinel.
2777   if (FLAG_debug_code) {
2778     Label okay;
2779     ExternalReference pending_exception_address = ExternalReference::Create(
2780         IsolateAddressId::kPendingExceptionAddress, masm->isolate());
2781     __ Move(r1, pending_exception_address);
2782     __ LoadP(r1, MemOperand(r1));
2783     __ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
2784     // Cannot use check here as it attempts to generate call into runtime.
2785     __ beq(&okay, Label::kNear);
2786     __ stop("Unexpected pending exception");
2787     __ bind(&okay);
2788   }
2789 
2790   // Exit C frame and return.
2791   // r2:r3: result
2792   // sp: stack pointer
2793   // fp: frame pointer
2794   Register argc = argv_mode == kArgvInRegister
2795                       // We don't want to pop arguments so set argc to no_reg.
2796                       ? no_reg
2797                       // r6: still holds argc (callee-saved).
2798                       : r6;
2799   __ LeaveExitFrame(save_doubles, argc);
2800   __ b(r14);
2801 
2802   // Handling of exception.
2803   __ bind(&exception_returned);
2804 
2805   ExternalReference pending_handler_context_address = ExternalReference::Create(
2806       IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
2807   ExternalReference pending_handler_entrypoint_address =
2808       ExternalReference::Create(
2809           IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
2810   ExternalReference pending_handler_fp_address = ExternalReference::Create(
2811       IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
2812   ExternalReference pending_handler_sp_address = ExternalReference::Create(
2813       IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
2814 
2815   // Ask the runtime for help to determine the handler. This will set r3 to
2816   // contain the current pending exception, don't clobber it.
2817   ExternalReference find_handler =
2818       ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
2819   {
2820     FrameScope scope(masm, StackFrame::MANUAL);
2821     __ PrepareCallCFunction(3, 0, r2);
2822     __ LoadImmP(r2, Operand::Zero());
2823     __ LoadImmP(r3, Operand::Zero());
2824     __ Move(r4, ExternalReference::isolate_address(masm->isolate()));
2825     __ CallCFunction(find_handler, 3);
2826   }
2827 
2828   // Retrieve the handler context, SP and FP.
2829   __ Move(cp, pending_handler_context_address);
2830   __ LoadP(cp, MemOperand(cp));
2831   __ Move(sp, pending_handler_sp_address);
2832   __ LoadP(sp, MemOperand(sp));
2833   __ Move(fp, pending_handler_fp_address);
2834   __ LoadP(fp, MemOperand(fp));
2835 
2836   // If the handler is a JS frame, restore the context to the frame. Note that
2837   // the context will be set to (cp == 0) for non-JS frames.
2838   Label skip;
2839   __ CmpP(cp, Operand::Zero());
2840   __ beq(&skip, Label::kNear);
2841   __ StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2842   __ bind(&skip);
2843 
2844   // Reset the masking register.
2845   if (FLAG_branch_load_poisoning) {
2846     __ ResetSpeculationPoisonRegister();
2847   }
2848 
2849   // Compute the handler entry address and jump to it.
2850   __ Move(r3, pending_handler_entrypoint_address);
2851   __ LoadP(r3, MemOperand(r3));
2852   __ Jump(r3);
2853 }
2854 
Generate_DoubleToI(MacroAssembler * masm)2855 void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
2856   Label out_of_range, only_low, negate, done, fastpath_done;
2857   Register result_reg = r2;
2858 
2859   // Immediate values for this stub fit in instructions, so it's safe to use ip.
2860   Register scratch = GetRegisterThatIsNotOneOf(result_reg);
2861   Register scratch_low = GetRegisterThatIsNotOneOf(result_reg, scratch);
2862   Register scratch_high =
2863       GetRegisterThatIsNotOneOf(result_reg, scratch, scratch_low);
2864   DoubleRegister double_scratch = kScratchDoubleReg;
2865 
2866   __ Push(result_reg, scratch);
2867   // Account for saved regs.
2868   int argument_offset = 2 * kPointerSize;
2869 
2870   // Load double input.
2871   __ LoadDouble(double_scratch, MemOperand(sp, argument_offset));
2872 
2873   // Do fast-path convert from double to int.
2874   __ ConvertDoubleToInt64(result_reg, double_scratch);
2875 
2876   // Test for overflow
2877   __ TestIfInt32(result_reg);
2878   __ beq(&fastpath_done, Label::kNear);
2879 
2880   __ Push(scratch_high, scratch_low);
2881   // Account for saved regs.
2882   argument_offset += 2 * kPointerSize;
2883 
2884   __ LoadlW(scratch_high,
2885             MemOperand(sp, argument_offset + Register::kExponentOffset));
2886   __ LoadlW(scratch_low,
2887             MemOperand(sp, argument_offset + Register::kMantissaOffset));
2888 
2889   __ ExtractBitMask(scratch, scratch_high, HeapNumber::kExponentMask);
2890   // Load scratch with exponent - 1. This is faster than loading
2891   // with exponent because Bias + 1 = 1024 which is a *S390* immediate value.
2892   STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
2893   __ SubP(scratch, Operand(HeapNumber::kExponentBias + 1));
2894   // If exponent is greater than or equal to 84, the 32 less significant
2895   // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
2896   // the result is 0.
2897   // Compare exponent with 84 (compare exponent - 1 with 83).
2898   __ CmpP(scratch, Operand(83));
2899   __ bge(&out_of_range, Label::kNear);
2900 
2901   // If we reach this code, 31 <= exponent <= 83.
2902   // So, we don't have to handle cases where 0 <= exponent <= 20 for
2903   // which we would need to shift right the high part of the mantissa.
2904   // Scratch contains exponent - 1.
2905   // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
2906   __ Load(r0, Operand(51));
2907   __ SubP(scratch, r0, scratch);
2908   __ CmpP(scratch, Operand::Zero());
2909   __ ble(&only_low, Label::kNear);
2910   // 21 <= exponent <= 51, shift scratch_low and scratch_high
2911   // to generate the result.
2912   __ ShiftRight(scratch_low, scratch_low, scratch);
2913   // Scratch contains: 52 - exponent.
2914   // We needs: exponent - 20.
2915   // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
2916   __ Load(r0, Operand(32));
2917   __ SubP(scratch, r0, scratch);
2918   __ ExtractBitMask(result_reg, scratch_high, HeapNumber::kMantissaMask);
2919   // Set the implicit 1 before the mantissa part in scratch_high.
2920   STATIC_ASSERT(HeapNumber::kMantissaBitsInTopWord >= 16);
2921   __ Load(r0, Operand(1 << ((HeapNumber::kMantissaBitsInTopWord)-16)));
2922   __ ShiftLeftP(r0, r0, Operand(16));
2923   __ OrP(result_reg, result_reg, r0);
2924   __ ShiftLeft(r0, result_reg, scratch);
2925   __ OrP(result_reg, scratch_low, r0);
2926   __ b(&negate, Label::kNear);
2927 
2928   __ bind(&out_of_range);
2929   __ mov(result_reg, Operand::Zero());
2930   __ b(&done, Label::kNear);
2931 
2932   __ bind(&only_low);
2933   // 52 <= exponent <= 83, shift only scratch_low.
2934   // On entry, scratch contains: 52 - exponent.
2935   __ LoadComplementRR(scratch, scratch);
2936   __ ShiftLeft(result_reg, scratch_low, scratch);
2937 
2938   __ bind(&negate);
2939   // If input was positive, scratch_high ASR 31 equals 0 and
2940   // scratch_high LSR 31 equals zero.
2941   // New result = (result eor 0) + 0 = result.
2942   // If the input was negative, we have to negate the result.
2943   // Input_high ASR 31 equals 0xFFFFFFFF and scratch_high LSR 31 equals 1.
2944   // New result = (result eor 0xFFFFFFFF) + 1 = 0 - result.
2945   __ ShiftRightArith(r0, scratch_high, Operand(31));
2946 #if V8_TARGET_ARCH_S390X
2947   __ lgfr(r0, r0);
2948   __ ShiftRightP(r0, r0, Operand(32));
2949 #endif
2950   __ XorP(result_reg, r0);
2951   __ ShiftRight(r0, scratch_high, Operand(31));
2952   __ AddP(result_reg, r0);
2953 
2954   __ bind(&done);
2955   __ Pop(scratch_high, scratch_low);
2956   argument_offset -= 2 * kPointerSize;
2957 
2958   __ bind(&fastpath_done);
2959   __ StoreP(result_reg, MemOperand(sp, argument_offset));
2960   __ Pop(result_reg, scratch);
2961 
2962   __ Ret();
2963 }
2964 
Generate_MathPowInternal(MacroAssembler * masm)2965 void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
2966   const Register exponent = MathPowTaggedDescriptor::exponent();
2967   DCHECK(exponent == r4);
2968   const DoubleRegister double_base = d1;
2969   const DoubleRegister double_exponent = d2;
2970   const DoubleRegister double_result = d3;
2971   const DoubleRegister double_scratch = d0;
2972   const Register scratch = r1;
2973   const Register scratch2 = r9;
2974 
2975   Label call_runtime, done, int_exponent;
2976 
2977   // Detect integer exponents stored as double.
2978   __ TryDoubleToInt32Exact(scratch, double_exponent, scratch2, double_scratch);
2979   __ beq(&int_exponent, Label::kNear);
2980 
2981   __ push(r14);
2982   {
2983     AllowExternalCallThatCantCauseGC scope(masm);
2984     __ PrepareCallCFunction(0, 2, scratch);
2985     __ MovToFloatParameters(double_base, double_exponent);
2986     __ CallCFunction(ExternalReference::power_double_double_function(), 0, 2);
2987   }
2988   __ pop(r14);
2989   __ MovFromFloatResult(double_result);
2990   __ b(&done);
2991 
2992   // Calculate power with integer exponent.
2993   __ bind(&int_exponent);
2994 
2995   // Get two copies of exponent in the registers scratch and exponent.
2996   // Exponent has previously been stored into scratch as untagged integer.
2997   __ LoadRR(exponent, scratch);
2998 
2999   __ ldr(double_scratch, double_base);  // Back up base.
3000   __ LoadImmP(scratch2, Operand(1));
3001   __ ConvertIntToDouble(double_result, scratch2);
3002 
3003   // Get absolute value of exponent.
3004   Label positive_exponent;
3005   __ CmpP(scratch, Operand::Zero());
3006   __ bge(&positive_exponent, Label::kNear);
3007   __ LoadComplementRR(scratch, scratch);
3008   __ bind(&positive_exponent);
3009 
3010   Label while_true, no_carry, loop_end;
3011   __ bind(&while_true);
3012   __ mov(scratch2, Operand(1));
3013   __ AndP(scratch2, scratch);
3014   __ beq(&no_carry, Label::kNear);
3015   __ mdbr(double_result, double_scratch);
3016   __ bind(&no_carry);
3017   __ ShiftRightP(scratch, scratch, Operand(1));
3018   __ LoadAndTestP(scratch, scratch);
3019   __ beq(&loop_end, Label::kNear);
3020   __ mdbr(double_scratch, double_scratch);
3021   __ b(&while_true);
3022   __ bind(&loop_end);
3023 
3024   __ CmpP(exponent, Operand::Zero());
3025   __ bge(&done);
3026 
3027   // get 1/double_result:
3028   __ ldr(double_scratch, double_result);
3029   __ LoadImmP(scratch2, Operand(1));
3030   __ ConvertIntToDouble(double_result, scratch2);
3031   __ ddbr(double_result, double_scratch);
3032 
3033   // Test whether result is zero.  Bail out to check for subnormal result.
3034   // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
3035   __ lzdr(kDoubleRegZero);
3036   __ cdbr(double_result, kDoubleRegZero);
3037   __ bne(&done, Label::kNear);
3038   // double_exponent may not containe the exponent value if the input was a
3039   // smi.  We set it with exponent value before bailing out.
3040   __ ConvertIntToDouble(double_exponent, exponent);
3041 
3042   // Returning or bailing out.
3043   __ push(r14);
3044   {
3045     AllowExternalCallThatCantCauseGC scope(masm);
3046     __ PrepareCallCFunction(0, 2, scratch);
3047     __ MovToFloatParameters(double_base, double_exponent);
3048     __ CallCFunction(ExternalReference::power_double_double_function(), 0, 2);
3049   }
3050   __ pop(r14);
3051   __ MovFromFloatResult(double_result);
3052 
3053   __ bind(&done);
3054   __ Ret();
3055 }
3056 
3057 #undef __
3058 
3059 }  // namespace internal
3060 }  // namespace v8
3061 
3062 #endif  // V8_TARGET_ARCH_S390
3063