1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if V8_TARGET_ARCH_ARM
6
7 #include "src/assembler-inl.h"
8 #include "src/code-factory.h"
9 #include "src/code-stubs.h"
10 #include "src/counters.h"
11 #include "src/debug/debug.h"
12 #include "src/deoptimizer.h"
13 #include "src/frame-constants.h"
14 #include "src/frames.h"
15 #include "src/objects-inl.h"
16 #include "src/runtime/runtime.h"
17
18 namespace v8 {
19 namespace internal {
20
21 #define __ ACCESS_MASM(masm)
22
Generate_Adaptor(MacroAssembler * masm,Address address,ExitFrameType exit_frame_type)23 void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
24 ExitFrameType exit_frame_type) {
25 #if defined(__thumb__)
26 // Thumb mode builtin.
27 DCHECK_EQ(1, reinterpret_cast<uintptr_t>(
28 ExternalReference::Create(address).address()) &
29 1);
30 #endif
31 __ Move(r5, ExternalReference::Create(address));
32 if (exit_frame_type == BUILTIN_EXIT) {
33 __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
34 RelocInfo::CODE_TARGET);
35 } else {
36 DCHECK(exit_frame_type == EXIT);
37 __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithExitFrame),
38 RelocInfo::CODE_TARGET);
39 }
40 }
41
42 namespace {
43
AdaptorWithExitFrameType(MacroAssembler * masm,Builtins::ExitFrameType exit_frame_type)44 void AdaptorWithExitFrameType(MacroAssembler* masm,
45 Builtins::ExitFrameType exit_frame_type) {
46 // ----------- S t a t e -------------
47 // -- r0 : number of arguments excluding receiver
48 // -- r1 : target
49 // -- r3 : new.target
50 // -- r5 : entry point
51 // -- sp[0] : last argument
52 // -- ...
53 // -- sp[4 * (argc - 1)] : first argument
54 // -- sp[4 * argc] : receiver
55 // -----------------------------------
56 __ AssertFunction(r1);
57
58 // Make sure we operate in the context of the called function (for example
59 // ConstructStubs implemented in C++ will be run in the context of the caller
60 // instead of the callee, due to the way that [[Construct]] is defined for
61 // ordinary functions).
62 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
63
64 // CEntry expects r0 to contain the number of arguments including the
65 // receiver and the extra arguments.
66 __ add(r0, r0, Operand(BuiltinExitFrameConstants::kNumExtraArgsWithReceiver));
67
68 // Insert extra arguments.
69 __ PushRoot(Heap::kTheHoleValueRootIndex); // Padding.
70 __ SmiTag(r0);
71 __ Push(r0, r1, r3);
72 __ SmiUntag(r0);
73
74 // Jump to the C entry runtime stub directly here instead of using
75 // JumpToExternalReference. We have already loaded entry point to r5
76 // in Generate_adaptor.
77 __ mov(r1, r5);
78 Handle<Code> code =
79 CodeFactory::CEntry(masm->isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
80 exit_frame_type == Builtins::BUILTIN_EXIT);
81 __ Jump(code, RelocInfo::CODE_TARGET);
82 }
83 } // namespace
84
Generate_AdaptorWithExitFrame(MacroAssembler * masm)85 void Builtins::Generate_AdaptorWithExitFrame(MacroAssembler* masm) {
86 AdaptorWithExitFrameType(masm, EXIT);
87 }
88
Generate_AdaptorWithBuiltinExitFrame(MacroAssembler * masm)89 void Builtins::Generate_AdaptorWithBuiltinExitFrame(MacroAssembler* masm) {
90 AdaptorWithExitFrameType(masm, BUILTIN_EXIT);
91 }
92
Generate_InternalArrayConstructor(MacroAssembler * masm)93 void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
94 // ----------- S t a t e -------------
95 // -- r0 : number of arguments
96 // -- lr : return address
97 // -- sp[...]: constructor arguments
98 // -----------------------------------
99 Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
100
101 if (FLAG_debug_code) {
102 // Initial map for the builtin InternalArray functions should be maps.
103 __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
104 __ SmiTst(r2);
105 __ Assert(ne, AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
106 __ CompareObjectType(r2, r3, r4, MAP_TYPE);
107 __ Assert(eq, AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
108 }
109
110 // Run the native code for the InternalArray function called as a normal
111 // function.
112 // tail call a stub
113 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
114 InternalArrayConstructorStub stub(masm->isolate());
115 __ TailCallStub(&stub);
116 }
117
Generate_ArrayConstructor(MacroAssembler * masm)118 void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
119 // ----------- S t a t e -------------
120 // -- r0 : number of arguments
121 // -- r1 : array function
122 // -- lr : return address
123 // -- sp[...]: constructor arguments
124 // -----------------------------------
125 Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
126
127 if (FLAG_debug_code) {
128 // Initial map for the builtin Array functions should be maps.
129 __ ldr(r7, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
130 __ SmiTst(r7);
131 __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction);
132 __ CompareObjectType(r7, r8, r9, MAP_TYPE);
133 __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
134 }
135
136 // r2 is the AllocationSite - here undefined.
137 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
138 // If r3 (new target) is undefined, then this is the 'Call' case, so move
139 // r1 (the constructor) to r3.
140 __ cmp(r3, r2);
141 __ mov(r3, r1, LeaveCC, eq);
142
143 // Run the native code for the Array function called as a normal function.
144 // tail call a stub
145 ArrayConstructorStub stub(masm->isolate());
146 __ TailCallStub(&stub);
147 }
148
GenerateTailCallToReturnedCode(MacroAssembler * masm,Runtime::FunctionId function_id)149 static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
150 Runtime::FunctionId function_id) {
151 // ----------- S t a t e -------------
152 // -- r0 : argument count (preserved for callee)
153 // -- r1 : target function (preserved for callee)
154 // -- r3 : new target (preserved for callee)
155 // -----------------------------------
156 {
157 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
158 // Push the number of arguments to the callee.
159 __ SmiTag(r0);
160 __ push(r0);
161 // Push a copy of the target function and the new target.
162 __ push(r1);
163 __ push(r3);
164 // Push function as parameter to the runtime call.
165 __ Push(r1);
166
167 __ CallRuntime(function_id, 1);
168 __ mov(r2, r0);
169
170 // Restore target function and new target.
171 __ pop(r3);
172 __ pop(r1);
173 __ pop(r0);
174 __ SmiUntag(r0, r0);
175 }
176 static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
177 __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
178 __ Jump(r2);
179 }
180
181 namespace {
182
Generate_JSBuiltinsConstructStubHelper(MacroAssembler * masm)183 void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
184 // ----------- S t a t e -------------
185 // -- r0 : number of arguments
186 // -- r1 : constructor function
187 // -- r3 : new target
188 // -- cp : context
189 // -- lr : return address
190 // -- sp[...]: constructor arguments
191 // -----------------------------------
192
193 Register scratch = r2;
194
195 // Enter a construct frame.
196 {
197 FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
198
199 // Preserve the incoming parameters on the stack.
200 __ SmiTag(r0);
201 __ Push(cp, r0);
202 __ SmiUntag(r0);
203
204 // The receiver for the builtin/api call.
205 __ PushRoot(Heap::kTheHoleValueRootIndex);
206
207 // Set up pointer to last argument.
208 __ add(r4, fp, Operand(StandardFrameConstants::kCallerSPOffset));
209
210 // Copy arguments and receiver to the expression stack.
211 Label loop, entry;
212 __ mov(r5, r0);
213 // ----------- S t a t e -------------
214 // -- r0: number of arguments (untagged)
215 // -- r1: constructor function
216 // -- r3: new target
217 // -- r4: pointer to last argument
218 // -- r5: counter
219 // -- sp[0*kPointerSize]: the hole (receiver)
220 // -- sp[1*kPointerSize]: number of arguments (tagged)
221 // -- sp[2*kPointerSize]: context
222 // -----------------------------------
223 __ b(&entry);
224 __ bind(&loop);
225 __ ldr(scratch, MemOperand(r4, r5, LSL, kPointerSizeLog2));
226 __ push(scratch);
227 __ bind(&entry);
228 __ sub(r5, r5, Operand(1), SetCC);
229 __ b(ge, &loop);
230
231 // Call the function.
232 // r0: number of arguments (untagged)
233 // r1: constructor function
234 // r3: new target
235 ParameterCount actual(r0);
236 __ InvokeFunction(r1, r3, actual, CALL_FUNCTION);
237
238 // Restore context from the frame.
239 __ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
240 // Restore smi-tagged arguments count from the frame.
241 __ ldr(scratch, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
242 // Leave construct frame.
243 }
244
245 // Remove caller arguments from the stack and return.
246 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
247 __ add(sp, sp, Operand(scratch, LSL, kPointerSizeLog2 - kSmiTagSize));
248 __ add(sp, sp, Operand(kPointerSize));
249 __ Jump(lr);
250 }
251
252 } // namespace
253
254 // The construct stub for ES5 constructor functions and ES6 class constructors.
Generate_JSConstructStubGeneric(MacroAssembler * masm)255 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
256 // ----------- S t a t e -------------
257 // -- r0: number of arguments (untagged)
258 // -- r1: constructor function
259 // -- r3: new target
260 // -- cp: context
261 // -- lr: return address
262 // -- sp[...]: constructor arguments
263 // -----------------------------------
264
265 // Enter a construct frame.
266 {
267 FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
268 Label post_instantiation_deopt_entry, not_create_implicit_receiver;
269
270 // Preserve the incoming parameters on the stack.
271 __ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
272 __ SmiTag(r0);
273 __ Push(cp, r0, r1, r4, r3);
274
275 // ----------- S t a t e -------------
276 // -- sp[0*kPointerSize]: new target
277 // -- sp[1*kPointerSize]: padding
278 // -- r1 and sp[2*kPointerSize]: constructor function
279 // -- sp[3*kPointerSize]: number of arguments (tagged)
280 // -- sp[4*kPointerSize]: context
281 // -----------------------------------
282
283 __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
284 __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
285 __ tst(r4, Operand(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
286 __ b(ne, ¬_create_implicit_receiver);
287
288 // If not derived class constructor: Allocate the new receiver object.
289 __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
290 r4, r5);
291 __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
292 RelocInfo::CODE_TARGET);
293 __ b(&post_instantiation_deopt_entry);
294
295 // Else: use TheHoleValue as receiver for constructor call
296 __ bind(¬_create_implicit_receiver);
297 __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
298
299 // ----------- S t a t e -------------
300 // -- r0: receiver
301 // -- Slot 3 / sp[0*kPointerSize]: new target
302 // -- Slot 2 / sp[1*kPointerSize]: constructor function
303 // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged)
304 // -- Slot 0 / sp[3*kPointerSize]: context
305 // -----------------------------------
306 // Deoptimizer enters here.
307 masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
308 masm->pc_offset());
309 __ bind(&post_instantiation_deopt_entry);
310
311 // Restore new target.
312 __ Pop(r3);
313 // Push the allocated receiver to the stack. We need two copies
314 // because we may have to return the original one and the calling
315 // conventions dictate that the called function pops the receiver.
316 __ Push(r0, r0);
317
318 // ----------- S t a t e -------------
319 // -- r3: new target
320 // -- sp[0*kPointerSize]: implicit receiver
321 // -- sp[1*kPointerSize]: implicit receiver
322 // -- sp[2*kPointerSize]: padding
323 // -- sp[3*kPointerSize]: constructor function
324 // -- sp[4*kPointerSize]: number of arguments (tagged)
325 // -- sp[5*kPointerSize]: context
326 // -----------------------------------
327
328 // Restore constructor function and argument count.
329 __ ldr(r1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
330 __ ldr(r0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
331 __ SmiUntag(r0);
332
333 // Set up pointer to last argument.
334 __ add(r4, fp, Operand(StandardFrameConstants::kCallerSPOffset));
335
336 // Copy arguments and receiver to the expression stack.
337 Label loop, entry;
338 __ mov(r5, r0);
339 // ----------- S t a t e -------------
340 // -- r0: number of arguments (untagged)
341 // -- r3: new target
342 // -- r4: pointer to last argument
343 // -- r5: counter
344 // -- sp[0*kPointerSize]: implicit receiver
345 // -- sp[1*kPointerSize]: implicit receiver
346 // -- sp[2*kPointerSize]: padding
347 // -- r1 and sp[3*kPointerSize]: constructor function
348 // -- sp[4*kPointerSize]: number of arguments (tagged)
349 // -- sp[5*kPointerSize]: context
350 // -----------------------------------
351 __ b(&entry);
352
353 __ bind(&loop);
354 __ ldr(r6, MemOperand(r4, r5, LSL, kPointerSizeLog2));
355 __ push(r6);
356 __ bind(&entry);
357 __ sub(r5, r5, Operand(1), SetCC);
358 __ b(ge, &loop);
359
360 // Call the function.
361 ParameterCount actual(r0);
362 __ InvokeFunction(r1, r3, actual, CALL_FUNCTION);
363
364 // ----------- S t a t e -------------
365 // -- r0: constructor result
366 // -- sp[0*kPointerSize]: implicit receiver
367 // -- sp[1*kPointerSize]: padding
368 // -- sp[2*kPointerSize]: constructor function
369 // -- sp[3*kPointerSize]: number of arguments
370 // -- sp[4*kPointerSize]: context
371 // -----------------------------------
372
373 // Store offset of return address for deoptimizer.
374 masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
375 masm->pc_offset());
376
377 // Restore the context from the frame.
378 __ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
379
380 // If the result is an object (in the ECMA sense), we should get rid
381 // of the receiver and use the result; see ECMA-262 section 13.2.2-7
382 // on page 74.
383 Label use_receiver, do_throw, leave_frame;
384
385 // If the result is undefined, we jump out to using the implicit receiver.
386 __ JumpIfRoot(r0, Heap::kUndefinedValueRootIndex, &use_receiver);
387
388 // Otherwise we do a smi check and fall through to check if the return value
389 // is a valid receiver.
390
391 // If the result is a smi, it is *not* an object in the ECMA sense.
392 __ JumpIfSmi(r0, &use_receiver);
393
394 // If the type of the result (stored in its map) is less than
395 // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
396 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
397 __ CompareObjectType(r0, r4, r5, FIRST_JS_RECEIVER_TYPE);
398 __ b(ge, &leave_frame);
399 __ b(&use_receiver);
400
401 __ bind(&do_throw);
402 __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
403
404 // Throw away the result of the constructor invocation and use the
405 // on-stack receiver as the result.
406 __ bind(&use_receiver);
407 __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
408 __ JumpIfRoot(r0, Heap::kTheHoleValueRootIndex, &do_throw);
409
410 __ bind(&leave_frame);
411 // Restore smi-tagged arguments count from the frame.
412 __ ldr(r1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
413 // Leave construct frame.
414 }
415 // Remove caller arguments from the stack and return.
416 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
417 __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
418 __ add(sp, sp, Operand(kPointerSize));
419 __ Jump(lr);
420 }
421
Generate_JSBuiltinsConstructStub(MacroAssembler * masm)422 void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
423 Generate_JSBuiltinsConstructStubHelper(masm);
424 }
425
GetSharedFunctionInfoBytecode(MacroAssembler * masm,Register sfi_data,Register scratch1)426 static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
427 Register sfi_data,
428 Register scratch1) {
429 Label done;
430
431 __ CompareObjectType(sfi_data, scratch1, scratch1, INTERPRETER_DATA_TYPE);
432 __ b(ne, &done);
433 __ ldr(sfi_data,
434 FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
435
436 __ bind(&done);
437 }
438
439 // static
Generate_ResumeGeneratorTrampoline(MacroAssembler * masm)440 void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
441 // ----------- S t a t e -------------
442 // -- r0 : the value to pass to the generator
443 // -- r1 : the JSGeneratorObject to resume
444 // -- lr : return address
445 // -----------------------------------
446 __ AssertGeneratorObject(r1);
447
448 // Store input value into generator object.
449 __ str(r0, FieldMemOperand(r1, JSGeneratorObject::kInputOrDebugPosOffset));
450 __ RecordWriteField(r1, JSGeneratorObject::kInputOrDebugPosOffset, r0, r3,
451 kLRHasNotBeenSaved, kDontSaveFPRegs);
452
453 // Load suspended function and context.
454 __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
455 __ ldr(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
456
457 Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
458 Label stepping_prepared;
459 Register scratch = r5;
460
461 // Flood function if we are stepping.
462 ExternalReference debug_hook =
463 ExternalReference::debug_hook_on_function_call_address(masm->isolate());
464 __ Move(scratch, debug_hook);
465 __ ldrsb(scratch, MemOperand(scratch));
466 __ cmp(scratch, Operand(0));
467 __ b(ne, &prepare_step_in_if_stepping);
468
469 // Flood function if we need to continue stepping in the suspended
470 // generator.
471 ExternalReference debug_suspended_generator =
472 ExternalReference::debug_suspended_generator_address(masm->isolate());
473 __ Move(scratch, debug_suspended_generator);
474 __ ldr(scratch, MemOperand(scratch));
475 __ cmp(scratch, Operand(r1));
476 __ b(eq, &prepare_step_in_suspended_generator);
477 __ bind(&stepping_prepared);
478
479 // Check the stack for overflow. We are not trying to catch interruptions
480 // (i.e. debug break and preemption) here, so check the "real stack limit".
481 Label stack_overflow;
482 __ CompareRoot(sp, Heap::kRealStackLimitRootIndex);
483 __ b(lo, &stack_overflow);
484
485 // Push receiver.
486 __ ldr(scratch, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
487 __ Push(scratch);
488
489 // ----------- S t a t e -------------
490 // -- r1 : the JSGeneratorObject to resume
491 // -- r4 : generator function
492 // -- cp : generator context
493 // -- lr : return address
494 // -- sp[0] : generator receiver
495 // -----------------------------------
496
497 // Push holes for arguments to generator function. Since the parser forced
498 // context allocation for any variables in generators, the actual argument
499 // values have already been copied into the context and these dummy values
500 // will never be used.
501 __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
502 __ ldr(r3,
503 FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
504 {
505 Label done_loop, loop;
506 __ bind(&loop);
507 __ sub(r3, r3, Operand(1), SetCC);
508 __ b(mi, &done_loop);
509 __ PushRoot(Heap::kTheHoleValueRootIndex);
510 __ b(&loop);
511 __ bind(&done_loop);
512 }
513
514 // Underlying function needs to have bytecode available.
515 if (FLAG_debug_code) {
516 __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
517 __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
518 GetSharedFunctionInfoBytecode(masm, r3, r0);
519 __ CompareObjectType(r3, r3, r3, BYTECODE_ARRAY_TYPE);
520 __ Assert(eq, AbortReason::kMissingBytecodeArray);
521 }
522
523 // Resume (Ignition/TurboFan) generator object.
524 {
525 __ ldr(r0, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
526 __ ldr(r0, FieldMemOperand(
527 r0, SharedFunctionInfo::kFormalParameterCountOffset));
528 // We abuse new.target both to indicate that this is a resume call and to
529 // pass in the generator object. In ordinary calls, new.target is always
530 // undefined because generator functions are non-constructable.
531 __ Move(r3, r1);
532 __ Move(r1, r4);
533 static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
534 __ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
535 __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
536 __ Jump(r2);
537 }
538
539 __ bind(&prepare_step_in_if_stepping);
540 {
541 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
542 __ Push(r1, r4);
543 // Push hole as receiver since we do not use it for stepping.
544 __ PushRoot(Heap::kTheHoleValueRootIndex);
545 __ CallRuntime(Runtime::kDebugOnFunctionCall);
546 __ Pop(r1);
547 __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
548 }
549 __ b(&stepping_prepared);
550
551 __ bind(&prepare_step_in_suspended_generator);
552 {
553 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
554 __ Push(r1);
555 __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
556 __ Pop(r1);
557 __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
558 }
559 __ b(&stepping_prepared);
560
561 __ bind(&stack_overflow);
562 {
563 FrameScope scope(masm, StackFrame::INTERNAL);
564 __ CallRuntime(Runtime::kThrowStackOverflow);
565 __ bkpt(0); // This should be unreachable.
566 }
567 }
568
Generate_ConstructedNonConstructable(MacroAssembler * masm)569 void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
570 FrameScope scope(masm, StackFrame::INTERNAL);
571 __ push(r1);
572 __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
573 }
574
Generate_StackOverflowCheck(MacroAssembler * masm,Register num_args,Register scratch,Label * stack_overflow)575 static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
576 Register scratch,
577 Label* stack_overflow) {
578 // Check the stack for overflow. We are not trying to catch
579 // interruptions (e.g. debug break and preemption) here, so the "real stack
580 // limit" is checked.
581 __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
582 // Make scratch the space we have left. The stack might already be overflowed
583 // here which will cause scratch to become negative.
584 __ sub(scratch, sp, scratch);
585 // Check if the arguments will overflow the stack.
586 __ cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
587 __ b(le, stack_overflow); // Signed comparison.
588 }
589
Generate_JSEntryTrampolineHelper(MacroAssembler * masm,bool is_construct)590 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
591 bool is_construct) {
592 // Called from Generate_JS_Entry
593 // r0: new.target
594 // r1: function
595 // r2: receiver
596 // r3: argc
597 // r4: argv
598 // r5-r6, r8 and cp may be clobbered
599 ProfileEntryHookStub::MaybeCallEntryHook(masm);
600
601 // Enter an internal frame.
602 {
603 FrameScope scope(masm, StackFrame::INTERNAL);
604
605 // Setup the context (we need to use the caller context from the isolate).
606 ExternalReference context_address = ExternalReference::Create(
607 IsolateAddressId::kContextAddress, masm->isolate());
608 __ Move(cp, context_address);
609 __ ldr(cp, MemOperand(cp));
610
611 // Push the function and the receiver onto the stack.
612 __ Push(r1, r2);
613
614 // Check if we have enough stack space to push all arguments.
615 // Clobbers r2.
616 Label enough_stack_space, stack_overflow;
617 Generate_StackOverflowCheck(masm, r3, r2, &stack_overflow);
618 __ b(&enough_stack_space);
619 __ bind(&stack_overflow);
620 __ CallRuntime(Runtime::kThrowStackOverflow);
621 // Unreachable code.
622 __ bkpt(0);
623
624 __ bind(&enough_stack_space);
625
626 // Remember new.target.
627 __ mov(r5, r0);
628
629 // Copy arguments to the stack in a loop.
630 // r1: function
631 // r3: argc
632 // r4: argv, i.e. points to first arg
633 Label loop, entry;
634 __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2));
635 // r2 points past last arg.
636 __ b(&entry);
637 __ bind(&loop);
638 __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex)); // read next parameter
639 __ ldr(r0, MemOperand(r0)); // dereference handle
640 __ push(r0); // push parameter
641 __ bind(&entry);
642 __ cmp(r4, r2);
643 __ b(ne, &loop);
644
645 // Setup new.target and argc.
646 __ mov(r0, Operand(r3));
647 __ mov(r3, Operand(r5));
648
649 // Initialize all JavaScript callee-saved registers, since they will be seen
650 // by the garbage collector as part of handlers.
651 __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
652 __ mov(r5, Operand(r4));
653 __ mov(r6, Operand(r4));
654 __ mov(r8, Operand(r4));
655 if (kR9Available == 1) {
656 __ mov(r9, Operand(r4));
657 }
658
659 // Invoke the code.
660 Handle<Code> builtin = is_construct
661 ? BUILTIN_CODE(masm->isolate(), Construct)
662 : masm->isolate()->builtins()->Call();
663 __ Call(builtin, RelocInfo::CODE_TARGET);
664
665 // Exit the JS frame and remove the parameters (except function), and
666 // return.
667 // Respect ABI stack constraint.
668 }
669 __ Jump(lr);
670
671 // r0: result
672 }
673
Generate_JSEntryTrampoline(MacroAssembler * masm)674 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
675 Generate_JSEntryTrampolineHelper(masm, false);
676 }
677
Generate_JSConstructEntryTrampoline(MacroAssembler * masm)678 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
679 Generate_JSEntryTrampolineHelper(masm, true);
680 }
681
ReplaceClosureCodeWithOptimizedCode(MacroAssembler * masm,Register optimized_code,Register closure,Register scratch1,Register scratch2,Register scratch3)682 static void ReplaceClosureCodeWithOptimizedCode(
683 MacroAssembler* masm, Register optimized_code, Register closure,
684 Register scratch1, Register scratch2, Register scratch3) {
685 // Store code entry in the closure.
686 __ str(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
687 __ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
688 __ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
689 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
690 OMIT_SMI_CHECK);
691 }
692
LeaveInterpreterFrame(MacroAssembler * masm,Register scratch)693 static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
694 Register args_count = scratch;
695
696 // Get the arguments + receiver count.
697 __ ldr(args_count,
698 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
699 __ ldr(args_count,
700 FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
701
702 // Leave the frame (also dropping the register file).
703 __ LeaveFrame(StackFrame::INTERPRETED);
704
705 // Drop receiver + arguments.
706 __ add(sp, sp, args_count, LeaveCC);
707 }
708
709 // Tail-call |function_id| if |smi_entry| == |marker|
TailCallRuntimeIfMarkerEquals(MacroAssembler * masm,Register smi_entry,OptimizationMarker marker,Runtime::FunctionId function_id)710 static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
711 Register smi_entry,
712 OptimizationMarker marker,
713 Runtime::FunctionId function_id) {
714 Label no_match;
715 __ cmp(smi_entry, Operand(Smi::FromEnum(marker)));
716 __ b(ne, &no_match);
717 GenerateTailCallToReturnedCode(masm, function_id);
718 __ bind(&no_match);
719 }
720
MaybeTailCallOptimizedCodeSlot(MacroAssembler * masm,Register feedback_vector,Register scratch1,Register scratch2,Register scratch3)721 static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
722 Register feedback_vector,
723 Register scratch1, Register scratch2,
724 Register scratch3) {
725 // ----------- S t a t e -------------
726 // -- r0 : argument count (preserved for callee if needed, and caller)
727 // -- r3 : new target (preserved for callee if needed, and caller)
728 // -- r1 : target function (preserved for callee if needed, and caller)
729 // -- feedback vector (preserved for caller if needed)
730 // -----------------------------------
731 DCHECK(
732 !AreAliased(feedback_vector, r0, r1, r3, scratch1, scratch2, scratch3));
733
734 Label optimized_code_slot_is_weak_ref, fallthrough;
735
736 Register closure = r1;
737 Register optimized_code_entry = scratch1;
738
739 __ ldr(
740 optimized_code_entry,
741 FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
742
743 // Check if the code entry is a Smi. If yes, we interpret it as an
744 // optimisation marker. Otherwise, interpret it as a weak reference to a code
745 // object.
746 __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
747
748 {
749 // Optimized code slot is a Smi optimization marker.
750
751 // Fall through if no optimization trigger.
752 __ cmp(optimized_code_entry,
753 Operand(Smi::FromEnum(OptimizationMarker::kNone)));
754 __ b(eq, &fallthrough);
755
756 TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
757 OptimizationMarker::kLogFirstExecution,
758 Runtime::kFunctionFirstExecution);
759 TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
760 OptimizationMarker::kCompileOptimized,
761 Runtime::kCompileOptimized_NotConcurrent);
762 TailCallRuntimeIfMarkerEquals(
763 masm, optimized_code_entry,
764 OptimizationMarker::kCompileOptimizedConcurrent,
765 Runtime::kCompileOptimized_Concurrent);
766
767 {
768 // Otherwise, the marker is InOptimizationQueue, so fall through hoping
769 // that an interrupt will eventually update the slot with optimized code.
770 if (FLAG_debug_code) {
771 __ cmp(
772 optimized_code_entry,
773 Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
774 __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
775 }
776 __ jmp(&fallthrough);
777 }
778 }
779
780 {
781 // Optimized code slot is a weak reference.
782 __ bind(&optimized_code_slot_is_weak_ref);
783
784 __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough);
785
786 // Check if the optimized code is marked for deopt. If it is, call the
787 // runtime to clear it.
788 Label found_deoptimized_code;
789 __ ldr(scratch2, FieldMemOperand(optimized_code_entry,
790 Code::kCodeDataContainerOffset));
791 __ ldr(
792 scratch2,
793 FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset));
794 __ tst(scratch2, Operand(1 << Code::kMarkedForDeoptimizationBit));
795 __ b(ne, &found_deoptimized_code);
796
797 // Optimized code is good, get it into the closure and link the closure into
798 // the optimized functions list, then tail call the optimized code.
799 // The feedback vector is no longer used, so re-use it as a scratch
800 // register.
801 ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
802 scratch2, scratch3, feedback_vector);
803 static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
804 __ add(r2, optimized_code_entry,
805 Operand(Code::kHeaderSize - kHeapObjectTag));
806 __ Jump(r2);
807
808 // Optimized code slot contains deoptimized code, evict it and re-enter the
809 // closure's code.
810 __ bind(&found_deoptimized_code);
811 GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
812 }
813
814 // Fall-through if the optimized code cell is clear and there is no
815 // optimization marker.
816 __ bind(&fallthrough);
817 }
818
819 // Advance the current bytecode offset. This simulates what all bytecode
820 // handlers do upon completion of the underlying operation. Will bail out to a
821 // label if the bytecode (without prefix) is a return bytecode.
AdvanceBytecodeOffsetOrReturn(MacroAssembler * masm,Register bytecode_array,Register bytecode_offset,Register bytecode,Register scratch1,Label * if_return)822 static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
823 Register bytecode_array,
824 Register bytecode_offset,
825 Register bytecode, Register scratch1,
826 Label* if_return) {
827 Register bytecode_size_table = scratch1;
828 DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
829 bytecode));
830
831 __ Move(bytecode_size_table,
832 ExternalReference::bytecode_size_table_address());
833
834 // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
835 Label process_bytecode, extra_wide;
836 STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
837 STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
838 STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
839 STATIC_ASSERT(3 ==
840 static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
841 __ cmp(bytecode, Operand(0x3));
842 __ b(hi, &process_bytecode);
843 __ tst(bytecode, Operand(0x1));
844 __ b(ne, &extra_wide);
845
846 // Load the next bytecode and update table to the wide scaled table.
847 __ add(bytecode_offset, bytecode_offset, Operand(1));
848 __ ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
849 __ add(bytecode_size_table, bytecode_size_table,
850 Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
851 __ jmp(&process_bytecode);
852
853 __ bind(&extra_wide);
854 // Load the next bytecode and update table to the extra wide scaled table.
855 __ add(bytecode_offset, bytecode_offset, Operand(1));
856 __ ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
857 __ add(bytecode_size_table, bytecode_size_table,
858 Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
859
860 __ bind(&process_bytecode);
861
862 // Bailout to the return label if this is a return bytecode.
863 #define JUMP_IF_EQUAL(NAME) \
864 __ cmp(bytecode, Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
865 __ b(if_return, eq);
866 RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
867 #undef JUMP_IF_EQUAL
868
869 // Otherwise, load the size of the current bytecode and advance the offset.
870 __ ldr(scratch1, MemOperand(bytecode_size_table, bytecode, LSL, 2));
871 __ add(bytecode_offset, bytecode_offset, scratch1);
872 }
873
874 // Generate code for entering a JS function with the interpreter.
875 // On entry to the function the receiver and arguments have been pushed on the
876 // stack left to right. The actual argument count matches the formal parameter
877 // count expected by the function.
878 //
879 // The live registers are:
880 // o r1: the JS function object being called.
881 // o r3: the incoming new target or generator object
882 // o cp: our context
883 // o fp: the caller's frame pointer
884 // o sp: stack pointer
885 // o lr: return address
886 //
887 // The function builds an interpreter frame. See InterpreterFrameConstants in
888 // frames.h for its layout.
Generate_InterpreterEntryTrampoline(MacroAssembler * masm)889 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
890 ProfileEntryHookStub::MaybeCallEntryHook(masm);
891
892 Register closure = r1;
893 Register feedback_vector = r2;
894
895 // Load the feedback vector from the closure.
896 __ ldr(feedback_vector,
897 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
898 __ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
899 // Read off the optimized code slot in the feedback vector, and if there
900 // is optimized code or an optimization marker, call that instead.
901 MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6, r5);
902
903 // Open a frame scope to indicate that there is a frame on the stack. The
904 // MANUAL indicates that the scope shouldn't actually generate code to set up
905 // the frame (that is done below).
906 FrameScope frame_scope(masm, StackFrame::MANUAL);
907 __ PushStandardFrame(closure);
908
909 // Get the bytecode array from the function object (or from the DebugInfo if
910 // it is present) and load it into kInterpreterBytecodeArrayRegister.
911 Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
912 __ ldr(r0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
913 __ ldr(kInterpreterBytecodeArrayRegister,
914 FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset));
915 GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r4);
916 __ ldr(r4, FieldMemOperand(r0, SharedFunctionInfo::kDebugInfoOffset));
917 __ SmiTst(r4);
918 __ b(ne, &maybe_load_debug_bytecode_array);
919 __ bind(&bytecode_array_loaded);
920
921 // Increment invocation count for the function.
922 __ ldr(r9, FieldMemOperand(feedback_vector,
923 FeedbackVector::kInvocationCountOffset));
924 __ add(r9, r9, Operand(1));
925 __ str(r9, FieldMemOperand(feedback_vector,
926 FeedbackVector::kInvocationCountOffset));
927
928 // Check function data field is actually a BytecodeArray object.
929 if (FLAG_debug_code) {
930 __ SmiTst(kInterpreterBytecodeArrayRegister);
931 __ Assert(
932 ne, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
933 __ CompareObjectType(kInterpreterBytecodeArrayRegister, r0, no_reg,
934 BYTECODE_ARRAY_TYPE);
935 __ Assert(
936 eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
937 }
938
939 // Reset code age.
940 __ mov(r9, Operand(BytecodeArray::kNoAgeBytecodeAge));
941 __ strb(r9, FieldMemOperand(kInterpreterBytecodeArrayRegister,
942 BytecodeArray::kBytecodeAgeOffset));
943
944 // Load the initial bytecode offset.
945 __ mov(kInterpreterBytecodeOffsetRegister,
946 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
947
948 // Push bytecode array and Smi tagged bytecode array offset.
949 __ SmiTag(r0, kInterpreterBytecodeOffsetRegister);
950 __ Push(kInterpreterBytecodeArrayRegister, r0);
951
952 // Allocate the local and temporary register file on the stack.
953 {
954 // Load frame size from the BytecodeArray object.
955 __ ldr(r4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
956 BytecodeArray::kFrameSizeOffset));
957
958 // Do a stack check to ensure we don't go over the limit.
959 Label ok;
960 __ sub(r9, sp, Operand(r4));
961 __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
962 __ cmp(r9, Operand(r2));
963 __ b(hs, &ok);
964 __ CallRuntime(Runtime::kThrowStackOverflow);
965 __ bind(&ok);
966
967 // If ok, push undefined as the initial value for all register file entries.
968 Label loop_header;
969 Label loop_check;
970 __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
971 __ b(&loop_check, al);
972 __ bind(&loop_header);
973 // TODO(rmcilroy): Consider doing more than one push per loop iteration.
974 __ push(r9);
975 // Continue loop if not done.
976 __ bind(&loop_check);
977 __ sub(r4, r4, Operand(kPointerSize), SetCC);
978 __ b(&loop_header, ge);
979 }
980
981 // If the bytecode array has a valid incoming new target or generator object
982 // register, initialize it with incoming value which was passed in r3.
983 __ ldr(r9, FieldMemOperand(
984 kInterpreterBytecodeArrayRegister,
985 BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
986 __ cmp(r9, Operand::Zero());
987 __ str(r3, MemOperand(fp, r9, LSL, kPointerSizeLog2), ne);
988
989 // Load accumulator with undefined.
990 __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
991
992 // Load the dispatch table into a register and dispatch to the bytecode
993 // handler at the current bytecode offset.
994 Label do_dispatch;
995 __ bind(&do_dispatch);
996 __ mov(kInterpreterDispatchTableRegister,
997 Operand(ExternalReference::interpreter_dispatch_table_address(
998 masm->isolate())));
999 __ ldrb(r4, MemOperand(kInterpreterBytecodeArrayRegister,
1000 kInterpreterBytecodeOffsetRegister));
1001 __ ldr(
1002 kJavaScriptCallCodeStartRegister,
1003 MemOperand(kInterpreterDispatchTableRegister, r4, LSL, kPointerSizeLog2));
1004 __ Call(kJavaScriptCallCodeStartRegister);
1005 masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
1006
1007 // Any returns to the entry trampoline are either due to the return bytecode
1008 // or the interpreter tail calling a builtin and then a dispatch.
1009
1010 // Get bytecode array and bytecode offset from the stack frame.
1011 __ ldr(kInterpreterBytecodeArrayRegister,
1012 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1013 __ ldr(kInterpreterBytecodeOffsetRegister,
1014 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1015 __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1016
1017 // Either return, or advance to the next bytecode and dispatch.
1018 Label do_return;
1019 __ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
1020 kInterpreterBytecodeOffsetRegister));
1021 AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1022 kInterpreterBytecodeOffsetRegister, r1, r2,
1023 &do_return);
1024 __ jmp(&do_dispatch);
1025
1026 __ bind(&do_return);
1027 // The return value is in r0.
1028 LeaveInterpreterFrame(masm, r2);
1029 __ Jump(lr);
1030
1031 // Load debug copy of the bytecode array if it exists.
1032 // kInterpreterBytecodeArrayRegister is already loaded with
1033 // SharedFunctionInfo::kFunctionDataOffset.
1034 __ bind(&maybe_load_debug_bytecode_array);
1035 __ ldr(r9, FieldMemOperand(r4, DebugInfo::kDebugBytecodeArrayOffset), ne);
1036 __ JumpIfRoot(r9, Heap::kUndefinedValueRootIndex, &bytecode_array_loaded);
1037
1038 __ mov(kInterpreterBytecodeArrayRegister, r9);
1039 __ ldr(r9, FieldMemOperand(r4, DebugInfo::kFlagsOffset));
1040 __ SmiUntag(r9);
1041 __ And(r9, r9, Operand(DebugInfo::kDebugExecutionMode));
1042
1043 ExternalReference debug_execution_mode =
1044 ExternalReference::debug_execution_mode_address(masm->isolate());
1045 __ mov(r4, Operand(debug_execution_mode));
1046 __ ldrsb(r4, MemOperand(r4));
1047 STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
1048 static_cast<int>(DebugInfo::kSideEffects));
1049 __ cmp(r4, r9);
1050 __ b(eq, &bytecode_array_loaded);
1051
1052 __ push(closure);
1053 __ push(feedback_vector);
1054 __ push(kInterpreterBytecodeArrayRegister);
1055 __ push(closure);
1056 __ CallRuntime(Runtime::kDebugApplyInstrumentation);
1057 __ pop(kInterpreterBytecodeArrayRegister);
1058 __ pop(feedback_vector);
1059 __ pop(closure);
1060 __ b(&bytecode_array_loaded);
1061 }
1062
Generate_InterpreterPushArgs(MacroAssembler * masm,Register num_args,Register index,Register limit,Register scratch)1063 static void Generate_InterpreterPushArgs(MacroAssembler* masm,
1064 Register num_args, Register index,
1065 Register limit, Register scratch) {
1066 // Find the address of the last argument.
1067 __ mov(limit, num_args);
1068 __ mov(limit, Operand(limit, LSL, kPointerSizeLog2));
1069 __ sub(limit, index, limit);
1070
1071 Label loop_header, loop_check;
1072 __ b(al, &loop_check);
1073 __ bind(&loop_header);
1074 __ ldr(scratch, MemOperand(index, -kPointerSize, PostIndex));
1075 __ push(scratch);
1076 __ bind(&loop_check);
1077 __ cmp(index, limit);
1078 __ b(gt, &loop_header);
1079 }
1080
1081 // static
Generate_InterpreterPushArgsThenCallImpl(MacroAssembler * masm,ConvertReceiverMode receiver_mode,InterpreterPushArgsMode mode)1082 void Builtins::Generate_InterpreterPushArgsThenCallImpl(
1083 MacroAssembler* masm, ConvertReceiverMode receiver_mode,
1084 InterpreterPushArgsMode mode) {
1085 DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
1086 // ----------- S t a t e -------------
1087 // -- r0 : the number of arguments (not including the receiver)
1088 // -- r2 : the address of the first argument to be pushed. Subsequent
1089 // arguments should be consecutive above this, in the same order as
1090 // they are to be pushed onto the stack.
1091 // -- r1 : the target to call (can be any Object).
1092 // -----------------------------------
1093 Label stack_overflow;
1094
1095 __ add(r3, r0, Operand(1)); // Add one for receiver.
1096
1097 Generate_StackOverflowCheck(masm, r3, r4, &stack_overflow);
1098
1099 // Push "undefined" as the receiver arg if we need to.
1100 if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1101 __ PushRoot(Heap::kUndefinedValueRootIndex);
1102 __ mov(r3, r0); // Argument count is correct.
1103 }
1104
1105 // Push the arguments. r2, r4, r5 will be modified.
1106 Generate_InterpreterPushArgs(masm, r3, r2, r4, r5);
1107
1108 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1109 __ Pop(r2); // Pass the spread in a register
1110 __ sub(r0, r0, Operand(1)); // Subtract one for spread
1111 }
1112
1113 // Call the target.
1114 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1115 __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
1116 RelocInfo::CODE_TARGET);
1117 } else {
1118 __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
1119 RelocInfo::CODE_TARGET);
1120 }
1121
1122 __ bind(&stack_overflow);
1123 {
1124 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1125 // Unreachable code.
1126 __ bkpt(0);
1127 }
1128 }
1129
1130 // static
Generate_InterpreterPushArgsThenConstructImpl(MacroAssembler * masm,InterpreterPushArgsMode mode)1131 void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
1132 MacroAssembler* masm, InterpreterPushArgsMode mode) {
1133 // ----------- S t a t e -------------
1134 // -- r0 : argument count (not including receiver)
1135 // -- r3 : new target
1136 // -- r1 : constructor to call
1137 // -- r2 : allocation site feedback if available, undefined otherwise.
1138 // -- r4 : address of the first argument
1139 // -----------------------------------
1140 Label stack_overflow;
1141
1142 // Push a slot for the receiver to be constructed.
1143 __ mov(r5, Operand::Zero());
1144 __ push(r5);
1145
1146 Generate_StackOverflowCheck(masm, r0, r5, &stack_overflow);
1147
1148 // Push the arguments. r5, r4, r6 will be modified.
1149 Generate_InterpreterPushArgs(masm, r0, r4, r5, r6);
1150
1151 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1152 __ Pop(r2); // Pass the spread in a register
1153 __ sub(r0, r0, Operand(1)); // Subtract one for spread
1154 } else {
1155 __ AssertUndefinedOrAllocationSite(r2, r5);
1156 }
1157
1158 if (mode == InterpreterPushArgsMode::kArrayFunction) {
1159 __ AssertFunction(r1);
1160
1161 // Tail call to the array construct stub (still in the caller
1162 // context at this point).
1163 ArrayConstructorStub array_constructor_stub(masm->isolate());
1164 __ Jump(array_constructor_stub.GetCode(), RelocInfo::CODE_TARGET);
1165 } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1166 // Call the constructor with r0, r1, and r3 unmodified.
1167 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
1168 RelocInfo::CODE_TARGET);
1169 } else {
1170 DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
1171 // Call the constructor with r0, r1, and r3 unmodified.
1172 __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
1173 }
1174
1175 __ bind(&stack_overflow);
1176 {
1177 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1178 // Unreachable code.
1179 __ bkpt(0);
1180 }
1181 }
1182
Generate_InterpreterEnterBytecode(MacroAssembler * masm)1183 static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
1184 // Set the return address to the correct point in the interpreter entry
1185 // trampoline.
1186 Label builtin_trampoline, trampoline_loaded;
1187 Smi* interpreter_entry_return_pc_offset(
1188 masm->isolate()->heap()->interpreter_entry_return_pc_offset());
1189 DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
1190
1191 // If the SFI function_data is an InterpreterData, get the trampoline stored
1192 // in it, otherwise get the trampoline from the builtins list.
1193 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
1194 __ ldr(r2, FieldMemOperand(r2, JSFunction::kSharedFunctionInfoOffset));
1195 __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset));
1196 __ CompareObjectType(r2, kInterpreterDispatchTableRegister,
1197 kInterpreterDispatchTableRegister,
1198 INTERPRETER_DATA_TYPE);
1199 __ b(ne, &builtin_trampoline);
1200
1201 __ ldr(r2,
1202 FieldMemOperand(r2, InterpreterData::kInterpreterTrampolineOffset));
1203 __ b(&trampoline_loaded);
1204
1205 __ bind(&builtin_trampoline);
1206 __ Move(r2, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
1207
1208 __ bind(&trampoline_loaded);
1209 __ add(lr, r2, Operand(interpreter_entry_return_pc_offset->value() +
1210 Code::kHeaderSize - kHeapObjectTag));
1211
1212 // Initialize the dispatch table register.
1213 __ Move(
1214 kInterpreterDispatchTableRegister,
1215 ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1216
1217 // Get the bytecode array pointer from the frame.
1218 __ ldr(kInterpreterBytecodeArrayRegister,
1219 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1220
1221 if (FLAG_debug_code) {
1222 // Check function data field is actually a BytecodeArray object.
1223 __ SmiTst(kInterpreterBytecodeArrayRegister);
1224 __ Assert(
1225 ne, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
1226 __ CompareObjectType(kInterpreterBytecodeArrayRegister, r1, no_reg,
1227 BYTECODE_ARRAY_TYPE);
1228 __ Assert(
1229 eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
1230 }
1231
1232 // Get the target bytecode offset from the frame.
1233 __ ldr(kInterpreterBytecodeOffsetRegister,
1234 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1235 __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1236
1237 // Dispatch to the target bytecode.
1238 UseScratchRegisterScope temps(masm);
1239 Register scratch = temps.Acquire();
1240 __ ldrb(scratch, MemOperand(kInterpreterBytecodeArrayRegister,
1241 kInterpreterBytecodeOffsetRegister));
1242 __ ldr(kJavaScriptCallCodeStartRegister,
1243 MemOperand(kInterpreterDispatchTableRegister, scratch, LSL,
1244 kPointerSizeLog2));
1245 __ Jump(kJavaScriptCallCodeStartRegister);
1246 }
1247
Generate_InterpreterEnterBytecodeAdvance(MacroAssembler * masm)1248 void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
1249 // Get bytecode array and bytecode offset from the stack frame.
1250 __ ldr(kInterpreterBytecodeArrayRegister,
1251 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1252 __ ldr(kInterpreterBytecodeOffsetRegister,
1253 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1254 __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1255
1256 // Load the current bytecode.
1257 __ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
1258 kInterpreterBytecodeOffsetRegister));
1259
1260 // Advance to the next bytecode.
1261 Label if_return;
1262 AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1263 kInterpreterBytecodeOffsetRegister, r1, r2,
1264 &if_return);
1265
1266 // Convert new bytecode offset to a Smi and save in the stackframe.
1267 __ SmiTag(r2, kInterpreterBytecodeOffsetRegister);
1268 __ str(r2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1269
1270 Generate_InterpreterEnterBytecode(masm);
1271
1272 // We should never take the if_return path.
1273 __ bind(&if_return);
1274 __ Abort(AbortReason::kInvalidBytecodeAdvance);
1275 }
1276
Generate_InterpreterEnterBytecodeDispatch(MacroAssembler * masm)1277 void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
1278 Generate_InterpreterEnterBytecode(masm);
1279 }
1280
Generate_CompileLazyDeoptimizedCode(MacroAssembler * masm)1281 void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
1282 // Set the code slot inside the JSFunction to CompileLazy.
1283 __ Move(r2, BUILTIN_CODE(masm->isolate(), CompileLazy));
1284 __ str(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
1285 __ RecordWriteField(r1, JSFunction::kCodeOffset, r2, r4, kLRHasNotBeenSaved,
1286 kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
1287 // Jump to compile lazy.
1288 Generate_CompileLazy(masm);
1289 }
1290
GetSharedFunctionInfoCode(MacroAssembler * masm,Register sfi_data,Register scratch1)1291 static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
1292 Register scratch1) {
1293 // Figure out the SFI's code object.
1294 Label done;
1295 Label check_is_bytecode_array;
1296 Label check_is_exported_function_data;
1297 Label check_is_fixed_array;
1298 Label check_is_pre_parsed_scope_data;
1299 Label check_is_function_template_info;
1300 Label check_is_interpreter_data;
1301
1302 Register data_type = scratch1;
1303
1304 // IsSmi: Is builtin
1305 __ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
1306 __ Move(scratch1, ExternalReference::builtins_address(masm->isolate()));
1307 __ ldr(sfi_data, MemOperand::PointerAddressFromSmiKey(scratch1, sfi_data));
1308 __ b(&done);
1309
1310 // Get map for subsequent checks.
1311 __ bind(&check_is_bytecode_array);
1312 __ ldr(data_type, FieldMemOperand(sfi_data, HeapObject::kMapOffset));
1313 __ ldrh(data_type, FieldMemOperand(data_type, Map::kInstanceTypeOffset));
1314
1315 // IsBytecodeArray: Interpret bytecode
1316 __ cmp(data_type, Operand(BYTECODE_ARRAY_TYPE));
1317 __ b(ne, &check_is_exported_function_data);
1318 __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
1319 __ b(&done);
1320
1321 // IsWasmExportedFunctionData: Use the wrapper code
1322 __ bind(&check_is_exported_function_data);
1323 __ cmp(data_type, Operand(WASM_EXPORTED_FUNCTION_DATA_TYPE));
1324 __ b(ne, &check_is_fixed_array);
1325 __ ldr(sfi_data, FieldMemOperand(
1326 sfi_data, WasmExportedFunctionData::kWrapperCodeOffset));
1327 __ b(&done);
1328
1329 // IsFixedArray: Instantiate using AsmWasmData
1330 __ bind(&check_is_fixed_array);
1331 __ cmp(data_type, Operand(FIXED_ARRAY_TYPE));
1332 __ b(ne, &check_is_pre_parsed_scope_data);
1333 __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
1334 __ b(&done);
1335
1336 // IsPreParsedScopeData: Compile lazy
1337 __ bind(&check_is_pre_parsed_scope_data);
1338 __ cmp(data_type, Operand(TUPLE2_TYPE));
1339 __ b(ne, &check_is_function_template_info);
1340 __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
1341 __ b(&done);
1342
1343 // IsFunctionTemplateInfo: API call
1344 __ bind(&check_is_function_template_info);
1345 __ cmp(data_type, Operand(FUNCTION_TEMPLATE_INFO_TYPE));
1346 __ b(ne, &check_is_interpreter_data);
1347 __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
1348 __ b(&done);
1349
1350 // IsInterpreterData: Interpret bytecode
1351 __ bind(&check_is_interpreter_data);
1352 if (FLAG_debug_code) {
1353 __ cmp(data_type, Operand(INTERPRETER_DATA_TYPE));
1354 __ Assert(eq, AbortReason::kInvalidSharedFunctionInfoData);
1355 }
1356 __ ldr(
1357 sfi_data,
1358 FieldMemOperand(sfi_data, InterpreterData::kInterpreterTrampolineOffset));
1359
1360 __ bind(&done);
1361 }
1362
Generate_CompileLazy(MacroAssembler * masm)1363 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
1364 // ----------- S t a t e -------------
1365 // -- r0 : argument count (preserved for callee)
1366 // -- r3 : new target (preserved for callee)
1367 // -- r1 : target function (preserved for callee)
1368 // -----------------------------------
1369 // First lookup code, maybe we don't need to compile!
1370 Label gotta_call_runtime;
1371
1372 Register closure = r1;
1373 Register feedback_vector = r2;
1374
1375 // Do we have a valid feedback vector?
1376 __ ldr(feedback_vector,
1377 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1378 __ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
1379 __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
1380 &gotta_call_runtime);
1381
1382 // Is there an optimization marker or optimized code in the feedback vector?
1383 MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6, r5);
1384
1385 // We found no optimized code. Infer the code object needed for the SFI.
1386 Register entry = r4;
1387 __ ldr(entry,
1388 FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
1389 __ ldr(entry,
1390 FieldMemOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
1391 GetSharedFunctionInfoCode(masm, entry, r5);
1392
1393 // If code entry points to anything other than CompileLazy, install that.
1394 __ Move(r5, masm->CodeObject());
1395 __ cmp(entry, r5);
1396 __ b(eq, &gotta_call_runtime);
1397
1398 // Install the SFI's code entry.
1399 __ str(entry, FieldMemOperand(closure, JSFunction::kCodeOffset));
1400 __ mov(r9, entry); // Write barrier clobbers r9 below.
1401 __ RecordWriteField(closure, JSFunction::kCodeOffset, r9, r5,
1402 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
1403 OMIT_SMI_CHECK);
1404 __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
1405 __ Jump(entry);
1406
1407 __ bind(&gotta_call_runtime);
1408 GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
1409 }
1410
1411 // Lazy deserialization design doc: http://goo.gl/dxkYDZ.
Generate_DeserializeLazy(MacroAssembler * masm)1412 void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
1413 // ----------- S t a t e -------------
1414 // -- r0 : argument count (preserved for callee)
1415 // -- r3 : new target (preserved for callee)
1416 // -- r1 : target function (preserved for callee)
1417 // -----------------------------------
1418
1419 Label deserialize_in_runtime;
1420
1421 Register target = r1; // Must be preserved
1422 Register scratch0 = r2;
1423 Register scratch1 = r4;
1424
1425 CHECK(scratch0 != r0 && scratch0 != r3 && scratch0 != r1);
1426 CHECK(scratch1 != r0 && scratch1 != r3 && scratch1 != r1);
1427 CHECK(scratch0 != scratch1);
1428
1429 // Load the builtin id for lazy deserialization from SharedFunctionInfo.
1430
1431 __ AssertFunction(target);
1432 __ ldr(scratch0,
1433 FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
1434
1435 __ ldr(scratch1,
1436 FieldMemOperand(scratch0, SharedFunctionInfo::kFunctionDataOffset));
1437 __ AssertSmi(scratch1);
1438
1439 // The builtin may already have been deserialized. If that is the case, it is
1440 // stored in the builtins table, and we can copy to correct code object to
1441 // both the shared function info and function without calling into runtime.
1442 //
1443 // Otherwise, we need to call into runtime to deserialize.
1444
1445 {
1446 // Load the code object at builtins_table[builtin_id] into scratch1.
1447
1448 __ SmiUntag(scratch1);
1449 __ Move(scratch0, ExternalReference::builtins_address(masm->isolate()));
1450 __ ldr(scratch1, MemOperand(scratch0, scratch1, LSL, kPointerSizeLog2));
1451
1452 // Check if the loaded code object has already been deserialized. This is
1453 // the case iff it does not equal DeserializeLazy.
1454
1455 __ Move(scratch0, masm->CodeObject());
1456 __ cmp(scratch1, scratch0);
1457 __ b(eq, &deserialize_in_runtime);
1458 }
1459
1460 {
1461 // If we've reached this spot, the target builtin has been deserialized and
1462 // we simply need to copy it over to the target function.
1463
1464 Register target_builtin = scratch1;
1465
1466 __ str(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset));
1467 __ mov(r9, target_builtin); // Write barrier clobbers r9 below.
1468 __ RecordWriteField(target, JSFunction::kCodeOffset, r9, r5,
1469 kLRHasNotBeenSaved, kDontSaveFPRegs,
1470 OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
1471
1472 // All copying is done. Jump to the deserialized code object.
1473
1474 __ add(target_builtin, target_builtin,
1475 Operand(Code::kHeaderSize - kHeapObjectTag));
1476 __ Jump(target_builtin);
1477 }
1478
1479 __ bind(&deserialize_in_runtime);
1480 GenerateTailCallToReturnedCode(masm, Runtime::kDeserializeLazy);
1481 }
1482
Generate_InstantiateAsmJs(MacroAssembler * masm)1483 void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
1484 // ----------- S t a t e -------------
1485 // -- r0 : argument count (preserved for callee)
1486 // -- r1 : new target (preserved for callee)
1487 // -- r3 : target function (preserved for callee)
1488 // -----------------------------------
1489 Label failed;
1490 {
1491 FrameScope scope(masm, StackFrame::INTERNAL);
1492 // Preserve argument count for later compare.
1493 __ Move(r4, r0);
1494 // Push the number of arguments to the callee.
1495 __ SmiTag(r0);
1496 __ push(r0);
1497 // Push a copy of the target function and the new target.
1498 __ push(r1);
1499 __ push(r3);
1500
1501 // The function.
1502 __ push(r1);
1503 // Copy arguments from caller (stdlib, foreign, heap).
1504 Label args_done;
1505 for (int j = 0; j < 4; ++j) {
1506 Label over;
1507 if (j < 3) {
1508 __ cmp(r4, Operand(j));
1509 __ b(ne, &over);
1510 }
1511 for (int i = j - 1; i >= 0; --i) {
1512 __ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
1513 i * kPointerSize));
1514 __ push(r4);
1515 }
1516 for (int i = 0; i < 3 - j; ++i) {
1517 __ PushRoot(Heap::kUndefinedValueRootIndex);
1518 }
1519 if (j < 3) {
1520 __ jmp(&args_done);
1521 __ bind(&over);
1522 }
1523 }
1524 __ bind(&args_done);
1525
1526 // Call runtime, on success unwind frame, and parent frame.
1527 __ CallRuntime(Runtime::kInstantiateAsmJs, 4);
1528 // A smi 0 is returned on failure, an object on success.
1529 __ JumpIfSmi(r0, &failed);
1530
1531 __ Drop(2);
1532 __ pop(r4);
1533 __ SmiUntag(r4);
1534 scope.GenerateLeaveFrame();
1535
1536 __ add(r4, r4, Operand(1));
1537 __ Drop(r4);
1538 __ Ret();
1539
1540 __ bind(&failed);
1541 // Restore target function and new target.
1542 __ pop(r3);
1543 __ pop(r1);
1544 __ pop(r0);
1545 __ SmiUntag(r0);
1546 }
1547 // On failure, tail call back to regular js by re-calling the function
1548 // which has be reset to the compile lazy builtin.
1549 static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
1550 __ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
1551 __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
1552 __ Jump(r2);
1553 }
1554
1555 namespace {
Generate_ContinueToBuiltinHelper(MacroAssembler * masm,bool java_script_builtin,bool with_result)1556 void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
1557 bool java_script_builtin,
1558 bool with_result) {
1559 const RegisterConfiguration* config(RegisterConfiguration::Default());
1560 int allocatable_register_count = config->num_allocatable_general_registers();
1561 if (with_result) {
1562 // Overwrite the hole inserted by the deoptimizer with the return value from
1563 // the LAZY deopt point.
1564 __ str(r0,
1565 MemOperand(
1566 sp, config->num_allocatable_general_registers() * kPointerSize +
1567 BuiltinContinuationFrameConstants::kFixedFrameSize));
1568 }
1569 for (int i = allocatable_register_count - 1; i >= 0; --i) {
1570 int code = config->GetAllocatableGeneralCode(i);
1571 __ Pop(Register::from_code(code));
1572 if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
1573 __ SmiUntag(Register::from_code(code));
1574 }
1575 }
1576 __ ldr(fp, MemOperand(
1577 sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1578
1579 UseScratchRegisterScope temps(masm);
1580 Register scratch = temps.Acquire();
1581 __ Pop(scratch);
1582 __ add(sp, sp,
1583 Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1584 __ Pop(lr);
1585 __ add(pc, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
1586 }
1587 } // namespace
1588
Generate_ContinueToCodeStubBuiltin(MacroAssembler * masm)1589 void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
1590 Generate_ContinueToBuiltinHelper(masm, false, false);
1591 }
1592
Generate_ContinueToCodeStubBuiltinWithResult(MacroAssembler * masm)1593 void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
1594 MacroAssembler* masm) {
1595 Generate_ContinueToBuiltinHelper(masm, false, true);
1596 }
1597
Generate_ContinueToJavaScriptBuiltin(MacroAssembler * masm)1598 void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
1599 Generate_ContinueToBuiltinHelper(masm, true, false);
1600 }
1601
Generate_ContinueToJavaScriptBuiltinWithResult(MacroAssembler * masm)1602 void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
1603 MacroAssembler* masm) {
1604 Generate_ContinueToBuiltinHelper(masm, true, true);
1605 }
1606
Generate_NotifyDeoptimized(MacroAssembler * masm)1607 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
1608 {
1609 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
1610 __ CallRuntime(Runtime::kNotifyDeoptimized);
1611 }
1612
1613 DCHECK_EQ(kInterpreterAccumulatorRegister.code(), r0.code());
1614 __ pop(r0);
1615 __ Ret();
1616 }
1617
Generate_OnStackReplacementHelper(MacroAssembler * masm,bool has_handler_frame)1618 static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
1619 bool has_handler_frame) {
1620 // Lookup the function in the JavaScript frame.
1621 if (has_handler_frame) {
1622 __ ldr(r0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1623 __ ldr(r0, MemOperand(r0, JavaScriptFrameConstants::kFunctionOffset));
1624 } else {
1625 __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
1626 }
1627
1628 {
1629 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
1630 // Pass function as argument.
1631 __ push(r0);
1632 __ CallRuntime(Runtime::kCompileForOnStackReplacement);
1633 }
1634
1635 // If the code object is null, just return to the caller.
1636 Label skip;
1637 __ cmp(r0, Operand(Smi::kZero));
1638 __ b(ne, &skip);
1639 __ Ret();
1640
1641 __ bind(&skip);
1642
1643 // Drop any potential handler frame that is be sitting on top of the actual
1644 // JavaScript frame. This is the case then OSR is triggered from bytecode.
1645 if (has_handler_frame) {
1646 __ LeaveFrame(StackFrame::STUB);
1647 }
1648
1649 // Load deoptimization data from the code object.
1650 // <deopt_data> = <code>[#deoptimization_data_offset]
1651 __ ldr(r1, FieldMemOperand(r0, Code::kDeoptimizationDataOffset));
1652
1653 {
1654 ConstantPoolUnavailableScope constant_pool_unavailable(masm);
1655 __ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
1656
1657 // Load the OSR entrypoint offset from the deoptimization data.
1658 // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
1659 __ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(
1660 DeoptimizationData::kOsrPcOffsetIndex)));
1661
1662 // Compute the target address = code start + osr_offset
1663 __ add(lr, r0, Operand::SmiUntag(r1));
1664
1665 // And "return" to the OSR entry point of the function.
1666 __ Ret();
1667 }
1668 }
1669
Generate_OnStackReplacement(MacroAssembler * masm)1670 void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
1671 Generate_OnStackReplacementHelper(masm, false);
1672 }
1673
Generate_InterpreterOnStackReplacement(MacroAssembler * masm)1674 void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
1675 Generate_OnStackReplacementHelper(masm, true);
1676 }
1677
1678 // static
Generate_FunctionPrototypeApply(MacroAssembler * masm)1679 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
1680 // ----------- S t a t e -------------
1681 // -- r0 : argc
1682 // -- sp[0] : argArray
1683 // -- sp[4] : thisArg
1684 // -- sp[8] : receiver
1685 // -----------------------------------
1686
1687 // 1. Load receiver into r1, argArray into r2 (if present), remove all
1688 // arguments from the stack (including the receiver), and push thisArg (if
1689 // present) instead.
1690 {
1691 __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
1692 __ mov(r2, r5);
1693 __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2)); // receiver
1694 __ sub(r4, r0, Operand(1), SetCC);
1695 __ ldr(r5, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // thisArg
1696 __ sub(r4, r4, Operand(1), SetCC, ge);
1697 __ ldr(r2, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // argArray
1698 __ add(sp, sp, Operand(r0, LSL, kPointerSizeLog2));
1699 __ str(r5, MemOperand(sp, 0));
1700 }
1701
1702 // ----------- S t a t e -------------
1703 // -- r2 : argArray
1704 // -- r1 : receiver
1705 // -- sp[0] : thisArg
1706 // -----------------------------------
1707
1708 // 2. We don't need to check explicitly for callable receiver here,
1709 // since that's the first thing the Call/CallWithArrayLike builtins
1710 // will do.
1711
1712 // 3. Tail call with no arguments if argArray is null or undefined.
1713 Label no_arguments;
1714 __ JumpIfRoot(r2, Heap::kNullValueRootIndex, &no_arguments);
1715 __ JumpIfRoot(r2, Heap::kUndefinedValueRootIndex, &no_arguments);
1716
1717 // 4a. Apply the receiver to the given argArray.
1718 __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1719 RelocInfo::CODE_TARGET);
1720
1721 // 4b. The argArray is either null or undefined, so we tail call without any
1722 // arguments to the receiver.
1723 __ bind(&no_arguments);
1724 {
1725 __ mov(r0, Operand(0));
1726 __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1727 }
1728 }
1729
1730 // static
Generate_FunctionPrototypeCall(MacroAssembler * masm)1731 void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
1732 // 1. Make sure we have at least one argument.
1733 // r0: actual number of arguments
1734 {
1735 Label done;
1736 __ cmp(r0, Operand::Zero());
1737 __ b(ne, &done);
1738 __ PushRoot(Heap::kUndefinedValueRootIndex);
1739 __ add(r0, r0, Operand(1));
1740 __ bind(&done);
1741 }
1742
1743 // 2. Get the callable to call (passed as receiver) from the stack.
1744 // r0: actual number of arguments
1745 __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
1746
1747 // 3. Shift arguments and return address one slot down on the stack
1748 // (overwriting the original receiver). Adjust argument count to make
1749 // the original first argument the new receiver.
1750 // r0: actual number of arguments
1751 // r1: callable
1752 {
1753 Register scratch = r3;
1754 Label loop;
1755 // Calculate the copy start address (destination). Copy end address is sp.
1756 __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
1757
1758 __ bind(&loop);
1759 __ ldr(scratch, MemOperand(r2, -kPointerSize));
1760 __ str(scratch, MemOperand(r2));
1761 __ sub(r2, r2, Operand(kPointerSize));
1762 __ cmp(r2, sp);
1763 __ b(ne, &loop);
1764 // Adjust the actual number of arguments and remove the top element
1765 // (which is a copy of the last argument).
1766 __ sub(r0, r0, Operand(1));
1767 __ pop();
1768 }
1769
1770 // 4. Call the callable.
1771 __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1772 }
1773
Generate_ReflectApply(MacroAssembler * masm)1774 void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
1775 // ----------- S t a t e -------------
1776 // -- r0 : argc
1777 // -- sp[0] : argumentsList
1778 // -- sp[4] : thisArgument
1779 // -- sp[8] : target
1780 // -- sp[12] : receiver
1781 // -----------------------------------
1782
1783 // 1. Load target into r1 (if present), argumentsList into r2 (if present),
1784 // remove all arguments from the stack (including the receiver), and push
1785 // thisArgument (if present) instead.
1786 {
1787 __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
1788 __ mov(r5, r1);
1789 __ mov(r2, r1);
1790 __ sub(r4, r0, Operand(1), SetCC);
1791 __ ldr(r1, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // target
1792 __ sub(r4, r4, Operand(1), SetCC, ge);
1793 __ ldr(r5, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // thisArgument
1794 __ sub(r4, r4, Operand(1), SetCC, ge);
1795 __ ldr(r2, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // argumentsList
1796 __ add(sp, sp, Operand(r0, LSL, kPointerSizeLog2));
1797 __ str(r5, MemOperand(sp, 0));
1798 }
1799
1800 // ----------- S t a t e -------------
1801 // -- r2 : argumentsList
1802 // -- r1 : target
1803 // -- sp[0] : thisArgument
1804 // -----------------------------------
1805
1806 // 2. We don't need to check explicitly for callable target here,
1807 // since that's the first thing the Call/CallWithArrayLike builtins
1808 // will do.
1809
1810 // 3. Apply the target to the given argumentsList.
1811 __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1812 RelocInfo::CODE_TARGET);
1813 }
1814
Generate_ReflectConstruct(MacroAssembler * masm)1815 void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
1816 // ----------- S t a t e -------------
1817 // -- r0 : argc
1818 // -- sp[0] : new.target (optional)
1819 // -- sp[4] : argumentsList
1820 // -- sp[8] : target
1821 // -- sp[12] : receiver
1822 // -----------------------------------
1823
1824 // 1. Load target into r1 (if present), argumentsList into r2 (if present),
1825 // new.target into r3 (if present, otherwise use target), remove all
1826 // arguments from the stack (including the receiver), and push thisArgument
1827 // (if present) instead.
1828 {
1829 __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
1830 __ mov(r2, r1);
1831 __ str(r2, MemOperand(sp, r0, LSL, kPointerSizeLog2)); // receiver
1832 __ sub(r4, r0, Operand(1), SetCC);
1833 __ ldr(r1, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // target
1834 __ mov(r3, r1); // new.target defaults to target
1835 __ sub(r4, r4, Operand(1), SetCC, ge);
1836 __ ldr(r2, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // argumentsList
1837 __ sub(r4, r4, Operand(1), SetCC, ge);
1838 __ ldr(r3, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // new.target
1839 __ add(sp, sp, Operand(r0, LSL, kPointerSizeLog2));
1840 }
1841
1842 // ----------- S t a t e -------------
1843 // -- r2 : argumentsList
1844 // -- r3 : new.target
1845 // -- r1 : target
1846 // -- sp[0] : receiver (undefined)
1847 // -----------------------------------
1848
1849 // 2. We don't need to check explicitly for constructor target here,
1850 // since that's the first thing the Construct/ConstructWithArrayLike
1851 // builtins will do.
1852
1853 // 3. We don't need to check explicitly for constructor new.target here,
1854 // since that's the second thing the Construct/ConstructWithArrayLike
1855 // builtins will do.
1856
1857 // 4. Construct the target with the given new.target and argumentsList.
1858 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike),
1859 RelocInfo::CODE_TARGET);
1860 }
1861
EnterArgumentsAdaptorFrame(MacroAssembler * masm)1862 static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
1863 __ SmiTag(r0);
1864 __ mov(r4, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
1865 __ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() |
1866 fp.bit() | lr.bit());
1867 __ Push(Smi::kZero); // Padding.
1868 __ add(fp, sp,
1869 Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp));
1870 }
1871
LeaveArgumentsAdaptorFrame(MacroAssembler * masm)1872 static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
1873 // ----------- S t a t e -------------
1874 // -- r0 : result being passed through
1875 // -----------------------------------
1876 // Get the number of arguments passed (as a smi), tear down the frame and
1877 // then tear down the parameters.
1878 __ ldr(r1, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
1879
1880 __ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR);
1881 __ add(sp, sp, Operand::PointerOffsetFromSmiKey(r1));
1882 __ add(sp, sp, Operand(kPointerSize)); // adjust for receiver
1883 }
1884
1885 // static
Generate_CallOrConstructVarargs(MacroAssembler * masm,Handle<Code> code)1886 void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
1887 Handle<Code> code) {
1888 // ----------- S t a t e -------------
1889 // -- r1 : target
1890 // -- r0 : number of parameters on the stack (not including the receiver)
1891 // -- r2 : arguments list (a FixedArray)
1892 // -- r4 : len (number of elements to push from args)
1893 // -- r3 : new.target (for [[Construct]])
1894 // -----------------------------------
1895 __ AssertFixedArray(r2);
1896
1897 Register scratch = r8;
1898
1899 // Check for stack overflow.
1900 {
1901 // Check the stack for overflow. We are not trying to catch interruptions
1902 // (i.e. debug break and preemption) here, so check the "real stack limit".
1903 Label done;
1904 __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
1905 // The stack might already be overflowed here which will cause 'scratch' to
1906 // become negative.
1907 __ sub(scratch, sp, scratch);
1908 // Check if the arguments will overflow the stack.
1909 __ cmp(scratch, Operand(r4, LSL, kPointerSizeLog2));
1910 __ b(gt, &done); // Signed comparison.
1911 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1912 __ bind(&done);
1913 }
1914
1915 // Push arguments onto the stack (thisArgument is already on the stack).
1916 {
1917 __ mov(r6, Operand(0));
1918 __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
1919 Label done, loop;
1920 __ bind(&loop);
1921 __ cmp(r6, r4);
1922 __ b(eq, &done);
1923 __ add(scratch, r2, Operand(r6, LSL, kPointerSizeLog2));
1924 __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize));
1925 __ cmp(scratch, r5);
1926 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex, eq);
1927 __ Push(scratch);
1928 __ add(r6, r6, Operand(1));
1929 __ b(&loop);
1930 __ bind(&done);
1931 __ add(r0, r0, r6);
1932 }
1933
1934 // Tail-call to the actual Call or Construct builtin.
1935 __ Jump(code, RelocInfo::CODE_TARGET);
1936 }
1937
1938 // static
Generate_CallOrConstructForwardVarargs(MacroAssembler * masm,CallOrConstructMode mode,Handle<Code> code)1939 void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
1940 CallOrConstructMode mode,
1941 Handle<Code> code) {
1942 // ----------- S t a t e -------------
1943 // -- r0 : the number of arguments (not including the receiver)
1944 // -- r3 : the new.target (for [[Construct]] calls)
1945 // -- r1 : the target to call (can be any Object)
1946 // -- r2 : start index (to support rest parameters)
1947 // -----------------------------------
1948
1949 Register scratch = r6;
1950
1951 // Check if new.target has a [[Construct]] internal method.
1952 if (mode == CallOrConstructMode::kConstruct) {
1953 Label new_target_constructor, new_target_not_constructor;
1954 __ JumpIfSmi(r3, &new_target_not_constructor);
1955 __ ldr(scratch, FieldMemOperand(r3, HeapObject::kMapOffset));
1956 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
1957 __ tst(scratch, Operand(Map::IsConstructorBit::kMask));
1958 __ b(ne, &new_target_constructor);
1959 __ bind(&new_target_not_constructor);
1960 {
1961 FrameScope scope(masm, StackFrame::MANUAL);
1962 __ EnterFrame(StackFrame::INTERNAL);
1963 __ Push(r3);
1964 __ CallRuntime(Runtime::kThrowNotConstructor);
1965 }
1966 __ bind(&new_target_constructor);
1967 }
1968
1969 // Check if we have an arguments adaptor frame below the function frame.
1970 Label arguments_adaptor, arguments_done;
1971 __ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1972 __ ldr(scratch,
1973 MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
1974 __ cmp(scratch,
1975 Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
1976 __ b(eq, &arguments_adaptor);
1977 {
1978 __ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
1979 __ ldr(r5, FieldMemOperand(r5, JSFunction::kSharedFunctionInfoOffset));
1980 __ ldr(r5, FieldMemOperand(
1981 r5, SharedFunctionInfo::kFormalParameterCountOffset));
1982 __ mov(r4, fp);
1983 }
1984 __ b(&arguments_done);
1985 __ bind(&arguments_adaptor);
1986 {
1987 // Load the length from the ArgumentsAdaptorFrame.
1988 __ ldr(r5, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
1989 __ SmiUntag(r5);
1990 }
1991 __ bind(&arguments_done);
1992
1993 Label stack_done, stack_overflow;
1994 __ sub(r5, r5, r2, SetCC);
1995 __ b(le, &stack_done);
1996 {
1997 // Check for stack overflow.
1998 Generate_StackOverflowCheck(masm, r5, r2, &stack_overflow);
1999
2000 // Forward the arguments from the caller frame.
2001 {
2002 Label loop;
2003 __ add(r4, r4, Operand(kPointerSize));
2004 __ add(r0, r0, r5);
2005 __ bind(&loop);
2006 {
2007 __ ldr(scratch, MemOperand(r4, r5, LSL, kPointerSizeLog2));
2008 __ push(scratch);
2009 __ sub(r5, r5, Operand(1), SetCC);
2010 __ b(ne, &loop);
2011 }
2012 }
2013 }
2014 __ b(&stack_done);
2015 __ bind(&stack_overflow);
2016 __ TailCallRuntime(Runtime::kThrowStackOverflow);
2017 __ bind(&stack_done);
2018
2019 // Tail-call to the {code} handler.
2020 __ Jump(code, RelocInfo::CODE_TARGET);
2021 }
2022
2023 // static
Generate_CallFunction(MacroAssembler * masm,ConvertReceiverMode mode)2024 void Builtins::Generate_CallFunction(MacroAssembler* masm,
2025 ConvertReceiverMode mode) {
2026 // ----------- S t a t e -------------
2027 // -- r0 : the number of arguments (not including the receiver)
2028 // -- r1 : the function to call (checked to be a JSFunction)
2029 // -----------------------------------
2030 __ AssertFunction(r1);
2031
2032 // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
2033 // Check that the function is not a "classConstructor".
2034 Label class_constructor;
2035 __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
2036 __ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kFlagsOffset));
2037 __ tst(r3, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
2038 __ b(ne, &class_constructor);
2039
2040 // Enter the context of the function; ToObject has to run in the function
2041 // context, and we also need to take the global proxy from the function
2042 // context in case of conversion.
2043 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
2044 // We need to convert the receiver for non-native sloppy mode functions.
2045 Label done_convert;
2046 __ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kFlagsOffset));
2047 __ tst(r3, Operand(SharedFunctionInfo::IsNativeBit::kMask |
2048 SharedFunctionInfo::IsStrictBit::kMask));
2049 __ b(ne, &done_convert);
2050 {
2051 // ----------- S t a t e -------------
2052 // -- r0 : the number of arguments (not including the receiver)
2053 // -- r1 : the function to call (checked to be a JSFunction)
2054 // -- r2 : the shared function info.
2055 // -- cp : the function context.
2056 // -----------------------------------
2057
2058 if (mode == ConvertReceiverMode::kNullOrUndefined) {
2059 // Patch receiver to global proxy.
2060 __ LoadGlobalProxy(r3);
2061 } else {
2062 Label convert_to_object, convert_receiver;
2063 __ ldr(r3, MemOperand(sp, r0, LSL, kPointerSizeLog2));
2064 __ JumpIfSmi(r3, &convert_to_object);
2065 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
2066 __ CompareObjectType(r3, r4, r4, FIRST_JS_RECEIVER_TYPE);
2067 __ b(hs, &done_convert);
2068 if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
2069 Label convert_global_proxy;
2070 __ JumpIfRoot(r3, Heap::kUndefinedValueRootIndex,
2071 &convert_global_proxy);
2072 __ JumpIfNotRoot(r3, Heap::kNullValueRootIndex, &convert_to_object);
2073 __ bind(&convert_global_proxy);
2074 {
2075 // Patch receiver to global proxy.
2076 __ LoadGlobalProxy(r3);
2077 }
2078 __ b(&convert_receiver);
2079 }
2080 __ bind(&convert_to_object);
2081 {
2082 // Convert receiver using ToObject.
2083 // TODO(bmeurer): Inline the allocation here to avoid building the frame
2084 // in the fast case? (fall back to AllocateInNewSpace?)
2085 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2086 __ SmiTag(r0);
2087 __ Push(r0, r1);
2088 __ mov(r0, r3);
2089 __ Push(cp);
2090 __ Call(BUILTIN_CODE(masm->isolate(), ToObject),
2091 RelocInfo::CODE_TARGET);
2092 __ Pop(cp);
2093 __ mov(r3, r0);
2094 __ Pop(r0, r1);
2095 __ SmiUntag(r0);
2096 }
2097 __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
2098 __ bind(&convert_receiver);
2099 }
2100 __ str(r3, MemOperand(sp, r0, LSL, kPointerSizeLog2));
2101 }
2102 __ bind(&done_convert);
2103
2104 // ----------- S t a t e -------------
2105 // -- r0 : the number of arguments (not including the receiver)
2106 // -- r1 : the function to call (checked to be a JSFunction)
2107 // -- r2 : the shared function info.
2108 // -- cp : the function context.
2109 // -----------------------------------
2110
2111 __ ldr(r2,
2112 FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
2113 ParameterCount actual(r0);
2114 ParameterCount expected(r2);
2115 __ InvokeFunctionCode(r1, no_reg, expected, actual, JUMP_FUNCTION);
2116
2117 // The function is a "classConstructor", need to raise an exception.
2118 __ bind(&class_constructor);
2119 {
2120 FrameScope frame(masm, StackFrame::INTERNAL);
2121 __ push(r1);
2122 __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
2123 }
2124 }
2125
2126 namespace {
2127
Generate_PushBoundArguments(MacroAssembler * masm)2128 void Generate_PushBoundArguments(MacroAssembler* masm) {
2129 // ----------- S t a t e -------------
2130 // -- r0 : the number of arguments (not including the receiver)
2131 // -- r1 : target (checked to be a JSBoundFunction)
2132 // -- r3 : new.target (only in case of [[Construct]])
2133 // -----------------------------------
2134
2135 // Load [[BoundArguments]] into r2 and length of that into r4.
2136 Label no_bound_arguments;
2137 __ ldr(r2, FieldMemOperand(r1, JSBoundFunction::kBoundArgumentsOffset));
2138 __ ldr(r4, FieldMemOperand(r2, FixedArray::kLengthOffset));
2139 __ SmiUntag(r4);
2140 __ cmp(r4, Operand(0));
2141 __ b(eq, &no_bound_arguments);
2142 {
2143 // ----------- S t a t e -------------
2144 // -- r0 : the number of arguments (not including the receiver)
2145 // -- r1 : target (checked to be a JSBoundFunction)
2146 // -- r2 : the [[BoundArguments]] (implemented as FixedArray)
2147 // -- r3 : new.target (only in case of [[Construct]])
2148 // -- r4 : the number of [[BoundArguments]]
2149 // -----------------------------------
2150
2151 // Reserve stack space for the [[BoundArguments]].
2152 {
2153 Label done;
2154 __ sub(sp, sp, Operand(r4, LSL, kPointerSizeLog2));
2155 // Check the stack for overflow. We are not trying to catch interruptions
2156 // (i.e. debug break and preemption) here, so check the "real stack
2157 // limit".
2158 __ CompareRoot(sp, Heap::kRealStackLimitRootIndex);
2159 __ b(gt, &done); // Signed comparison.
2160 // Restore the stack pointer.
2161 __ add(sp, sp, Operand(r4, LSL, kPointerSizeLog2));
2162 {
2163 FrameScope scope(masm, StackFrame::MANUAL);
2164 __ EnterFrame(StackFrame::INTERNAL);
2165 __ CallRuntime(Runtime::kThrowStackOverflow);
2166 }
2167 __ bind(&done);
2168 }
2169
2170 Register scratch = r6;
2171
2172 // Relocate arguments down the stack.
2173 {
2174 Label loop, done_loop;
2175 __ mov(r5, Operand(0));
2176 __ bind(&loop);
2177 __ cmp(r5, r0);
2178 __ b(gt, &done_loop);
2179 __ ldr(scratch, MemOperand(sp, r4, LSL, kPointerSizeLog2));
2180 __ str(scratch, MemOperand(sp, r5, LSL, kPointerSizeLog2));
2181 __ add(r4, r4, Operand(1));
2182 __ add(r5, r5, Operand(1));
2183 __ b(&loop);
2184 __ bind(&done_loop);
2185 }
2186
2187 // Copy [[BoundArguments]] to the stack (below the arguments).
2188 {
2189 Label loop;
2190 __ ldr(r4, FieldMemOperand(r2, FixedArray::kLengthOffset));
2191 __ SmiUntag(r4);
2192 __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2193 __ bind(&loop);
2194 __ sub(r4, r4, Operand(1), SetCC);
2195 __ ldr(scratch, MemOperand(r2, r4, LSL, kPointerSizeLog2));
2196 __ str(scratch, MemOperand(sp, r0, LSL, kPointerSizeLog2));
2197 __ add(r0, r0, Operand(1));
2198 __ b(gt, &loop);
2199 }
2200 }
2201 __ bind(&no_bound_arguments);
2202 }
2203
2204 } // namespace
2205
2206 // static
Generate_CallBoundFunctionImpl(MacroAssembler * masm)2207 void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
2208 // ----------- S t a t e -------------
2209 // -- r0 : the number of arguments (not including the receiver)
2210 // -- r1 : the function to call (checked to be a JSBoundFunction)
2211 // -----------------------------------
2212 __ AssertBoundFunction(r1);
2213
2214 // Patch the receiver to [[BoundThis]].
2215 __ ldr(r3, FieldMemOperand(r1, JSBoundFunction::kBoundThisOffset));
2216 __ str(r3, MemOperand(sp, r0, LSL, kPointerSizeLog2));
2217
2218 // Push the [[BoundArguments]] onto the stack.
2219 Generate_PushBoundArguments(masm);
2220
2221 // Call the [[BoundTargetFunction]] via the Call builtin.
2222 __ ldr(r1, FieldMemOperand(r1, JSBoundFunction::kBoundTargetFunctionOffset));
2223 __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
2224 RelocInfo::CODE_TARGET);
2225 }
2226
2227 // static
Generate_Call(MacroAssembler * masm,ConvertReceiverMode mode)2228 void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
2229 // ----------- S t a t e -------------
2230 // -- r0 : the number of arguments (not including the receiver)
2231 // -- r1 : the target to call (can be any Object).
2232 // -----------------------------------
2233
2234 Label non_callable, non_function, non_smi;
2235 __ JumpIfSmi(r1, &non_callable);
2236 __ bind(&non_smi);
2237 __ CompareObjectType(r1, r4, r5, JS_FUNCTION_TYPE);
2238 __ Jump(masm->isolate()->builtins()->CallFunction(mode),
2239 RelocInfo::CODE_TARGET, eq);
2240 __ cmp(r5, Operand(JS_BOUND_FUNCTION_TYPE));
2241 __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
2242 RelocInfo::CODE_TARGET, eq);
2243
2244 // Check if target has a [[Call]] internal method.
2245 __ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
2246 __ tst(r4, Operand(Map::IsCallableBit::kMask));
2247 __ b(eq, &non_callable);
2248
2249 // Check if target is a proxy and call CallProxy external builtin
2250 __ cmp(r5, Operand(JS_PROXY_TYPE));
2251 __ b(ne, &non_function);
2252 __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET);
2253
2254 // 2. Call to something else, which might have a [[Call]] internal method (if
2255 // not we raise an exception).
2256 __ bind(&non_function);
2257 // Overwrite the original receiver the (original) target.
2258 __ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
2259 // Let the "call_as_function_delegate" take care of the rest.
2260 __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r1);
2261 __ Jump(masm->isolate()->builtins()->CallFunction(
2262 ConvertReceiverMode::kNotNullOrUndefined),
2263 RelocInfo::CODE_TARGET);
2264
2265 // 3. Call to something that is not callable.
2266 __ bind(&non_callable);
2267 {
2268 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2269 __ Push(r1);
2270 __ CallRuntime(Runtime::kThrowCalledNonCallable);
2271 }
2272 }
2273
2274 // static
Generate_ConstructFunction(MacroAssembler * masm)2275 void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
2276 // ----------- S t a t e -------------
2277 // -- r0 : the number of arguments (not including the receiver)
2278 // -- r1 : the constructor to call (checked to be a JSFunction)
2279 // -- r3 : the new target (checked to be a constructor)
2280 // -----------------------------------
2281 __ AssertConstructor(r1);
2282 __ AssertFunction(r1);
2283
2284 // Calling convention for function specific ConstructStubs require
2285 // r2 to contain either an AllocationSite or undefined.
2286 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
2287
2288 Label call_generic_stub;
2289
2290 // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
2291 __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
2292 __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
2293 __ tst(r4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
2294 __ b(eq, &call_generic_stub);
2295
2296 __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
2297 RelocInfo::CODE_TARGET);
2298
2299 __ bind(&call_generic_stub);
2300 __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric),
2301 RelocInfo::CODE_TARGET);
2302 }
2303
2304 // static
Generate_ConstructBoundFunction(MacroAssembler * masm)2305 void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
2306 // ----------- S t a t e -------------
2307 // -- r0 : the number of arguments (not including the receiver)
2308 // -- r1 : the function to call (checked to be a JSBoundFunction)
2309 // -- r3 : the new target (checked to be a constructor)
2310 // -----------------------------------
2311 __ AssertConstructor(r1);
2312 __ AssertBoundFunction(r1);
2313
2314 // Push the [[BoundArguments]] onto the stack.
2315 Generate_PushBoundArguments(masm);
2316
2317 // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
2318 __ cmp(r1, r3);
2319 __ ldr(r3, FieldMemOperand(r1, JSBoundFunction::kBoundTargetFunctionOffset),
2320 eq);
2321
2322 // Construct the [[BoundTargetFunction]] via the Construct builtin.
2323 __ ldr(r1, FieldMemOperand(r1, JSBoundFunction::kBoundTargetFunctionOffset));
2324 __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
2325 }
2326
2327 // static
Generate_Construct(MacroAssembler * masm)2328 void Builtins::Generate_Construct(MacroAssembler* masm) {
2329 // ----------- S t a t e -------------
2330 // -- r0 : the number of arguments (not including the receiver)
2331 // -- r1 : the constructor to call (can be any Object)
2332 // -- r3 : the new target (either the same as the constructor or
2333 // the JSFunction on which new was invoked initially)
2334 // -----------------------------------
2335
2336 // Check if target is a Smi.
2337 Label non_constructor, non_proxy;
2338 __ JumpIfSmi(r1, &non_constructor);
2339
2340 // Check if target has a [[Construct]] internal method.
2341 __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
2342 __ ldrb(r2, FieldMemOperand(r4, Map::kBitFieldOffset));
2343 __ tst(r2, Operand(Map::IsConstructorBit::kMask));
2344 __ b(eq, &non_constructor);
2345
2346 // Dispatch based on instance type.
2347 __ CompareInstanceType(r4, r5, JS_FUNCTION_TYPE);
2348 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
2349 RelocInfo::CODE_TARGET, eq);
2350
2351 // Only dispatch to bound functions after checking whether they are
2352 // constructors.
2353 __ cmp(r5, Operand(JS_BOUND_FUNCTION_TYPE));
2354 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
2355 RelocInfo::CODE_TARGET, eq);
2356
2357 // Only dispatch to proxies after checking whether they are constructors.
2358 __ cmp(r5, Operand(JS_PROXY_TYPE));
2359 __ b(ne, &non_proxy);
2360 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
2361 RelocInfo::CODE_TARGET);
2362
2363 // Called Construct on an exotic Object with a [[Construct]] internal method.
2364 __ bind(&non_proxy);
2365 {
2366 // Overwrite the original receiver with the (original) target.
2367 __ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
2368 // Let the "call_as_constructor_delegate" take care of the rest.
2369 __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r1);
2370 __ Jump(masm->isolate()->builtins()->CallFunction(),
2371 RelocInfo::CODE_TARGET);
2372 }
2373
2374 // Called Construct on an Object that doesn't have a [[Construct]] internal
2375 // method.
2376 __ bind(&non_constructor);
2377 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable),
2378 RelocInfo::CODE_TARGET);
2379 }
2380
2381 // static
Generate_AllocateInNewSpace(MacroAssembler * masm)2382 void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
2383 // ----------- S t a t e -------------
2384 // -- r1 : requested object size (untagged)
2385 // -- lr : return address
2386 // -----------------------------------
2387 __ SmiTag(r1);
2388 __ Push(r1);
2389 __ Move(cp, Smi::kZero);
2390 __ TailCallRuntime(Runtime::kAllocateInNewSpace);
2391 }
2392
2393 // static
Generate_AllocateInOldSpace(MacroAssembler * masm)2394 void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
2395 // ----------- S t a t e -------------
2396 // -- r1 : requested object size (untagged)
2397 // -- lr : return address
2398 // -----------------------------------
2399 __ SmiTag(r1);
2400 __ Move(r2, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
2401 __ Push(r1, r2);
2402 __ Move(cp, Smi::kZero);
2403 __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
2404 }
2405
2406 // static
Generate_Abort(MacroAssembler * masm)2407 void Builtins::Generate_Abort(MacroAssembler* masm) {
2408 // ----------- S t a t e -------------
2409 // -- r1 : message_id as Smi
2410 // -- lr : return address
2411 // -----------------------------------
2412 __ Push(r1);
2413 __ Move(cp, Smi::kZero);
2414 __ TailCallRuntime(Runtime::kAbort);
2415 }
2416
Generate_ArgumentsAdaptorTrampoline(MacroAssembler * masm)2417 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
2418 // ----------- S t a t e -------------
2419 // -- r0 : actual number of arguments
2420 // -- r1 : function (passed through to callee)
2421 // -- r2 : expected number of arguments
2422 // -- r3 : new target (passed through to callee)
2423 // -----------------------------------
2424
2425 Label invoke, dont_adapt_arguments, stack_overflow;
2426
2427 Label enough, too_few;
2428 __ cmp(r0, r2);
2429 __ b(lt, &too_few);
2430 __ cmp(r2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
2431 __ b(eq, &dont_adapt_arguments);
2432
2433 Register scratch = r5;
2434
2435 { // Enough parameters: actual >= expected
2436 __ bind(&enough);
2437 EnterArgumentsAdaptorFrame(masm);
2438 Generate_StackOverflowCheck(masm, r2, scratch, &stack_overflow);
2439
2440 // Calculate copy start address into r0 and copy end address into r4.
2441 // r0: actual number of arguments as a smi
2442 // r1: function
2443 // r2: expected number of arguments
2444 // r3: new target (passed through to callee)
2445 __ add(r0, fp, Operand::PointerOffsetFromSmiKey(r0));
2446 // adjust for return address and receiver
2447 __ add(r0, r0, Operand(2 * kPointerSize));
2448 __ sub(r4, r0, Operand(r2, LSL, kPointerSizeLog2));
2449
2450 // Copy the arguments (including the receiver) to the new stack frame.
2451 // r0: copy start address
2452 // r1: function
2453 // r2: expected number of arguments
2454 // r3: new target (passed through to callee)
2455 // r4: copy end address
2456
2457 Label copy;
2458 __ bind(©);
2459 __ ldr(scratch, MemOperand(r0, 0));
2460 __ push(scratch);
2461 __ cmp(r0, r4); // Compare before moving to next argument.
2462 __ sub(r0, r0, Operand(kPointerSize));
2463 __ b(ne, ©);
2464
2465 __ b(&invoke);
2466 }
2467
2468 { // Too few parameters: Actual < expected
2469 __ bind(&too_few);
2470 EnterArgumentsAdaptorFrame(masm);
2471 Generate_StackOverflowCheck(masm, r2, scratch, &stack_overflow);
2472
2473 // Calculate copy start address into r0 and copy end address is fp.
2474 // r0: actual number of arguments as a smi
2475 // r1: function
2476 // r2: expected number of arguments
2477 // r3: new target (passed through to callee)
2478 __ add(r0, fp, Operand::PointerOffsetFromSmiKey(r0));
2479
2480 // Copy the arguments (including the receiver) to the new stack frame.
2481 // r0: copy start address
2482 // r1: function
2483 // r2: expected number of arguments
2484 // r3: new target (passed through to callee)
2485 Label copy;
2486 __ bind(©);
2487
2488 // Adjust load for return address and receiver.
2489 __ ldr(scratch, MemOperand(r0, 2 * kPointerSize));
2490 __ push(scratch);
2491
2492 __ cmp(r0, fp); // Compare before moving to next argument.
2493 __ sub(r0, r0, Operand(kPointerSize));
2494 __ b(ne, ©);
2495
2496 // Fill the remaining expected arguments with undefined.
2497 // r1: function
2498 // r2: expected number of arguments
2499 // r3: new target (passed through to callee)
2500 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
2501 __ sub(r4, fp, Operand(r2, LSL, kPointerSizeLog2));
2502 // Adjust for frame.
2503 __ sub(r4, r4,
2504 Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
2505 kPointerSize));
2506
2507 Label fill;
2508 __ bind(&fill);
2509 __ push(scratch);
2510 __ cmp(sp, r4);
2511 __ b(ne, &fill);
2512 }
2513
2514 // Call the entry point.
2515 __ bind(&invoke);
2516 __ mov(r0, r2);
2517 // r0 : expected number of arguments
2518 // r1 : function (passed through to callee)
2519 // r3 : new target (passed through to callee)
2520 static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
2521 __ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
2522 __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
2523 __ Call(r2);
2524
2525 // Store offset of return address for deoptimizer.
2526 masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
2527
2528 // Exit frame and return.
2529 LeaveArgumentsAdaptorFrame(masm);
2530 __ Jump(lr);
2531
2532 // -------------------------------------------
2533 // Dont adapt arguments.
2534 // -------------------------------------------
2535 __ bind(&dont_adapt_arguments);
2536 static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
2537 __ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
2538 __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
2539 __ Jump(r2);
2540
2541 __ bind(&stack_overflow);
2542 {
2543 FrameScope frame(masm, StackFrame::MANUAL);
2544 __ CallRuntime(Runtime::kThrowStackOverflow);
2545 __ bkpt(0);
2546 }
2547 }
2548
Generate_WasmCompileLazy(MacroAssembler * masm)2549 void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
2550 {
2551 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2552
2553 // Save all parameter registers (see wasm-linkage.cc). They might be
2554 // overwritten in the runtime call below. We don't have any callee-saved
2555 // registers in wasm, so no need to store anything else.
2556 constexpr RegList gp_regs = Register::ListOf<r0, r1, r2>();
2557 constexpr DwVfpRegister lowest_fp_reg = d0;
2558 constexpr DwVfpRegister highest_fp_reg = d7;
2559
2560 __ stm(db_w, sp, gp_regs);
2561 __ vstm(db_w, sp, lowest_fp_reg, highest_fp_reg);
2562
2563 // Pass the WASM instance as an explicit argument to WasmCompileLazy.
2564 __ push(kWasmInstanceRegister);
2565 // Initialize the JavaScript context with 0. CEntry will use it to
2566 // set the current context on the isolate.
2567 __ Move(cp, Smi::kZero);
2568 __ CallRuntime(Runtime::kWasmCompileLazy);
2569 // The entrypoint address is the first return value.
2570 __ mov(r8, kReturnRegister0);
2571 // The WASM instance is the second return value.
2572 __ mov(kWasmInstanceRegister, kReturnRegister1);
2573
2574 // Restore registers.
2575 __ vldm(ia_w, sp, lowest_fp_reg, highest_fp_reg);
2576 __ ldm(ia_w, sp, gp_regs);
2577 }
2578 // Finally, jump to the entrypoint.
2579 __ Jump(r8);
2580 }
2581
Generate_CEntry(MacroAssembler * masm,int result_size,SaveFPRegsMode save_doubles,ArgvMode argv_mode,bool builtin_exit_frame)2582 void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
2583 SaveFPRegsMode save_doubles, ArgvMode argv_mode,
2584 bool builtin_exit_frame) {
2585 // Called from JavaScript; parameters are on stack as if calling JS function.
2586 // r0: number of arguments including receiver
2587 // r1: pointer to builtin function
2588 // fp: frame pointer (restored after C call)
2589 // sp: stack pointer (restored as callee's sp after C call)
2590 // cp: current context (C callee-saved)
2591 //
2592 // If argv_mode == kArgvInRegister:
2593 // r2: pointer to the first argument
2594 ProfileEntryHookStub::MaybeCallEntryHook(masm);
2595
2596 __ mov(r5, Operand(r1));
2597
2598 if (argv_mode == kArgvInRegister) {
2599 // Move argv into the correct register.
2600 __ mov(r1, Operand(r2));
2601 } else {
2602 // Compute the argv pointer in a callee-saved register.
2603 __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2));
2604 __ sub(r1, r1, Operand(kPointerSize));
2605 }
2606
2607 // Enter the exit frame that transitions from JavaScript to C++.
2608 FrameScope scope(masm, StackFrame::MANUAL);
2609 __ EnterExitFrame(
2610 save_doubles == kSaveFPRegs, 0,
2611 builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
2612
2613 // Store a copy of argc in callee-saved registers for later.
2614 __ mov(r4, Operand(r0));
2615
2616 // r0, r4: number of arguments including receiver (C callee-saved)
2617 // r1: pointer to the first argument (C callee-saved)
2618 // r5: pointer to builtin function (C callee-saved)
2619
2620 #if V8_HOST_ARCH_ARM
2621 int frame_alignment = MacroAssembler::ActivationFrameAlignment();
2622 int frame_alignment_mask = frame_alignment - 1;
2623 if (FLAG_debug_code) {
2624 if (frame_alignment > kPointerSize) {
2625 Label alignment_as_expected;
2626 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
2627 __ tst(sp, Operand(frame_alignment_mask));
2628 __ b(eq, &alignment_as_expected);
2629 // Don't use Check here, as it will call Runtime_Abort re-entering here.
2630 __ stop("Unexpected alignment");
2631 __ bind(&alignment_as_expected);
2632 }
2633 }
2634 #endif
2635
2636 // Call C built-in.
2637 // r0 = argc, r1 = argv, r2 = isolate
2638 __ Move(r2, ExternalReference::isolate_address(masm->isolate()));
2639
2640 // To let the GC traverse the return address of the exit frames, we need to
2641 // know where the return address is. CEntry is unmovable, so
2642 // we can store the address on the stack to be able to find it again and
2643 // we never have to restore it, because it will not change.
2644 // Compute the return address in lr to return to after the jump below. Pc is
2645 // already at '+ 8' from the current instruction but return is after three
2646 // instructions so add another 4 to pc to get the return address.
2647 {
2648 // Prevent literal pool emission before return address.
2649 Assembler::BlockConstPoolScope block_const_pool(masm);
2650 __ add(lr, pc, Operand(4));
2651 __ str(lr, MemOperand(sp));
2652 __ Call(r5);
2653 }
2654
2655 // Result returned in r0 or r1:r0 - do not destroy these registers!
2656
2657 // Check result for exception sentinel.
2658 Label exception_returned;
2659 __ CompareRoot(r0, Heap::kExceptionRootIndex);
2660 __ b(eq, &exception_returned);
2661
2662 // Check that there is no pending exception, otherwise we
2663 // should have returned the exception sentinel.
2664 if (FLAG_debug_code) {
2665 Label okay;
2666 ExternalReference pending_exception_address = ExternalReference::Create(
2667 IsolateAddressId::kPendingExceptionAddress, masm->isolate());
2668 __ Move(r3, pending_exception_address);
2669 __ ldr(r3, MemOperand(r3));
2670 __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
2671 // Cannot use check here as it attempts to generate call into runtime.
2672 __ b(eq, &okay);
2673 __ stop("Unexpected pending exception");
2674 __ bind(&okay);
2675 }
2676
2677 // Exit C frame and return.
2678 // r0:r1: result
2679 // sp: stack pointer
2680 // fp: frame pointer
2681 Register argc = argv_mode == kArgvInRegister
2682 // We don't want to pop arguments so set argc to no_reg.
2683 ? no_reg
2684 // Callee-saved register r4 still holds argc.
2685 : r4;
2686 __ LeaveExitFrame(save_doubles == kSaveFPRegs, argc);
2687 __ mov(pc, lr);
2688
2689 // Handling of exception.
2690 __ bind(&exception_returned);
2691
2692 ExternalReference pending_handler_context_address = ExternalReference::Create(
2693 IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
2694 ExternalReference pending_handler_entrypoint_address =
2695 ExternalReference::Create(
2696 IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
2697 ExternalReference pending_handler_fp_address = ExternalReference::Create(
2698 IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
2699 ExternalReference pending_handler_sp_address = ExternalReference::Create(
2700 IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
2701
2702 // Ask the runtime for help to determine the handler. This will set r0 to
2703 // contain the current pending exception, don't clobber it.
2704 ExternalReference find_handler =
2705 ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
2706 {
2707 FrameScope scope(masm, StackFrame::MANUAL);
2708 __ PrepareCallCFunction(3, 0);
2709 __ mov(r0, Operand(0));
2710 __ mov(r1, Operand(0));
2711 __ Move(r2, ExternalReference::isolate_address(masm->isolate()));
2712 __ CallCFunction(find_handler, 3);
2713 }
2714
2715 // Retrieve the handler context, SP and FP.
2716 __ Move(cp, pending_handler_context_address);
2717 __ ldr(cp, MemOperand(cp));
2718 __ Move(sp, pending_handler_sp_address);
2719 __ ldr(sp, MemOperand(sp));
2720 __ Move(fp, pending_handler_fp_address);
2721 __ ldr(fp, MemOperand(fp));
2722
2723 // If the handler is a JS frame, restore the context to the frame. Note that
2724 // the context will be set to (cp == 0) for non-JS frames.
2725 __ cmp(cp, Operand(0));
2726 __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
2727
2728 // Reset the masking register. This is done independent of the underlying
2729 // feature flag {FLAG_branch_load_poisoning} to make the snapshot work with
2730 // both configurations. It is safe to always do this, because the underlying
2731 // register is caller-saved and can be arbitrarily clobbered.
2732 __ ResetSpeculationPoisonRegister();
2733
2734 // Compute the handler entry address and jump to it.
2735 ConstantPoolUnavailableScope constant_pool_unavailable(masm);
2736 __ Move(r1, pending_handler_entrypoint_address);
2737 __ ldr(r1, MemOperand(r1));
2738 __ Jump(r1);
2739 }
2740
Generate_DoubleToI(MacroAssembler * masm)2741 void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
2742 Label negate, done;
2743
2744 UseScratchRegisterScope temps(masm);
2745 Register result_reg = r7;
2746 Register double_low = GetRegisterThatIsNotOneOf(result_reg);
2747 Register double_high = GetRegisterThatIsNotOneOf(result_reg, double_low);
2748 LowDwVfpRegister double_scratch = temps.AcquireLowD();
2749
2750 // Save the old values from these temporary registers on the stack.
2751 __ Push(result_reg, double_high, double_low);
2752
2753 // Account for saved regs.
2754 const int kArgumentOffset = 3 * kPointerSize;
2755
2756 MemOperand input_operand(sp, kArgumentOffset);
2757 MemOperand result_operand = input_operand;
2758
2759 // Load double input.
2760 __ vldr(double_scratch, input_operand);
2761 __ vmov(double_low, double_high, double_scratch);
2762 // Try to convert with a FPU convert instruction. This handles all
2763 // non-saturating cases.
2764 __ TryInlineTruncateDoubleToI(result_reg, double_scratch, &done);
2765
2766 Register scratch = temps.Acquire();
2767 __ Ubfx(scratch, double_high, HeapNumber::kExponentShift,
2768 HeapNumber::kExponentBits);
2769 // Load scratch with exponent - 1. This is faster than loading
2770 // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
2771 STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
2772 __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
2773 // If exponent is greater than or equal to 84, the 32 less significant
2774 // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
2775 // the result is 0.
2776 // Compare exponent with 84 (compare exponent - 1 with 83). If the exponent is
2777 // greater than this, the conversion is out of range, so return zero.
2778 __ cmp(scratch, Operand(83));
2779 __ mov(result_reg, Operand::Zero(), LeaveCC, ge);
2780 __ b(ge, &done);
2781
2782 // If we reach this code, 30 <= exponent <= 83.
2783 // `TryInlineTruncateDoubleToI` above will have truncated any double with an
2784 // exponent lower than 30.
2785 if (masm->emit_debug_code()) {
2786 // Scratch is exponent - 1.
2787 __ cmp(scratch, Operand(30 - 1));
2788 __ Check(ge, AbortReason::kUnexpectedValue);
2789 }
2790
2791 // We don't have to handle cases where 0 <= exponent <= 20 for which we would
2792 // need to shift right the high part of the mantissa.
2793 // Scratch contains exponent - 1.
2794 // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
2795 __ rsb(scratch, scratch, Operand(51), SetCC);
2796
2797 // 52 <= exponent <= 83, shift only double_low.
2798 // On entry, scratch contains: 52 - exponent.
2799 __ rsb(scratch, scratch, Operand::Zero(), LeaveCC, ls);
2800 __ mov(result_reg, Operand(double_low, LSL, scratch), LeaveCC, ls);
2801 __ b(ls, &negate);
2802
2803 // 21 <= exponent <= 51, shift double_low and double_high
2804 // to generate the result.
2805 __ mov(double_low, Operand(double_low, LSR, scratch));
2806 // Scratch contains: 52 - exponent.
2807 // We needs: exponent - 20.
2808 // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
2809 __ rsb(scratch, scratch, Operand(32));
2810 __ Ubfx(result_reg, double_high, 0, HeapNumber::kMantissaBitsInTopWord);
2811 // Set the implicit 1 before the mantissa part in double_high.
2812 __ orr(result_reg, result_reg,
2813 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
2814 __ orr(result_reg, double_low, Operand(result_reg, LSL, scratch));
2815
2816 __ bind(&negate);
2817 // If input was positive, double_high ASR 31 equals 0 and
2818 // double_high LSR 31 equals zero.
2819 // New result = (result eor 0) + 0 = result.
2820 // If the input was negative, we have to negate the result.
2821 // Input_high ASR 31 equals 0xFFFFFFFF and double_high LSR 31 equals 1.
2822 // New result = (result eor 0xFFFFFFFF) + 1 = 0 - result.
2823 __ eor(result_reg, result_reg, Operand(double_high, ASR, 31));
2824 __ add(result_reg, result_reg, Operand(double_high, LSR, 31));
2825
2826 __ bind(&done);
2827 __ str(result_reg, result_operand);
2828
2829 // Restore registers corrupted in this routine and return.
2830 __ Pop(result_reg, double_high, double_low);
2831 __ Ret();
2832 }
2833
Generate_MathPowInternal(MacroAssembler * masm)2834 void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
2835 const Register exponent = MathPowTaggedDescriptor::exponent();
2836 DCHECK(exponent == r2);
2837 const LowDwVfpRegister double_base = d0;
2838 const LowDwVfpRegister double_exponent = d1;
2839 const LowDwVfpRegister double_result = d2;
2840 const LowDwVfpRegister double_scratch = d3;
2841 const SwVfpRegister single_scratch = s6;
2842 const Register scratch = r9;
2843 const Register scratch2 = r4;
2844
2845 Label call_runtime, done, int_exponent;
2846
2847 // Detect integer exponents stored as double.
2848 __ TryDoubleToInt32Exact(scratch, double_exponent, double_scratch);
2849 __ b(eq, &int_exponent);
2850
2851 __ push(lr);
2852 {
2853 AllowExternalCallThatCantCauseGC scope(masm);
2854 __ PrepareCallCFunction(0, 2);
2855 __ MovToFloatParameters(double_base, double_exponent);
2856 __ CallCFunction(ExternalReference::power_double_double_function(), 0, 2);
2857 }
2858 __ pop(lr);
2859 __ MovFromFloatResult(double_result);
2860 __ b(&done);
2861
2862 // Calculate power with integer exponent.
2863 __ bind(&int_exponent);
2864
2865 // Get two copies of exponent in the registers scratch and exponent.
2866 // Exponent has previously been stored into scratch as untagged integer.
2867 __ mov(exponent, scratch);
2868
2869 __ vmov(double_scratch, double_base); // Back up base.
2870 __ vmov(double_result, Double(1.0), scratch2);
2871
2872 // Get absolute value of exponent.
2873 __ cmp(scratch, Operand::Zero());
2874 __ rsb(scratch, scratch, Operand::Zero(), LeaveCC, mi);
2875
2876 Label while_true;
2877 __ bind(&while_true);
2878 __ mov(scratch, Operand(scratch, LSR, 1), SetCC);
2879 __ vmul(double_result, double_result, double_scratch, cs);
2880 __ vmul(double_scratch, double_scratch, double_scratch, ne);
2881 __ b(ne, &while_true);
2882
2883 __ cmp(exponent, Operand::Zero());
2884 __ b(ge, &done);
2885 __ vmov(double_scratch, Double(1.0), scratch);
2886 __ vdiv(double_result, double_scratch, double_result);
2887 // Test whether result is zero. Bail out to check for subnormal result.
2888 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
2889 __ VFPCompareAndSetFlags(double_result, 0.0);
2890 __ b(ne, &done);
2891 // double_exponent may not containe the exponent value if the input was a
2892 // smi. We set it with exponent value before bailing out.
2893 __ vmov(single_scratch, exponent);
2894 __ vcvt_f64_s32(double_exponent, single_scratch);
2895
2896 // Returning or bailing out.
2897 __ push(lr);
2898 {
2899 AllowExternalCallThatCantCauseGC scope(masm);
2900 __ PrepareCallCFunction(0, 2);
2901 __ MovToFloatParameters(double_base, double_exponent);
2902 __ CallCFunction(ExternalReference::power_double_double_function(), 0, 2);
2903 }
2904 __ pop(lr);
2905 __ MovFromFloatResult(double_result);
2906
2907 __ bind(&done);
2908 __ Ret();
2909 }
2910
2911 #undef __
2912
2913 } // namespace internal
2914 } // namespace v8
2915
2916 #endif // V8_TARGET_ARCH_ARM
2917