1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if V8_TARGET_ARCH_ARM
6
7 #include "src/api/api-arguments.h"
8 #include "src/codegen/code-factory.h"
9 #include "src/codegen/interface-descriptors-inl.h"
10 // For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
11 #include "src/codegen/macro-assembler-inl.h"
12 #include "src/codegen/register-configuration.h"
13 #include "src/debug/debug.h"
14 #include "src/deoptimizer/deoptimizer.h"
15 #include "src/execution/frame-constants.h"
16 #include "src/execution/frames.h"
17 #include "src/heap/heap-inl.h"
18 #include "src/logging/counters.h"
19 #include "src/objects/cell.h"
20 #include "src/objects/foreign.h"
21 #include "src/objects/heap-number.h"
22 #include "src/objects/js-generator.h"
23 #include "src/objects/objects-inl.h"
24 #include "src/objects/smi.h"
25 #include "src/runtime/runtime.h"
26
27 #if V8_ENABLE_WEBASSEMBLY
28 #include "src/wasm/wasm-linkage.h"
29 #include "src/wasm/wasm-objects.h"
30 #endif // V8_ENABLE_WEBASSEMBLY
31
32 namespace v8 {
33 namespace internal {
34
35 #define __ ACCESS_MASM(masm)
36
Generate_Adaptor(MacroAssembler * masm,Address address)37 void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
38 #if defined(__thumb__)
39 // Thumb mode builtin.
40 DCHECK_EQ(1, reinterpret_cast<uintptr_t>(
41 ExternalReference::Create(address).address()) &
42 1);
43 #endif
44 __ Move(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
45 __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
46 RelocInfo::CODE_TARGET);
47 }
48
GenerateTailCallToReturnedCode(MacroAssembler * masm,Runtime::FunctionId function_id)49 static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
50 Runtime::FunctionId function_id) {
51 // ----------- S t a t e -------------
52 // -- r0 : actual argument count
53 // -- r1 : target function (preserved for callee)
54 // -- r3 : new target (preserved for callee)
55 // -----------------------------------
56 {
57 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
58 // Push a copy of the target function, the new target and the actual
59 // argument count.
60 // Push function as parameter to the runtime call.
61 __ SmiTag(kJavaScriptCallArgCountRegister);
62 __ Push(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
63 kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister);
64
65 __ CallRuntime(function_id, 1);
66 __ mov(r2, r0);
67
68 // Restore target function, new target and actual argument count.
69 __ Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
70 kJavaScriptCallArgCountRegister);
71 __ SmiUntag(kJavaScriptCallArgCountRegister);
72 }
73 static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
74 __ JumpCodeObject(r2);
75 }
76
77 namespace {
78
79 enum class ArgumentsElementType {
80 kRaw, // Push arguments as they are.
81 kHandle // Dereference arguments before pushing.
82 };
83
Generate_PushArguments(MacroAssembler * masm,Register array,Register argc,Register scratch,ArgumentsElementType element_type)84 void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
85 Register scratch,
86 ArgumentsElementType element_type) {
87 DCHECK(!AreAliased(array, argc, scratch));
88 UseScratchRegisterScope temps(masm);
89 Register counter = scratch;
90 Register value = temps.Acquire();
91 Label loop, entry;
92 if (kJSArgcIncludesReceiver) {
93 __ sub(counter, argc, Operand(kJSArgcReceiverSlots));
94 } else {
95 __ mov(counter, argc);
96 }
97 __ b(&entry);
98 __ bind(&loop);
99 __ ldr(value, MemOperand(array, counter, LSL, kSystemPointerSizeLog2));
100 if (element_type == ArgumentsElementType::kHandle) {
101 __ ldr(value, MemOperand(value));
102 }
103 __ push(value);
104 __ bind(&entry);
105 __ sub(counter, counter, Operand(1), SetCC);
106 __ b(ge, &loop);
107 }
108
Generate_JSBuiltinsConstructStubHelper(MacroAssembler * masm)109 void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
110 // ----------- S t a t e -------------
111 // -- r0 : number of arguments
112 // -- r1 : constructor function
113 // -- r3 : new target
114 // -- cp : context
115 // -- lr : return address
116 // -- sp[...]: constructor arguments
117 // -----------------------------------
118
119 Register scratch = r2;
120
121 Label stack_overflow;
122
123 __ StackOverflowCheck(r0, scratch, &stack_overflow);
124
125 // Enter a construct frame.
126 {
127 FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
128
129 // Preserve the incoming parameters on the stack.
130 __ SmiTag(r0);
131 __ Push(cp, r0);
132 __ SmiUntag(r0);
133
134 // TODO(victorgomes): When the arguments adaptor is completely removed, we
135 // should get the formal parameter count and copy the arguments in its
136 // correct position (including any undefined), instead of delaying this to
137 // InvokeFunction.
138
139 // Set up pointer to first argument (skip receiver).
140 __ add(
141 r4, fp,
142 Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
143 // Copy arguments and receiver to the expression stack.
144 // r4: Pointer to start of arguments.
145 // r0: Number of arguments.
146 Generate_PushArguments(masm, r4, r0, r5, ArgumentsElementType::kRaw);
147 // The receiver for the builtin/api call.
148 __ PushRoot(RootIndex::kTheHoleValue);
149
150 // Call the function.
151 // r0: number of arguments (untagged)
152 // r1: constructor function
153 // r3: new target
154 __ InvokeFunctionWithNewTarget(r1, r3, r0, InvokeType::kCall);
155
156 // Restore context from the frame.
157 __ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
158 // Restore smi-tagged arguments count from the frame.
159 __ ldr(scratch, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
160 // Leave construct frame.
161 }
162
163 // Remove caller arguments from the stack and return.
164 __ DropArguments(scratch, TurboAssembler::kCountIsSmi,
165 kJSArgcIncludesReceiver
166 ? TurboAssembler::kCountIncludesReceiver
167 : TurboAssembler::kCountExcludesReceiver);
168 __ Jump(lr);
169
170 __ bind(&stack_overflow);
171 {
172 FrameScope scope(masm, StackFrame::INTERNAL);
173 __ CallRuntime(Runtime::kThrowStackOverflow);
174 __ bkpt(0); // Unreachable code.
175 }
176 }
177
178 } // namespace
179
180 // The construct stub for ES5 constructor functions and ES6 class constructors.
Generate_JSConstructStubGeneric(MacroAssembler * masm)181 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
182 // ----------- S t a t e -------------
183 // -- r0: number of arguments (untagged)
184 // -- r1: constructor function
185 // -- r3: new target
186 // -- cp: context
187 // -- lr: return address
188 // -- sp[...]: constructor arguments
189 // -----------------------------------
190
191 FrameScope scope(masm, StackFrame::MANUAL);
192 // Enter a construct frame.
193 Label post_instantiation_deopt_entry, not_create_implicit_receiver;
194 __ EnterFrame(StackFrame::CONSTRUCT);
195
196 // Preserve the incoming parameters on the stack.
197 __ LoadRoot(r4, RootIndex::kTheHoleValue);
198 __ SmiTag(r0);
199 __ Push(cp, r0, r1, r4, r3);
200
201 // ----------- S t a t e -------------
202 // -- sp[0*kPointerSize]: new target
203 // -- sp[1*kPointerSize]: padding
204 // -- r1 and sp[2*kPointerSize]: constructor function
205 // -- sp[3*kPointerSize]: number of arguments (tagged)
206 // -- sp[4*kPointerSize]: context
207 // -----------------------------------
208
209 __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
210 __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
211 __ DecodeField<SharedFunctionInfo::FunctionKindBits>(r4);
212 __ JumpIfIsInRange(r4, kDefaultDerivedConstructor, kDerivedConstructor,
213 ¬_create_implicit_receiver);
214
215 // If not derived class constructor: Allocate the new receiver object.
216 __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, r4,
217 r5);
218 __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), RelocInfo::CODE_TARGET);
219 __ b(&post_instantiation_deopt_entry);
220
221 // Else: use TheHoleValue as receiver for constructor call
222 __ bind(¬_create_implicit_receiver);
223 __ LoadRoot(r0, RootIndex::kTheHoleValue);
224
225 // ----------- S t a t e -------------
226 // -- r0: receiver
227 // -- Slot 3 / sp[0*kPointerSize]: new target
228 // -- Slot 2 / sp[1*kPointerSize]: constructor function
229 // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged)
230 // -- Slot 0 / sp[3*kPointerSize]: context
231 // -----------------------------------
232 // Deoptimizer enters here.
233 masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
234 masm->pc_offset());
235 __ bind(&post_instantiation_deopt_entry);
236
237 // Restore new target.
238 __ Pop(r3);
239
240 // Push the allocated receiver to the stack.
241 __ Push(r0);
242 // We need two copies because we may have to return the original one
243 // and the calling conventions dictate that the called function pops the
244 // receiver. The second copy is pushed after the arguments, we saved in r6
245 // since r0 needs to store the number of arguments before
246 // InvokingFunction.
247 __ mov(r6, r0);
248
249 // Set up pointer to first argument (skip receiver).
250 __ add(r4, fp,
251 Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
252
253 // Restore constructor function and argument count.
254 __ ldr(r1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
255 __ ldr(r0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
256 __ SmiUntag(r0);
257
258 Label stack_overflow;
259 __ StackOverflowCheck(r0, r5, &stack_overflow);
260
261 // TODO(victorgomes): When the arguments adaptor is completely removed, we
262 // should get the formal parameter count and copy the arguments in its
263 // correct position (including any undefined), instead of delaying this to
264 // InvokeFunction.
265
266 // Copy arguments to the expression stack.
267 // r4: Pointer to start of argument.
268 // r0: Number of arguments.
269 Generate_PushArguments(masm, r4, r0, r5, ArgumentsElementType::kRaw);
270
271 // Push implicit receiver.
272 __ Push(r6);
273
274 // Call the function.
275 __ InvokeFunctionWithNewTarget(r1, r3, r0, InvokeType::kCall);
276
277 // ----------- S t a t e -------------
278 // -- r0: constructor result
279 // -- sp[0*kPointerSize]: implicit receiver
280 // -- sp[1*kPointerSize]: padding
281 // -- sp[2*kPointerSize]: constructor function
282 // -- sp[3*kPointerSize]: number of arguments
283 // -- sp[4*kPointerSize]: context
284 // -----------------------------------
285
286 // Store offset of return address for deoptimizer.
287 masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
288 masm->pc_offset());
289
290 // If the result is an object (in the ECMA sense), we should get rid
291 // of the receiver and use the result; see ECMA-262 section 13.2.2-7
292 // on page 74.
293 Label use_receiver, do_throw, leave_and_return, check_receiver;
294
295 // If the result is undefined, we jump out to using the implicit receiver.
296 __ JumpIfNotRoot(r0, RootIndex::kUndefinedValue, &check_receiver);
297
298 // Otherwise we do a smi check and fall through to check if the return value
299 // is a valid receiver.
300
301 // Throw away the result of the constructor invocation and use the
302 // on-stack receiver as the result.
303 __ bind(&use_receiver);
304 __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
305 __ JumpIfRoot(r0, RootIndex::kTheHoleValue, &do_throw);
306
307 __ bind(&leave_and_return);
308 // Restore smi-tagged arguments count from the frame.
309 __ ldr(r1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
310 // Leave construct frame.
311 __ LeaveFrame(StackFrame::CONSTRUCT);
312
313 // Remove caller arguments from the stack and return.
314 __ DropArguments(r1, TurboAssembler::kCountIsSmi,
315 kJSArgcIncludesReceiver
316 ? TurboAssembler::kCountIncludesReceiver
317 : TurboAssembler::kCountExcludesReceiver);
318 __ Jump(lr);
319
320 __ bind(&check_receiver);
321 // If the result is a smi, it is *not* an object in the ECMA sense.
322 __ JumpIfSmi(r0, &use_receiver);
323
324 // If the type of the result (stored in its map) is less than
325 // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
326 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
327 __ CompareObjectType(r0, r4, r5, FIRST_JS_RECEIVER_TYPE);
328 __ b(ge, &leave_and_return);
329 __ b(&use_receiver);
330
331 __ bind(&do_throw);
332 // Restore the context from the frame.
333 __ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
334 __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
335 __ bkpt(0);
336
337 __ bind(&stack_overflow);
338 // Restore the context from the frame.
339 __ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
340 __ CallRuntime(Runtime::kThrowStackOverflow);
341 // Unreachable code.
342 __ bkpt(0);
343 }
344
Generate_JSBuiltinsConstructStub(MacroAssembler * masm)345 void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
346 Generate_JSBuiltinsConstructStubHelper(masm);
347 }
348
AssertCodeIsBaseline(MacroAssembler * masm,Register code,Register scratch)349 static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
350 Register scratch) {
351 DCHECK(!AreAliased(code, scratch));
352 // Verify that the code kind is baseline code via the CodeKind.
353 __ ldr(scratch, FieldMemOperand(code, Code::kFlagsOffset));
354 __ DecodeField<Code::KindField>(scratch);
355 __ cmp(scratch, Operand(static_cast<int>(CodeKind::BASELINE)));
356 __ Assert(eq, AbortReason::kExpectedBaselineData);
357 }
358
GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler * masm,Register sfi_data,Register scratch1,Label * is_baseline)359 static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
360 Register sfi_data,
361 Register scratch1,
362 Label* is_baseline) {
363 ASM_CODE_COMMENT(masm);
364 Label done;
365 __ CompareObjectType(sfi_data, scratch1, scratch1, CODET_TYPE);
366 if (FLAG_debug_code) {
367 Label not_baseline;
368 __ b(ne, ¬_baseline);
369 AssertCodeIsBaseline(masm, sfi_data, scratch1);
370 __ b(eq, is_baseline);
371 __ bind(¬_baseline);
372 } else {
373 __ b(eq, is_baseline);
374 }
375 __ cmp(scratch1, Operand(INTERPRETER_DATA_TYPE));
376 __ b(ne, &done);
377 __ ldr(sfi_data,
378 FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
379
380 __ bind(&done);
381 }
382
383 // static
Generate_ResumeGeneratorTrampoline(MacroAssembler * masm)384 void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
385 // ----------- S t a t e -------------
386 // -- r0 : the value to pass to the generator
387 // -- r1 : the JSGeneratorObject to resume
388 // -- lr : return address
389 // -----------------------------------
390 // Store input value into generator object.
391 __ str(r0, FieldMemOperand(r1, JSGeneratorObject::kInputOrDebugPosOffset));
392 __ RecordWriteField(r1, JSGeneratorObject::kInputOrDebugPosOffset, r0,
393 kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore);
394 // Check that r1 is still valid, RecordWrite might have clobbered it.
395 __ AssertGeneratorObject(r1);
396
397 // Load suspended function and context.
398 __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
399 __ ldr(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
400
401 Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
402 Label stepping_prepared;
403 Register scratch = r5;
404
405 // Flood function if we are stepping.
406 ExternalReference debug_hook =
407 ExternalReference::debug_hook_on_function_call_address(masm->isolate());
408 __ Move(scratch, debug_hook);
409 __ ldrsb(scratch, MemOperand(scratch));
410 __ cmp(scratch, Operand(0));
411 __ b(ne, &prepare_step_in_if_stepping);
412
413 // Flood function if we need to continue stepping in the suspended
414 // generator.
415 ExternalReference debug_suspended_generator =
416 ExternalReference::debug_suspended_generator_address(masm->isolate());
417 __ Move(scratch, debug_suspended_generator);
418 __ ldr(scratch, MemOperand(scratch));
419 __ cmp(scratch, Operand(r1));
420 __ b(eq, &prepare_step_in_suspended_generator);
421 __ bind(&stepping_prepared);
422
423 // Check the stack for overflow. We are not trying to catch interruptions
424 // (i.e. debug break and preemption) here, so check the "real stack limit".
425 Label stack_overflow;
426 __ LoadStackLimit(scratch, StackLimitKind::kRealStackLimit);
427 __ cmp(sp, scratch);
428 __ b(lo, &stack_overflow);
429
430 // ----------- S t a t e -------------
431 // -- r1 : the JSGeneratorObject to resume
432 // -- r4 : generator function
433 // -- cp : generator context
434 // -- lr : return address
435 // -- sp[0] : generator receiver
436 // -----------------------------------
437
438 // Copy the function arguments from the generator object's register file.
439 __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
440 __ ldrh(r3,
441 FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
442 if (kJSArgcIncludesReceiver) {
443 __ sub(r3, r3, Operand(kJSArgcReceiverSlots));
444 }
445 __ ldr(r2,
446 FieldMemOperand(r1, JSGeneratorObject::kParametersAndRegistersOffset));
447 {
448 Label done_loop, loop;
449 __ bind(&loop);
450 __ sub(r3, r3, Operand(1), SetCC);
451 __ b(lt, &done_loop);
452 __ add(scratch, r2, Operand(r3, LSL, kTaggedSizeLog2));
453 __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize));
454 __ Push(scratch);
455 __ b(&loop);
456 __ bind(&done_loop);
457
458 // Push receiver.
459 __ ldr(scratch, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
460 __ Push(scratch);
461 }
462
463 // Underlying function needs to have bytecode available.
464 if (FLAG_debug_code) {
465 Label is_baseline;
466 __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
467 __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
468 GetSharedFunctionInfoBytecodeOrBaseline(masm, r3, r0, &is_baseline);
469 __ CompareObjectType(r3, r3, r3, BYTECODE_ARRAY_TYPE);
470 __ Assert(eq, AbortReason::kMissingBytecodeArray);
471 __ bind(&is_baseline);
472 }
473
474 // Resume (Ignition/TurboFan) generator object.
475 {
476 __ ldr(r0, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
477 __ ldrh(r0, FieldMemOperand(
478 r0, SharedFunctionInfo::kFormalParameterCountOffset));
479 // We abuse new.target both to indicate that this is a resume call and to
480 // pass in the generator object. In ordinary calls, new.target is always
481 // undefined because generator functions are non-constructable.
482 __ Move(r3, r1);
483 __ Move(r1, r4);
484 static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
485 __ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
486 __ JumpCodeObject(r2);
487 }
488
489 __ bind(&prepare_step_in_if_stepping);
490 {
491 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
492 __ Push(r1, r4);
493 // Push hole as receiver since we do not use it for stepping.
494 __ PushRoot(RootIndex::kTheHoleValue);
495 __ CallRuntime(Runtime::kDebugOnFunctionCall);
496 __ Pop(r1);
497 __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
498 }
499 __ b(&stepping_prepared);
500
501 __ bind(&prepare_step_in_suspended_generator);
502 {
503 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
504 __ Push(r1);
505 __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
506 __ Pop(r1);
507 __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
508 }
509 __ b(&stepping_prepared);
510
511 __ bind(&stack_overflow);
512 {
513 FrameScope scope(masm, StackFrame::INTERNAL);
514 __ CallRuntime(Runtime::kThrowStackOverflow);
515 __ bkpt(0); // This should be unreachable.
516 }
517 }
518
Generate_ConstructedNonConstructable(MacroAssembler * masm)519 void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
520 FrameScope scope(masm, StackFrame::INTERNAL);
521 __ push(r1);
522 __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
523 }
524
525 namespace {
526
527 // Total size of the stack space pushed by JSEntryVariant.
528 // JSEntryTrampoline uses this to access on stack arguments passed to
529 // JSEntryVariant.
530 constexpr int kPushedStackSpace = kNumCalleeSaved * kPointerSize -
531 kPointerSize /* FP */ +
532 kNumDoubleCalleeSaved * kDoubleSize +
533 5 * kPointerSize /* r5, r6, r7, fp, lr */ +
534 EntryFrameConstants::kCallerFPOffset;
535
536 // Assert that the EntryFrameConstants are in sync with the builtin.
537 static_assert(kPushedStackSpace == EntryFrameConstants::kDirectCallerSPOffset +
538 3 * kPointerSize /* r5, r6, r7*/ +
539 EntryFrameConstants::kCallerFPOffset,
540 "Pushed stack space and frame constants do not match. See "
541 "frame-constants-arm.h");
542
543 // Called with the native C calling convention. The corresponding function
544 // signature is either:
545 //
546 // using JSEntryFunction = GeneratedCode<Address(
547 // Address root_register_value, Address new_target, Address target,
548 // Address receiver, intptr_t argc, Address** argv)>;
549 // or
550 // using JSEntryFunction = GeneratedCode<Address(
551 // Address root_register_value, MicrotaskQueue* microtask_queue)>;
Generate_JSEntryVariant(MacroAssembler * masm,StackFrame::Type type,Builtin entry_trampoline)552 void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
553 Builtin entry_trampoline) {
554 // The register state is either:
555 // r0: root_register_value
556 // r1: code entry
557 // r2: function
558 // r3: receiver
559 // [sp + 0 * kSystemPointerSize]: argc
560 // [sp + 1 * kSystemPointerSize]: argv
561 // or
562 // r0: root_register_value
563 // r1: microtask_queue
564 // Preserve all but r0 and pass them to entry_trampoline.
565 Label invoke, handler_entry, exit;
566 const RegList kCalleeSavedWithoutFp = kCalleeSaved & ~fp.bit();
567
568 // Update |pushed_stack_space| when we manipulate the stack.
569 int pushed_stack_space = EntryFrameConstants::kCallerFPOffset;
570 {
571 NoRootArrayScope no_root_array(masm);
572
573 // Called from C, so do not pop argc and args on exit (preserve sp)
574 // No need to save register-passed args
575 // Save callee-saved registers (incl. cp), but without fp
576 __ stm(db_w, sp, kCalleeSavedWithoutFp);
577 pushed_stack_space +=
578 kNumCalleeSaved * kPointerSize - kPointerSize /* FP */;
579
580 // Save callee-saved vfp registers.
581 __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
582 pushed_stack_space += kNumDoubleCalleeSaved * kDoubleSize;
583
584 // Set up the reserved register for 0.0.
585 __ vmov(kDoubleRegZero, base::Double(0.0));
586
587 // Initialize the root register.
588 // C calling convention. The first argument is passed in r0.
589 __ mov(kRootRegister, r0);
590 }
591
592 // Push a frame with special values setup to mark it as an entry frame.
593 // r0: root_register_value
594 __ mov(r7, Operand(StackFrame::TypeToMarker(type)));
595 __ mov(r6, Operand(StackFrame::TypeToMarker(type)));
596 __ Move(r4, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
597 masm->isolate()));
598 __ ldr(r5, MemOperand(r4));
599
600 __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | fp.bit() | lr.bit());
601 pushed_stack_space += 5 * kPointerSize /* r5, r6, r7, fp, lr */;
602
603 // Clear c_entry_fp, now we've pushed its previous value to the stack.
604 // If the c_entry_fp is not already zero and we don't clear it, the
605 // SafeStackFrameIterator will assume we are executing C++ and miss the JS
606 // frames on top.
607 __ mov(r5, Operand::Zero());
608 __ str(r5, MemOperand(r4));
609
610 Register scratch = r6;
611
612 // Set up frame pointer for the frame to be pushed.
613 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
614
615 // If this is the outermost JS call, set js_entry_sp value.
616 Label non_outermost_js;
617 ExternalReference js_entry_sp = ExternalReference::Create(
618 IsolateAddressId::kJSEntrySPAddress, masm->isolate());
619 __ Move(r5, js_entry_sp);
620 __ ldr(scratch, MemOperand(r5));
621 __ cmp(scratch, Operand::Zero());
622 __ b(ne, &non_outermost_js);
623 __ str(fp, MemOperand(r5));
624 __ mov(scratch, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
625 Label cont;
626 __ b(&cont);
627 __ bind(&non_outermost_js);
628 __ mov(scratch, Operand(StackFrame::INNER_JSENTRY_FRAME));
629 __ bind(&cont);
630 __ push(scratch);
631
632 // Jump to a faked try block that does the invoke, with a faked catch
633 // block that sets the pending exception.
634 __ jmp(&invoke);
635
636 // Block literal pool emission whilst taking the position of the handler
637 // entry. This avoids making the assumption that literal pools are always
638 // emitted after an instruction is emitted, rather than before.
639 {
640 Assembler::BlockConstPoolScope block_const_pool(masm);
641 __ bind(&handler_entry);
642
643 // Store the current pc as the handler offset. It's used later to create the
644 // handler table.
645 masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
646
647 // Caught exception: Store result (exception) in the pending exception
648 // field in the JSEnv and return a failure sentinel. Coming in here the
649 // fp will be invalid because the PushStackHandler below sets it to 0 to
650 // signal the existence of the JSEntry frame.
651 __ Move(scratch,
652 ExternalReference::Create(
653 IsolateAddressId::kPendingExceptionAddress, masm->isolate()));
654 }
655 __ str(r0, MemOperand(scratch));
656 __ LoadRoot(r0, RootIndex::kException);
657 __ b(&exit);
658
659 // Invoke: Link this frame into the handler chain.
660 __ bind(&invoke);
661 // Must preserve r0-r4, r5-r6 are available.
662 __ PushStackHandler();
663 // If an exception not caught by another handler occurs, this handler
664 // returns control to the code after the bl(&invoke) above, which
665 // restores all kCalleeSaved registers (including cp and fp) to their
666 // saved values before returning a failure to C.
667 //
668 // Invoke the function by calling through JS entry trampoline builtin and
669 // pop the faked function when we return.
670 Handle<Code> trampoline_code =
671 masm->isolate()->builtins()->code_handle(entry_trampoline);
672 DCHECK_EQ(kPushedStackSpace, pushed_stack_space);
673 USE(pushed_stack_space);
674 __ Call(trampoline_code, RelocInfo::CODE_TARGET);
675
676 // Unlink this frame from the handler chain.
677 __ PopStackHandler();
678
679 __ bind(&exit); // r0 holds result
680 // Check if the current stack frame is marked as the outermost JS frame.
681 Label non_outermost_js_2;
682 __ pop(r5);
683 __ cmp(r5, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
684 __ b(ne, &non_outermost_js_2);
685 __ mov(r6, Operand::Zero());
686 __ Move(r5, js_entry_sp);
687 __ str(r6, MemOperand(r5));
688 __ bind(&non_outermost_js_2);
689
690 // Restore the top frame descriptors from the stack.
691 __ pop(r3);
692 __ Move(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
693 masm->isolate()));
694 __ str(r3, MemOperand(scratch));
695
696 // Reset the stack to the callee saved registers.
697 __ add(sp, sp,
698 Operand(-EntryFrameConstants::kCallerFPOffset -
699 kSystemPointerSize /* already popped one */));
700
701 __ ldm(ia_w, sp, fp.bit() | lr.bit());
702
703 // Restore callee-saved vfp registers.
704 __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
705
706 __ ldm(ia_w, sp, kCalleeSavedWithoutFp);
707
708 __ mov(pc, lr);
709
710 // Emit constant pool.
711 __ CheckConstPool(true, false);
712 }
713
714 } // namespace
715
Generate_JSEntry(MacroAssembler * masm)716 void Builtins::Generate_JSEntry(MacroAssembler* masm) {
717 Generate_JSEntryVariant(masm, StackFrame::ENTRY, Builtin::kJSEntryTrampoline);
718 }
719
Generate_JSConstructEntry(MacroAssembler * masm)720 void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
721 Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
722 Builtin::kJSConstructEntryTrampoline);
723 }
724
Generate_JSRunMicrotasksEntry(MacroAssembler * masm)725 void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
726 Generate_JSEntryVariant(masm, StackFrame::ENTRY,
727 Builtin::kRunMicrotasksTrampoline);
728 }
729
Generate_JSEntryTrampolineHelper(MacroAssembler * masm,bool is_construct)730 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
731 bool is_construct) {
732 // Called from Generate_JS_Entry
733 // r0: root_register_value
734 // r1: new.target
735 // r2: function
736 // r3: receiver
737 // [fp + kPushedStackSpace + 0 * kSystemPointerSize]: argc
738 // [fp + kPushedStackSpace + 1 * kSystemPointerSize]: argv
739 // r5-r6, r8 and cp may be clobbered
740
741 __ ldr(r0,
742 MemOperand(fp, kPushedStackSpace + EntryFrameConstants::kArgcOffset));
743 __ ldr(r4,
744 MemOperand(fp, kPushedStackSpace + EntryFrameConstants::kArgvOffset));
745
746 // r1: new.target
747 // r2: function
748 // r3: receiver
749 // r0: argc
750 // r4: argv
751
752 // Enter an internal frame.
753 {
754 FrameScope scope(masm, StackFrame::INTERNAL);
755
756 // Setup the context (we need to use the caller context from the isolate).
757 ExternalReference context_address = ExternalReference::Create(
758 IsolateAddressId::kContextAddress, masm->isolate());
759 __ Move(cp, context_address);
760 __ ldr(cp, MemOperand(cp));
761
762 // Push the function.
763 __ Push(r2);
764
765 // Check if we have enough stack space to push all arguments + receiver.
766 // Clobbers r5.
767 Label enough_stack_space, stack_overflow;
768 if (kJSArgcIncludesReceiver) {
769 __ mov(r6, r0);
770 } else {
771 __ add(r6, r0, Operand(1)); // Add one for receiver.
772 }
773 __ StackOverflowCheck(r6, r5, &stack_overflow);
774 __ b(&enough_stack_space);
775 __ bind(&stack_overflow);
776 __ CallRuntime(Runtime::kThrowStackOverflow);
777 // Unreachable code.
778 __ bkpt(0);
779
780 __ bind(&enough_stack_space);
781
782 // Copy arguments to the stack.
783 // r1: new.target
784 // r2: function
785 // r3: receiver
786 // r0: argc
787 // r4: argv, i.e. points to first arg
788 Generate_PushArguments(masm, r4, r0, r5, ArgumentsElementType::kHandle);
789
790 // Push the receiver.
791 __ Push(r3);
792
793 // Setup new.target and function.
794 __ mov(r3, r1);
795 __ mov(r1, r2);
796 // r0: argc
797 // r1: function
798 // r3: new.target
799
800 // Initialize all JavaScript callee-saved registers, since they will be seen
801 // by the garbage collector as part of handlers.
802 __ LoadRoot(r4, RootIndex::kUndefinedValue);
803 __ mov(r2, r4);
804 __ mov(r5, r4);
805 __ mov(r6, r4);
806 __ mov(r8, r4);
807 if (kR9Available == 1) {
808 __ mov(r9, r4);
809 }
810
811 // Invoke the code.
812 Handle<Code> builtin = is_construct
813 ? BUILTIN_CODE(masm->isolate(), Construct)
814 : masm->isolate()->builtins()->Call();
815 __ Call(builtin, RelocInfo::CODE_TARGET);
816
817 // Exit the JS frame and remove the parameters (except function), and
818 // return.
819 // Respect ABI stack constraint.
820 }
821 __ Jump(lr);
822
823 // r0: result
824 }
825
Generate_JSEntryTrampoline(MacroAssembler * masm)826 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
827 Generate_JSEntryTrampolineHelper(masm, false);
828 }
829
Generate_JSConstructEntryTrampoline(MacroAssembler * masm)830 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
831 Generate_JSEntryTrampolineHelper(masm, true);
832 }
833
Generate_RunMicrotasksTrampoline(MacroAssembler * masm)834 void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
835 // This expects two C++ function parameters passed by Invoke() in
836 // execution.cc.
837 // r0: root_register_value
838 // r1: microtask_queue
839
840 __ mov(RunMicrotasksDescriptor::MicrotaskQueueRegister(), r1);
841 __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
842 }
843
ReplaceClosureCodeWithOptimizedCode(MacroAssembler * masm,Register optimized_code,Register closure)844 static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
845 Register optimized_code,
846 Register closure) {
847 ASM_CODE_COMMENT(masm);
848 DCHECK(!AreAliased(optimized_code, closure));
849 // Store code entry in the closure.
850 __ str(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
851 __ RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code,
852 kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore,
853 RememberedSetAction::kOmit, SmiCheck::kOmit);
854 }
855
LeaveInterpreterFrame(MacroAssembler * masm,Register scratch1,Register scratch2)856 static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
857 Register scratch2) {
858 ASM_CODE_COMMENT(masm);
859 Register params_size = scratch1;
860 // Get the size of the formal parameters + receiver (in bytes).
861 __ ldr(params_size,
862 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
863 __ ldr(params_size,
864 FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
865
866 Register actual_params_size = scratch2;
867 // Compute the size of the actual parameters + receiver (in bytes).
868 __ ldr(actual_params_size,
869 MemOperand(fp, StandardFrameConstants::kArgCOffset));
870 __ lsl(actual_params_size, actual_params_size, Operand(kPointerSizeLog2));
871 if (!kJSArgcIncludesReceiver) {
872 __ add(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
873 }
874
875 // If actual is bigger than formal, then we should use it to free up the stack
876 // arguments.
877 __ cmp(params_size, actual_params_size);
878 __ mov(params_size, actual_params_size, LeaveCC, lt);
879
880 // Leave the frame (also dropping the register file).
881 __ LeaveFrame(StackFrame::INTERPRETED);
882
883 // Drop receiver + arguments.
884 __ DropArguments(params_size, TurboAssembler::kCountIsBytes,
885 TurboAssembler::kCountIncludesReceiver);
886 }
887
888 // Tail-call |function_id| if |actual_marker| == |expected_marker|
TailCallRuntimeIfMarkerEquals(MacroAssembler * masm,Register actual_marker,OptimizationMarker expected_marker,Runtime::FunctionId function_id)889 static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
890 Register actual_marker,
891 OptimizationMarker expected_marker,
892 Runtime::FunctionId function_id) {
893 ASM_CODE_COMMENT(masm);
894 Label no_match;
895 __ cmp_raw_immediate(actual_marker, expected_marker);
896 __ b(ne, &no_match);
897 GenerateTailCallToReturnedCode(masm, function_id);
898 __ bind(&no_match);
899 }
900
TailCallOptimizedCodeSlot(MacroAssembler * masm,Register optimized_code_entry,Register scratch)901 static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
902 Register optimized_code_entry,
903 Register scratch) {
904 // ----------- S t a t e -------------
905 // -- r0 : actual argument count
906 // -- r3 : new target (preserved for callee if needed, and caller)
907 // -- r1 : target function (preserved for callee if needed, and caller)
908 // -----------------------------------
909 DCHECK(!AreAliased(r1, r3, optimized_code_entry, scratch));
910
911 Register closure = r1;
912 Label heal_optimized_code_slot;
913
914 // If the optimized code is cleared, go to runtime to update the optimization
915 // marker field.
916 __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
917 &heal_optimized_code_slot);
918
919 // Check if the optimized code is marked for deopt. If it is, call the
920 // runtime to clear it.
921 __ ldr(scratch,
922 FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
923 __ ldr(scratch,
924 FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
925 __ tst(scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
926 __ b(ne, &heal_optimized_code_slot);
927
928 // Optimized code is good, get it into the closure and link the closure
929 // into the optimized functions list, then tail call the optimized code.
930 ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure);
931 static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
932 __ LoadCodeObjectEntry(r2, optimized_code_entry);
933 __ Jump(r2);
934
935 // Optimized code slot contains deoptimized code or code is cleared and
936 // optimized code marker isn't updated. Evict the code, update the marker
937 // and re-enter the closure's code.
938 __ bind(&heal_optimized_code_slot);
939 GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
940 }
941
MaybeOptimizeCode(MacroAssembler * masm,Register feedback_vector,Register optimization_marker)942 static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
943 Register optimization_marker) {
944 // ----------- S t a t e -------------
945 // -- r0 : actual argument count
946 // -- r3 : new target (preserved for callee if needed, and caller)
947 // -- r1 : target function (preserved for callee if needed, and caller)
948 // -- feedback vector (preserved for caller if needed)
949 // -- optimization_marker : a int32 containing a non-zero optimization
950 // marker.
951 // -----------------------------------
952 DCHECK(!AreAliased(feedback_vector, r1, r3, optimization_marker));
953
954 // TODO(v8:8394): The logging of first execution will break if
955 // feedback vectors are not allocated. We need to find a different way of
956 // logging these events if required.
957 TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
958 OptimizationMarker::kLogFirstExecution,
959 Runtime::kFunctionFirstExecution);
960 TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
961 OptimizationMarker::kCompileOptimized,
962 Runtime::kCompileOptimized_NotConcurrent);
963 TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
964 OptimizationMarker::kCompileOptimizedConcurrent,
965 Runtime::kCompileOptimized_Concurrent);
966
967 // Marker should be one of LogFirstExecution / CompileOptimized /
968 // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
969 // here.
970 if (FLAG_debug_code) {
971 __ stop();
972 }
973 }
974
975 // Advance the current bytecode offset. This simulates what all bytecode
976 // handlers do upon completion of the underlying operation. Will bail out to a
977 // label if the bytecode (without prefix) is a return bytecode. Will not advance
978 // the bytecode offset if the current bytecode is a JumpLoop, instead just
979 // re-executing the JumpLoop to jump to the correct bytecode.
AdvanceBytecodeOffsetOrReturn(MacroAssembler * masm,Register bytecode_array,Register bytecode_offset,Register bytecode,Register scratch1,Register scratch2,Label * if_return)980 static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
981 Register bytecode_array,
982 Register bytecode_offset,
983 Register bytecode, Register scratch1,
984 Register scratch2, Label* if_return) {
985 ASM_CODE_COMMENT(masm);
986 Register bytecode_size_table = scratch1;
987
988 // The bytecode offset value will be increased by one in wide and extra wide
989 // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
990 // will restore the original bytecode. In order to simplify the code, we have
991 // a backup of it.
992 Register original_bytecode_offset = scratch2;
993 DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
994 bytecode, original_bytecode_offset));
995
996 __ Move(bytecode_size_table,
997 ExternalReference::bytecode_size_table_address());
998 __ Move(original_bytecode_offset, bytecode_offset);
999
1000 // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
1001 Label process_bytecode;
1002 STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
1003 STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
1004 STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
1005 STATIC_ASSERT(3 ==
1006 static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
1007 __ cmp(bytecode, Operand(0x3));
1008 __ b(hi, &process_bytecode);
1009 __ tst(bytecode, Operand(0x1));
1010 // Load the next bytecode.
1011 __ add(bytecode_offset, bytecode_offset, Operand(1));
1012 __ ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
1013
1014 // Update table to the wide scaled table.
1015 __ add(bytecode_size_table, bytecode_size_table,
1016 Operand(kByteSize * interpreter::Bytecodes::kBytecodeCount));
1017 // Conditionally update table to the extra wide scaled table. We are taking
1018 // advantage of the fact that the extra wide follows the wide one.
1019 __ add(bytecode_size_table, bytecode_size_table,
1020 Operand(kByteSize * interpreter::Bytecodes::kBytecodeCount), LeaveCC,
1021 ne);
1022
1023 __ bind(&process_bytecode);
1024
1025 // Bailout to the return label if this is a return bytecode.
1026
1027 // Create cmp, cmpne, ..., cmpne to check for a return bytecode.
1028 Condition flag = al;
1029 #define JUMP_IF_EQUAL(NAME) \
1030 __ cmp(bytecode, Operand(static_cast<int>(interpreter::Bytecode::k##NAME)), \
1031 flag); \
1032 flag = ne;
1033 RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
1034 #undef JUMP_IF_EQUAL
1035
1036 __ b(if_return, eq);
1037
1038 // If this is a JumpLoop, re-execute it to perform the jump to the beginning
1039 // of the loop.
1040 Label end, not_jump_loop;
1041 __ cmp(bytecode, Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
1042 __ b(ne, ¬_jump_loop);
1043 // We need to restore the original bytecode_offset since we might have
1044 // increased it to skip the wide / extra-wide prefix bytecode.
1045 __ Move(bytecode_offset, original_bytecode_offset);
1046 __ b(&end);
1047
1048 __ bind(¬_jump_loop);
1049 // Otherwise, load the size of the current bytecode and advance the offset.
1050 __ ldrb(scratch1, MemOperand(bytecode_size_table, bytecode));
1051 __ add(bytecode_offset, bytecode_offset, scratch1);
1052
1053 __ bind(&end);
1054 }
1055
1056 // Read off the optimization state in the feedback vector and check if there
1057 // is optimized code or a optimization marker that needs to be processed.
LoadOptimizationStateAndJumpIfNeedsProcessing(MacroAssembler * masm,Register optimization_state,Register feedback_vector,Label * has_optimized_code_or_marker)1058 static void LoadOptimizationStateAndJumpIfNeedsProcessing(
1059 MacroAssembler* masm, Register optimization_state, Register feedback_vector,
1060 Label* has_optimized_code_or_marker) {
1061 ASM_CODE_COMMENT(masm);
1062 DCHECK(!AreAliased(optimization_state, feedback_vector));
1063 __ ldr(optimization_state,
1064 FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
1065 __ tst(
1066 optimization_state,
1067 Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
1068 __ b(ne, has_optimized_code_or_marker);
1069 }
1070
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(MacroAssembler * masm,Register optimization_state,Register feedback_vector)1071 static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
1072 MacroAssembler* masm, Register optimization_state,
1073 Register feedback_vector) {
1074 ASM_CODE_COMMENT(masm);
1075 DCHECK(!AreAliased(optimization_state, feedback_vector));
1076 Label maybe_has_optimized_code;
1077 // Check if optimized code is available
1078 __ tst(
1079 optimization_state,
1080 Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
1081 __ b(eq, &maybe_has_optimized_code);
1082
1083 Register optimization_marker = optimization_state;
1084 __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
1085 MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
1086
1087 __ bind(&maybe_has_optimized_code);
1088 Register optimized_code_entry = optimization_state;
1089 __ ldr(optimization_marker,
1090 FieldMemOperand(feedback_vector,
1091 FeedbackVector::kMaybeOptimizedCodeOffset));
1092 TailCallOptimizedCodeSlot(masm, optimized_code_entry, r6);
1093 }
1094
1095 // static
Generate_BaselineOutOfLinePrologue(MacroAssembler * masm)1096 void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
1097 UseScratchRegisterScope temps(masm);
1098 // Need a few extra registers
1099 temps.Include(r8, r9);
1100
1101 auto descriptor =
1102 Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
1103 Register closure = descriptor.GetRegisterParameter(
1104 BaselineOutOfLinePrologueDescriptor::kClosure);
1105 // Load the feedback vector from the closure.
1106 Register feedback_vector = temps.Acquire();
1107 __ ldr(feedback_vector,
1108 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1109 __ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
1110 if (FLAG_debug_code) {
1111 UseScratchRegisterScope temps(masm);
1112 Register scratch = temps.Acquire();
1113 __ CompareObjectType(feedback_vector, scratch, scratch,
1114 FEEDBACK_VECTOR_TYPE);
1115 __ Assert(eq, AbortReason::kExpectedFeedbackVector);
1116 }
1117
1118 // Check for an optimization marker.
1119 Label has_optimized_code_or_marker;
1120 Register optimization_state = no_reg;
1121 {
1122 UseScratchRegisterScope temps(masm);
1123 // optimization_state will be used only in |has_optimized_code_or_marker|
1124 // and outside it can be reused.
1125 optimization_state = temps.Acquire();
1126 LoadOptimizationStateAndJumpIfNeedsProcessing(
1127 masm, optimization_state, feedback_vector,
1128 &has_optimized_code_or_marker);
1129 }
1130
1131 // Increment invocation count for the function.
1132 {
1133 UseScratchRegisterScope temps(masm);
1134 Register invocation_count = temps.Acquire();
1135 __ ldr(invocation_count,
1136 FieldMemOperand(feedback_vector,
1137 FeedbackVector::kInvocationCountOffset));
1138 __ add(invocation_count, invocation_count, Operand(1));
1139 __ str(invocation_count,
1140 FieldMemOperand(feedback_vector,
1141 FeedbackVector::kInvocationCountOffset));
1142 }
1143
1144 FrameScope frame_scope(masm, StackFrame::MANUAL);
1145 {
1146 ASM_CODE_COMMENT_STRING(masm, "Frame Setup");
1147 // Normally the first thing we'd do here is Push(lr, fp), but we already
1148 // entered the frame in BaselineCompiler::Prologue, as we had to use the
1149 // value lr before the call to this BaselineOutOfLinePrologue builtin.
1150
1151 Register callee_context = descriptor.GetRegisterParameter(
1152 BaselineOutOfLinePrologueDescriptor::kCalleeContext);
1153 Register callee_js_function = descriptor.GetRegisterParameter(
1154 BaselineOutOfLinePrologueDescriptor::kClosure);
1155 __ Push(callee_context, callee_js_function);
1156 DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
1157 DCHECK_EQ(callee_js_function, kJSFunctionRegister);
1158
1159 Register argc = descriptor.GetRegisterParameter(
1160 BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
1161 // We'll use the bytecode for both code age/OSR resetting, and pushing onto
1162 // the frame, so load it into a register.
1163 Register bytecodeArray = descriptor.GetRegisterParameter(
1164 BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
1165
1166 // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
1167 // are 8-bit fields next to each other, so we could just optimize by writing
1168 // a 16-bit. These static asserts guard our assumption is valid.
1169 STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
1170 BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
1171 STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
1172 {
1173 UseScratchRegisterScope temps(masm);
1174 Register scratch = temps.Acquire();
1175 __ mov(scratch, Operand(0));
1176 __ strh(scratch,
1177 FieldMemOperand(bytecodeArray,
1178 BytecodeArray::kOsrLoopNestingLevelOffset));
1179 }
1180
1181 __ Push(argc, bytecodeArray);
1182
1183 // Baseline code frames store the feedback vector where interpreter would
1184 // store the bytecode offset.
1185 if (FLAG_debug_code) {
1186 UseScratchRegisterScope temps(masm);
1187 Register scratch = temps.Acquire();
1188 __ CompareObjectType(feedback_vector, scratch, scratch,
1189 FEEDBACK_VECTOR_TYPE);
1190 __ Assert(eq, AbortReason::kExpectedFeedbackVector);
1191 }
1192 __ Push(feedback_vector);
1193 }
1194
1195 Label call_stack_guard;
1196 Register frame_size = descriptor.GetRegisterParameter(
1197 BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
1198 {
1199 ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt check");
1200 // Stack check. This folds the checks for both the interrupt stack limit
1201 // check and the real stack limit into one by just checking for the
1202 // interrupt limit. The interrupt limit is either equal to the real stack
1203 // limit or tighter. By ensuring we have space until that limit after
1204 // building the frame we can quickly precheck both at once.
1205 UseScratchRegisterScope temps(masm);
1206
1207 Register sp_minus_frame_size = temps.Acquire();
1208 __ sub(sp_minus_frame_size, sp, frame_size);
1209 Register interrupt_limit = temps.Acquire();
1210 __ LoadStackLimit(interrupt_limit, StackLimitKind::kInterruptStackLimit);
1211 __ cmp(sp_minus_frame_size, interrupt_limit);
1212 __ b(&call_stack_guard, lo);
1213 }
1214
1215 // Do "fast" return to the caller pc in lr.
1216 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1217 __ Ret();
1218
1219 __ bind(&has_optimized_code_or_marker);
1220 {
1221 ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
1222 UseScratchRegisterScope temps(masm);
1223 // Ensure the optimization_state is not allocated again.
1224 temps.Exclude(optimization_state);
1225
1226 // Drop the frame created by the baseline call.
1227 __ ldm(ia_w, sp, fp.bit() | lr.bit());
1228 MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
1229 feedback_vector);
1230 __ Trap();
1231 }
1232
1233 __ bind(&call_stack_guard);
1234 {
1235 ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
1236 FrameScope frame_scope(masm, StackFrame::INTERNAL);
1237 // Save incoming new target or generator
1238 __ Push(kJavaScriptCallNewTargetRegister);
1239 __ SmiTag(frame_size);
1240 __ Push(frame_size);
1241 __ CallRuntime(Runtime::kStackGuardWithGap);
1242 __ Pop(kJavaScriptCallNewTargetRegister);
1243 }
1244
1245 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1246 __ Ret();
1247 }
1248
1249 // Generate code for entering a JS function with the interpreter.
1250 // On entry to the function the receiver and arguments have been pushed on the
1251 // stack left to right.
1252 //
1253 // The live registers are:
1254 // o r0: actual argument count
1255 // o r1: the JS function object being called.
1256 // o r3: the incoming new target or generator object
1257 // o cp: our context
1258 // o fp: the caller's frame pointer
1259 // o sp: stack pointer
1260 // o lr: return address
1261 //
1262 // The function builds an interpreter frame. See InterpreterFrameConstants in
1263 // frame-constants.h for its layout.
Generate_InterpreterEntryTrampoline(MacroAssembler * masm)1264 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
1265 Register closure = r1;
1266 Register feedback_vector = r2;
1267
1268 // Get the bytecode array from the function object and load it into
1269 // kInterpreterBytecodeArrayRegister.
1270 __ ldr(r4, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
1271 __ ldr(kInterpreterBytecodeArrayRegister,
1272 FieldMemOperand(r4, SharedFunctionInfo::kFunctionDataOffset));
1273
1274 Label is_baseline;
1275 GetSharedFunctionInfoBytecodeOrBaseline(
1276 masm, kInterpreterBytecodeArrayRegister, r8, &is_baseline);
1277
1278 // The bytecode array could have been flushed from the shared function info,
1279 // if so, call into CompileLazy.
1280 Label compile_lazy;
1281 __ CompareObjectType(kInterpreterBytecodeArrayRegister, r4, no_reg,
1282 BYTECODE_ARRAY_TYPE);
1283 __ b(ne, &compile_lazy);
1284
1285 // Load the feedback vector from the closure.
1286 __ ldr(feedback_vector,
1287 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1288 __ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
1289
1290 Label push_stack_frame;
1291 // Check if feedback vector is valid. If valid, check for optimized code
1292 // and update invocation count. Otherwise, setup the stack frame.
1293 __ ldr(r4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
1294 __ ldrh(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
1295 __ cmp(r4, Operand(FEEDBACK_VECTOR_TYPE));
1296 __ b(ne, &push_stack_frame);
1297
1298 Register optimization_state = r4;
1299 Label has_optimized_code_or_marker;
1300 LoadOptimizationStateAndJumpIfNeedsProcessing(
1301 masm, optimization_state, feedback_vector, &has_optimized_code_or_marker);
1302
1303 Label not_optimized;
1304 __ bind(¬_optimized);
1305
1306 // Increment invocation count for the function.
1307 __ ldr(r9, FieldMemOperand(feedback_vector,
1308 FeedbackVector::kInvocationCountOffset));
1309 __ add(r9, r9, Operand(1));
1310 __ str(r9, FieldMemOperand(feedback_vector,
1311 FeedbackVector::kInvocationCountOffset));
1312
1313 // Open a frame scope to indicate that there is a frame on the stack. The
1314 // MANUAL indicates that the scope shouldn't actually generate code to set up
1315 // the frame (that is done below).
1316 __ bind(&push_stack_frame);
1317 FrameScope frame_scope(masm, StackFrame::MANUAL);
1318 __ PushStandardFrame(closure);
1319
1320 // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
1321 // 8-bit fields next to each other, so we could just optimize by writing a
1322 // 16-bit. These static asserts guard our assumption is valid.
1323 STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
1324 BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
1325 STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
1326 __ mov(r9, Operand(0));
1327 __ strh(r9, FieldMemOperand(kInterpreterBytecodeArrayRegister,
1328 BytecodeArray::kOsrLoopNestingLevelOffset));
1329
1330 // Load the initial bytecode offset.
1331 __ mov(kInterpreterBytecodeOffsetRegister,
1332 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1333
1334 // Push bytecode array and Smi tagged bytecode array offset.
1335 __ SmiTag(r4, kInterpreterBytecodeOffsetRegister);
1336 __ Push(kInterpreterBytecodeArrayRegister, r4);
1337
1338 // Allocate the local and temporary register file on the stack.
1339 Label stack_overflow;
1340 {
1341 // Load frame size from the BytecodeArray object.
1342 __ ldr(r4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
1343 BytecodeArray::kFrameSizeOffset));
1344
1345 // Do a stack check to ensure we don't go over the limit.
1346 __ sub(r9, sp, Operand(r4));
1347 __ LoadStackLimit(r2, StackLimitKind::kRealStackLimit);
1348 __ cmp(r9, Operand(r2));
1349 __ b(lo, &stack_overflow);
1350
1351 // If ok, push undefined as the initial value for all register file entries.
1352 Label loop_header;
1353 Label loop_check;
1354 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1355 __ b(&loop_check, al);
1356 __ bind(&loop_header);
1357 // TODO(rmcilroy): Consider doing more than one push per loop iteration.
1358 __ push(kInterpreterAccumulatorRegister);
1359 // Continue loop if not done.
1360 __ bind(&loop_check);
1361 __ sub(r4, r4, Operand(kPointerSize), SetCC);
1362 __ b(&loop_header, ge);
1363 }
1364
1365 // If the bytecode array has a valid incoming new target or generator object
1366 // register, initialize it with incoming value which was passed in r3.
1367 __ ldr(r9, FieldMemOperand(
1368 kInterpreterBytecodeArrayRegister,
1369 BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
1370 __ cmp(r9, Operand::Zero());
1371 __ str(r3, MemOperand(fp, r9, LSL, kPointerSizeLog2), ne);
1372
1373 // Perform interrupt stack check.
1374 // TODO(solanes): Merge with the real stack limit check above.
1375 Label stack_check_interrupt, after_stack_check_interrupt;
1376 __ LoadStackLimit(r4, StackLimitKind::kInterruptStackLimit);
1377 __ cmp(sp, r4);
1378 __ b(lo, &stack_check_interrupt);
1379 __ bind(&after_stack_check_interrupt);
1380
1381 // The accumulator is already loaded with undefined.
1382
1383 // Load the dispatch table into a register and dispatch to the bytecode
1384 // handler at the current bytecode offset.
1385 Label do_dispatch;
1386 __ bind(&do_dispatch);
1387 __ Move(
1388 kInterpreterDispatchTableRegister,
1389 ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1390 __ ldrb(r4, MemOperand(kInterpreterBytecodeArrayRegister,
1391 kInterpreterBytecodeOffsetRegister));
1392 __ ldr(
1393 kJavaScriptCallCodeStartRegister,
1394 MemOperand(kInterpreterDispatchTableRegister, r4, LSL, kPointerSizeLog2));
1395 __ Call(kJavaScriptCallCodeStartRegister);
1396 masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
1397
1398 // Any returns to the entry trampoline are either due to the return bytecode
1399 // or the interpreter tail calling a builtin and then a dispatch.
1400
1401 // Get bytecode array and bytecode offset from the stack frame.
1402 __ ldr(kInterpreterBytecodeArrayRegister,
1403 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1404 __ ldr(kInterpreterBytecodeOffsetRegister,
1405 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1406 __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1407
1408 // Either return, or advance to the next bytecode and dispatch.
1409 Label do_return;
1410 __ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
1411 kInterpreterBytecodeOffsetRegister));
1412 AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1413 kInterpreterBytecodeOffsetRegister, r1, r2, r3,
1414 &do_return);
1415 __ jmp(&do_dispatch);
1416
1417 __ bind(&do_return);
1418 // The return value is in r0.
1419 LeaveInterpreterFrame(masm, r2, r4);
1420 __ Jump(lr);
1421
1422 __ bind(&stack_check_interrupt);
1423 // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
1424 // for the call to the StackGuard.
1425 __ mov(kInterpreterBytecodeOffsetRegister,
1426 Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
1427 kFunctionEntryBytecodeOffset)));
1428 __ str(kInterpreterBytecodeOffsetRegister,
1429 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1430 __ CallRuntime(Runtime::kStackGuard);
1431
1432 // After the call, restore the bytecode array, bytecode offset and accumulator
1433 // registers again. Also, restore the bytecode offset in the stack to its
1434 // previous value.
1435 __ ldr(kInterpreterBytecodeArrayRegister,
1436 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1437 __ mov(kInterpreterBytecodeOffsetRegister,
1438 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1439 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1440
1441 __ SmiTag(r4, kInterpreterBytecodeOffsetRegister);
1442 __ str(r4, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1443
1444 __ jmp(&after_stack_check_interrupt);
1445
1446 __ bind(&has_optimized_code_or_marker);
1447 MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
1448 feedback_vector);
1449
1450 __ bind(&is_baseline);
1451 {
1452 // Load the feedback vector from the closure.
1453 __ ldr(feedback_vector,
1454 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1455 __ ldr(feedback_vector,
1456 FieldMemOperand(feedback_vector, Cell::kValueOffset));
1457
1458 Label install_baseline_code;
1459 // Check if feedback vector is valid. If not, call prepare for baseline to
1460 // allocate it.
1461 __ ldr(r8, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
1462 __ ldrh(r8, FieldMemOperand(r8, Map::kInstanceTypeOffset));
1463 __ cmp(r8, Operand(FEEDBACK_VECTOR_TYPE));
1464 __ b(ne, &install_baseline_code);
1465
1466 // Check for an optimization marker.
1467 LoadOptimizationStateAndJumpIfNeedsProcessing(
1468 masm, optimization_state, feedback_vector,
1469 &has_optimized_code_or_marker);
1470
1471 // Load the baseline code into the closure.
1472 __ mov(r2, kInterpreterBytecodeArrayRegister);
1473 static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
1474 ReplaceClosureCodeWithOptimizedCode(masm, r2, closure);
1475 __ JumpCodeObject(r2);
1476
1477 __ bind(&install_baseline_code);
1478 GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode);
1479 }
1480
1481 __ bind(&compile_lazy);
1482 GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
1483
1484 __ bind(&stack_overflow);
1485 __ CallRuntime(Runtime::kThrowStackOverflow);
1486 __ bkpt(0); // Should not return.
1487 }
1488
GenerateInterpreterPushArgs(MacroAssembler * masm,Register num_args,Register start_address,Register scratch)1489 static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
1490 Register start_address,
1491 Register scratch) {
1492 ASM_CODE_COMMENT(masm);
1493 // Find the argument with lowest address.
1494 __ sub(scratch, num_args, Operand(1));
1495 __ mov(scratch, Operand(scratch, LSL, kSystemPointerSizeLog2));
1496 __ sub(start_address, start_address, scratch);
1497 // Push the arguments.
1498 __ PushArray(start_address, num_args, scratch,
1499 TurboAssembler::PushArrayOrder::kReverse);
1500 }
1501
1502 // static
Generate_InterpreterPushArgsThenCallImpl(MacroAssembler * masm,ConvertReceiverMode receiver_mode,InterpreterPushArgsMode mode)1503 void Builtins::Generate_InterpreterPushArgsThenCallImpl(
1504 MacroAssembler* masm, ConvertReceiverMode receiver_mode,
1505 InterpreterPushArgsMode mode) {
1506 DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
1507 // ----------- S t a t e -------------
1508 // -- r0 : the number of arguments
1509 // -- r2 : the address of the first argument to be pushed. Subsequent
1510 // arguments should be consecutive above this, in the same order as
1511 // they are to be pushed onto the stack.
1512 // -- r1 : the target to call (can be any Object).
1513 // -----------------------------------
1514 Label stack_overflow;
1515
1516 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1517 // The spread argument should not be pushed.
1518 __ sub(r0, r0, Operand(1));
1519 }
1520
1521 const bool skip_receiver =
1522 receiver_mode == ConvertReceiverMode::kNullOrUndefined;
1523 if (kJSArgcIncludesReceiver && skip_receiver) {
1524 __ sub(r3, r0, Operand(kJSArgcReceiverSlots));
1525 } else if (!kJSArgcIncludesReceiver && !skip_receiver) {
1526 __ add(r3, r0, Operand(1));
1527 } else {
1528 __ mov(r3, r0);
1529 }
1530
1531 __ StackOverflowCheck(r3, r4, &stack_overflow);
1532
1533 // Push the arguments. r2 and r4 will be modified.
1534 GenerateInterpreterPushArgs(masm, r3, r2, r4);
1535
1536 // Push "undefined" as the receiver arg if we need to.
1537 if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1538 __ PushRoot(RootIndex::kUndefinedValue);
1539 }
1540
1541 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1542 // Pass the spread in the register r2.
1543 // r2 already points to the penultimate argument, the spread
1544 // lies in the next interpreter register.
1545 __ sub(r2, r2, Operand(kSystemPointerSize));
1546 __ ldr(r2, MemOperand(r2));
1547 }
1548
1549 // Call the target.
1550 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1551 __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
1552 RelocInfo::CODE_TARGET);
1553 } else {
1554 __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
1555 RelocInfo::CODE_TARGET);
1556 }
1557
1558 __ bind(&stack_overflow);
1559 {
1560 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1561 // Unreachable code.
1562 __ bkpt(0);
1563 }
1564 }
1565
1566 // static
Generate_InterpreterPushArgsThenConstructImpl(MacroAssembler * masm,InterpreterPushArgsMode mode)1567 void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
1568 MacroAssembler* masm, InterpreterPushArgsMode mode) {
1569 // ----------- S t a t e -------------
1570 // -- r0 : argument count
1571 // -- r3 : new target
1572 // -- r1 : constructor to call
1573 // -- r2 : allocation site feedback if available, undefined otherwise.
1574 // -- r4 : address of the first argument
1575 // -----------------------------------
1576 Label stack_overflow;
1577
1578 __ StackOverflowCheck(r0, r6, &stack_overflow);
1579
1580 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1581 // The spread argument should not be pushed.
1582 __ sub(r0, r0, Operand(1));
1583 }
1584
1585 Register argc_without_receiver = r0;
1586 if (kJSArgcIncludesReceiver) {
1587 argc_without_receiver = r6;
1588 __ sub(argc_without_receiver, r0, Operand(kJSArgcReceiverSlots));
1589 }
1590 // Push the arguments. r4 and r5 will be modified.
1591 GenerateInterpreterPushArgs(masm, argc_without_receiver, r4, r5);
1592
1593 // Push a slot for the receiver to be constructed.
1594 __ mov(r5, Operand::Zero());
1595 __ push(r5);
1596
1597 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1598 // Pass the spread in the register r2.
1599 // r4 already points to the penultimate argument, the spread
1600 // lies in the next interpreter register.
1601 __ sub(r4, r4, Operand(kSystemPointerSize));
1602 __ ldr(r2, MemOperand(r4));
1603 } else {
1604 __ AssertUndefinedOrAllocationSite(r2, r5);
1605 }
1606
1607 if (mode == InterpreterPushArgsMode::kArrayFunction) {
1608 __ AssertFunction(r1);
1609
1610 // Tail call to the array construct stub (still in the caller
1611 // context at this point).
1612 Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
1613 __ Jump(code, RelocInfo::CODE_TARGET);
1614 } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1615 // Call the constructor with r0, r1, and r3 unmodified.
1616 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
1617 RelocInfo::CODE_TARGET);
1618 } else {
1619 DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
1620 // Call the constructor with r0, r1, and r3 unmodified.
1621 __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
1622 }
1623
1624 __ bind(&stack_overflow);
1625 {
1626 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1627 // Unreachable code.
1628 __ bkpt(0);
1629 }
1630 }
1631
Generate_InterpreterEnterBytecode(MacroAssembler * masm)1632 static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
1633 // Set the return address to the correct point in the interpreter entry
1634 // trampoline.
1635 Label builtin_trampoline, trampoline_loaded;
1636 Smi interpreter_entry_return_pc_offset(
1637 masm->isolate()->heap()->interpreter_entry_return_pc_offset());
1638 DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
1639
1640 // If the SFI function_data is an InterpreterData, the function will have a
1641 // custom copy of the interpreter entry trampoline for profiling. If so,
1642 // get the custom trampoline, otherwise grab the entry address of the global
1643 // trampoline.
1644 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
1645 __ ldr(r2, FieldMemOperand(r2, JSFunction::kSharedFunctionInfoOffset));
1646 __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset));
1647 __ CompareObjectType(r2, kInterpreterDispatchTableRegister,
1648 kInterpreterDispatchTableRegister,
1649 INTERPRETER_DATA_TYPE);
1650 __ b(ne, &builtin_trampoline);
1651
1652 __ ldr(r2,
1653 FieldMemOperand(r2, InterpreterData::kInterpreterTrampolineOffset));
1654 __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
1655 __ b(&trampoline_loaded);
1656
1657 __ bind(&builtin_trampoline);
1658 __ Move(r2, ExternalReference::
1659 address_of_interpreter_entry_trampoline_instruction_start(
1660 masm->isolate()));
1661 __ ldr(r2, MemOperand(r2));
1662
1663 __ bind(&trampoline_loaded);
1664 __ add(lr, r2, Operand(interpreter_entry_return_pc_offset.value()));
1665
1666 // Initialize the dispatch table register.
1667 __ Move(
1668 kInterpreterDispatchTableRegister,
1669 ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1670
1671 // Get the bytecode array pointer from the frame.
1672 __ ldr(kInterpreterBytecodeArrayRegister,
1673 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1674
1675 if (FLAG_debug_code) {
1676 // Check function data field is actually a BytecodeArray object.
1677 __ SmiTst(kInterpreterBytecodeArrayRegister);
1678 __ Assert(
1679 ne, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
1680 __ CompareObjectType(kInterpreterBytecodeArrayRegister, r1, no_reg,
1681 BYTECODE_ARRAY_TYPE);
1682 __ Assert(
1683 eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
1684 }
1685
1686 // Get the target bytecode offset from the frame.
1687 __ ldr(kInterpreterBytecodeOffsetRegister,
1688 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1689 __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1690
1691 if (FLAG_debug_code) {
1692 Label okay;
1693 __ cmp(kInterpreterBytecodeOffsetRegister,
1694 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1695 __ b(ge, &okay);
1696 __ bkpt(0);
1697 __ bind(&okay);
1698 }
1699
1700 // Dispatch to the target bytecode.
1701 UseScratchRegisterScope temps(masm);
1702 Register scratch = temps.Acquire();
1703 __ ldrb(scratch, MemOperand(kInterpreterBytecodeArrayRegister,
1704 kInterpreterBytecodeOffsetRegister));
1705 __ ldr(kJavaScriptCallCodeStartRegister,
1706 MemOperand(kInterpreterDispatchTableRegister, scratch, LSL,
1707 kPointerSizeLog2));
1708 __ Jump(kJavaScriptCallCodeStartRegister);
1709 }
1710
Generate_InterpreterEnterAtNextBytecode(MacroAssembler * masm)1711 void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
1712 // Get bytecode array and bytecode offset from the stack frame.
1713 __ ldr(kInterpreterBytecodeArrayRegister,
1714 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1715 __ ldr(kInterpreterBytecodeOffsetRegister,
1716 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1717 __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1718
1719 Label enter_bytecode, function_entry_bytecode;
1720 __ cmp(kInterpreterBytecodeOffsetRegister,
1721 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
1722 kFunctionEntryBytecodeOffset));
1723 __ b(eq, &function_entry_bytecode);
1724
1725 // Load the current bytecode.
1726 __ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
1727 kInterpreterBytecodeOffsetRegister));
1728
1729 // Advance to the next bytecode.
1730 Label if_return;
1731 AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1732 kInterpreterBytecodeOffsetRegister, r1, r2, r3,
1733 &if_return);
1734
1735 __ bind(&enter_bytecode);
1736 // Convert new bytecode offset to a Smi and save in the stackframe.
1737 __ SmiTag(r2, kInterpreterBytecodeOffsetRegister);
1738 __ str(r2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1739
1740 Generate_InterpreterEnterBytecode(masm);
1741
1742 __ bind(&function_entry_bytecode);
1743 // If the code deoptimizes during the implicit function entry stack interrupt
1744 // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
1745 // not a valid bytecode offset. Detect this case and advance to the first
1746 // actual bytecode.
1747 __ mov(kInterpreterBytecodeOffsetRegister,
1748 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1749 __ b(&enter_bytecode);
1750
1751 // We should never take the if_return path.
1752 __ bind(&if_return);
1753 __ Abort(AbortReason::kInvalidBytecodeAdvance);
1754 }
1755
Generate_InterpreterEnterAtBytecode(MacroAssembler * masm)1756 void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
1757 Generate_InterpreterEnterBytecode(masm);
1758 }
1759
1760 namespace {
Generate_ContinueToBuiltinHelper(MacroAssembler * masm,bool java_script_builtin,bool with_result)1761 void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
1762 bool java_script_builtin,
1763 bool with_result) {
1764 const RegisterConfiguration* config(RegisterConfiguration::Default());
1765 int allocatable_register_count = config->num_allocatable_general_registers();
1766 UseScratchRegisterScope temps(masm);
1767 Register scratch = temps.Acquire(); // Temp register is not allocatable.
1768 if (with_result) {
1769 if (java_script_builtin) {
1770 __ mov(scratch, r0);
1771 } else {
1772 // Overwrite the hole inserted by the deoptimizer with the return value
1773 // from the LAZY deopt point.
1774 __ str(
1775 r0,
1776 MemOperand(
1777 sp, config->num_allocatable_general_registers() * kPointerSize +
1778 BuiltinContinuationFrameConstants::kFixedFrameSize));
1779 }
1780 }
1781 for (int i = allocatable_register_count - 1; i >= 0; --i) {
1782 int code = config->GetAllocatableGeneralCode(i);
1783 __ Pop(Register::from_code(code));
1784 if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
1785 __ SmiUntag(Register::from_code(code));
1786 }
1787 }
1788 if (java_script_builtin && with_result) {
1789 // Overwrite the hole inserted by the deoptimizer with the return value from
1790 // the LAZY deopt point. r0 contains the arguments count, the return value
1791 // from LAZY is always the last argument.
1792 constexpr int return_value_offset =
1793 BuiltinContinuationFrameConstants::kFixedSlotCount -
1794 kJSArgcReceiverSlots;
1795 __ add(r0, r0, Operand(return_value_offset));
1796 __ str(scratch, MemOperand(sp, r0, LSL, kPointerSizeLog2));
1797 // Recover arguments count.
1798 __ sub(r0, r0, Operand(return_value_offset));
1799 }
1800 __ ldr(fp, MemOperand(
1801 sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1802 // Load builtin index (stored as a Smi) and use it to get the builtin start
1803 // address from the builtins table.
1804 Register builtin = scratch;
1805 __ Pop(builtin);
1806 __ add(sp, sp,
1807 Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1808 __ Pop(lr);
1809 __ LoadEntryFromBuiltinIndex(builtin);
1810 __ bx(builtin);
1811 }
1812 } // namespace
1813
Generate_ContinueToCodeStubBuiltin(MacroAssembler * masm)1814 void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
1815 Generate_ContinueToBuiltinHelper(masm, false, false);
1816 }
1817
Generate_ContinueToCodeStubBuiltinWithResult(MacroAssembler * masm)1818 void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
1819 MacroAssembler* masm) {
1820 Generate_ContinueToBuiltinHelper(masm, false, true);
1821 }
1822
Generate_ContinueToJavaScriptBuiltin(MacroAssembler * masm)1823 void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
1824 Generate_ContinueToBuiltinHelper(masm, true, false);
1825 }
1826
Generate_ContinueToJavaScriptBuiltinWithResult(MacroAssembler * masm)1827 void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
1828 MacroAssembler* masm) {
1829 Generate_ContinueToBuiltinHelper(masm, true, true);
1830 }
1831
Generate_NotifyDeoptimized(MacroAssembler * masm)1832 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
1833 {
1834 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
1835 __ CallRuntime(Runtime::kNotifyDeoptimized);
1836 }
1837
1838 DCHECK_EQ(kInterpreterAccumulatorRegister.code(), r0.code());
1839 __ pop(r0);
1840 __ Ret();
1841 }
1842
1843 namespace {
1844
Generate_OSREntry(MacroAssembler * masm,Register entry_address,Operand offset=Operand::Zero ())1845 void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
1846 Operand offset = Operand::Zero()) {
1847 // Compute the target address = entry_address + offset
1848 if (offset.IsImmediate() && offset.immediate() == 0) {
1849 __ mov(lr, entry_address);
1850 } else {
1851 __ add(lr, entry_address, offset);
1852 }
1853
1854 // "return" to the OSR entry point of the function.
1855 __ Ret();
1856 }
1857
OnStackReplacement(MacroAssembler * masm,bool is_interpreter)1858 void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
1859 ASM_CODE_COMMENT(masm);
1860 {
1861 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
1862 __ CallRuntime(Runtime::kCompileForOnStackReplacement);
1863 }
1864
1865 // If the code object is null, just return to the caller.
1866 Label skip;
1867 __ cmp(r0, Operand(Smi::zero()));
1868 __ b(ne, &skip);
1869 __ Ret();
1870
1871 __ bind(&skip);
1872
1873 if (is_interpreter) {
1874 // Drop the handler frame that is be sitting on top of the actual
1875 // JavaScript frame. This is the case then OSR is triggered from bytecode.
1876 __ LeaveFrame(StackFrame::STUB);
1877 }
1878
1879 // Load deoptimization data from the code object.
1880 // <deopt_data> = <code>[#deoptimization_data_offset]
1881 __ ldr(r1,
1882 FieldMemOperand(r0, Code::kDeoptimizationDataOrInterpreterDataOffset));
1883
1884 {
1885 ConstantPoolUnavailableScope constant_pool_unavailable(masm);
1886 __ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
1887
1888 // Load the OSR entrypoint offset from the deoptimization data.
1889 // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
1890 __ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(
1891 DeoptimizationData::kOsrPcOffsetIndex)));
1892
1893 Generate_OSREntry(masm, r0, Operand::SmiUntag(r1));
1894 }
1895 }
1896 } // namespace
1897
Generate_InterpreterOnStackReplacement(MacroAssembler * masm)1898 void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
1899 return OnStackReplacement(masm, true);
1900 }
1901
Generate_BaselineOnStackReplacement(MacroAssembler * masm)1902 void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
1903 __ ldr(kContextRegister,
1904 MemOperand(fp, BaselineFrameConstants::kContextOffset));
1905 return OnStackReplacement(masm, false);
1906 }
1907
1908 // static
Generate_FunctionPrototypeApply(MacroAssembler * masm)1909 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
1910 // ----------- S t a t e -------------
1911 // -- r0 : argc
1912 // -- sp[0] : receiver
1913 // -- sp[4] : thisArg
1914 // -- sp[8] : argArray
1915 // -----------------------------------
1916
1917 // 1. Load receiver into r1, argArray into r2 (if present), remove all
1918 // arguments from the stack (including the receiver), and push thisArg (if
1919 // present) instead.
1920 {
1921 __ LoadRoot(r5, RootIndex::kUndefinedValue);
1922 __ mov(r2, r5);
1923 __ ldr(r1, MemOperand(sp, 0)); // receiver
1924 __ cmp(r0, Operand(JSParameterCount(1)));
1925 __ ldr(r5, MemOperand(sp, kSystemPointerSize), ge); // thisArg
1926 __ cmp(r0, Operand(JSParameterCount(2)), ge);
1927 __ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argArray
1928 __ DropArgumentsAndPushNewReceiver(
1929 r0, r5, TurboAssembler::kCountIsInteger,
1930 kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
1931 : TurboAssembler::kCountExcludesReceiver);
1932 }
1933
1934 // ----------- S t a t e -------------
1935 // -- r2 : argArray
1936 // -- r1 : receiver
1937 // -- sp[0] : thisArg
1938 // -----------------------------------
1939
1940 // 2. We don't need to check explicitly for callable receiver here,
1941 // since that's the first thing the Call/CallWithArrayLike builtins
1942 // will do.
1943
1944 // 3. Tail call with no arguments if argArray is null or undefined.
1945 Label no_arguments;
1946 __ JumpIfRoot(r2, RootIndex::kNullValue, &no_arguments);
1947 __ JumpIfRoot(r2, RootIndex::kUndefinedValue, &no_arguments);
1948
1949 // 4a. Apply the receiver to the given argArray.
1950 __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1951 RelocInfo::CODE_TARGET);
1952
1953 // 4b. The argArray is either null or undefined, so we tail call without any
1954 // arguments to the receiver.
1955 __ bind(&no_arguments);
1956 {
1957 __ mov(r0, Operand(JSParameterCount(0)));
1958 __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1959 }
1960 }
1961
1962 // static
Generate_FunctionPrototypeCall(MacroAssembler * masm)1963 void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
1964 // 1. Get the callable to call (passed as receiver) from the stack.
1965 __ Pop(r1);
1966
1967 // 2. Make sure we have at least one argument.
1968 // r0: actual number of arguments
1969 {
1970 Label done;
1971 __ cmp(r0, Operand(JSParameterCount(0)));
1972 __ b(ne, &done);
1973 __ PushRoot(RootIndex::kUndefinedValue);
1974 __ add(r0, r0, Operand(1));
1975 __ bind(&done);
1976 }
1977
1978 // 3. Adjust the actual number of arguments.
1979 __ sub(r0, r0, Operand(1));
1980
1981 // 4. Call the callable.
1982 __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1983 }
1984
Generate_ReflectApply(MacroAssembler * masm)1985 void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
1986 // ----------- S t a t e -------------
1987 // -- r0 : argc
1988 // -- sp[0] : receiver
1989 // -- sp[4] : target (if argc >= 1)
1990 // -- sp[8] : thisArgument (if argc >= 2)
1991 // -- sp[12] : argumentsList (if argc == 3)
1992 // -----------------------------------
1993
1994 // 1. Load target into r1 (if present), argumentsList into r2 (if present),
1995 // remove all arguments from the stack (including the receiver), and push
1996 // thisArgument (if present) instead.
1997 {
1998 __ LoadRoot(r1, RootIndex::kUndefinedValue);
1999 __ mov(r5, r1);
2000 __ mov(r2, r1);
2001 __ cmp(r0, Operand(JSParameterCount(1)));
2002 __ ldr(r1, MemOperand(sp, kSystemPointerSize), ge); // target
2003 __ cmp(r0, Operand(JSParameterCount(2)), ge);
2004 __ ldr(r5, MemOperand(sp, 2 * kSystemPointerSize), ge); // thisArgument
2005 __ cmp(r0, Operand(JSParameterCount(3)), ge);
2006 __ ldr(r2, MemOperand(sp, 3 * kSystemPointerSize), ge); // argumentsList
2007 __ DropArgumentsAndPushNewReceiver(
2008 r0, r5, TurboAssembler::kCountIsInteger,
2009 kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
2010 : TurboAssembler::kCountExcludesReceiver);
2011 }
2012
2013 // ----------- S t a t e -------------
2014 // -- r2 : argumentsList
2015 // -- r1 : target
2016 // -- sp[0] : thisArgument
2017 // -----------------------------------
2018
2019 // 2. We don't need to check explicitly for callable target here,
2020 // since that's the first thing the Call/CallWithArrayLike builtins
2021 // will do.
2022
2023 // 3. Apply the target to the given argumentsList.
2024 __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
2025 RelocInfo::CODE_TARGET);
2026 }
2027
Generate_ReflectConstruct(MacroAssembler * masm)2028 void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
2029 // ----------- S t a t e -------------
2030 // -- r0 : argc
2031 // -- sp[0] : receiver
2032 // -- sp[4] : target
2033 // -- sp[8] : argumentsList
2034 // -- sp[12] : new.target (optional)
2035 // -----------------------------------
2036
2037 // 1. Load target into r1 (if present), argumentsList into r2 (if present),
2038 // new.target into r3 (if present, otherwise use target), remove all
2039 // arguments from the stack (including the receiver), and push thisArgument
2040 // (if present) instead.
2041 {
2042 __ LoadRoot(r1, RootIndex::kUndefinedValue);
2043 __ mov(r2, r1);
2044 __ mov(r4, r1);
2045 __ cmp(r0, Operand(JSParameterCount(1)));
2046 __ ldr(r1, MemOperand(sp, kSystemPointerSize), ge); // target
2047 __ mov(r3, r1); // new.target defaults to target
2048 __ cmp(r0, Operand(JSParameterCount(2)), ge);
2049 __ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argumentsList
2050 __ cmp(r0, Operand(JSParameterCount(3)), ge);
2051 __ ldr(r3, MemOperand(sp, 3 * kSystemPointerSize), ge); // new.target
2052 __ DropArgumentsAndPushNewReceiver(
2053 r0, r4, TurboAssembler::kCountIsInteger,
2054 kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
2055 : TurboAssembler::kCountExcludesReceiver);
2056 }
2057
2058 // ----------- S t a t e -------------
2059 // -- r2 : argumentsList
2060 // -- r3 : new.target
2061 // -- r1 : target
2062 // -- sp[0] : receiver (undefined)
2063 // -----------------------------------
2064
2065 // 2. We don't need to check explicitly for constructor target here,
2066 // since that's the first thing the Construct/ConstructWithArrayLike
2067 // builtins will do.
2068
2069 // 3. We don't need to check explicitly for constructor new.target here,
2070 // since that's the second thing the Construct/ConstructWithArrayLike
2071 // builtins will do.
2072
2073 // 4. Construct the target with the given new.target and argumentsList.
2074 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike),
2075 RelocInfo::CODE_TARGET);
2076 }
2077
2078 namespace {
2079
2080 // Allocate new stack space for |count| arguments and shift all existing
2081 // arguments already on the stack. |pointer_to_new_space_out| points to the
2082 // first free slot on the stack to copy additional arguments to and
2083 // |argc_in_out| is updated to include |count|.
Generate_AllocateSpaceAndShiftExistingArguments(MacroAssembler * masm,Register count,Register argc_in_out,Register pointer_to_new_space_out,Register scratch1,Register scratch2)2084 void Generate_AllocateSpaceAndShiftExistingArguments(
2085 MacroAssembler* masm, Register count, Register argc_in_out,
2086 Register pointer_to_new_space_out, Register scratch1, Register scratch2) {
2087 DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1,
2088 scratch2));
2089 UseScratchRegisterScope temps(masm);
2090 Register old_sp = scratch1;
2091 Register new_space = scratch2;
2092 __ mov(old_sp, sp);
2093 __ lsl(new_space, count, Operand(kSystemPointerSizeLog2));
2094 __ AllocateStackSpace(new_space);
2095
2096 Register end = scratch2;
2097 Register value = temps.Acquire();
2098 Register dest = pointer_to_new_space_out;
2099 __ mov(dest, sp);
2100 __ add(end, old_sp, Operand(argc_in_out, LSL, kSystemPointerSizeLog2));
2101 Label loop, done;
2102 __ bind(&loop);
2103 __ cmp(old_sp, end);
2104 if (kJSArgcIncludesReceiver) {
2105 __ b(ge, &done);
2106 } else {
2107 __ b(gt, &done);
2108 }
2109 __ ldr(value, MemOperand(old_sp, kSystemPointerSize, PostIndex));
2110 __ str(value, MemOperand(dest, kSystemPointerSize, PostIndex));
2111 __ b(&loop);
2112 __ bind(&done);
2113
2114 // Update total number of arguments.
2115 __ add(argc_in_out, argc_in_out, count);
2116 }
2117
2118 } // namespace
2119
2120 // static
2121 // TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
Generate_CallOrConstructVarargs(MacroAssembler * masm,Handle<Code> code)2122 void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
2123 Handle<Code> code) {
2124 // ----------- S t a t e -------------
2125 // -- r1 : target
2126 // -- r0 : number of parameters on the stack
2127 // -- r2 : arguments list (a FixedArray)
2128 // -- r4 : len (number of elements to push from args)
2129 // -- r3 : new.target (for [[Construct]])
2130 // -----------------------------------
2131 Register scratch = r8;
2132
2133 if (FLAG_debug_code) {
2134 // Allow r2 to be a FixedArray, or a FixedDoubleArray if r4 == 0.
2135 Label ok, fail;
2136 __ AssertNotSmi(r2);
2137 __ ldr(scratch, FieldMemOperand(r2, HeapObject::kMapOffset));
2138 __ ldrh(r6, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
2139 __ cmp(r6, Operand(FIXED_ARRAY_TYPE));
2140 __ b(eq, &ok);
2141 __ cmp(r6, Operand(FIXED_DOUBLE_ARRAY_TYPE));
2142 __ b(ne, &fail);
2143 __ cmp(r4, Operand(0));
2144 __ b(eq, &ok);
2145 // Fall through.
2146 __ bind(&fail);
2147 __ Abort(AbortReason::kOperandIsNotAFixedArray);
2148
2149 __ bind(&ok);
2150 }
2151
2152 Label stack_overflow;
2153 __ StackOverflowCheck(r4, scratch, &stack_overflow);
2154
2155 // Move the arguments already in the stack,
2156 // including the receiver and the return address.
2157 // r4: Number of arguments to make room for.
2158 // r0: Number of arguments already on the stack.
2159 // r9: Points to first free slot on the stack after arguments were shifted.
2160 Generate_AllocateSpaceAndShiftExistingArguments(masm, r4, r0, r9, r5, r6);
2161
2162 // Copy arguments onto the stack (thisArgument is already on the stack).
2163 {
2164 __ mov(r6, Operand(0));
2165 __ LoadRoot(r5, RootIndex::kTheHoleValue);
2166 Label done, loop;
2167 __ bind(&loop);
2168 __ cmp(r6, r4);
2169 __ b(eq, &done);
2170 __ add(scratch, r2, Operand(r6, LSL, kTaggedSizeLog2));
2171 __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize));
2172 __ cmp(scratch, r5);
2173 // Turn the hole into undefined as we go.
2174 __ LoadRoot(scratch, RootIndex::kUndefinedValue, eq);
2175 __ str(scratch, MemOperand(r9, kSystemPointerSize, PostIndex));
2176 __ add(r6, r6, Operand(1));
2177 __ b(&loop);
2178 __ bind(&done);
2179 }
2180
2181 // Tail-call to the actual Call or Construct builtin.
2182 __ Jump(code, RelocInfo::CODE_TARGET);
2183
2184 __ bind(&stack_overflow);
2185 __ TailCallRuntime(Runtime::kThrowStackOverflow);
2186 }
2187
2188 // static
Generate_CallOrConstructForwardVarargs(MacroAssembler * masm,CallOrConstructMode mode,Handle<Code> code)2189 void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
2190 CallOrConstructMode mode,
2191 Handle<Code> code) {
2192 // ----------- S t a t e -------------
2193 // -- r0 : the number of arguments
2194 // -- r3 : the new.target (for [[Construct]] calls)
2195 // -- r1 : the target to call (can be any Object)
2196 // -- r2 : start index (to support rest parameters)
2197 // -----------------------------------
2198
2199 Register scratch = r6;
2200
2201 // Check if new.target has a [[Construct]] internal method.
2202 if (mode == CallOrConstructMode::kConstruct) {
2203 Label new_target_constructor, new_target_not_constructor;
2204 __ JumpIfSmi(r3, &new_target_not_constructor);
2205 __ ldr(scratch, FieldMemOperand(r3, HeapObject::kMapOffset));
2206 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
2207 __ tst(scratch, Operand(Map::Bits1::IsConstructorBit::kMask));
2208 __ b(ne, &new_target_constructor);
2209 __ bind(&new_target_not_constructor);
2210 {
2211 FrameScope scope(masm, StackFrame::MANUAL);
2212 __ EnterFrame(StackFrame::INTERNAL);
2213 __ Push(r3);
2214 __ CallRuntime(Runtime::kThrowNotConstructor);
2215 }
2216 __ bind(&new_target_constructor);
2217 }
2218
2219 Label stack_done, stack_overflow;
2220 __ ldr(r5, MemOperand(fp, StandardFrameConstants::kArgCOffset));
2221 if (kJSArgcIncludesReceiver) {
2222 __ sub(r5, r5, Operand(kJSArgcReceiverSlots));
2223 }
2224 __ sub(r5, r5, r2, SetCC);
2225 __ b(le, &stack_done);
2226 {
2227 // ----------- S t a t e -------------
2228 // -- r0 : the number of arguments already in the stack
2229 // -- r1 : the target to call (can be any Object)
2230 // -- r2 : start index (to support rest parameters)
2231 // -- r3 : the new.target (for [[Construct]] calls)
2232 // -- fp : point to the caller stack frame
2233 // -- r5 : number of arguments to copy, i.e. arguments count - start index
2234 // -----------------------------------
2235
2236 // Check for stack overflow.
2237 __ StackOverflowCheck(r5, scratch, &stack_overflow);
2238
2239 // Forward the arguments from the caller frame.
2240 // Point to the first argument to copy (skipping the receiver).
2241 __ add(r4, fp,
2242 Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
2243 kSystemPointerSize));
2244 __ add(r4, r4, Operand(r2, LSL, kSystemPointerSizeLog2));
2245
2246 // Move the arguments already in the stack,
2247 // including the receiver and the return address.
2248 // r5: Number of arguments to make room for.
2249 // r0: Number of arguments already on the stack.
2250 // r2: Points to first free slot on the stack after arguments were shifted.
2251 Generate_AllocateSpaceAndShiftExistingArguments(masm, r5, r0, r2, scratch,
2252 r8);
2253
2254 // Copy arguments from the caller frame.
2255 // TODO(victorgomes): Consider using forward order as potentially more cache
2256 // friendly.
2257 {
2258 Label loop;
2259 __ bind(&loop);
2260 {
2261 __ sub(r5, r5, Operand(1), SetCC);
2262 __ ldr(scratch, MemOperand(r4, r5, LSL, kSystemPointerSizeLog2));
2263 __ str(scratch, MemOperand(r2, r5, LSL, kSystemPointerSizeLog2));
2264 __ b(ne, &loop);
2265 }
2266 }
2267 }
2268 __ b(&stack_done);
2269 __ bind(&stack_overflow);
2270 __ TailCallRuntime(Runtime::kThrowStackOverflow);
2271 __ bind(&stack_done);
2272
2273 // Tail-call to the {code} handler.
2274 __ Jump(code, RelocInfo::CODE_TARGET);
2275 }
2276
2277 // static
Generate_CallFunction(MacroAssembler * masm,ConvertReceiverMode mode)2278 void Builtins::Generate_CallFunction(MacroAssembler* masm,
2279 ConvertReceiverMode mode) {
2280 // ----------- S t a t e -------------
2281 // -- r0 : the number of arguments
2282 // -- r1 : the function to call (checked to be a JSFunction)
2283 // -----------------------------------
2284 __ AssertFunction(r1);
2285
2286 __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
2287
2288 // Enter the context of the function; ToObject has to run in the function
2289 // context, and we also need to take the global proxy from the function
2290 // context in case of conversion.
2291 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
2292 // We need to convert the receiver for non-native sloppy mode functions.
2293 Label done_convert;
2294 __ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kFlagsOffset));
2295 __ tst(r3, Operand(SharedFunctionInfo::IsNativeBit::kMask |
2296 SharedFunctionInfo::IsStrictBit::kMask));
2297 __ b(ne, &done_convert);
2298 {
2299 // ----------- S t a t e -------------
2300 // -- r0 : the number of arguments
2301 // -- r1 : the function to call (checked to be a JSFunction)
2302 // -- r2 : the shared function info.
2303 // -- cp : the function context.
2304 // -----------------------------------
2305
2306 if (mode == ConvertReceiverMode::kNullOrUndefined) {
2307 // Patch receiver to global proxy.
2308 __ LoadGlobalProxy(r3);
2309 } else {
2310 Label convert_to_object, convert_receiver;
2311 __ ldr(r3, __ ReceiverOperand(r0));
2312 __ JumpIfSmi(r3, &convert_to_object);
2313 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
2314 __ CompareObjectType(r3, r4, r4, FIRST_JS_RECEIVER_TYPE);
2315 __ b(hs, &done_convert);
2316 if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
2317 Label convert_global_proxy;
2318 __ JumpIfRoot(r3, RootIndex::kUndefinedValue, &convert_global_proxy);
2319 __ JumpIfNotRoot(r3, RootIndex::kNullValue, &convert_to_object);
2320 __ bind(&convert_global_proxy);
2321 {
2322 // Patch receiver to global proxy.
2323 __ LoadGlobalProxy(r3);
2324 }
2325 __ b(&convert_receiver);
2326 }
2327 __ bind(&convert_to_object);
2328 {
2329 // Convert receiver using ToObject.
2330 // TODO(bmeurer): Inline the allocation here to avoid building the frame
2331 // in the fast case? (fall back to AllocateInNewSpace?)
2332 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2333 __ SmiTag(r0);
2334 __ Push(r0, r1);
2335 __ mov(r0, r3);
2336 __ Push(cp);
2337 __ Call(BUILTIN_CODE(masm->isolate(), ToObject),
2338 RelocInfo::CODE_TARGET);
2339 __ Pop(cp);
2340 __ mov(r3, r0);
2341 __ Pop(r0, r1);
2342 __ SmiUntag(r0);
2343 }
2344 __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
2345 __ bind(&convert_receiver);
2346 }
2347 __ str(r3, __ ReceiverOperand(r0));
2348 }
2349 __ bind(&done_convert);
2350
2351 // ----------- S t a t e -------------
2352 // -- r0 : the number of arguments
2353 // -- r1 : the function to call (checked to be a JSFunction)
2354 // -- r2 : the shared function info.
2355 // -- cp : the function context.
2356 // -----------------------------------
2357
2358 __ ldrh(r2,
2359 FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
2360 __ InvokeFunctionCode(r1, no_reg, r2, r0, InvokeType::kJump);
2361 }
2362
2363 namespace {
2364
Generate_PushBoundArguments(MacroAssembler * masm)2365 void Generate_PushBoundArguments(MacroAssembler* masm) {
2366 ASM_CODE_COMMENT(masm);
2367 // ----------- S t a t e -------------
2368 // -- r0 : the number of arguments
2369 // -- r1 : target (checked to be a JSBoundFunction)
2370 // -- r3 : new.target (only in case of [[Construct]])
2371 // -----------------------------------
2372
2373 // Load [[BoundArguments]] into r2 and length of that into r4.
2374 Label no_bound_arguments;
2375 __ ldr(r2, FieldMemOperand(r1, JSBoundFunction::kBoundArgumentsOffset));
2376 __ ldr(r4, FieldMemOperand(r2, FixedArray::kLengthOffset));
2377 __ SmiUntag(r4);
2378 __ cmp(r4, Operand(0));
2379 __ b(eq, &no_bound_arguments);
2380 {
2381 // ----------- S t a t e -------------
2382 // -- r0 : the number of arguments
2383 // -- r1 : target (checked to be a JSBoundFunction)
2384 // -- r2 : the [[BoundArguments]] (implemented as FixedArray)
2385 // -- r3 : new.target (only in case of [[Construct]])
2386 // -- r4 : the number of [[BoundArguments]]
2387 // -----------------------------------
2388
2389 Register scratch = r6;
2390
2391 {
2392 // Check the stack for overflow. We are not trying to catch interruptions
2393 // (i.e. debug break and preemption) here, so check the "real stack
2394 // limit".
2395 Label done;
2396 __ mov(scratch, Operand(r4, LSL, kSystemPointerSizeLog2));
2397 {
2398 UseScratchRegisterScope temps(masm);
2399 Register remaining_stack_size = temps.Acquire();
2400 DCHECK(!AreAliased(r0, r1, r2, r3, r4, scratch, remaining_stack_size));
2401
2402 // Compute the space we have left. The stack might already be overflowed
2403 // here which will cause remaining_stack_size to become negative.
2404 __ LoadStackLimit(remaining_stack_size,
2405 StackLimitKind::kRealStackLimit);
2406 __ sub(remaining_stack_size, sp, remaining_stack_size);
2407
2408 // Check if the arguments will overflow the stack.
2409 __ cmp(remaining_stack_size, scratch);
2410 }
2411 __ b(gt, &done);
2412 {
2413 FrameScope scope(masm, StackFrame::MANUAL);
2414 __ EnterFrame(StackFrame::INTERNAL);
2415 __ CallRuntime(Runtime::kThrowStackOverflow);
2416 }
2417 __ bind(&done);
2418 }
2419
2420 // Pop receiver.
2421 __ Pop(r5);
2422
2423 // Push [[BoundArguments]].
2424 {
2425 Label loop;
2426 __ add(r0, r0, r4); // Adjust effective number of arguments.
2427 __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2428 __ bind(&loop);
2429 __ sub(r4, r4, Operand(1), SetCC);
2430 __ ldr(scratch, MemOperand(r2, r4, LSL, kTaggedSizeLog2));
2431 __ Push(scratch);
2432 __ b(gt, &loop);
2433 }
2434
2435 // Push receiver.
2436 __ Push(r5);
2437 }
2438 __ bind(&no_bound_arguments);
2439 }
2440
2441 } // namespace
2442
2443 // static
Generate_CallBoundFunctionImpl(MacroAssembler * masm)2444 void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
2445 // ----------- S t a t e -------------
2446 // -- r0 : the number of arguments
2447 // -- r1 : the function to call (checked to be a JSBoundFunction)
2448 // -----------------------------------
2449 __ AssertBoundFunction(r1);
2450
2451 // Patch the receiver to [[BoundThis]].
2452 __ ldr(r3, FieldMemOperand(r1, JSBoundFunction::kBoundThisOffset));
2453 __ str(r3, __ ReceiverOperand(r0));
2454
2455 // Push the [[BoundArguments]] onto the stack.
2456 Generate_PushBoundArguments(masm);
2457
2458 // Call the [[BoundTargetFunction]] via the Call builtin.
2459 __ ldr(r1, FieldMemOperand(r1, JSBoundFunction::kBoundTargetFunctionOffset));
2460 __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
2461 RelocInfo::CODE_TARGET);
2462 }
2463
2464 // static
Generate_Call(MacroAssembler * masm,ConvertReceiverMode mode)2465 void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
2466 // ----------- S t a t e -------------
2467 // -- r0 : the number of arguments
2468 // -- r1 : the target to call (can be any Object).
2469 // -----------------------------------
2470 Register argc = r0;
2471 Register target = r1;
2472 Register map = r4;
2473 Register instance_type = r5;
2474 DCHECK(!AreAliased(argc, target, map, instance_type));
2475
2476 Label non_callable, class_constructor;
2477 __ JumpIfSmi(target, &non_callable);
2478 __ LoadMap(map, target);
2479 __ CompareInstanceTypeRange(map, instance_type,
2480 FIRST_CALLABLE_JS_FUNCTION_TYPE,
2481 LAST_CALLABLE_JS_FUNCTION_TYPE);
2482 __ Jump(masm->isolate()->builtins()->CallFunction(mode),
2483 RelocInfo::CODE_TARGET, ls);
2484 __ cmp(instance_type, Operand(JS_BOUND_FUNCTION_TYPE));
2485 __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
2486 RelocInfo::CODE_TARGET, eq);
2487
2488 // Check if target has a [[Call]] internal method.
2489 {
2490 Register flags = r4;
2491 __ ldrb(flags, FieldMemOperand(map, Map::kBitFieldOffset));
2492 map = no_reg;
2493 __ tst(flags, Operand(Map::Bits1::IsCallableBit::kMask));
2494 __ b(eq, &non_callable);
2495 }
2496
2497 // Check if target is a proxy and call CallProxy external builtin
2498 __ cmp(instance_type, Operand(JS_PROXY_TYPE));
2499 __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq);
2500
2501 // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
2502 // Check that the function is not a "classConstructor".
2503 __ cmp(instance_type, Operand(JS_CLASS_CONSTRUCTOR_TYPE));
2504 __ b(eq, &class_constructor);
2505
2506 // 2. Call to something else, which might have a [[Call]] internal method (if
2507 // not we raise an exception).
2508 // Overwrite the original receiver the (original) target.
2509 __ str(target, __ ReceiverOperand(argc));
2510 // Let the "call_as_function_delegate" take care of the rest.
2511 __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
2512 __ Jump(masm->isolate()->builtins()->CallFunction(
2513 ConvertReceiverMode::kNotNullOrUndefined),
2514 RelocInfo::CODE_TARGET);
2515
2516 // 3. Call to something that is not callable.
2517 __ bind(&non_callable);
2518 {
2519 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2520 __ Push(target);
2521 __ CallRuntime(Runtime::kThrowCalledNonCallable);
2522 __ Trap(); // Unreachable.
2523 }
2524
2525 // 4. The function is a "classConstructor", need to raise an exception.
2526 __ bind(&class_constructor);
2527 {
2528 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2529 __ Push(target);
2530 __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
2531 __ Trap(); // Unreachable.
2532 }
2533 }
2534
2535 // static
Generate_ConstructFunction(MacroAssembler * masm)2536 void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
2537 // ----------- S t a t e -------------
2538 // -- r0 : the number of arguments
2539 // -- r1 : the constructor to call (checked to be a JSFunction)
2540 // -- r3 : the new target (checked to be a constructor)
2541 // -----------------------------------
2542 __ AssertConstructor(r1);
2543 __ AssertFunction(r1);
2544
2545 // Calling convention for function specific ConstructStubs require
2546 // r2 to contain either an AllocationSite or undefined.
2547 __ LoadRoot(r2, RootIndex::kUndefinedValue);
2548
2549 Label call_generic_stub;
2550
2551 // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
2552 __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
2553 __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
2554 __ tst(r4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
2555 __ b(eq, &call_generic_stub);
2556
2557 __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
2558 RelocInfo::CODE_TARGET);
2559
2560 __ bind(&call_generic_stub);
2561 __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric),
2562 RelocInfo::CODE_TARGET);
2563 }
2564
2565 // static
Generate_ConstructBoundFunction(MacroAssembler * masm)2566 void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
2567 // ----------- S t a t e -------------
2568 // -- r0 : the number of arguments
2569 // -- r1 : the function to call (checked to be a JSBoundFunction)
2570 // -- r3 : the new target (checked to be a constructor)
2571 // -----------------------------------
2572 __ AssertConstructor(r1);
2573 __ AssertBoundFunction(r1);
2574
2575 // Push the [[BoundArguments]] onto the stack.
2576 Generate_PushBoundArguments(masm);
2577
2578 // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
2579 __ cmp(r1, r3);
2580 __ ldr(r3, FieldMemOperand(r1, JSBoundFunction::kBoundTargetFunctionOffset),
2581 eq);
2582
2583 // Construct the [[BoundTargetFunction]] via the Construct builtin.
2584 __ ldr(r1, FieldMemOperand(r1, JSBoundFunction::kBoundTargetFunctionOffset));
2585 __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
2586 }
2587
2588 // static
Generate_Construct(MacroAssembler * masm)2589 void Builtins::Generate_Construct(MacroAssembler* masm) {
2590 // ----------- S t a t e -------------
2591 // -- r0 : the number of arguments
2592 // -- r1 : the constructor to call (can be any Object)
2593 // -- r3 : the new target (either the same as the constructor or
2594 // the JSFunction on which new was invoked initially)
2595 // -----------------------------------
2596 Register argc = r0;
2597 Register target = r1;
2598 Register map = r4;
2599 Register instance_type = r5;
2600 DCHECK(!AreAliased(argc, target, map, instance_type));
2601
2602 // Check if target is a Smi.
2603 Label non_constructor, non_proxy;
2604 __ JumpIfSmi(target, &non_constructor);
2605
2606 // Check if target has a [[Construct]] internal method.
2607 __ ldr(map, FieldMemOperand(target, HeapObject::kMapOffset));
2608 {
2609 Register flags = r2;
2610 DCHECK(!AreAliased(argc, target, map, instance_type, flags));
2611 __ ldrb(flags, FieldMemOperand(map, Map::kBitFieldOffset));
2612 __ tst(flags, Operand(Map::Bits1::IsConstructorBit::kMask));
2613 __ b(eq, &non_constructor);
2614 }
2615
2616 // Dispatch based on instance type.
2617 __ CompareInstanceTypeRange(map, instance_type, FIRST_JS_FUNCTION_TYPE,
2618 LAST_JS_FUNCTION_TYPE);
2619 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
2620 RelocInfo::CODE_TARGET, ls);
2621
2622 // Only dispatch to bound functions after checking whether they are
2623 // constructors.
2624 __ cmp(instance_type, Operand(JS_BOUND_FUNCTION_TYPE));
2625 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
2626 RelocInfo::CODE_TARGET, eq);
2627
2628 // Only dispatch to proxies after checking whether they are constructors.
2629 __ cmp(instance_type, Operand(JS_PROXY_TYPE));
2630 __ b(ne, &non_proxy);
2631 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
2632 RelocInfo::CODE_TARGET);
2633
2634 // Called Construct on an exotic Object with a [[Construct]] internal method.
2635 __ bind(&non_proxy);
2636 {
2637 // Overwrite the original receiver with the (original) target.
2638 __ str(target, __ ReceiverOperand(argc));
2639 // Let the "call_as_constructor_delegate" take care of the rest.
2640 __ LoadNativeContextSlot(target,
2641 Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
2642 __ Jump(masm->isolate()->builtins()->CallFunction(),
2643 RelocInfo::CODE_TARGET);
2644 }
2645
2646 // Called Construct on an Object that doesn't have a [[Construct]] internal
2647 // method.
2648 __ bind(&non_constructor);
2649 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable),
2650 RelocInfo::CODE_TARGET);
2651 }
2652
2653 #if V8_ENABLE_WEBASSEMBLY
Generate_WasmCompileLazy(MacroAssembler * masm)2654 void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
2655 // The function index was put in a register by the jump table trampoline.
2656 // Convert to Smi for the runtime call.
2657 __ SmiTag(kWasmCompileLazyFuncIndexRegister,
2658 kWasmCompileLazyFuncIndexRegister);
2659 {
2660 HardAbortScope hard_abort(masm); // Avoid calls to Abort.
2661 FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
2662
2663 // Save all parameter registers (see wasm-linkage.h). They might be
2664 // overwritten in the runtime call below. We don't have any callee-saved
2665 // registers in wasm, so no need to store anything else.
2666 RegList gp_regs = 0;
2667 for (Register gp_param_reg : wasm::kGpParamRegisters) {
2668 gp_regs |= gp_param_reg.bit();
2669 }
2670 DwVfpRegister lowest_fp_reg = std::begin(wasm::kFpParamRegisters)[0];
2671 DwVfpRegister highest_fp_reg = std::end(wasm::kFpParamRegisters)[-1];
2672 for (DwVfpRegister fp_param_reg : wasm::kFpParamRegisters) {
2673 CHECK(fp_param_reg.code() >= lowest_fp_reg.code() &&
2674 fp_param_reg.code() <= highest_fp_reg.code());
2675 }
2676
2677 CHECK_EQ(NumRegs(gp_regs), arraysize(wasm::kGpParamRegisters));
2678 CHECK_EQ(highest_fp_reg.code() - lowest_fp_reg.code() + 1,
2679 arraysize(wasm::kFpParamRegisters));
2680 CHECK_EQ(NumRegs(gp_regs),
2681 WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs);
2682 CHECK_EQ(highest_fp_reg.code() - lowest_fp_reg.code() + 1,
2683 WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs);
2684
2685 __ stm(db_w, sp, gp_regs);
2686 __ vstm(db_w, sp, lowest_fp_reg, highest_fp_reg);
2687
2688 // Pass instance and function index as explicit arguments to the runtime
2689 // function.
2690 __ push(kWasmInstanceRegister);
2691 __ push(kWasmCompileLazyFuncIndexRegister);
2692 // Initialize the JavaScript context with 0. CEntry will use it to
2693 // set the current context on the isolate.
2694 __ Move(cp, Smi::zero());
2695 __ CallRuntime(Runtime::kWasmCompileLazy, 2);
2696 // The entrypoint address is the return value.
2697 __ mov(r8, kReturnRegister0);
2698
2699 // Restore registers.
2700 __ vldm(ia_w, sp, lowest_fp_reg, highest_fp_reg);
2701 __ ldm(ia_w, sp, gp_regs);
2702 }
2703 // Finally, jump to the entrypoint.
2704 __ Jump(r8);
2705 }
2706
Generate_WasmDebugBreak(MacroAssembler * masm)2707 void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
2708 HardAbortScope hard_abort(masm); // Avoid calls to Abort.
2709 {
2710 FrameAndConstantPoolScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
2711
2712 STATIC_ASSERT(DwVfpRegister::kNumRegisters == 32);
2713 constexpr uint32_t last =
2714 31 - base::bits::CountLeadingZeros32(
2715 WasmDebugBreakFrameConstants::kPushedFpRegs);
2716 constexpr uint32_t first = base::bits::CountTrailingZeros32(
2717 WasmDebugBreakFrameConstants::kPushedFpRegs);
2718 static_assert(
2719 base::bits::CountPopulation(
2720 WasmDebugBreakFrameConstants::kPushedFpRegs) == last - first + 1,
2721 "All registers in the range from first to last have to be set");
2722
2723 // Save all parameter registers. They might hold live values, we restore
2724 // them after the runtime call.
2725 constexpr DwVfpRegister lowest_fp_reg = DwVfpRegister::from_code(first);
2726 constexpr DwVfpRegister highest_fp_reg = DwVfpRegister::from_code(last);
2727
2728 // Store gp parameter registers.
2729 __ stm(db_w, sp, WasmDebugBreakFrameConstants::kPushedGpRegs);
2730 // Store fp parameter registers.
2731 __ vstm(db_w, sp, lowest_fp_reg, highest_fp_reg);
2732
2733 // Initialize the JavaScript context with 0. CEntry will use it to
2734 // set the current context on the isolate.
2735 __ Move(cp, Smi::zero());
2736 __ CallRuntime(Runtime::kWasmDebugBreak, 0);
2737
2738 // Restore registers.
2739 __ vldm(ia_w, sp, lowest_fp_reg, highest_fp_reg);
2740 __ ldm(ia_w, sp, WasmDebugBreakFrameConstants::kPushedGpRegs);
2741 }
2742 __ Ret();
2743 }
2744
Generate_GenericJSToWasmWrapper(MacroAssembler * masm)2745 void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
2746 // TODO(v8:10701): Implement for this platform.
2747 __ Trap();
2748 }
2749
Generate_WasmOnStackReplace(MacroAssembler * masm)2750 void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
2751 // Only needed on x64.
2752 __ Trap();
2753 }
2754 #endif // V8_ENABLE_WEBASSEMBLY
2755
Generate_CEntry(MacroAssembler * masm,int result_size,SaveFPRegsMode save_doubles,ArgvMode argv_mode,bool builtin_exit_frame)2756 void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
2757 SaveFPRegsMode save_doubles, ArgvMode argv_mode,
2758 bool builtin_exit_frame) {
2759 // Called from JavaScript; parameters are on stack as if calling JS function.
2760 // r0: number of arguments including receiver
2761 // r1: pointer to builtin function
2762 // fp: frame pointer (restored after C call)
2763 // sp: stack pointer (restored as callee's sp after C call)
2764 // cp: current context (C callee-saved)
2765 //
2766 // If argv_mode == ArgvMode::kRegister:
2767 // r2: pointer to the first argument
2768
2769 __ mov(r5, Operand(r1));
2770
2771 if (argv_mode == ArgvMode::kRegister) {
2772 // Move argv into the correct register.
2773 __ mov(r1, Operand(r2));
2774 } else {
2775 // Compute the argv pointer in a callee-saved register.
2776 __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2));
2777 __ sub(r1, r1, Operand(kPointerSize));
2778 }
2779
2780 // Enter the exit frame that transitions from JavaScript to C++.
2781 FrameScope scope(masm, StackFrame::MANUAL);
2782 __ EnterExitFrame(
2783 save_doubles == SaveFPRegsMode::kSave, 0,
2784 builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
2785
2786 // Store a copy of argc in callee-saved registers for later.
2787 __ mov(r4, Operand(r0));
2788
2789 // r0, r4: number of arguments including receiver (C callee-saved)
2790 // r1: pointer to the first argument (C callee-saved)
2791 // r5: pointer to builtin function (C callee-saved)
2792
2793 #if V8_HOST_ARCH_ARM
2794 int frame_alignment = MacroAssembler::ActivationFrameAlignment();
2795 int frame_alignment_mask = frame_alignment - 1;
2796 if (FLAG_debug_code) {
2797 if (frame_alignment > kPointerSize) {
2798 Label alignment_as_expected;
2799 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
2800 __ tst(sp, Operand(frame_alignment_mask));
2801 __ b(eq, &alignment_as_expected);
2802 // Don't use Check here, as it will call Runtime_Abort re-entering here.
2803 __ stop();
2804 __ bind(&alignment_as_expected);
2805 }
2806 }
2807 #endif
2808
2809 // Call C built-in.
2810 // r0 = argc, r1 = argv, r2 = isolate
2811 __ Move(r2, ExternalReference::isolate_address(masm->isolate()));
2812 __ StoreReturnAddressAndCall(r5);
2813
2814 // Result returned in r0 or r1:r0 - do not destroy these registers!
2815
2816 // Check result for exception sentinel.
2817 Label exception_returned;
2818 __ CompareRoot(r0, RootIndex::kException);
2819 __ b(eq, &exception_returned);
2820
2821 // Check that there is no pending exception, otherwise we
2822 // should have returned the exception sentinel.
2823 if (FLAG_debug_code) {
2824 Label okay;
2825 ExternalReference pending_exception_address = ExternalReference::Create(
2826 IsolateAddressId::kPendingExceptionAddress, masm->isolate());
2827 __ Move(r3, pending_exception_address);
2828 __ ldr(r3, MemOperand(r3));
2829 __ CompareRoot(r3, RootIndex::kTheHoleValue);
2830 // Cannot use check here as it attempts to generate call into runtime.
2831 __ b(eq, &okay);
2832 __ stop();
2833 __ bind(&okay);
2834 }
2835
2836 // Exit C frame and return.
2837 // r0:r1: result
2838 // sp: stack pointer
2839 // fp: frame pointer
2840 Register argc = argv_mode == ArgvMode::kRegister
2841 // We don't want to pop arguments so set argc to no_reg.
2842 ? no_reg
2843 // Callee-saved register r4 still holds argc.
2844 : r4;
2845 __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc);
2846 __ mov(pc, lr);
2847
2848 // Handling of exception.
2849 __ bind(&exception_returned);
2850
2851 ExternalReference pending_handler_context_address = ExternalReference::Create(
2852 IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
2853 ExternalReference pending_handler_entrypoint_address =
2854 ExternalReference::Create(
2855 IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
2856 ExternalReference pending_handler_fp_address = ExternalReference::Create(
2857 IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
2858 ExternalReference pending_handler_sp_address = ExternalReference::Create(
2859 IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
2860
2861 // Ask the runtime for help to determine the handler. This will set r0 to
2862 // contain the current pending exception, don't clobber it.
2863 ExternalReference find_handler =
2864 ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
2865 {
2866 FrameScope scope(masm, StackFrame::MANUAL);
2867 __ PrepareCallCFunction(3, 0);
2868 __ mov(r0, Operand(0));
2869 __ mov(r1, Operand(0));
2870 __ Move(r2, ExternalReference::isolate_address(masm->isolate()));
2871 __ CallCFunction(find_handler, 3);
2872 }
2873
2874 // Retrieve the handler context, SP and FP.
2875 __ Move(cp, pending_handler_context_address);
2876 __ ldr(cp, MemOperand(cp));
2877 __ Move(sp, pending_handler_sp_address);
2878 __ ldr(sp, MemOperand(sp));
2879 __ Move(fp, pending_handler_fp_address);
2880 __ ldr(fp, MemOperand(fp));
2881
2882 // If the handler is a JS frame, restore the context to the frame. Note that
2883 // the context will be set to (cp == 0) for non-JS frames.
2884 __ cmp(cp, Operand(0));
2885 __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
2886
2887 // Clear c_entry_fp, like we do in `LeaveExitFrame`.
2888 {
2889 UseScratchRegisterScope temps(masm);
2890 Register scratch = temps.Acquire();
2891 __ Move(scratch, ExternalReference::Create(
2892 IsolateAddressId::kCEntryFPAddress, masm->isolate()));
2893 __ mov(r1, Operand::Zero());
2894 __ str(r1, MemOperand(scratch));
2895 }
2896
2897 // Compute the handler entry address and jump to it.
2898 ConstantPoolUnavailableScope constant_pool_unavailable(masm);
2899 __ Move(r1, pending_handler_entrypoint_address);
2900 __ ldr(r1, MemOperand(r1));
2901 __ Jump(r1);
2902 }
2903
Generate_DoubleToI(MacroAssembler * masm)2904 void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
2905 Label negate, done;
2906
2907 HardAbortScope hard_abort(masm); // Avoid calls to Abort.
2908 UseScratchRegisterScope temps(masm);
2909 Register result_reg = r7;
2910 Register double_low = GetRegisterThatIsNotOneOf(result_reg);
2911 Register double_high = GetRegisterThatIsNotOneOf(result_reg, double_low);
2912 LowDwVfpRegister double_scratch = temps.AcquireLowD();
2913
2914 // Save the old values from these temporary registers on the stack.
2915 __ Push(result_reg, double_high, double_low);
2916
2917 // Account for saved regs.
2918 const int kArgumentOffset = 3 * kPointerSize;
2919
2920 MemOperand input_operand(sp, kArgumentOffset);
2921 MemOperand result_operand = input_operand;
2922
2923 // Load double input.
2924 __ vldr(double_scratch, input_operand);
2925 __ vmov(double_low, double_high, double_scratch);
2926 // Try to convert with a FPU convert instruction. This handles all
2927 // non-saturating cases.
2928 __ TryInlineTruncateDoubleToI(result_reg, double_scratch, &done);
2929
2930 Register scratch = temps.Acquire();
2931 __ Ubfx(scratch, double_high, HeapNumber::kExponentShift,
2932 HeapNumber::kExponentBits);
2933 // Load scratch with exponent - 1. This is faster than loading
2934 // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
2935 STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
2936 __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
2937 // If exponent is greater than or equal to 84, the 32 less significant
2938 // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
2939 // the result is 0.
2940 // Compare exponent with 84 (compare exponent - 1 with 83). If the exponent is
2941 // greater than this, the conversion is out of range, so return zero.
2942 __ cmp(scratch, Operand(83));
2943 __ mov(result_reg, Operand::Zero(), LeaveCC, ge);
2944 __ b(ge, &done);
2945
2946 // If we reach this code, 30 <= exponent <= 83.
2947 // `TryInlineTruncateDoubleToI` above will have truncated any double with an
2948 // exponent lower than 30.
2949 if (FLAG_debug_code) {
2950 // Scratch is exponent - 1.
2951 __ cmp(scratch, Operand(30 - 1));
2952 __ Check(ge, AbortReason::kUnexpectedValue);
2953 }
2954
2955 // We don't have to handle cases where 0 <= exponent <= 20 for which we would
2956 // need to shift right the high part of the mantissa.
2957 // Scratch contains exponent - 1.
2958 // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
2959 __ rsb(scratch, scratch, Operand(51), SetCC);
2960
2961 // 52 <= exponent <= 83, shift only double_low.
2962 // On entry, scratch contains: 52 - exponent.
2963 __ rsb(scratch, scratch, Operand::Zero(), LeaveCC, ls);
2964 __ mov(result_reg, Operand(double_low, LSL, scratch), LeaveCC, ls);
2965 __ b(ls, &negate);
2966
2967 // 21 <= exponent <= 51, shift double_low and double_high
2968 // to generate the result.
2969 __ mov(double_low, Operand(double_low, LSR, scratch));
2970 // Scratch contains: 52 - exponent.
2971 // We needs: exponent - 20.
2972 // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
2973 __ rsb(scratch, scratch, Operand(32));
2974 __ Ubfx(result_reg, double_high, 0, HeapNumber::kMantissaBitsInTopWord);
2975 // Set the implicit 1 before the mantissa part in double_high.
2976 __ orr(result_reg, result_reg,
2977 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
2978 __ orr(result_reg, double_low, Operand(result_reg, LSL, scratch));
2979
2980 __ bind(&negate);
2981 // If input was positive, double_high ASR 31 equals 0 and
2982 // double_high LSR 31 equals zero.
2983 // New result = (result eor 0) + 0 = result.
2984 // If the input was negative, we have to negate the result.
2985 // Input_high ASR 31 equals 0xFFFFFFFF and double_high LSR 31 equals 1.
2986 // New result = (result eor 0xFFFFFFFF) + 1 = 0 - result.
2987 __ eor(result_reg, result_reg, Operand(double_high, ASR, 31));
2988 __ add(result_reg, result_reg, Operand(double_high, LSR, 31));
2989
2990 __ bind(&done);
2991 __ str(result_reg, result_operand);
2992
2993 // Restore registers corrupted in this routine and return.
2994 __ Pop(result_reg, double_high, double_low);
2995 __ Ret();
2996 }
2997
2998 namespace {
2999
AddressOffset(ExternalReference ref0,ExternalReference ref1)3000 int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
3001 return ref0.address() - ref1.address();
3002 }
3003
3004 // Calls an API function. Allocates HandleScope, extracts returned value
3005 // from handle and propagates exceptions. Restores context. stack_space
3006 // - space to be unwound on exit (includes the call JS arguments space and
3007 // the additional space allocated for the fast call).
CallApiFunctionAndReturn(MacroAssembler * masm,Register function_address,ExternalReference thunk_ref,int stack_space,MemOperand * stack_space_operand,MemOperand return_value_operand)3008 void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
3009 ExternalReference thunk_ref, int stack_space,
3010 MemOperand* stack_space_operand,
3011 MemOperand return_value_operand) {
3012 ASM_CODE_COMMENT(masm);
3013 Isolate* isolate = masm->isolate();
3014 ExternalReference next_address =
3015 ExternalReference::handle_scope_next_address(isolate);
3016 const int kNextOffset = 0;
3017 const int kLimitOffset = AddressOffset(
3018 ExternalReference::handle_scope_limit_address(isolate), next_address);
3019 const int kLevelOffset = AddressOffset(
3020 ExternalReference::handle_scope_level_address(isolate), next_address);
3021
3022 DCHECK(function_address == r1 || function_address == r2);
3023
3024 Label profiler_enabled, end_profiler_check;
3025 __ Move(r9, ExternalReference::is_profiling_address(isolate));
3026 __ ldrb(r9, MemOperand(r9, 0));
3027 __ cmp(r9, Operand(0));
3028 __ b(ne, &profiler_enabled);
3029 __ Move(r9, ExternalReference::address_of_runtime_stats_flag());
3030 __ ldr(r9, MemOperand(r9, 0));
3031 __ cmp(r9, Operand(0));
3032 __ b(ne, &profiler_enabled);
3033 {
3034 // Call the api function directly.
3035 __ Move(r3, function_address);
3036 __ b(&end_profiler_check);
3037 }
3038 __ bind(&profiler_enabled);
3039 {
3040 // Additional parameter is the address of the actual callback.
3041 __ Move(r3, thunk_ref);
3042 }
3043 __ bind(&end_profiler_check);
3044
3045 // Allocate HandleScope in callee-save registers.
3046 __ Move(r9, next_address);
3047 __ ldr(r4, MemOperand(r9, kNextOffset));
3048 __ ldr(r5, MemOperand(r9, kLimitOffset));
3049 __ ldr(r6, MemOperand(r9, kLevelOffset));
3050 __ add(r6, r6, Operand(1));
3051 __ str(r6, MemOperand(r9, kLevelOffset));
3052
3053 __ StoreReturnAddressAndCall(r3);
3054
3055 Label promote_scheduled_exception;
3056 Label delete_allocated_handles;
3057 Label leave_exit_frame;
3058 Label return_value_loaded;
3059
3060 // load value from ReturnValue
3061 __ ldr(r0, return_value_operand);
3062 __ bind(&return_value_loaded);
3063 // No more valid handles (the result handle was the last one). Restore
3064 // previous handle scope.
3065 __ str(r4, MemOperand(r9, kNextOffset));
3066 if (FLAG_debug_code) {
3067 __ ldr(r1, MemOperand(r9, kLevelOffset));
3068 __ cmp(r1, r6);
3069 __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
3070 }
3071 __ sub(r6, r6, Operand(1));
3072 __ str(r6, MemOperand(r9, kLevelOffset));
3073 __ ldr(r6, MemOperand(r9, kLimitOffset));
3074 __ cmp(r5, r6);
3075 __ b(ne, &delete_allocated_handles);
3076
3077 // Leave the API exit frame.
3078 __ bind(&leave_exit_frame);
3079 // LeaveExitFrame expects unwind space to be in a register.
3080 if (stack_space_operand == nullptr) {
3081 DCHECK_NE(stack_space, 0);
3082 __ mov(r4, Operand(stack_space));
3083 } else {
3084 DCHECK_EQ(stack_space, 0);
3085 __ ldr(r4, *stack_space_operand);
3086 }
3087 __ LeaveExitFrame(false, r4, stack_space_operand != nullptr);
3088
3089 // Check if the function scheduled an exception.
3090 __ LoadRoot(r4, RootIndex::kTheHoleValue);
3091 __ Move(r6, ExternalReference::scheduled_exception_address(isolate));
3092 __ ldr(r5, MemOperand(r6));
3093 __ cmp(r4, r5);
3094 __ b(ne, &promote_scheduled_exception);
3095
3096 __ mov(pc, lr);
3097
3098 // Re-throw by promoting a scheduled exception.
3099 __ bind(&promote_scheduled_exception);
3100 __ TailCallRuntime(Runtime::kPromoteScheduledException);
3101
3102 // HandleScope limit has changed. Delete allocated extensions.
3103 __ bind(&delete_allocated_handles);
3104 __ str(r5, MemOperand(r9, kLimitOffset));
3105 __ mov(r4, r0);
3106 __ PrepareCallCFunction(1);
3107 __ Move(r0, ExternalReference::isolate_address(isolate));
3108 __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
3109 __ mov(r0, r4);
3110 __ jmp(&leave_exit_frame);
3111 }
3112
3113 } // namespace
3114
Generate_CallApiCallback(MacroAssembler * masm)3115 void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
3116 // ----------- S t a t e -------------
3117 // -- cp : context
3118 // -- r1 : api function address
3119 // -- r2 : arguments count (not including the receiver)
3120 // -- r3 : call data
3121 // -- r0 : holder
3122 // -- sp[0] : receiver
3123 // -- sp[8] : first argument
3124 // -- ...
3125 // -- sp[(argc) * 8] : last argument
3126 // -----------------------------------
3127
3128 Register api_function_address = r1;
3129 Register argc = r2;
3130 Register call_data = r3;
3131 Register holder = r0;
3132 Register scratch = r4;
3133
3134 DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch));
3135
3136 using FCA = FunctionCallbackArguments;
3137
3138 STATIC_ASSERT(FCA::kArgsLength == 6);
3139 STATIC_ASSERT(FCA::kNewTargetIndex == 5);
3140 STATIC_ASSERT(FCA::kDataIndex == 4);
3141 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
3142 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
3143 STATIC_ASSERT(FCA::kIsolateIndex == 1);
3144 STATIC_ASSERT(FCA::kHolderIndex == 0);
3145
3146 // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
3147 //
3148 // Target state:
3149 // sp[0 * kPointerSize]: kHolder
3150 // sp[1 * kPointerSize]: kIsolate
3151 // sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue)
3152 // sp[3 * kPointerSize]: undefined (kReturnValue)
3153 // sp[4 * kPointerSize]: kData
3154 // sp[5 * kPointerSize]: undefined (kNewTarget)
3155
3156 // Reserve space on the stack.
3157 __ AllocateStackSpace(FCA::kArgsLength * kPointerSize);
3158
3159 // kHolder.
3160 __ str(holder, MemOperand(sp, 0 * kPointerSize));
3161
3162 // kIsolate.
3163 __ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
3164 __ str(scratch, MemOperand(sp, 1 * kPointerSize));
3165
3166 // kReturnValueDefaultValue and kReturnValue.
3167 __ LoadRoot(scratch, RootIndex::kUndefinedValue);
3168 __ str(scratch, MemOperand(sp, 2 * kPointerSize));
3169 __ str(scratch, MemOperand(sp, 3 * kPointerSize));
3170
3171 // kData.
3172 __ str(call_data, MemOperand(sp, 4 * kPointerSize));
3173
3174 // kNewTarget.
3175 __ str(scratch, MemOperand(sp, 5 * kPointerSize));
3176
3177 // Keep a pointer to kHolder (= implicit_args) in a scratch register.
3178 // We use it below to set up the FunctionCallbackInfo object.
3179 __ mov(scratch, sp);
3180
3181 // Allocate the v8::Arguments structure in the arguments' space since
3182 // it's not controlled by GC.
3183 static constexpr int kApiStackSpace = 4;
3184 static constexpr bool kDontSaveDoubles = false;
3185 FrameScope frame_scope(masm, StackFrame::MANUAL);
3186 __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace);
3187
3188 // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
3189 // Arguments are after the return address (pushed by EnterExitFrame()).
3190 __ str(scratch, MemOperand(sp, 1 * kPointerSize));
3191
3192 // FunctionCallbackInfo::values_ (points at the first varargs argument passed
3193 // on the stack).
3194 __ add(scratch, scratch, Operand((FCA::kArgsLength + 1) * kPointerSize));
3195 __ str(scratch, MemOperand(sp, 2 * kPointerSize));
3196
3197 // FunctionCallbackInfo::length_.
3198 __ str(argc, MemOperand(sp, 3 * kPointerSize));
3199
3200 // We also store the number of bytes to drop from the stack after returning
3201 // from the API function here.
3202 __ mov(scratch,
3203 Operand((FCA::kArgsLength + 1 /* receiver */) * kPointerSize));
3204 __ add(scratch, scratch, Operand(argc, LSL, kPointerSizeLog2));
3205 __ str(scratch, MemOperand(sp, 4 * kPointerSize));
3206
3207 // v8::InvocationCallback's argument.
3208 __ add(r0, sp, Operand(1 * kPointerSize));
3209
3210 ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
3211
3212 // There are two stack slots above the arguments we constructed on the stack.
3213 // TODO(jgruber): Document what these arguments are.
3214 static constexpr int kStackSlotsAboveFCA = 2;
3215 MemOperand return_value_operand(
3216 fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize);
3217
3218 static constexpr int kUseStackSpaceOperand = 0;
3219 MemOperand stack_space_operand(sp, 4 * kPointerSize);
3220
3221 AllowExternalCallThatCantCauseGC scope(masm);
3222 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3223 kUseStackSpaceOperand, &stack_space_operand,
3224 return_value_operand);
3225 }
3226
Generate_CallApiGetter(MacroAssembler * masm)3227 void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
3228 // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
3229 // name below the exit frame to make GC aware of them.
3230 STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
3231 STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
3232 STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
3233 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
3234 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
3235 STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
3236 STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
3237 STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
3238
3239 Register receiver = ApiGetterDescriptor::ReceiverRegister();
3240 Register holder = ApiGetterDescriptor::HolderRegister();
3241 Register callback = ApiGetterDescriptor::CallbackRegister();
3242 Register scratch = r4;
3243 DCHECK(!AreAliased(receiver, holder, callback, scratch));
3244
3245 Register api_function_address = r2;
3246
3247 __ push(receiver);
3248 // Push data from AccessorInfo.
3249 __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
3250 __ push(scratch);
3251 __ LoadRoot(scratch, RootIndex::kUndefinedValue);
3252 __ Push(scratch, scratch);
3253 __ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
3254 __ Push(scratch, holder);
3255 __ Push(Smi::zero()); // should_throw_on_error -> false
3256 __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
3257 __ push(scratch);
3258 // v8::PropertyCallbackInfo::args_ array and name handle.
3259 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
3260
3261 // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
3262 __ mov(r0, sp); // r0 = Handle<Name>
3263 __ add(r1, r0, Operand(1 * kPointerSize)); // r1 = v8::PCI::args_
3264
3265 const int kApiStackSpace = 1;
3266 FrameScope frame_scope(masm, StackFrame::MANUAL);
3267 __ EnterExitFrame(false, kApiStackSpace);
3268
3269 // Create v8::PropertyCallbackInfo object on the stack and initialize
3270 // it's args_ field.
3271 __ str(r1, MemOperand(sp, 1 * kPointerSize));
3272 __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = v8::PropertyCallbackInfo&
3273
3274 ExternalReference thunk_ref =
3275 ExternalReference::invoke_accessor_getter_callback();
3276
3277 __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
3278 __ ldr(api_function_address,
3279 FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
3280
3281 // +3 is to skip prolog, return address and name handle.
3282 MemOperand return_value_operand(
3283 fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
3284 MemOperand* const kUseStackSpaceConstant = nullptr;
3285 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3286 kStackUnwindSpace, kUseStackSpaceConstant,
3287 return_value_operand);
3288 }
3289
Generate_DirectCEntry(MacroAssembler * masm)3290 void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
3291 // The sole purpose of DirectCEntry is for movable callers (e.g. any general
3292 // purpose Code object) to be able to call into C functions that may trigger
3293 // GC and thus move the caller.
3294 //
3295 // DirectCEntry places the return address on the stack (updated by the GC),
3296 // making the call GC safe. The irregexp backend relies on this.
3297
3298 __ str(lr, MemOperand(sp, 0)); // Store the return address.
3299 __ blx(ip); // Call the C++ function.
3300 __ ldr(pc, MemOperand(sp, 0)); // Return to calling code.
3301 }
3302
Generate_MemCopyUint8Uint8(MacroAssembler * masm)3303 void Builtins::Generate_MemCopyUint8Uint8(MacroAssembler* masm) {
3304 Register dest = r0;
3305 Register src = r1;
3306 Register chars = r2;
3307 Register temp1 = r3;
3308 Label less_4;
3309
3310 {
3311 UseScratchRegisterScope temps(masm);
3312 Register temp2 = temps.Acquire();
3313 Label loop;
3314
3315 __ bic(temp2, chars, Operand(0x3), SetCC);
3316 __ b(&less_4, eq);
3317 __ add(temp2, dest, temp2);
3318
3319 __ bind(&loop);
3320 __ ldr(temp1, MemOperand(src, 4, PostIndex));
3321 __ str(temp1, MemOperand(dest, 4, PostIndex));
3322 __ cmp(dest, temp2);
3323 __ b(&loop, ne);
3324 }
3325
3326 __ bind(&less_4);
3327 __ mov(chars, Operand(chars, LSL, 31), SetCC);
3328 // bit0 => Z (ne), bit1 => C (cs)
3329 __ ldrh(temp1, MemOperand(src, 2, PostIndex), cs);
3330 __ strh(temp1, MemOperand(dest, 2, PostIndex), cs);
3331 __ ldrb(temp1, MemOperand(src), ne);
3332 __ strb(temp1, MemOperand(dest), ne);
3333 __ Ret();
3334 }
3335
3336 namespace {
3337
3338 // This code tries to be close to ia32 code so that any changes can be
3339 // easily ported.
Generate_DeoptimizationEntry(MacroAssembler * masm,DeoptimizeKind deopt_kind)3340 void Generate_DeoptimizationEntry(MacroAssembler* masm,
3341 DeoptimizeKind deopt_kind) {
3342 Isolate* isolate = masm->isolate();
3343
3344 // Note: This is an overapproximation; we always reserve space for 32 double
3345 // registers, even though the actual CPU may only support 16. In the latter
3346 // case, SaveFPRegs and RestoreFPRegs still use 32 stack slots, but only fill
3347 // 16.
3348 static constexpr int kDoubleRegsSize =
3349 kDoubleSize * DwVfpRegister::kNumRegisters;
3350
3351 // Save all allocatable VFP registers before messing with them.
3352 {
3353 UseScratchRegisterScope temps(masm);
3354 Register scratch = temps.Acquire();
3355 __ SaveFPRegs(sp, scratch);
3356 }
3357
3358 // Save all general purpose registers before messing with them.
3359 static constexpr int kNumberOfRegisters = Register::kNumRegisters;
3360 STATIC_ASSERT(kNumberOfRegisters == 16);
3361
3362 // Everything but pc, lr and ip which will be saved but not restored.
3363 RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
3364
3365 // Push all 16 registers (needed to populate FrameDescription::registers_).
3366 // TODO(v8:1588): Note that using pc with stm is deprecated, so we should
3367 // perhaps handle this a bit differently.
3368 __ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit());
3369
3370 {
3371 UseScratchRegisterScope temps(masm);
3372 Register scratch = temps.Acquire();
3373 __ Move(scratch, ExternalReference::Create(
3374 IsolateAddressId::kCEntryFPAddress, isolate));
3375 __ str(fp, MemOperand(scratch));
3376 }
3377
3378 static constexpr int kSavedRegistersAreaSize =
3379 (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
3380
3381 __ mov(r2, Operand(Deoptimizer::kFixedExitSizeMarker));
3382 // Get the address of the location in the code object (r3) (return
3383 // address for lazy deoptimization) and compute the fp-to-sp delta in
3384 // register r4.
3385 __ mov(r3, lr);
3386 __ add(r4, sp, Operand(kSavedRegistersAreaSize));
3387 __ sub(r4, fp, r4);
3388
3389 // Allocate a new deoptimizer object.
3390 // Pass four arguments in r0 to r3 and fifth argument on stack.
3391 __ PrepareCallCFunction(6);
3392 __ mov(r0, Operand(0));
3393 Label context_check;
3394 __ ldr(r1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
3395 __ JumpIfSmi(r1, &context_check);
3396 __ ldr(r0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
3397 __ bind(&context_check);
3398 __ mov(r1, Operand(static_cast<int>(deopt_kind)));
3399 // r2: bailout id already loaded.
3400 // r3: code address or 0 already loaded.
3401 __ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta.
3402 __ Move(r5, ExternalReference::isolate_address(isolate));
3403 __ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate.
3404 // Call Deoptimizer::New().
3405 {
3406 AllowExternalCallThatCantCauseGC scope(masm);
3407 __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
3408 }
3409
3410 // Preserve "deoptimizer" object in register r0 and get the input
3411 // frame descriptor pointer to r1 (deoptimizer->input_);
3412 __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
3413
3414 // Copy core registers into FrameDescription::registers_.
3415 DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
3416 for (int i = 0; i < kNumberOfRegisters; i++) {
3417 int offset = (i * kPointerSize) + FrameDescription::registers_offset();
3418 __ ldr(r2, MemOperand(sp, i * kPointerSize));
3419 __ str(r2, MemOperand(r1, offset));
3420 }
3421
3422 // Copy double registers to double_registers_.
3423 static constexpr int kDoubleRegsOffset =
3424 FrameDescription::double_registers_offset();
3425 {
3426 UseScratchRegisterScope temps(masm);
3427 Register scratch = temps.Acquire();
3428 Register src_location = r4;
3429 __ add(src_location, sp, Operand(kNumberOfRegisters * kPointerSize));
3430 __ RestoreFPRegs(src_location, scratch);
3431
3432 Register dst_location = r4;
3433 __ add(dst_location, r1, Operand(kDoubleRegsOffset));
3434 __ SaveFPRegsToHeap(dst_location, scratch);
3435 }
3436
3437 // Mark the stack as not iterable for the CPU profiler which won't be able to
3438 // walk the stack without the return address.
3439 {
3440 UseScratchRegisterScope temps(masm);
3441 Register is_iterable = temps.Acquire();
3442 Register zero = r4;
3443 __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
3444 __ mov(zero, Operand(0));
3445 __ strb(zero, MemOperand(is_iterable));
3446 }
3447
3448 // Remove the saved registers from the stack.
3449 __ add(sp, sp, Operand(kSavedRegistersAreaSize));
3450
3451 // Compute a pointer to the unwinding limit in register r2; that is
3452 // the first stack slot not part of the input frame.
3453 __ ldr(r2, MemOperand(r1, FrameDescription::frame_size_offset()));
3454 __ add(r2, r2, sp);
3455
3456 // Unwind the stack down to - but not including - the unwinding
3457 // limit and copy the contents of the activation frame to the input
3458 // frame description.
3459 __ add(r3, r1, Operand(FrameDescription::frame_content_offset()));
3460 Label pop_loop;
3461 Label pop_loop_header;
3462 __ b(&pop_loop_header);
3463 __ bind(&pop_loop);
3464 __ pop(r4);
3465 __ str(r4, MemOperand(r3, 0));
3466 __ add(r3, r3, Operand(sizeof(uint32_t)));
3467 __ bind(&pop_loop_header);
3468 __ cmp(r2, sp);
3469 __ b(ne, &pop_loop);
3470
3471 // Compute the output frame in the deoptimizer.
3472 __ push(r0); // Preserve deoptimizer object across call.
3473 // r0: deoptimizer object; r1: scratch.
3474 __ PrepareCallCFunction(1);
3475 // Call Deoptimizer::ComputeOutputFrames().
3476 {
3477 AllowExternalCallThatCantCauseGC scope(masm);
3478 __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
3479 }
3480 __ pop(r0); // Restore deoptimizer object (class Deoptimizer).
3481
3482 __ ldr(sp, MemOperand(r0, Deoptimizer::caller_frame_top_offset()));
3483
3484 // Replace the current (input) frame with the output frames.
3485 Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
3486 // Outer loop state: r4 = current "FrameDescription** output_",
3487 // r1 = one past the last FrameDescription**.
3488 __ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset()));
3489 __ ldr(r4, MemOperand(r0, Deoptimizer::output_offset())); // r4 is output_.
3490 __ add(r1, r4, Operand(r1, LSL, 2));
3491 __ jmp(&outer_loop_header);
3492 __ bind(&outer_push_loop);
3493 // Inner loop state: r2 = current FrameDescription*, r3 = loop index.
3494 __ ldr(r2, MemOperand(r4, 0)); // output_[ix]
3495 __ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
3496 __ jmp(&inner_loop_header);
3497 __ bind(&inner_push_loop);
3498 __ sub(r3, r3, Operand(sizeof(uint32_t)));
3499 __ add(r6, r2, Operand(r3));
3500 __ ldr(r6, MemOperand(r6, FrameDescription::frame_content_offset()));
3501 __ push(r6);
3502 __ bind(&inner_loop_header);
3503 __ cmp(r3, Operand::Zero());
3504 __ b(ne, &inner_push_loop); // test for gt?
3505 __ add(r4, r4, Operand(kPointerSize));
3506 __ bind(&outer_loop_header);
3507 __ cmp(r4, r1);
3508 __ b(lt, &outer_push_loop);
3509
3510 __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
3511
3512 // State:
3513 // r1: Deoptimizer::input_ (FrameDescription*).
3514 // r2: The last output FrameDescription pointer (FrameDescription*).
3515
3516 // Restore double registers from the input frame description.
3517 {
3518 UseScratchRegisterScope temps(masm);
3519 Register scratch = temps.Acquire();
3520 Register src_location = r6;
3521 __ add(src_location, r1, Operand(kDoubleRegsOffset));
3522 __ RestoreFPRegsFromHeap(src_location, scratch);
3523 }
3524
3525 // Push pc and continuation from the last output frame.
3526 __ ldr(r6, MemOperand(r2, FrameDescription::pc_offset()));
3527 __ push(r6);
3528 __ ldr(r6, MemOperand(r2, FrameDescription::continuation_offset()));
3529 __ push(r6);
3530
3531 // Push the registers from the last output frame.
3532 for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
3533 int offset = (i * kPointerSize) + FrameDescription::registers_offset();
3534 __ ldr(r6, MemOperand(r2, offset));
3535 __ push(r6);
3536 }
3537
3538 // Restore the registers from the stack.
3539 __ ldm(ia_w, sp, restored_regs); // all but pc registers.
3540
3541 {
3542 UseScratchRegisterScope temps(masm);
3543 Register is_iterable = temps.Acquire();
3544 Register one = r4;
3545 __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
3546 __ mov(one, Operand(1));
3547 __ strb(one, MemOperand(is_iterable));
3548 }
3549
3550 // Remove sp, lr and pc.
3551 __ Drop(3);
3552 {
3553 UseScratchRegisterScope temps(masm);
3554 Register scratch = temps.Acquire();
3555 __ pop(scratch); // get continuation, leave pc on stack
3556 __ pop(lr);
3557 __ Jump(scratch);
3558 }
3559
3560 __ stop();
3561 }
3562
3563 } // namespace
3564
Generate_DeoptimizationEntry_Eager(MacroAssembler * masm)3565 void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
3566 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
3567 }
3568
Generate_DeoptimizationEntry_Soft(MacroAssembler * masm)3569 void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
3570 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
3571 }
3572
Generate_DeoptimizationEntry_Bailout(MacroAssembler * masm)3573 void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
3574 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
3575 }
3576
Generate_DeoptimizationEntry_Lazy(MacroAssembler * masm)3577 void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
3578 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
3579 }
3580
3581 namespace {
3582
3583 // Restarts execution either at the current or next (in execution order)
3584 // bytecode. If there is baseline code on the shared function info, converts an
3585 // interpreter frame into a baseline frame and continues execution in baseline
3586 // code. Otherwise execution continues with bytecode.
Generate_BaselineOrInterpreterEntry(MacroAssembler * masm,bool next_bytecode,bool is_osr=false)3587 void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
3588 bool next_bytecode,
3589 bool is_osr = false) {
3590 Label start;
3591 __ bind(&start);
3592
3593 // Get function from the frame.
3594 Register closure = r1;
3595 __ ldr(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
3596
3597 // Get the Code object from the shared function info.
3598 Register code_obj = r4;
3599 __ ldr(code_obj,
3600 FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
3601 __ ldr(code_obj,
3602 FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
3603
3604 // Check if we have baseline code. For OSR entry it is safe to assume we
3605 // always have baseline code.
3606 if (!is_osr) {
3607 Label start_with_baseline;
3608 __ CompareObjectType(code_obj, r3, r3, CODET_TYPE);
3609 __ b(eq, &start_with_baseline);
3610
3611 // Start with bytecode as there is no baseline code.
3612 Builtin builtin_id = next_bytecode
3613 ? Builtin::kInterpreterEnterAtNextBytecode
3614 : Builtin::kInterpreterEnterAtBytecode;
3615 __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
3616 RelocInfo::CODE_TARGET);
3617
3618 // Start with baseline code.
3619 __ bind(&start_with_baseline);
3620 } else if (FLAG_debug_code) {
3621 __ CompareObjectType(code_obj, r3, r3, CODET_TYPE);
3622 __ Assert(eq, AbortReason::kExpectedBaselineData);
3623 }
3624
3625 if (FLAG_debug_code) {
3626 AssertCodeIsBaseline(masm, code_obj, r3);
3627 }
3628
3629 // Load the feedback vector.
3630 Register feedback_vector = r2;
3631 __ ldr(feedback_vector,
3632 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
3633 __ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
3634
3635 Label install_baseline_code;
3636 // Check if feedback vector is valid. If not, call prepare for baseline to
3637 // allocate it.
3638 __ CompareObjectType(feedback_vector, r3, r3, FEEDBACK_VECTOR_TYPE);
3639 __ b(ne, &install_baseline_code);
3640
3641 // Save BytecodeOffset from the stack frame.
3642 __ ldr(kInterpreterBytecodeOffsetRegister,
3643 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
3644 __ SmiUntag(kInterpreterBytecodeOffsetRegister);
3645 // Replace BytecodeOffset with the feedback vector.
3646 __ str(feedback_vector,
3647 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
3648 feedback_vector = no_reg;
3649
3650 // Compute baseline pc for bytecode offset.
3651 ExternalReference get_baseline_pc_extref;
3652 if (next_bytecode || is_osr) {
3653 get_baseline_pc_extref =
3654 ExternalReference::baseline_pc_for_next_executed_bytecode();
3655 } else {
3656 get_baseline_pc_extref =
3657 ExternalReference::baseline_pc_for_bytecode_offset();
3658 }
3659 Register get_baseline_pc = r3;
3660 __ Move(get_baseline_pc, get_baseline_pc_extref);
3661
3662 // If the code deoptimizes during the implicit function entry stack interrupt
3663 // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
3664 // not a valid bytecode offset.
3665 // TODO(pthier): Investigate if it is feasible to handle this special case
3666 // in TurboFan instead of here.
3667 Label valid_bytecode_offset, function_entry_bytecode;
3668 if (!is_osr) {
3669 __ cmp(kInterpreterBytecodeOffsetRegister,
3670 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
3671 kFunctionEntryBytecodeOffset));
3672 __ b(eq, &function_entry_bytecode);
3673 }
3674
3675 __ sub(kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeOffsetRegister,
3676 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
3677
3678 __ bind(&valid_bytecode_offset);
3679 // Get bytecode array from the stack frame.
3680 __ ldr(kInterpreterBytecodeArrayRegister,
3681 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
3682 // Save the accumulator register, since it's clobbered by the below call.
3683 __ Push(kInterpreterAccumulatorRegister);
3684 {
3685 Register arg_reg_1 = r0;
3686 Register arg_reg_2 = r1;
3687 Register arg_reg_3 = r2;
3688 __ mov(arg_reg_1, code_obj);
3689 __ mov(arg_reg_2, kInterpreterBytecodeOffsetRegister);
3690 __ mov(arg_reg_3, kInterpreterBytecodeArrayRegister);
3691 FrameScope scope(masm, StackFrame::INTERNAL);
3692 __ PrepareCallCFunction(3, 0);
3693 __ CallCFunction(get_baseline_pc, 3, 0);
3694 }
3695 __ add(code_obj, code_obj, kReturnRegister0);
3696 __ Pop(kInterpreterAccumulatorRegister);
3697
3698 if (is_osr) {
3699 // Reset the OSR loop nesting depth to disarm back edges.
3700 // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
3701 // Sparkplug here.
3702 UseScratchRegisterScope temps(masm);
3703 Register scratch = temps.Acquire();
3704 __ mov(scratch, Operand(0));
3705 __ strh(scratch,
3706 FieldMemOperand(kInterpreterBytecodeArrayRegister,
3707 BytecodeArray::kOsrLoopNestingLevelOffset));
3708 Generate_OSREntry(masm, code_obj,
3709 Operand(Code::kHeaderSize - kHeapObjectTag));
3710 } else {
3711 __ add(code_obj, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag));
3712 __ Jump(code_obj);
3713 }
3714 __ Trap(); // Unreachable.
3715
3716 if (!is_osr) {
3717 __ bind(&function_entry_bytecode);
3718 // If the bytecode offset is kFunctionEntryOffset, get the start address of
3719 // the first bytecode.
3720 __ mov(kInterpreterBytecodeOffsetRegister, Operand(0));
3721 if (next_bytecode) {
3722 __ Move(get_baseline_pc,
3723 ExternalReference::baseline_pc_for_bytecode_offset());
3724 }
3725 __ b(&valid_bytecode_offset);
3726 }
3727
3728 __ bind(&install_baseline_code);
3729 {
3730 FrameScope scope(masm, StackFrame::INTERNAL);
3731 __ Push(kInterpreterAccumulatorRegister);
3732 __ Push(closure);
3733 __ CallRuntime(Runtime::kInstallBaselineCode, 1);
3734 __ Pop(kInterpreterAccumulatorRegister);
3735 }
3736 // Retry from the start after installing baseline code.
3737 __ b(&start);
3738 }
3739
3740 } // namespace
3741
Generate_BaselineOrInterpreterEnterAtBytecode(MacroAssembler * masm)3742 void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
3743 MacroAssembler* masm) {
3744 Generate_BaselineOrInterpreterEntry(masm, false);
3745 }
3746
Generate_BaselineOrInterpreterEnterAtNextBytecode(MacroAssembler * masm)3747 void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
3748 MacroAssembler* masm) {
3749 Generate_BaselineOrInterpreterEntry(masm, true);
3750 }
3751
Generate_InterpreterOnStackReplacement_ToBaseline(MacroAssembler * masm)3752 void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
3753 MacroAssembler* masm) {
3754 Generate_BaselineOrInterpreterEntry(masm, false, true);
3755 }
3756
Generate_DynamicCheckMapsTrampoline(MacroAssembler * masm)3757 void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
3758 Generate_DynamicCheckMapsTrampoline<DynamicCheckMapsDescriptor>(
3759 masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMaps));
3760 }
3761
Generate_DynamicCheckMapsWithFeedbackVectorTrampoline(MacroAssembler * masm)3762 void Builtins::Generate_DynamicCheckMapsWithFeedbackVectorTrampoline(
3763 MacroAssembler* masm) {
3764 Generate_DynamicCheckMapsTrampoline<
3765 DynamicCheckMapsWithFeedbackVectorDescriptor>(
3766 masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMapsWithFeedbackVector));
3767 }
3768
3769 template <class Descriptor>
Generate_DynamicCheckMapsTrampoline(MacroAssembler * masm,Handle<Code> builtin_target)3770 void Builtins::Generate_DynamicCheckMapsTrampoline(
3771 MacroAssembler* masm, Handle<Code> builtin_target) {
3772 FrameScope scope(masm, StackFrame::MANUAL);
3773 __ EnterFrame(StackFrame::INTERNAL);
3774
3775 // Only save the registers that the DynamicCheckMaps builtin can clobber.
3776 Descriptor descriptor;
3777 RegList registers = descriptor.allocatable_registers();
3778 // FLAG_debug_code is enabled CSA checks will call C function and so we need
3779 // to save all CallerSaved registers too.
3780 if (FLAG_debug_code) registers |= kCallerSaved;
3781 __ MaybeSaveRegisters(registers);
3782
3783 // Load the immediate arguments from the deopt exit to pass to the builtin.
3784 Register slot_arg = descriptor.GetRegisterParameter(Descriptor::kSlot);
3785 Register handler_arg = descriptor.GetRegisterParameter(Descriptor::kHandler);
3786 __ ldr(handler_arg, MemOperand(fp, CommonFrameConstants::kCallerPCOffset));
3787 __ ldr(slot_arg, MemOperand(handler_arg,
3788 Deoptimizer::kEagerWithResumeImmedArgs1PcOffset));
3789 __ ldr(
3790 handler_arg,
3791 MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs2PcOffset));
3792
3793 __ Call(builtin_target, RelocInfo::CODE_TARGET);
3794
3795 Label deopt, bailout;
3796 __ cmp_raw_immediate(r0, static_cast<int>(DynamicCheckMapsStatus::kSuccess));
3797 __ b(ne, &deopt);
3798
3799 __ MaybeRestoreRegisters(registers);
3800 __ LeaveFrame(StackFrame::INTERNAL);
3801 __ Ret();
3802
3803 __ bind(&deopt);
3804 __ cmp_raw_immediate(r0, static_cast<int>(DynamicCheckMapsStatus::kBailout));
3805 __ b(eq, &bailout);
3806
3807 if (FLAG_debug_code) {
3808 __ cmp_raw_immediate(r0, static_cast<int>(DynamicCheckMapsStatus::kDeopt));
3809 __ Assert(eq, AbortReason::kUnexpectedDynamicCheckMapsStatus);
3810 }
3811 __ MaybeRestoreRegisters(registers);
3812 __ LeaveFrame(StackFrame::INTERNAL);
3813 Handle<Code> deopt_eager = masm->isolate()->builtins()->code_handle(
3814 Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kEager));
3815 __ Jump(deopt_eager, RelocInfo::CODE_TARGET);
3816
3817 __ bind(&bailout);
3818 __ MaybeRestoreRegisters(registers);
3819 __ LeaveFrame(StackFrame::INTERNAL);
3820 Handle<Code> deopt_bailout = masm->isolate()->builtins()->code_handle(
3821 Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kBailout));
3822 __ Jump(deopt_bailout, RelocInfo::CODE_TARGET);
3823 }
3824
3825 #undef __
3826
3827 } // namespace internal
3828 } // namespace v8
3829
3830 #endif // V8_TARGET_ARCH_ARM
3831