1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if V8_TARGET_ARCH_ARM
6
7 #include "src/api/api-arguments.h"
8 #include "src/codegen/code-factory.h"
9 // For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
10 #include "src/codegen/macro-assembler-inl.h"
11 #include "src/codegen/register-configuration.h"
12 #include "src/debug/debug.h"
13 #include "src/deoptimizer/deoptimizer.h"
14 #include "src/execution/frame-constants.h"
15 #include "src/execution/frames.h"
16 #include "src/heap/heap-inl.h"
17 #include "src/logging/counters.h"
18 #include "src/objects/cell.h"
19 #include "src/objects/foreign.h"
20 #include "src/objects/heap-number.h"
21 #include "src/objects/js-generator.h"
22 #include "src/objects/objects-inl.h"
23 #include "src/objects/smi.h"
24 #include "src/runtime/runtime.h"
25 #include "src/wasm/wasm-linkage.h"
26 #include "src/wasm/wasm-objects.h"
27
28 namespace v8 {
29 namespace internal {
30
31 #define __ ACCESS_MASM(masm)
32
Generate_Adaptor(MacroAssembler * masm,Address address)33 void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
34 #if defined(__thumb__)
35 // Thumb mode builtin.
36 DCHECK_EQ(1, reinterpret_cast<uintptr_t>(
37 ExternalReference::Create(address).address()) &
38 1);
39 #endif
40 __ Move(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
41 __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
42 RelocInfo::CODE_TARGET);
43 }
44
GenerateTailCallToReturnedCode(MacroAssembler * masm,Runtime::FunctionId function_id)45 static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
46 Runtime::FunctionId function_id) {
47 // ----------- S t a t e -------------
48 // -- r0 : actual argument count
49 // -- r1 : target function (preserved for callee)
50 // -- r3 : new target (preserved for callee)
51 // -----------------------------------
52 {
53 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
54 // Push a copy of the target function, the new target and the actual
55 // argument count.
56 // Push function as parameter to the runtime call.
57 __ SmiTag(kJavaScriptCallArgCountRegister);
58 __ Push(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
59 kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister);
60
61 __ CallRuntime(function_id, 1);
62 __ mov(r2, r0);
63
64 // Restore target function, new target and actual argument count.
65 __ Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
66 kJavaScriptCallArgCountRegister);
67 __ SmiUntag(kJavaScriptCallArgCountRegister);
68 }
69 static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
70 __ JumpCodeObject(r2);
71 }
72
73 namespace {
74
Generate_JSBuiltinsConstructStubHelper(MacroAssembler * masm)75 void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
76 // ----------- S t a t e -------------
77 // -- r0 : number of arguments
78 // -- r1 : constructor function
79 // -- r3 : new target
80 // -- cp : context
81 // -- lr : return address
82 // -- sp[...]: constructor arguments
83 // -----------------------------------
84
85 Register scratch = r2;
86
87 Label stack_overflow;
88
89 __ StackOverflowCheck(r0, scratch, &stack_overflow);
90
91 // Enter a construct frame.
92 {
93 FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
94
95 // Preserve the incoming parameters on the stack.
96 __ SmiTag(r0);
97 __ Push(cp, r0);
98 __ SmiUntag(r0);
99
100 // TODO(victorgomes): When the arguments adaptor is completely removed, we
101 // should get the formal parameter count and copy the arguments in its
102 // correct position (including any undefined), instead of delaying this to
103 // InvokeFunction.
104
105 // Set up pointer to last argument (skip receiver).
106 __ add(
107 r4, fp,
108 Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
109 // Copy arguments and receiver to the expression stack.
110 __ PushArray(r4, r0, r5);
111 // The receiver for the builtin/api call.
112 __ PushRoot(RootIndex::kTheHoleValue);
113
114 // Call the function.
115 // r0: number of arguments (untagged)
116 // r1: constructor function
117 // r3: new target
118 __ InvokeFunctionWithNewTarget(r1, r3, r0, CALL_FUNCTION);
119
120 // Restore context from the frame.
121 __ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
122 // Restore smi-tagged arguments count from the frame.
123 __ ldr(scratch, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
124 // Leave construct frame.
125 }
126
127 // Remove caller arguments from the stack and return.
128 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
129 __ add(sp, sp, Operand(scratch, LSL, kPointerSizeLog2 - kSmiTagSize));
130 __ add(sp, sp, Operand(kPointerSize));
131 __ Jump(lr);
132
133 __ bind(&stack_overflow);
134 {
135 FrameScope scope(masm, StackFrame::INTERNAL);
136 __ CallRuntime(Runtime::kThrowStackOverflow);
137 __ bkpt(0); // Unreachable code.
138 }
139 }
140
141 } // namespace
142
143 // The construct stub for ES5 constructor functions and ES6 class constructors.
Generate_JSConstructStubGeneric(MacroAssembler * masm)144 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
145 // ----------- S t a t e -------------
146 // -- r0: number of arguments (untagged)
147 // -- r1: constructor function
148 // -- r3: new target
149 // -- cp: context
150 // -- lr: return address
151 // -- sp[...]: constructor arguments
152 // -----------------------------------
153
154 FrameScope scope(masm, StackFrame::MANUAL);
155 // Enter a construct frame.
156 Label post_instantiation_deopt_entry, not_create_implicit_receiver;
157 __ EnterFrame(StackFrame::CONSTRUCT);
158
159 // Preserve the incoming parameters on the stack.
160 __ LoadRoot(r4, RootIndex::kTheHoleValue);
161 __ SmiTag(r0);
162 __ Push(cp, r0, r1, r4, r3);
163
164 // ----------- S t a t e -------------
165 // -- sp[0*kPointerSize]: new target
166 // -- sp[1*kPointerSize]: padding
167 // -- r1 and sp[2*kPointerSize]: constructor function
168 // -- sp[3*kPointerSize]: number of arguments (tagged)
169 // -- sp[4*kPointerSize]: context
170 // -----------------------------------
171
172 __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
173 __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
174 __ DecodeField<SharedFunctionInfo::FunctionKindBits>(r4);
175 __ JumpIfIsInRange(r4, kDefaultDerivedConstructor, kDerivedConstructor,
176 ¬_create_implicit_receiver);
177
178 // If not derived class constructor: Allocate the new receiver object.
179 __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, r4,
180 r5);
181 __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), RelocInfo::CODE_TARGET);
182 __ b(&post_instantiation_deopt_entry);
183
184 // Else: use TheHoleValue as receiver for constructor call
185 __ bind(¬_create_implicit_receiver);
186 __ LoadRoot(r0, RootIndex::kTheHoleValue);
187
188 // ----------- S t a t e -------------
189 // -- r0: receiver
190 // -- Slot 3 / sp[0*kPointerSize]: new target
191 // -- Slot 2 / sp[1*kPointerSize]: constructor function
192 // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged)
193 // -- Slot 0 / sp[3*kPointerSize]: context
194 // -----------------------------------
195 // Deoptimizer enters here.
196 masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
197 masm->pc_offset());
198 __ bind(&post_instantiation_deopt_entry);
199
200 // Restore new target.
201 __ Pop(r3);
202
203 // Push the allocated receiver to the stack.
204 __ Push(r0);
205 // We need two copies because we may have to return the original one
206 // and the calling conventions dictate that the called function pops the
207 // receiver. The second copy is pushed after the arguments, we saved in r6
208 // since r0 needs to store the number of arguments before
209 // InvokingFunction.
210 __ mov(r6, r0);
211
212 // Set up pointer to first argument (skip receiver).
213 __ add(r4, fp,
214 Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
215
216 // Restore constructor function and argument count.
217 __ ldr(r1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
218 __ ldr(r0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
219 __ SmiUntag(r0);
220
221 Label stack_overflow;
222 __ StackOverflowCheck(r0, r5, &stack_overflow);
223
224 // TODO(victorgomes): When the arguments adaptor is completely removed, we
225 // should get the formal parameter count and copy the arguments in its
226 // correct position (including any undefined), instead of delaying this to
227 // InvokeFunction.
228
229 // Copy arguments to the expression stack.
230 __ PushArray(r4, r0, r5);
231
232 // Push implicit receiver.
233 __ Push(r6);
234
235 // Call the function.
236 __ InvokeFunctionWithNewTarget(r1, r3, r0, CALL_FUNCTION);
237
238 // ----------- S t a t e -------------
239 // -- r0: constructor result
240 // -- sp[0*kPointerSize]: implicit receiver
241 // -- sp[1*kPointerSize]: padding
242 // -- sp[2*kPointerSize]: constructor function
243 // -- sp[3*kPointerSize]: number of arguments
244 // -- sp[4*kPointerSize]: context
245 // -----------------------------------
246
247 // Store offset of return address for deoptimizer.
248 masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
249 masm->pc_offset());
250
251 // If the result is an object (in the ECMA sense), we should get rid
252 // of the receiver and use the result; see ECMA-262 section 13.2.2-7
253 // on page 74.
254 Label use_receiver, do_throw, leave_and_return, check_receiver;
255
256 // If the result is undefined, we jump out to using the implicit receiver.
257 __ JumpIfNotRoot(r0, RootIndex::kUndefinedValue, &check_receiver);
258
259 // Otherwise we do a smi check and fall through to check if the return value
260 // is a valid receiver.
261
262 // Throw away the result of the constructor invocation and use the
263 // on-stack receiver as the result.
264 __ bind(&use_receiver);
265 __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
266 __ JumpIfRoot(r0, RootIndex::kTheHoleValue, &do_throw);
267
268 __ bind(&leave_and_return);
269 // Restore smi-tagged arguments count from the frame.
270 __ ldr(r1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
271 // Leave construct frame.
272 __ LeaveFrame(StackFrame::CONSTRUCT);
273
274 // Remove caller arguments from the stack and return.
275 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
276 __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
277 __ add(sp, sp, Operand(kPointerSize));
278 __ Jump(lr);
279
280 __ bind(&check_receiver);
281 // If the result is a smi, it is *not* an object in the ECMA sense.
282 __ JumpIfSmi(r0, &use_receiver);
283
284 // If the type of the result (stored in its map) is less than
285 // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
286 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
287 __ CompareObjectType(r0, r4, r5, FIRST_JS_RECEIVER_TYPE);
288 __ b(ge, &leave_and_return);
289 __ b(&use_receiver);
290
291 __ bind(&do_throw);
292 // Restore the context from the frame.
293 __ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
294 __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
295 __ bkpt(0);
296
297 __ bind(&stack_overflow);
298 // Restore the context from the frame.
299 __ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
300 __ CallRuntime(Runtime::kThrowStackOverflow);
301 // Unreachable code.
302 __ bkpt(0);
303 }
304
Generate_JSBuiltinsConstructStub(MacroAssembler * masm)305 void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
306 Generate_JSBuiltinsConstructStubHelper(masm);
307 }
308
GetSharedFunctionInfoBytecode(MacroAssembler * masm,Register sfi_data,Register scratch1)309 static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
310 Register sfi_data,
311 Register scratch1) {
312 Label done;
313
314 __ CompareObjectType(sfi_data, scratch1, scratch1, INTERPRETER_DATA_TYPE);
315 __ b(ne, &done);
316 __ ldr(sfi_data,
317 FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
318
319 __ bind(&done);
320 }
321
322 // static
Generate_ResumeGeneratorTrampoline(MacroAssembler * masm)323 void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
324 // ----------- S t a t e -------------
325 // -- r0 : the value to pass to the generator
326 // -- r1 : the JSGeneratorObject to resume
327 // -- lr : return address
328 // -----------------------------------
329 __ AssertGeneratorObject(r1);
330
331 // Store input value into generator object.
332 __ str(r0, FieldMemOperand(r1, JSGeneratorObject::kInputOrDebugPosOffset));
333 __ RecordWriteField(r1, JSGeneratorObject::kInputOrDebugPosOffset, r0,
334 kLRHasNotBeenSaved, kDontSaveFPRegs);
335
336 // Load suspended function and context.
337 __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
338 __ ldr(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
339
340 Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
341 Label stepping_prepared;
342 Register scratch = r5;
343
344 // Flood function if we are stepping.
345 ExternalReference debug_hook =
346 ExternalReference::debug_hook_on_function_call_address(masm->isolate());
347 __ Move(scratch, debug_hook);
348 __ ldrsb(scratch, MemOperand(scratch));
349 __ cmp(scratch, Operand(0));
350 __ b(ne, &prepare_step_in_if_stepping);
351
352 // Flood function if we need to continue stepping in the suspended
353 // generator.
354 ExternalReference debug_suspended_generator =
355 ExternalReference::debug_suspended_generator_address(masm->isolate());
356 __ Move(scratch, debug_suspended_generator);
357 __ ldr(scratch, MemOperand(scratch));
358 __ cmp(scratch, Operand(r1));
359 __ b(eq, &prepare_step_in_suspended_generator);
360 __ bind(&stepping_prepared);
361
362 // Check the stack for overflow. We are not trying to catch interruptions
363 // (i.e. debug break and preemption) here, so check the "real stack limit".
364 Label stack_overflow;
365 __ LoadStackLimit(scratch, StackLimitKind::kRealStackLimit);
366 __ cmp(sp, scratch);
367 __ b(lo, &stack_overflow);
368
369 // ----------- S t a t e -------------
370 // -- r1 : the JSGeneratorObject to resume
371 // -- r4 : generator function
372 // -- cp : generator context
373 // -- lr : return address
374 // -- sp[0] : generator receiver
375 // -----------------------------------
376
377 // Copy the function arguments from the generator object's register file.
378 __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
379 __ ldrh(r3,
380 FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
381 __ ldr(r2,
382 FieldMemOperand(r1, JSGeneratorObject::kParametersAndRegistersOffset));
383 {
384 Label done_loop, loop;
385 __ mov(r6, r3);
386
387 __ bind(&loop);
388 __ sub(r6, r6, Operand(1), SetCC);
389 __ b(lt, &done_loop);
390 __ add(scratch, r2, Operand(r6, LSL, kTaggedSizeLog2));
391 __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize));
392 __ Push(scratch);
393 __ b(&loop);
394
395 __ bind(&done_loop);
396
397 // Push receiver.
398 __ ldr(scratch, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
399 __ Push(scratch);
400 }
401
402 // Underlying function needs to have bytecode available.
403 if (FLAG_debug_code) {
404 __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
405 __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
406 GetSharedFunctionInfoBytecode(masm, r3, r0);
407 __ CompareObjectType(r3, r3, r3, BYTECODE_ARRAY_TYPE);
408 __ Assert(eq, AbortReason::kMissingBytecodeArray);
409 }
410
411 // Resume (Ignition/TurboFan) generator object.
412 {
413 __ ldr(r0, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
414 __ ldrh(r0, FieldMemOperand(
415 r0, SharedFunctionInfo::kFormalParameterCountOffset));
416 // We abuse new.target both to indicate that this is a resume call and to
417 // pass in the generator object. In ordinary calls, new.target is always
418 // undefined because generator functions are non-constructable.
419 __ Move(r3, r1);
420 __ Move(r1, r4);
421 static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
422 __ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
423 __ JumpCodeObject(r2);
424 }
425
426 __ bind(&prepare_step_in_if_stepping);
427 {
428 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
429 __ Push(r1, r4);
430 // Push hole as receiver since we do not use it for stepping.
431 __ PushRoot(RootIndex::kTheHoleValue);
432 __ CallRuntime(Runtime::kDebugOnFunctionCall);
433 __ Pop(r1);
434 __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
435 }
436 __ b(&stepping_prepared);
437
438 __ bind(&prepare_step_in_suspended_generator);
439 {
440 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
441 __ Push(r1);
442 __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
443 __ Pop(r1);
444 __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
445 }
446 __ b(&stepping_prepared);
447
448 __ bind(&stack_overflow);
449 {
450 FrameScope scope(masm, StackFrame::INTERNAL);
451 __ CallRuntime(Runtime::kThrowStackOverflow);
452 __ bkpt(0); // This should be unreachable.
453 }
454 }
455
Generate_ConstructedNonConstructable(MacroAssembler * masm)456 void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
457 FrameScope scope(masm, StackFrame::INTERNAL);
458 __ push(r1);
459 __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
460 }
461
462 namespace {
463
464 // Total size of the stack space pushed by JSEntryVariant.
465 // JSEntryTrampoline uses this to access on stack arguments passed to
466 // JSEntryVariant.
467 constexpr int kPushedStackSpace = kNumCalleeSaved * kPointerSize +
468 kPointerSize /* LR */ +
469 kNumDoubleCalleeSaved * kDoubleSize +
470 4 * kPointerSize /* r5, r6, r7, scratch */ +
471 EntryFrameConstants::kCallerFPOffset;
472
473 // Assert that the EntryFrameConstants are in sync with the builtin.
474 static_assert(kPushedStackSpace == EntryFrameConstants::kDirectCallerSPOffset +
475 3 * kPointerSize /* r5, r6, r7*/ +
476 EntryFrameConstants::kCallerFPOffset,
477 "Pushed stack space and frame constants do not match. See "
478 "frame-constants-arm.h");
479
480 // Called with the native C calling convention. The corresponding function
481 // signature is either:
482 //
483 // using JSEntryFunction = GeneratedCode<Address(
484 // Address root_register_value, Address new_target, Address target,
485 // Address receiver, intptr_t argc, Address** argv)>;
486 // or
487 // using JSEntryFunction = GeneratedCode<Address(
488 // Address root_register_value, MicrotaskQueue* microtask_queue)>;
Generate_JSEntryVariant(MacroAssembler * masm,StackFrame::Type type,Builtins::Name entry_trampoline)489 void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
490 Builtins::Name entry_trampoline) {
491 // The register state is either:
492 // r0: root_register_value
493 // r1: code entry
494 // r2: function
495 // r3: receiver
496 // [sp + 0 * kSystemPointerSize]: argc
497 // [sp + 1 * kSystemPointerSize]: argv
498 // or
499 // r0: root_register_value
500 // r1: microtask_queue
501 // Preserve all but r0 and pass them to entry_trampoline.
502 Label invoke, handler_entry, exit;
503
504 // Update |pushed_stack_space| when we manipulate the stack.
505 int pushed_stack_space = EntryFrameConstants::kCallerFPOffset;
506 {
507 NoRootArrayScope no_root_array(masm);
508
509 // Called from C, so do not pop argc and args on exit (preserve sp)
510 // No need to save register-passed args
511 // Save callee-saved registers (incl. cp and fp), sp, and lr
512 __ stm(db_w, sp, kCalleeSaved | lr.bit());
513 pushed_stack_space +=
514 kNumCalleeSaved * kPointerSize + kPointerSize /* LR */;
515
516 // Save callee-saved vfp registers.
517 __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
518 pushed_stack_space += kNumDoubleCalleeSaved * kDoubleSize;
519
520 // Set up the reserved register for 0.0.
521 __ vmov(kDoubleRegZero, Double(0.0));
522
523 // Initialize the root register.
524 // C calling convention. The first argument is passed in r0.
525 __ mov(kRootRegister, r0);
526 }
527
528 // Push a frame with special values setup to mark it as an entry frame.
529 // r0: root_register_value
530 __ mov(r7, Operand(StackFrame::TypeToMarker(type)));
531 __ mov(r6, Operand(StackFrame::TypeToMarker(type)));
532 __ Move(r5, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
533 masm->isolate()));
534 __ ldr(r5, MemOperand(r5));
535 {
536 UseScratchRegisterScope temps(masm);
537 Register scratch = temps.Acquire();
538
539 // Push a bad frame pointer to fail if it is used.
540 __ mov(scratch, Operand(-1));
541 __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | scratch.bit());
542 pushed_stack_space += 4 * kPointerSize /* r5, r6, r7, scratch */;
543 }
544
545 Register scratch = r6;
546
547 // Set up frame pointer for the frame to be pushed.
548 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
549
550 // If this is the outermost JS call, set js_entry_sp value.
551 Label non_outermost_js;
552 ExternalReference js_entry_sp = ExternalReference::Create(
553 IsolateAddressId::kJSEntrySPAddress, masm->isolate());
554 __ Move(r5, js_entry_sp);
555 __ ldr(scratch, MemOperand(r5));
556 __ cmp(scratch, Operand::Zero());
557 __ b(ne, &non_outermost_js);
558 __ str(fp, MemOperand(r5));
559 __ mov(scratch, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
560 Label cont;
561 __ b(&cont);
562 __ bind(&non_outermost_js);
563 __ mov(scratch, Operand(StackFrame::INNER_JSENTRY_FRAME));
564 __ bind(&cont);
565 __ push(scratch);
566
567 // Jump to a faked try block that does the invoke, with a faked catch
568 // block that sets the pending exception.
569 __ jmp(&invoke);
570
571 // Block literal pool emission whilst taking the position of the handler
572 // entry. This avoids making the assumption that literal pools are always
573 // emitted after an instruction is emitted, rather than before.
574 {
575 Assembler::BlockConstPoolScope block_const_pool(masm);
576 __ bind(&handler_entry);
577
578 // Store the current pc as the handler offset. It's used later to create the
579 // handler table.
580 masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
581
582 // Caught exception: Store result (exception) in the pending exception
583 // field in the JSEnv and return a failure sentinel. Coming in here the
584 // fp will be invalid because the PushStackHandler below sets it to 0 to
585 // signal the existence of the JSEntry frame.
586 __ Move(scratch,
587 ExternalReference::Create(
588 IsolateAddressId::kPendingExceptionAddress, masm->isolate()));
589 }
590 __ str(r0, MemOperand(scratch));
591 __ LoadRoot(r0, RootIndex::kException);
592 __ b(&exit);
593
594 // Invoke: Link this frame into the handler chain.
595 __ bind(&invoke);
596 // Must preserve r0-r4, r5-r6 are available.
597 __ PushStackHandler();
598 // If an exception not caught by another handler occurs, this handler
599 // returns control to the code after the bl(&invoke) above, which
600 // restores all kCalleeSaved registers (including cp and fp) to their
601 // saved values before returning a failure to C.
602 //
603 // Invoke the function by calling through JS entry trampoline builtin and
604 // pop the faked function when we return.
605 Handle<Code> trampoline_code =
606 masm->isolate()->builtins()->builtin_handle(entry_trampoline);
607 DCHECK_EQ(kPushedStackSpace, pushed_stack_space);
608 __ Call(trampoline_code, RelocInfo::CODE_TARGET);
609
610 // Unlink this frame from the handler chain.
611 __ PopStackHandler();
612
613 __ bind(&exit); // r0 holds result
614 // Check if the current stack frame is marked as the outermost JS frame.
615 Label non_outermost_js_2;
616 __ pop(r5);
617 __ cmp(r5, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
618 __ b(ne, &non_outermost_js_2);
619 __ mov(r6, Operand::Zero());
620 __ Move(r5, js_entry_sp);
621 __ str(r6, MemOperand(r5));
622 __ bind(&non_outermost_js_2);
623
624 // Restore the top frame descriptors from the stack.
625 __ pop(r3);
626 __ Move(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
627 masm->isolate()));
628 __ str(r3, MemOperand(scratch));
629
630 // Reset the stack to the callee saved registers.
631 __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
632
633 // Restore callee-saved registers and return.
634 #ifdef DEBUG
635 if (FLAG_debug_code) {
636 __ mov(lr, Operand(pc));
637 }
638 #endif
639
640 // Restore callee-saved vfp registers.
641 __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
642
643 __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
644 }
645
646 } // namespace
647
Generate_JSEntry(MacroAssembler * masm)648 void Builtins::Generate_JSEntry(MacroAssembler* masm) {
649 Generate_JSEntryVariant(masm, StackFrame::ENTRY,
650 Builtins::kJSEntryTrampoline);
651 }
652
Generate_JSConstructEntry(MacroAssembler * masm)653 void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
654 Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
655 Builtins::kJSConstructEntryTrampoline);
656 }
657
Generate_JSRunMicrotasksEntry(MacroAssembler * masm)658 void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
659 Generate_JSEntryVariant(masm, StackFrame::ENTRY,
660 Builtins::kRunMicrotasksTrampoline);
661 }
662
Generate_JSEntryTrampolineHelper(MacroAssembler * masm,bool is_construct)663 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
664 bool is_construct) {
665 // Called from Generate_JS_Entry
666 // r0: root_register_value
667 // r1: new.target
668 // r2: function
669 // r3: receiver
670 // [fp + kPushedStackSpace + 0 * kSystemPointerSize]: argc
671 // [fp + kPushedStackSpace + 1 * kSystemPointerSize]: argv
672 // r5-r6, r8 and cp may be clobbered
673
674 __ ldr(r0,
675 MemOperand(fp, kPushedStackSpace + EntryFrameConstants::kArgcOffset));
676 __ ldr(r4,
677 MemOperand(fp, kPushedStackSpace + EntryFrameConstants::kArgvOffset));
678
679 // r1: new.target
680 // r2: function
681 // r3: receiver
682 // r0: argc
683 // r4: argv
684
685 // Enter an internal frame.
686 {
687 FrameScope scope(masm, StackFrame::INTERNAL);
688
689 // Setup the context (we need to use the caller context from the isolate).
690 ExternalReference context_address = ExternalReference::Create(
691 IsolateAddressId::kContextAddress, masm->isolate());
692 __ Move(cp, context_address);
693 __ ldr(cp, MemOperand(cp));
694
695 // Push the function.
696 __ Push(r2);
697
698 // Check if we have enough stack space to push all arguments + receiver.
699 // Clobbers r5.
700 Label enough_stack_space, stack_overflow;
701 __ add(r6, r0, Operand(1)); // Add one for receiver.
702 __ StackOverflowCheck(r6, r5, &stack_overflow);
703 __ b(&enough_stack_space);
704 __ bind(&stack_overflow);
705 __ CallRuntime(Runtime::kThrowStackOverflow);
706 // Unreachable code.
707 __ bkpt(0);
708
709 __ bind(&enough_stack_space);
710
711 // Copy arguments to the stack in a loop.
712 // r1: new.target
713 // r2: function
714 // r3: receiver
715 // r0: argc
716 // r4: argv, i.e. points to first arg
717 Label loop, entry;
718 __ add(r6, r4, Operand(r0, LSL, kSystemPointerSizeLog2));
719 // r6 points past last arg.
720 __ b(&entry);
721 __ bind(&loop);
722 __ ldr(r5, MemOperand(r6, -kSystemPointerSize,
723 PreIndex)); // read next parameter
724 __ ldr(r5, MemOperand(r5)); // dereference handle
725 __ push(r5); // push parameter
726 __ bind(&entry);
727 __ cmp(r4, r6);
728 __ b(ne, &loop);
729
730 // Push the receiver.
731 __ Push(r3);
732
733 // Setup new.target and function.
734 __ mov(r3, r1);
735 __ mov(r1, r2);
736 // r0: argc
737 // r1: function
738 // r3: new.target
739
740 // Initialize all JavaScript callee-saved registers, since they will be seen
741 // by the garbage collector as part of handlers.
742 __ LoadRoot(r4, RootIndex::kUndefinedValue);
743 __ mov(r2, r4);
744 __ mov(r5, r4);
745 __ mov(r6, r4);
746 __ mov(r8, r4);
747 if (kR9Available == 1) {
748 __ mov(r9, r4);
749 }
750
751 // Invoke the code.
752 Handle<Code> builtin = is_construct
753 ? BUILTIN_CODE(masm->isolate(), Construct)
754 : masm->isolate()->builtins()->Call();
755 __ Call(builtin, RelocInfo::CODE_TARGET);
756
757 // Exit the JS frame and remove the parameters (except function), and
758 // return.
759 // Respect ABI stack constraint.
760 }
761 __ Jump(lr);
762
763 // r0: result
764 }
765
Generate_JSEntryTrampoline(MacroAssembler * masm)766 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
767 Generate_JSEntryTrampolineHelper(masm, false);
768 }
769
Generate_JSConstructEntryTrampoline(MacroAssembler * masm)770 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
771 Generate_JSEntryTrampolineHelper(masm, true);
772 }
773
Generate_RunMicrotasksTrampoline(MacroAssembler * masm)774 void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
775 // This expects two C++ function parameters passed by Invoke() in
776 // execution.cc.
777 // r0: root_register_value
778 // r1: microtask_queue
779
780 __ mov(RunMicrotasksDescriptor::MicrotaskQueueRegister(), r1);
781 __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
782 }
783
ReplaceClosureCodeWithOptimizedCode(MacroAssembler * masm,Register optimized_code,Register closure)784 static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
785 Register optimized_code,
786 Register closure) {
787 // Store code entry in the closure.
788 __ str(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
789 __ RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code,
790 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
791 OMIT_SMI_CHECK);
792 }
793
LeaveInterpreterFrame(MacroAssembler * masm,Register scratch1,Register scratch2)794 static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
795 Register scratch2) {
796 Register params_size = scratch1;
797 // Get the size of the formal parameters + receiver (in bytes).
798 __ ldr(params_size,
799 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
800 __ ldr(params_size,
801 FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
802
803 #ifdef V8_NO_ARGUMENTS_ADAPTOR
804 Register actual_params_size = scratch2;
805 // Compute the size of the actual parameters + receiver (in bytes).
806 __ ldr(actual_params_size,
807 MemOperand(fp, StandardFrameConstants::kArgCOffset));
808 __ lsl(actual_params_size, actual_params_size, Operand(kPointerSizeLog2));
809 __ add(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
810
811 // If actual is bigger than formal, then we should use it to free up the stack
812 // arguments.
813 __ cmp(params_size, actual_params_size);
814 __ mov(params_size, actual_params_size, LeaveCC, lt);
815 #endif
816
817 // Leave the frame (also dropping the register file).
818 __ LeaveFrame(StackFrame::INTERPRETED);
819
820 // Drop receiver + arguments.
821 __ add(sp, sp, params_size, LeaveCC);
822 }
823
824 // Tail-call |function_id| if |actual_marker| == |expected_marker|
TailCallRuntimeIfMarkerEquals(MacroAssembler * masm,Register actual_marker,OptimizationMarker expected_marker,Runtime::FunctionId function_id)825 static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
826 Register actual_marker,
827 OptimizationMarker expected_marker,
828 Runtime::FunctionId function_id) {
829 Label no_match;
830 __ cmp_raw_immediate(actual_marker, expected_marker);
831 __ b(ne, &no_match);
832 GenerateTailCallToReturnedCode(masm, function_id);
833 __ bind(&no_match);
834 }
835
TailCallOptimizedCodeSlot(MacroAssembler * masm,Register optimized_code_entry,Register scratch)836 static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
837 Register optimized_code_entry,
838 Register scratch) {
839 // ----------- S t a t e -------------
840 // -- r0 : actual argument count
841 // -- r3 : new target (preserved for callee if needed, and caller)
842 // -- r1 : target function (preserved for callee if needed, and caller)
843 // -----------------------------------
844 DCHECK(!AreAliased(r1, r3, optimized_code_entry, scratch));
845
846 Register closure = r1;
847 Label heal_optimized_code_slot;
848
849 // If the optimized code is cleared, go to runtime to update the optimization
850 // marker field.
851 __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
852 &heal_optimized_code_slot);
853
854 // Check if the optimized code is marked for deopt. If it is, call the
855 // runtime to clear it.
856 __ ldr(scratch,
857 FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
858 __ ldr(scratch,
859 FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
860 __ tst(scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
861 __ b(ne, &heal_optimized_code_slot);
862
863 // Optimized code is good, get it into the closure and link the closure
864 // into the optimized functions list, then tail call the optimized code.
865 ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure);
866 static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
867 __ LoadCodeObjectEntry(r2, optimized_code_entry);
868 __ Jump(r2);
869
870 // Optimized code slot contains deoptimized code or code is cleared and
871 // optimized code marker isn't updated. Evict the code, update the marker
872 // and re-enter the closure's code.
873 __ bind(&heal_optimized_code_slot);
874 GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
875 }
876
MaybeOptimizeCode(MacroAssembler * masm,Register feedback_vector,Register optimization_marker)877 static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
878 Register optimization_marker) {
879 // ----------- S t a t e -------------
880 // -- r0 : actual argument count
881 // -- r3 : new target (preserved for callee if needed, and caller)
882 // -- r1 : target function (preserved for callee if needed, and caller)
883 // -- feedback vector (preserved for caller if needed)
884 // -- optimization_marker : a int32 containing a non-zero optimization
885 // marker.
886 // -----------------------------------
887 DCHECK(!AreAliased(feedback_vector, r1, r3, optimization_marker));
888
889 // TODO(v8:8394): The logging of first execution will break if
890 // feedback vectors are not allocated. We need to find a different way of
891 // logging these events if required.
892 TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
893 OptimizationMarker::kLogFirstExecution,
894 Runtime::kFunctionFirstExecution);
895 TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
896 OptimizationMarker::kCompileOptimized,
897 Runtime::kCompileOptimized_NotConcurrent);
898 TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
899 OptimizationMarker::kCompileOptimizedConcurrent,
900 Runtime::kCompileOptimized_Concurrent);
901
902 // Marker should be one of LogFirstExecution / CompileOptimized /
903 // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
904 // here.
905 if (FLAG_debug_code) {
906 __ stop();
907 }
908 }
909
910 // Advance the current bytecode offset. This simulates what all bytecode
911 // handlers do upon completion of the underlying operation. Will bail out to a
912 // label if the bytecode (without prefix) is a return bytecode. Will not advance
913 // the bytecode offset if the current bytecode is a JumpLoop, instead just
914 // re-executing the JumpLoop to jump to the correct bytecode.
AdvanceBytecodeOffsetOrReturn(MacroAssembler * masm,Register bytecode_array,Register bytecode_offset,Register bytecode,Register scratch1,Register scratch2,Label * if_return)915 static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
916 Register bytecode_array,
917 Register bytecode_offset,
918 Register bytecode, Register scratch1,
919 Register scratch2, Label* if_return) {
920 Register bytecode_size_table = scratch1;
921
922 // The bytecode offset value will be increased by one in wide and extra wide
923 // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
924 // will restore the original bytecode. In order to simplify the code, we have
925 // a backup of it.
926 Register original_bytecode_offset = scratch2;
927 DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
928 bytecode, original_bytecode_offset));
929
930 __ Move(bytecode_size_table,
931 ExternalReference::bytecode_size_table_address());
932 __ Move(original_bytecode_offset, bytecode_offset);
933
934 // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
935 Label process_bytecode;
936 STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
937 STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
938 STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
939 STATIC_ASSERT(3 ==
940 static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
941 __ cmp(bytecode, Operand(0x3));
942 __ b(hi, &process_bytecode);
943 __ tst(bytecode, Operand(0x1));
944 // Load the next bytecode.
945 __ add(bytecode_offset, bytecode_offset, Operand(1));
946 __ ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
947
948 // Update table to the wide scaled table.
949 __ add(bytecode_size_table, bytecode_size_table,
950 Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
951 // Conditionally update table to the extra wide scaled table. We are taking
952 // advantage of the fact that the extra wide follows the wide one.
953 __ add(bytecode_size_table, bytecode_size_table,
954 Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount), LeaveCC,
955 ne);
956
957 __ bind(&process_bytecode);
958
959 // Bailout to the return label if this is a return bytecode.
960
961 // Create cmp, cmpne, ..., cmpne to check for a return bytecode.
962 Condition flag = al;
963 #define JUMP_IF_EQUAL(NAME) \
964 __ cmp(bytecode, Operand(static_cast<int>(interpreter::Bytecode::k##NAME)), \
965 flag); \
966 flag = ne;
967 RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
968 #undef JUMP_IF_EQUAL
969
970 __ b(if_return, eq);
971
972 // If this is a JumpLoop, re-execute it to perform the jump to the beginning
973 // of the loop.
974 Label end, not_jump_loop;
975 __ cmp(bytecode, Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
976 __ b(ne, ¬_jump_loop);
977 // We need to restore the original bytecode_offset since we might have
978 // increased it to skip the wide / extra-wide prefix bytecode.
979 __ Move(bytecode_offset, original_bytecode_offset);
980 __ b(&end);
981
982 __ bind(¬_jump_loop);
983 // Otherwise, load the size of the current bytecode and advance the offset.
984 __ ldr(scratch1, MemOperand(bytecode_size_table, bytecode, LSL, 2));
985 __ add(bytecode_offset, bytecode_offset, scratch1);
986
987 __ bind(&end);
988 }
989
990 // Generate code for entering a JS function with the interpreter.
991 // On entry to the function the receiver and arguments have been pushed on the
992 // stack left to right.
993 //
994 // The live registers are:
995 // o r0: actual argument count (not including the receiver)
996 // o r1: the JS function object being called.
997 // o r3: the incoming new target or generator object
998 // o cp: our context
999 // o fp: the caller's frame pointer
1000 // o sp: stack pointer
1001 // o lr: return address
1002 //
1003 // The function builds an interpreter frame. See InterpreterFrameConstants in
1004 // frames.h for its layout.
Generate_InterpreterEntryTrampoline(MacroAssembler * masm)1005 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
1006 Register closure = r1;
1007 Register feedback_vector = r2;
1008
1009 // Get the bytecode array from the function object and load it into
1010 // kInterpreterBytecodeArrayRegister.
1011 __ ldr(r4, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
1012 __ ldr(kInterpreterBytecodeArrayRegister,
1013 FieldMemOperand(r4, SharedFunctionInfo::kFunctionDataOffset));
1014 GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r8);
1015
1016 // The bytecode array could have been flushed from the shared function info,
1017 // if so, call into CompileLazy.
1018 Label compile_lazy;
1019 __ CompareObjectType(kInterpreterBytecodeArrayRegister, r4, no_reg,
1020 BYTECODE_ARRAY_TYPE);
1021 __ b(ne, &compile_lazy);
1022
1023 // Load the feedback vector from the closure.
1024 __ ldr(feedback_vector,
1025 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1026 __ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
1027
1028 Label push_stack_frame;
1029 // Check if feedback vector is valid. If valid, check for optimized code
1030 // and update invocation count. Otherwise, setup the stack frame.
1031 __ ldr(r4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
1032 __ ldrh(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
1033 __ cmp(r4, Operand(FEEDBACK_VECTOR_TYPE));
1034 __ b(ne, &push_stack_frame);
1035
1036 Register optimization_state = r4;
1037
1038 // Read off the optimization state in the feedback vector.
1039 __ ldr(optimization_state,
1040 FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
1041
1042 // Check if the optimized code slot is not empty or has a optimization marker.
1043 Label has_optimized_code_or_marker;
1044 __ tst(
1045 optimization_state,
1046 Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
1047 __ b(ne, &has_optimized_code_or_marker);
1048
1049 Label not_optimized;
1050 __ bind(¬_optimized);
1051
1052 // Increment invocation count for the function.
1053 __ ldr(r9, FieldMemOperand(feedback_vector,
1054 FeedbackVector::kInvocationCountOffset));
1055 __ add(r9, r9, Operand(1));
1056 __ str(r9, FieldMemOperand(feedback_vector,
1057 FeedbackVector::kInvocationCountOffset));
1058
1059 // Open a frame scope to indicate that there is a frame on the stack. The
1060 // MANUAL indicates that the scope shouldn't actually generate code to set up
1061 // the frame (that is done below).
1062 __ bind(&push_stack_frame);
1063 FrameScope frame_scope(masm, StackFrame::MANUAL);
1064 __ PushStandardFrame(closure);
1065
1066 // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
1067 // 8-bit fields next to each other, so we could just optimize by writing a
1068 // 16-bit. These static asserts guard our assumption is valid.
1069 STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
1070 BytecodeArray::kOsrNestingLevelOffset + kCharSize);
1071 STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
1072 __ mov(r9, Operand(0));
1073 __ strh(r9, FieldMemOperand(kInterpreterBytecodeArrayRegister,
1074 BytecodeArray::kOsrNestingLevelOffset));
1075
1076 // Load the initial bytecode offset.
1077 __ mov(kInterpreterBytecodeOffsetRegister,
1078 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1079
1080 // Push bytecode array and Smi tagged bytecode array offset.
1081 __ SmiTag(r4, kInterpreterBytecodeOffsetRegister);
1082 __ Push(kInterpreterBytecodeArrayRegister, r4);
1083
1084 // Allocate the local and temporary register file on the stack.
1085 Label stack_overflow;
1086 {
1087 // Load frame size from the BytecodeArray object.
1088 __ ldr(r4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
1089 BytecodeArray::kFrameSizeOffset));
1090
1091 // Do a stack check to ensure we don't go over the limit.
1092 __ sub(r9, sp, Operand(r4));
1093 __ LoadStackLimit(r2, StackLimitKind::kRealStackLimit);
1094 __ cmp(r9, Operand(r2));
1095 __ b(lo, &stack_overflow);
1096
1097 // If ok, push undefined as the initial value for all register file entries.
1098 Label loop_header;
1099 Label loop_check;
1100 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1101 __ b(&loop_check, al);
1102 __ bind(&loop_header);
1103 // TODO(rmcilroy): Consider doing more than one push per loop iteration.
1104 __ push(kInterpreterAccumulatorRegister);
1105 // Continue loop if not done.
1106 __ bind(&loop_check);
1107 __ sub(r4, r4, Operand(kPointerSize), SetCC);
1108 __ b(&loop_header, ge);
1109 }
1110
1111 // If the bytecode array has a valid incoming new target or generator object
1112 // register, initialize it with incoming value which was passed in r3.
1113 __ ldr(r9, FieldMemOperand(
1114 kInterpreterBytecodeArrayRegister,
1115 BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
1116 __ cmp(r9, Operand::Zero());
1117 __ str(r3, MemOperand(fp, r9, LSL, kPointerSizeLog2), ne);
1118
1119 // Perform interrupt stack check.
1120 // TODO(solanes): Merge with the real stack limit check above.
1121 Label stack_check_interrupt, after_stack_check_interrupt;
1122 __ LoadStackLimit(r4, StackLimitKind::kInterruptStackLimit);
1123 __ cmp(sp, r4);
1124 __ b(lo, &stack_check_interrupt);
1125 __ bind(&after_stack_check_interrupt);
1126
1127 // The accumulator is already loaded with undefined.
1128
1129 // Load the dispatch table into a register and dispatch to the bytecode
1130 // handler at the current bytecode offset.
1131 Label do_dispatch;
1132 __ bind(&do_dispatch);
1133 __ Move(
1134 kInterpreterDispatchTableRegister,
1135 ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1136 __ ldrb(r4, MemOperand(kInterpreterBytecodeArrayRegister,
1137 kInterpreterBytecodeOffsetRegister));
1138 __ ldr(
1139 kJavaScriptCallCodeStartRegister,
1140 MemOperand(kInterpreterDispatchTableRegister, r4, LSL, kPointerSizeLog2));
1141 __ Call(kJavaScriptCallCodeStartRegister);
1142 masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
1143
1144 // Any returns to the entry trampoline are either due to the return bytecode
1145 // or the interpreter tail calling a builtin and then a dispatch.
1146
1147 // Get bytecode array and bytecode offset from the stack frame.
1148 __ ldr(kInterpreterBytecodeArrayRegister,
1149 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1150 __ ldr(kInterpreterBytecodeOffsetRegister,
1151 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1152 __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1153
1154 // Either return, or advance to the next bytecode and dispatch.
1155 Label do_return;
1156 __ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
1157 kInterpreterBytecodeOffsetRegister));
1158 AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1159 kInterpreterBytecodeOffsetRegister, r1, r2, r3,
1160 &do_return);
1161 __ jmp(&do_dispatch);
1162
1163 __ bind(&do_return);
1164 // The return value is in r0.
1165 LeaveInterpreterFrame(masm, r2, r4);
1166 __ Jump(lr);
1167
1168 __ bind(&stack_check_interrupt);
1169 // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
1170 // for the call to the StackGuard.
1171 __ mov(kInterpreterBytecodeOffsetRegister,
1172 Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
1173 kFunctionEntryBytecodeOffset)));
1174 __ str(kInterpreterBytecodeOffsetRegister,
1175 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1176 __ CallRuntime(Runtime::kStackGuard);
1177
1178 // After the call, restore the bytecode array, bytecode offset and accumulator
1179 // registers again. Also, restore the bytecode offset in the stack to its
1180 // previous value.
1181 __ ldr(kInterpreterBytecodeArrayRegister,
1182 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1183 __ mov(kInterpreterBytecodeOffsetRegister,
1184 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1185 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1186
1187 __ SmiTag(r4, kInterpreterBytecodeOffsetRegister);
1188 __ str(r4, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1189
1190 __ jmp(&after_stack_check_interrupt);
1191
1192 __ bind(&has_optimized_code_or_marker);
1193 Label maybe_has_optimized_code;
1194
1195 // Check if optimized code is available
1196 __ tst(
1197 optimization_state,
1198 Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
1199 __ b(eq, &maybe_has_optimized_code);
1200
1201 Register optimization_marker = optimization_state;
1202 __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
1203 MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
1204 // Fall through if there's no runnable optimized code.
1205 __ jmp(¬_optimized);
1206
1207 __ bind(&maybe_has_optimized_code);
1208 Register optimized_code_entry = optimization_state;
1209 __ ldr(optimization_marker,
1210 FieldMemOperand(feedback_vector,
1211 FeedbackVector::kMaybeOptimizedCodeOffset));
1212 TailCallOptimizedCodeSlot(masm, optimized_code_entry, r6);
1213
1214 __ bind(&compile_lazy);
1215 GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
1216
1217 __ bind(&stack_overflow);
1218 __ CallRuntime(Runtime::kThrowStackOverflow);
1219 __ bkpt(0); // Should not return.
1220 }
1221
Generate_InterpreterPushArgs(MacroAssembler * masm,Register num_args,Register start_address,Register scratch)1222 static void Generate_InterpreterPushArgs(MacroAssembler* masm,
1223 Register num_args,
1224 Register start_address,
1225 Register scratch) {
1226 // Find the argument with lowest address.
1227 __ sub(scratch, num_args, Operand(1));
1228 __ mov(scratch, Operand(scratch, LSL, kSystemPointerSizeLog2));
1229 __ sub(start_address, start_address, scratch);
1230 // Push the arguments.
1231 __ PushArray(start_address, num_args, scratch,
1232 TurboAssembler::PushArrayOrder::kReverse);
1233 }
1234
1235 // static
Generate_InterpreterPushArgsThenCallImpl(MacroAssembler * masm,ConvertReceiverMode receiver_mode,InterpreterPushArgsMode mode)1236 void Builtins::Generate_InterpreterPushArgsThenCallImpl(
1237 MacroAssembler* masm, ConvertReceiverMode receiver_mode,
1238 InterpreterPushArgsMode mode) {
1239 DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
1240 // ----------- S t a t e -------------
1241 // -- r0 : the number of arguments (not including the receiver)
1242 // -- r2 : the address of the first argument to be pushed. Subsequent
1243 // arguments should be consecutive above this, in the same order as
1244 // they are to be pushed onto the stack.
1245 // -- r1 : the target to call (can be any Object).
1246 // -----------------------------------
1247 Label stack_overflow;
1248
1249 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1250 // The spread argument should not be pushed.
1251 __ sub(r0, r0, Operand(1));
1252 }
1253
1254 __ add(r3, r0, Operand(1)); // Add one for receiver.
1255
1256 __ StackOverflowCheck(r3, r4, &stack_overflow);
1257
1258 if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1259 // Don't copy receiver. Argument count is correct.
1260 __ mov(r3, r0);
1261 }
1262
1263 // Push the arguments. r2 and r4 will be modified.
1264 Generate_InterpreterPushArgs(masm, r3, r2, r4);
1265
1266 // Push "undefined" as the receiver arg if we need to.
1267 if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1268 __ PushRoot(RootIndex::kUndefinedValue);
1269 }
1270
1271 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1272 // Pass the spread in the register r2.
1273 // r2 already points to the penultimate argument, the spread
1274 // lies in the next interpreter register.
1275 __ sub(r2, r2, Operand(kSystemPointerSize));
1276 __ ldr(r2, MemOperand(r2));
1277 }
1278
1279 // Call the target.
1280 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1281 __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
1282 RelocInfo::CODE_TARGET);
1283 } else {
1284 __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
1285 RelocInfo::CODE_TARGET);
1286 }
1287
1288 __ bind(&stack_overflow);
1289 {
1290 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1291 // Unreachable code.
1292 __ bkpt(0);
1293 }
1294 }
1295
1296 // static
Generate_InterpreterPushArgsThenConstructImpl(MacroAssembler * masm,InterpreterPushArgsMode mode)1297 void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
1298 MacroAssembler* masm, InterpreterPushArgsMode mode) {
1299 // ----------- S t a t e -------------
1300 // -- r0 : argument count (not including receiver)
1301 // -- r3 : new target
1302 // -- r1 : constructor to call
1303 // -- r2 : allocation site feedback if available, undefined otherwise.
1304 // -- r4 : address of the first argument
1305 // -----------------------------------
1306 Label stack_overflow;
1307
1308 __ add(r5, r0, Operand(1)); // Add one for receiver.
1309
1310 __ StackOverflowCheck(r5, r6, &stack_overflow);
1311
1312 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1313 // The spread argument should not be pushed.
1314 __ sub(r0, r0, Operand(1));
1315 }
1316
1317 // Push the arguments. r4 and r5 will be modified.
1318 Generate_InterpreterPushArgs(masm, r0, r4, r5);
1319
1320 // Push a slot for the receiver to be constructed.
1321 __ mov(r5, Operand::Zero());
1322 __ push(r5);
1323
1324 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1325 // Pass the spread in the register r2.
1326 // r4 already points to the penultimate argument, the spread
1327 // lies in the next interpreter register.
1328 __ sub(r4, r4, Operand(kSystemPointerSize));
1329 __ ldr(r2, MemOperand(r4));
1330 } else {
1331 __ AssertUndefinedOrAllocationSite(r2, r5);
1332 }
1333
1334 if (mode == InterpreterPushArgsMode::kArrayFunction) {
1335 __ AssertFunction(r1);
1336
1337 // Tail call to the array construct stub (still in the caller
1338 // context at this point).
1339 Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
1340 __ Jump(code, RelocInfo::CODE_TARGET);
1341 } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1342 // Call the constructor with r0, r1, and r3 unmodified.
1343 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
1344 RelocInfo::CODE_TARGET);
1345 } else {
1346 DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
1347 // Call the constructor with r0, r1, and r3 unmodified.
1348 __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
1349 }
1350
1351 __ bind(&stack_overflow);
1352 {
1353 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1354 // Unreachable code.
1355 __ bkpt(0);
1356 }
1357 }
1358
Generate_InterpreterEnterBytecode(MacroAssembler * masm)1359 static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
1360 // Set the return address to the correct point in the interpreter entry
1361 // trampoline.
1362 Label builtin_trampoline, trampoline_loaded;
1363 Smi interpreter_entry_return_pc_offset(
1364 masm->isolate()->heap()->interpreter_entry_return_pc_offset());
1365 DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
1366
1367 // If the SFI function_data is an InterpreterData, the function will have a
1368 // custom copy of the interpreter entry trampoline for profiling. If so,
1369 // get the custom trampoline, otherwise grab the entry address of the global
1370 // trampoline.
1371 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
1372 __ ldr(r2, FieldMemOperand(r2, JSFunction::kSharedFunctionInfoOffset));
1373 __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset));
1374 __ CompareObjectType(r2, kInterpreterDispatchTableRegister,
1375 kInterpreterDispatchTableRegister,
1376 INTERPRETER_DATA_TYPE);
1377 __ b(ne, &builtin_trampoline);
1378
1379 __ ldr(r2,
1380 FieldMemOperand(r2, InterpreterData::kInterpreterTrampolineOffset));
1381 __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
1382 __ b(&trampoline_loaded);
1383
1384 __ bind(&builtin_trampoline);
1385 __ Move(r2, ExternalReference::
1386 address_of_interpreter_entry_trampoline_instruction_start(
1387 masm->isolate()));
1388 __ ldr(r2, MemOperand(r2));
1389
1390 __ bind(&trampoline_loaded);
1391 __ add(lr, r2, Operand(interpreter_entry_return_pc_offset.value()));
1392
1393 // Initialize the dispatch table register.
1394 __ Move(
1395 kInterpreterDispatchTableRegister,
1396 ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1397
1398 // Get the bytecode array pointer from the frame.
1399 __ ldr(kInterpreterBytecodeArrayRegister,
1400 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1401
1402 if (FLAG_debug_code) {
1403 // Check function data field is actually a BytecodeArray object.
1404 __ SmiTst(kInterpreterBytecodeArrayRegister);
1405 __ Assert(
1406 ne, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
1407 __ CompareObjectType(kInterpreterBytecodeArrayRegister, r1, no_reg,
1408 BYTECODE_ARRAY_TYPE);
1409 __ Assert(
1410 eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
1411 }
1412
1413 // Get the target bytecode offset from the frame.
1414 __ ldr(kInterpreterBytecodeOffsetRegister,
1415 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1416 __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1417
1418 if (FLAG_debug_code) {
1419 Label okay;
1420 __ cmp(kInterpreterBytecodeOffsetRegister,
1421 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1422 __ b(ge, &okay);
1423 __ bkpt(0);
1424 __ bind(&okay);
1425 }
1426
1427 // Dispatch to the target bytecode.
1428 UseScratchRegisterScope temps(masm);
1429 Register scratch = temps.Acquire();
1430 __ ldrb(scratch, MemOperand(kInterpreterBytecodeArrayRegister,
1431 kInterpreterBytecodeOffsetRegister));
1432 __ ldr(kJavaScriptCallCodeStartRegister,
1433 MemOperand(kInterpreterDispatchTableRegister, scratch, LSL,
1434 kPointerSizeLog2));
1435 __ Jump(kJavaScriptCallCodeStartRegister);
1436 }
1437
Generate_InterpreterEnterBytecodeAdvance(MacroAssembler * masm)1438 void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
1439 // Get bytecode array and bytecode offset from the stack frame.
1440 __ ldr(kInterpreterBytecodeArrayRegister,
1441 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1442 __ ldr(kInterpreterBytecodeOffsetRegister,
1443 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1444 __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1445
1446 Label enter_bytecode, function_entry_bytecode;
1447 __ cmp(kInterpreterBytecodeOffsetRegister,
1448 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
1449 kFunctionEntryBytecodeOffset));
1450 __ b(eq, &function_entry_bytecode);
1451
1452 // Load the current bytecode.
1453 __ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
1454 kInterpreterBytecodeOffsetRegister));
1455
1456 // Advance to the next bytecode.
1457 Label if_return;
1458 AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1459 kInterpreterBytecodeOffsetRegister, r1, r2, r3,
1460 &if_return);
1461
1462 __ bind(&enter_bytecode);
1463 // Convert new bytecode offset to a Smi and save in the stackframe.
1464 __ SmiTag(r2, kInterpreterBytecodeOffsetRegister);
1465 __ str(r2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1466
1467 Generate_InterpreterEnterBytecode(masm);
1468
1469 __ bind(&function_entry_bytecode);
1470 // If the code deoptimizes during the implicit function entry stack interrupt
1471 // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
1472 // not a valid bytecode offset. Detect this case and advance to the first
1473 // actual bytecode.
1474 __ mov(kInterpreterBytecodeOffsetRegister,
1475 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1476 __ b(&enter_bytecode);
1477
1478 // We should never take the if_return path.
1479 __ bind(&if_return);
1480 __ Abort(AbortReason::kInvalidBytecodeAdvance);
1481 }
1482
Generate_InterpreterEnterBytecodeDispatch(MacroAssembler * masm)1483 void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
1484 Generate_InterpreterEnterBytecode(masm);
1485 }
1486
1487 namespace {
Generate_ContinueToBuiltinHelper(MacroAssembler * masm,bool java_script_builtin,bool with_result)1488 void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
1489 bool java_script_builtin,
1490 bool with_result) {
1491 const RegisterConfiguration* config(RegisterConfiguration::Default());
1492 int allocatable_register_count = config->num_allocatable_general_registers();
1493 UseScratchRegisterScope temps(masm);
1494 Register scratch = temps.Acquire(); // Temp register is not allocatable.
1495 if (with_result) {
1496 if (java_script_builtin) {
1497 __ mov(scratch, r0);
1498 } else {
1499 // Overwrite the hole inserted by the deoptimizer with the return value
1500 // from the LAZY deopt point.
1501 __ str(
1502 r0,
1503 MemOperand(
1504 sp, config->num_allocatable_general_registers() * kPointerSize +
1505 BuiltinContinuationFrameConstants::kFixedFrameSize));
1506 }
1507 }
1508 for (int i = allocatable_register_count - 1; i >= 0; --i) {
1509 int code = config->GetAllocatableGeneralCode(i);
1510 __ Pop(Register::from_code(code));
1511 if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
1512 __ SmiUntag(Register::from_code(code));
1513 }
1514 }
1515 if (java_script_builtin && with_result) {
1516 // Overwrite the hole inserted by the deoptimizer with the return value from
1517 // the LAZY deopt point. r0 contains the arguments count, the return value
1518 // from LAZY is always the last argument.
1519 __ add(r0, r0, Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
1520 __ str(scratch, MemOperand(sp, r0, LSL, kPointerSizeLog2));
1521 // Recover arguments count.
1522 __ sub(r0, r0, Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
1523 }
1524 __ ldr(fp, MemOperand(
1525 sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1526 // Load builtin index (stored as a Smi) and use it to get the builtin start
1527 // address from the builtins table.
1528 Register builtin = scratch;
1529 __ Pop(builtin);
1530 __ add(sp, sp,
1531 Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1532 __ Pop(lr);
1533 __ LoadEntryFromBuiltinIndex(builtin);
1534 __ bx(builtin);
1535 }
1536 } // namespace
1537
Generate_ContinueToCodeStubBuiltin(MacroAssembler * masm)1538 void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
1539 Generate_ContinueToBuiltinHelper(masm, false, false);
1540 }
1541
Generate_ContinueToCodeStubBuiltinWithResult(MacroAssembler * masm)1542 void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
1543 MacroAssembler* masm) {
1544 Generate_ContinueToBuiltinHelper(masm, false, true);
1545 }
1546
Generate_ContinueToJavaScriptBuiltin(MacroAssembler * masm)1547 void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
1548 Generate_ContinueToBuiltinHelper(masm, true, false);
1549 }
1550
Generate_ContinueToJavaScriptBuiltinWithResult(MacroAssembler * masm)1551 void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
1552 MacroAssembler* masm) {
1553 Generate_ContinueToBuiltinHelper(masm, true, true);
1554 }
1555
Generate_NotifyDeoptimized(MacroAssembler * masm)1556 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
1557 {
1558 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
1559 __ CallRuntime(Runtime::kNotifyDeoptimized);
1560 }
1561
1562 DCHECK_EQ(kInterpreterAccumulatorRegister.code(), r0.code());
1563 __ pop(r0);
1564 __ Ret();
1565 }
1566
Generate_InterpreterOnStackReplacement(MacroAssembler * masm)1567 void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
1568 {
1569 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
1570 __ CallRuntime(Runtime::kCompileForOnStackReplacement);
1571 }
1572
1573 // If the code object is null, just return to the caller.
1574 Label skip;
1575 __ cmp(r0, Operand(Smi::zero()));
1576 __ b(ne, &skip);
1577 __ Ret();
1578
1579 __ bind(&skip);
1580
1581 // Drop the handler frame that is be sitting on top of the actual
1582 // JavaScript frame. This is the case then OSR is triggered from bytecode.
1583 __ LeaveFrame(StackFrame::STUB);
1584
1585 // Load deoptimization data from the code object.
1586 // <deopt_data> = <code>[#deoptimization_data_offset]
1587 __ ldr(r1, FieldMemOperand(r0, Code::kDeoptimizationDataOffset));
1588
1589 {
1590 ConstantPoolUnavailableScope constant_pool_unavailable(masm);
1591 __ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
1592
1593 // Load the OSR entrypoint offset from the deoptimization data.
1594 // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
1595 __ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(
1596 DeoptimizationData::kOsrPcOffsetIndex)));
1597
1598 // Compute the target address = code start + osr_offset
1599 __ add(lr, r0, Operand::SmiUntag(r1));
1600
1601 // And "return" to the OSR entry point of the function.
1602 __ Ret();
1603 }
1604 }
1605
1606 // static
Generate_FunctionPrototypeApply(MacroAssembler * masm)1607 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
1608 // ----------- S t a t e -------------
1609 // -- r0 : argc
1610 // -- sp[0] : receiver
1611 // -- sp[4] : thisArg
1612 // -- sp[8] : argArray
1613 // -----------------------------------
1614
1615 // 1. Load receiver into r1, argArray into r2 (if present), remove all
1616 // arguments from the stack (including the receiver), and push thisArg (if
1617 // present) instead.
1618 {
1619 __ LoadRoot(r5, RootIndex::kUndefinedValue);
1620 __ mov(r2, r5);
1621 __ ldr(r1, MemOperand(sp, 0)); // receiver
1622 __ cmp(r0, Operand(1));
1623 __ ldr(r5, MemOperand(sp, kSystemPointerSize), ge); // thisArg
1624 __ cmp(r0, Operand(2), ge);
1625 __ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argArray
1626 __ add(sp, sp, Operand(r0, LSL, kSystemPointerSizeLog2));
1627 __ str(r5, MemOperand(sp, 0));
1628 }
1629
1630 // ----------- S t a t e -------------
1631 // -- r2 : argArray
1632 // -- r1 : receiver
1633 // -- sp[0] : thisArg
1634 // -----------------------------------
1635
1636 // 2. We don't need to check explicitly for callable receiver here,
1637 // since that's the first thing the Call/CallWithArrayLike builtins
1638 // will do.
1639
1640 // 3. Tail call with no arguments if argArray is null or undefined.
1641 Label no_arguments;
1642 __ JumpIfRoot(r2, RootIndex::kNullValue, &no_arguments);
1643 __ JumpIfRoot(r2, RootIndex::kUndefinedValue, &no_arguments);
1644
1645 // 4a. Apply the receiver to the given argArray.
1646 __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1647 RelocInfo::CODE_TARGET);
1648
1649 // 4b. The argArray is either null or undefined, so we tail call without any
1650 // arguments to the receiver.
1651 __ bind(&no_arguments);
1652 {
1653 __ mov(r0, Operand(0));
1654 __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1655 }
1656 }
1657
1658 // static
Generate_FunctionPrototypeCall(MacroAssembler * masm)1659 void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
1660 // 1. Get the callable to call (passed as receiver) from the stack.
1661 __ Pop(r1);
1662
1663 // 2. Make sure we have at least one argument.
1664 // r0: actual number of arguments
1665 {
1666 Label done;
1667 __ cmp(r0, Operand::Zero());
1668 __ b(ne, &done);
1669 __ PushRoot(RootIndex::kUndefinedValue);
1670 __ add(r0, r0, Operand(1));
1671 __ bind(&done);
1672 }
1673
1674 // 3. Adjust the actual number of arguments.
1675 __ sub(r0, r0, Operand(1));
1676
1677 // 4. Call the callable.
1678 __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1679 }
1680
Generate_ReflectApply(MacroAssembler * masm)1681 void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
1682 // ----------- S t a t e -------------
1683 // -- r0 : argc
1684 // -- sp[0] : receiver
1685 // -- sp[4] : target (if argc >= 1)
1686 // -- sp[8] : thisArgument (if argc >= 2)
1687 // -- sp[12] : argumentsList (if argc == 3)
1688 // -----------------------------------
1689
1690 // 1. Load target into r1 (if present), argumentsList into r2 (if present),
1691 // remove all arguments from the stack (including the receiver), and push
1692 // thisArgument (if present) instead.
1693 {
1694 __ LoadRoot(r1, RootIndex::kUndefinedValue);
1695 __ mov(r5, r1);
1696 __ mov(r2, r1);
1697 __ cmp(r0, Operand(1));
1698 __ ldr(r1, MemOperand(sp, kSystemPointerSize), ge); // target
1699 __ cmp(r0, Operand(2), ge);
1700 __ ldr(r5, MemOperand(sp, 2 * kSystemPointerSize), ge); // thisArgument
1701 __ cmp(r0, Operand(3), ge);
1702 __ ldr(r2, MemOperand(sp, 3 * kSystemPointerSize), ge); // argumentsList
1703 __ add(sp, sp, Operand(r0, LSL, kSystemPointerSizeLog2));
1704 __ str(r5, MemOperand(sp, 0));
1705 }
1706
1707 // ----------- S t a t e -------------
1708 // -- r2 : argumentsList
1709 // -- r1 : target
1710 // -- sp[0] : thisArgument
1711 // -----------------------------------
1712
1713 // 2. We don't need to check explicitly for callable target here,
1714 // since that's the first thing the Call/CallWithArrayLike builtins
1715 // will do.
1716
1717 // 3. Apply the target to the given argumentsList.
1718 __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1719 RelocInfo::CODE_TARGET);
1720 }
1721
Generate_ReflectConstruct(MacroAssembler * masm)1722 void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
1723 // ----------- S t a t e -------------
1724 // -- r0 : argc
1725 // -- sp[0] : receiver
1726 // -- sp[4] : target
1727 // -- sp[8] : argumentsList
1728 // -- sp[12] : new.target (optional)
1729 // -----------------------------------
1730
1731 // 1. Load target into r1 (if present), argumentsList into r2 (if present),
1732 // new.target into r3 (if present, otherwise use target), remove all
1733 // arguments from the stack (including the receiver), and push thisArgument
1734 // (if present) instead.
1735 {
1736 __ LoadRoot(r1, RootIndex::kUndefinedValue);
1737 __ mov(r2, r1);
1738 __ mov(r4, r1);
1739 __ cmp(r0, Operand(1));
1740 __ ldr(r1, MemOperand(sp, kSystemPointerSize), ge); // target
1741 __ mov(r3, r1); // new.target defaults to target
1742 __ cmp(r0, Operand(2), ge);
1743 __ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argumentsList
1744 __ cmp(r0, Operand(3), ge);
1745 __ ldr(r3, MemOperand(sp, 3 * kSystemPointerSize), ge); // new.target
1746 __ add(sp, sp, Operand(r0, LSL, kSystemPointerSizeLog2));
1747 __ str(r4, MemOperand(sp, 0)); // set undefined to the receiver
1748 }
1749
1750 // ----------- S t a t e -------------
1751 // -- r2 : argumentsList
1752 // -- r3 : new.target
1753 // -- r1 : target
1754 // -- sp[0] : receiver (undefined)
1755 // -----------------------------------
1756
1757 // 2. We don't need to check explicitly for constructor target here,
1758 // since that's the first thing the Construct/ConstructWithArrayLike
1759 // builtins will do.
1760
1761 // 3. We don't need to check explicitly for constructor new.target here,
1762 // since that's the second thing the Construct/ConstructWithArrayLike
1763 // builtins will do.
1764
1765 // 4. Construct the target with the given new.target and argumentsList.
1766 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike),
1767 RelocInfo::CODE_TARGET);
1768 }
1769
EnterArgumentsAdaptorFrame(MacroAssembler * masm)1770 static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
1771 __ SmiTag(r0);
1772 __ mov(r4, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
1773 __ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() |
1774 fp.bit() | lr.bit());
1775 __ Push(Smi::zero()); // Padding.
1776 __ add(fp, sp,
1777 Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp));
1778 }
1779
LeaveArgumentsAdaptorFrame(MacroAssembler * masm)1780 static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
1781 // ----------- S t a t e -------------
1782 // -- r0 : result being passed through
1783 // -----------------------------------
1784 // Get the number of arguments passed (as a smi), tear down the frame and
1785 // then tear down the parameters.
1786 __ ldr(r1, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
1787
1788 __ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR);
1789 __ add(sp, sp, Operand::PointerOffsetFromSmiKey(r1));
1790 __ add(sp, sp, Operand(kPointerSize)); // adjust for receiver
1791 }
1792
1793 // static
Generate_CallOrConstructVarargs(MacroAssembler * masm,Handle<Code> code)1794 void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
1795 Handle<Code> code) {
1796 // ----------- S t a t e -------------
1797 // -- r1 : target
1798 // -- r0 : number of parameters on the stack (not including the receiver)
1799 // -- r2 : arguments list (a FixedArray)
1800 // -- r4 : len (number of elements to push from args)
1801 // -- r3 : new.target (for [[Construct]])
1802 // -----------------------------------
1803 Register scratch = r8;
1804
1805 if (masm->emit_debug_code()) {
1806 // Allow r2 to be a FixedArray, or a FixedDoubleArray if r4 == 0.
1807 Label ok, fail;
1808 __ AssertNotSmi(r2);
1809 __ ldr(scratch, FieldMemOperand(r2, HeapObject::kMapOffset));
1810 __ ldrh(r6, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1811 __ cmp(r6, Operand(FIXED_ARRAY_TYPE));
1812 __ b(eq, &ok);
1813 __ cmp(r6, Operand(FIXED_DOUBLE_ARRAY_TYPE));
1814 __ b(ne, &fail);
1815 __ cmp(r4, Operand(0));
1816 __ b(eq, &ok);
1817 // Fall through.
1818 __ bind(&fail);
1819 __ Abort(AbortReason::kOperandIsNotAFixedArray);
1820
1821 __ bind(&ok);
1822 }
1823
1824 Label stack_overflow;
1825 __ StackOverflowCheck(r4, scratch, &stack_overflow);
1826
1827 // Move the arguments already in the stack,
1828 // including the receiver and the return address.
1829 {
1830 Label copy, check;
1831 Register num = r5, src = r6, dest = r9; // r7 and r8 are context and root.
1832 __ mov(src, sp);
1833 // Update stack pointer.
1834 __ lsl(scratch, r4, Operand(kSystemPointerSizeLog2));
1835 __ AllocateStackSpace(scratch);
1836 __ mov(dest, sp);
1837 __ mov(num, r0);
1838 __ b(&check);
1839 __ bind(©);
1840 __ ldr(scratch, MemOperand(src, kSystemPointerSize, PostIndex));
1841 __ str(scratch, MemOperand(dest, kSystemPointerSize, PostIndex));
1842 __ sub(num, num, Operand(1), SetCC);
1843 __ bind(&check);
1844 __ b(ge, ©);
1845 }
1846
1847 // Copy arguments onto the stack (thisArgument is already on the stack).
1848 {
1849 __ mov(r6, Operand(0));
1850 __ LoadRoot(r5, RootIndex::kTheHoleValue);
1851 Label done, loop;
1852 __ bind(&loop);
1853 __ cmp(r6, r4);
1854 __ b(eq, &done);
1855 __ add(scratch, r2, Operand(r6, LSL, kTaggedSizeLog2));
1856 __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize));
1857 __ cmp(scratch, r5);
1858 // Turn the hole into undefined as we go.
1859 __ LoadRoot(scratch, RootIndex::kUndefinedValue, eq);
1860 __ str(scratch, MemOperand(r9, kSystemPointerSize, PostIndex));
1861 __ add(r6, r6, Operand(1));
1862 __ b(&loop);
1863 __ bind(&done);
1864 __ add(r0, r0, r6);
1865 }
1866
1867 // Tail-call to the actual Call or Construct builtin.
1868 __ Jump(code, RelocInfo::CODE_TARGET);
1869
1870 __ bind(&stack_overflow);
1871 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1872 }
1873
1874 // static
Generate_CallOrConstructForwardVarargs(MacroAssembler * masm,CallOrConstructMode mode,Handle<Code> code)1875 void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
1876 CallOrConstructMode mode,
1877 Handle<Code> code) {
1878 // ----------- S t a t e -------------
1879 // -- r0 : the number of arguments (not including the receiver)
1880 // -- r3 : the new.target (for [[Construct]] calls)
1881 // -- r1 : the target to call (can be any Object)
1882 // -- r2 : start index (to support rest parameters)
1883 // -----------------------------------
1884
1885 Register scratch = r6;
1886
1887 // Check if new.target has a [[Construct]] internal method.
1888 if (mode == CallOrConstructMode::kConstruct) {
1889 Label new_target_constructor, new_target_not_constructor;
1890 __ JumpIfSmi(r3, &new_target_not_constructor);
1891 __ ldr(scratch, FieldMemOperand(r3, HeapObject::kMapOffset));
1892 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
1893 __ tst(scratch, Operand(Map::Bits1::IsConstructorBit::kMask));
1894 __ b(ne, &new_target_constructor);
1895 __ bind(&new_target_not_constructor);
1896 {
1897 FrameScope scope(masm, StackFrame::MANUAL);
1898 __ EnterFrame(StackFrame::INTERNAL);
1899 __ Push(r3);
1900 __ CallRuntime(Runtime::kThrowNotConstructor);
1901 }
1902 __ bind(&new_target_constructor);
1903 }
1904
1905 #ifdef V8_NO_ARGUMENTS_ADAPTOR
1906 // TODO(victorgomes): Remove this copy when all the arguments adaptor frame
1907 // code is erased.
1908 __ mov(r4, fp);
1909 __ ldr(r5, MemOperand(fp, StandardFrameConstants::kArgCOffset));
1910 #else
1911 // Check if we have an arguments adaptor frame below the function frame.
1912 Label arguments_adaptor, arguments_done;
1913 __ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1914 __ ldr(scratch,
1915 MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
1916 __ cmp(scratch,
1917 Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
1918 __ b(eq, &arguments_adaptor);
1919 {
1920 __ ldr(r5, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
1921 __ ldr(r5, FieldMemOperand(r5, JSFunction::kSharedFunctionInfoOffset));
1922 __ ldrh(r5, FieldMemOperand(
1923 r5, SharedFunctionInfo::kFormalParameterCountOffset));
1924 __ mov(r4, fp);
1925 }
1926 __ b(&arguments_done);
1927 __ bind(&arguments_adaptor);
1928 {
1929 // Load the length from the ArgumentsAdaptorFrame.
1930 __ ldr(r5, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
1931 __ SmiUntag(r5);
1932 }
1933 __ bind(&arguments_done);
1934 #endif
1935
1936 Label stack_done, stack_overflow;
1937 __ sub(r5, r5, r2, SetCC);
1938 __ b(le, &stack_done);
1939 {
1940 // ----------- S t a t e -------------
1941 // -- r0 : the number of arguments already in the stack (not including the
1942 // receiver)
1943 // -- r1 : the target to call (can be any Object)
1944 // -- r2 : start index (to support rest parameters)
1945 // -- r3 : the new.target (for [[Construct]] calls)
1946 // -- r4 : point to the caller stack frame
1947 // -- r5 : number of arguments to copy, i.e. arguments count - start index
1948 // -----------------------------------
1949
1950 // Check for stack overflow.
1951 __ StackOverflowCheck(r5, scratch, &stack_overflow);
1952
1953 // Forward the arguments from the caller frame.
1954 // Point to the first argument to copy (skipping the receiver).
1955 __ add(r4, r4,
1956 Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
1957 kSystemPointerSize));
1958 __ add(r4, r4, Operand(r2, LSL, kSystemPointerSizeLog2));
1959
1960 // Move the arguments already in the stack,
1961 // including the receiver and the return address.
1962 {
1963 Label copy, check;
1964 Register num = r8, src = r9,
1965 dest = r2; // r7 and r10 are context and root.
1966 __ mov(src, sp);
1967 // Update stack pointer.
1968 __ lsl(scratch, r5, Operand(kSystemPointerSizeLog2));
1969 __ AllocateStackSpace(scratch);
1970 __ mov(dest, sp);
1971 __ mov(num, r0);
1972 __ b(&check);
1973 __ bind(©);
1974 __ ldr(scratch, MemOperand(src, kSystemPointerSize, PostIndex));
1975 __ str(scratch, MemOperand(dest, kSystemPointerSize, PostIndex));
1976 __ sub(num, num, Operand(1), SetCC);
1977 __ bind(&check);
1978 __ b(ge, ©);
1979 }
1980 // Copy arguments from the caller frame.
1981 // TODO(victorgomes): Consider using forward order as potentially more cache
1982 // friendly.
1983 {
1984 Label loop;
1985 __ add(r0, r0, r5);
1986 __ bind(&loop);
1987 {
1988 __ sub(r5, r5, Operand(1), SetCC);
1989 __ ldr(scratch, MemOperand(r4, r5, LSL, kSystemPointerSizeLog2));
1990 __ str(scratch, MemOperand(r2, r5, LSL, kSystemPointerSizeLog2));
1991 __ b(ne, &loop);
1992 }
1993 }
1994 }
1995 __ b(&stack_done);
1996 __ bind(&stack_overflow);
1997 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1998 __ bind(&stack_done);
1999
2000 // Tail-call to the {code} handler.
2001 __ Jump(code, RelocInfo::CODE_TARGET);
2002 }
2003
2004 // static
Generate_CallFunction(MacroAssembler * masm,ConvertReceiverMode mode)2005 void Builtins::Generate_CallFunction(MacroAssembler* masm,
2006 ConvertReceiverMode mode) {
2007 // ----------- S t a t e -------------
2008 // -- r0 : the number of arguments (not including the receiver)
2009 // -- r1 : the function to call (checked to be a JSFunction)
2010 // -----------------------------------
2011 __ AssertFunction(r1);
2012
2013 // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
2014 // Check that the function is not a "classConstructor".
2015 Label class_constructor;
2016 __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
2017 __ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kFlagsOffset));
2018 __ tst(r3, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
2019 __ b(ne, &class_constructor);
2020
2021 // Enter the context of the function; ToObject has to run in the function
2022 // context, and we also need to take the global proxy from the function
2023 // context in case of conversion.
2024 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
2025 // We need to convert the receiver for non-native sloppy mode functions.
2026 Label done_convert;
2027 __ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kFlagsOffset));
2028 __ tst(r3, Operand(SharedFunctionInfo::IsNativeBit::kMask |
2029 SharedFunctionInfo::IsStrictBit::kMask));
2030 __ b(ne, &done_convert);
2031 {
2032 // ----------- S t a t e -------------
2033 // -- r0 : the number of arguments (not including the receiver)
2034 // -- r1 : the function to call (checked to be a JSFunction)
2035 // -- r2 : the shared function info.
2036 // -- cp : the function context.
2037 // -----------------------------------
2038
2039 if (mode == ConvertReceiverMode::kNullOrUndefined) {
2040 // Patch receiver to global proxy.
2041 __ LoadGlobalProxy(r3);
2042 } else {
2043 Label convert_to_object, convert_receiver;
2044 __ ldr(r3, __ ReceiverOperand(r0));
2045 __ JumpIfSmi(r3, &convert_to_object);
2046 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
2047 __ CompareObjectType(r3, r4, r4, FIRST_JS_RECEIVER_TYPE);
2048 __ b(hs, &done_convert);
2049 if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
2050 Label convert_global_proxy;
2051 __ JumpIfRoot(r3, RootIndex::kUndefinedValue, &convert_global_proxy);
2052 __ JumpIfNotRoot(r3, RootIndex::kNullValue, &convert_to_object);
2053 __ bind(&convert_global_proxy);
2054 {
2055 // Patch receiver to global proxy.
2056 __ LoadGlobalProxy(r3);
2057 }
2058 __ b(&convert_receiver);
2059 }
2060 __ bind(&convert_to_object);
2061 {
2062 // Convert receiver using ToObject.
2063 // TODO(bmeurer): Inline the allocation here to avoid building the frame
2064 // in the fast case? (fall back to AllocateInNewSpace?)
2065 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2066 __ SmiTag(r0);
2067 __ Push(r0, r1);
2068 __ mov(r0, r3);
2069 __ Push(cp);
2070 __ Call(BUILTIN_CODE(masm->isolate(), ToObject),
2071 RelocInfo::CODE_TARGET);
2072 __ Pop(cp);
2073 __ mov(r3, r0);
2074 __ Pop(r0, r1);
2075 __ SmiUntag(r0);
2076 }
2077 __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
2078 __ bind(&convert_receiver);
2079 }
2080 __ str(r3, __ ReceiverOperand(r0));
2081 }
2082 __ bind(&done_convert);
2083
2084 // ----------- S t a t e -------------
2085 // -- r0 : the number of arguments (not including the receiver)
2086 // -- r1 : the function to call (checked to be a JSFunction)
2087 // -- r2 : the shared function info.
2088 // -- cp : the function context.
2089 // -----------------------------------
2090
2091 __ ldrh(r2,
2092 FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
2093 __ InvokeFunctionCode(r1, no_reg, r2, r0, JUMP_FUNCTION);
2094
2095 // The function is a "classConstructor", need to raise an exception.
2096 __ bind(&class_constructor);
2097 {
2098 FrameScope frame(masm, StackFrame::INTERNAL);
2099 __ push(r1);
2100 __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
2101 }
2102 }
2103
2104 namespace {
2105
Generate_PushBoundArguments(MacroAssembler * masm)2106 void Generate_PushBoundArguments(MacroAssembler* masm) {
2107 // ----------- S t a t e -------------
2108 // -- r0 : the number of arguments (not including the receiver)
2109 // -- r1 : target (checked to be a JSBoundFunction)
2110 // -- r3 : new.target (only in case of [[Construct]])
2111 // -----------------------------------
2112
2113 // Load [[BoundArguments]] into r2 and length of that into r4.
2114 Label no_bound_arguments;
2115 __ ldr(r2, FieldMemOperand(r1, JSBoundFunction::kBoundArgumentsOffset));
2116 __ ldr(r4, FieldMemOperand(r2, FixedArray::kLengthOffset));
2117 __ SmiUntag(r4);
2118 __ cmp(r4, Operand(0));
2119 __ b(eq, &no_bound_arguments);
2120 {
2121 // ----------- S t a t e -------------
2122 // -- r0 : the number of arguments (not including the receiver)
2123 // -- r1 : target (checked to be a JSBoundFunction)
2124 // -- r2 : the [[BoundArguments]] (implemented as FixedArray)
2125 // -- r3 : new.target (only in case of [[Construct]])
2126 // -- r4 : the number of [[BoundArguments]]
2127 // -----------------------------------
2128
2129 Register scratch = r6;
2130
2131 {
2132 // Check the stack for overflow. We are not trying to catch interruptions
2133 // (i.e. debug break and preemption) here, so check the "real stack
2134 // limit".
2135 Label done;
2136 __ mov(scratch, Operand(r4, LSL, kSystemPointerSizeLog2));
2137 {
2138 UseScratchRegisterScope temps(masm);
2139 Register remaining_stack_size = temps.Acquire();
2140 DCHECK(!AreAliased(r0, r1, r2, r3, r4, scratch, remaining_stack_size));
2141
2142 // Compute the space we have left. The stack might already be overflowed
2143 // here which will cause remaining_stack_size to become negative.
2144 __ LoadStackLimit(remaining_stack_size,
2145 StackLimitKind::kRealStackLimit);
2146 __ sub(remaining_stack_size, sp, remaining_stack_size);
2147
2148 // Check if the arguments will overflow the stack.
2149 __ cmp(remaining_stack_size, scratch);
2150 }
2151 __ b(gt, &done);
2152 {
2153 FrameScope scope(masm, StackFrame::MANUAL);
2154 __ EnterFrame(StackFrame::INTERNAL);
2155 __ CallRuntime(Runtime::kThrowStackOverflow);
2156 }
2157 __ bind(&done);
2158 }
2159
2160 // Pop receiver.
2161 __ Pop(r5);
2162
2163 // Push [[BoundArguments]].
2164 {
2165 Label loop;
2166 __ add(r0, r0, r4); // Adjust effective number of arguments.
2167 __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2168 __ bind(&loop);
2169 __ sub(r4, r4, Operand(1), SetCC);
2170 __ ldr(scratch, MemOperand(r2, r4, LSL, kTaggedSizeLog2));
2171 __ Push(scratch);
2172 __ b(gt, &loop);
2173 }
2174
2175 // Push receiver.
2176 __ Push(r5);
2177 }
2178 __ bind(&no_bound_arguments);
2179 }
2180
2181 } // namespace
2182
2183 // static
Generate_CallBoundFunctionImpl(MacroAssembler * masm)2184 void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
2185 // ----------- S t a t e -------------
2186 // -- r0 : the number of arguments (not including the receiver)
2187 // -- r1 : the function to call (checked to be a JSBoundFunction)
2188 // -----------------------------------
2189 __ AssertBoundFunction(r1);
2190
2191 // Patch the receiver to [[BoundThis]].
2192 __ ldr(r3, FieldMemOperand(r1, JSBoundFunction::kBoundThisOffset));
2193 __ str(r3, __ ReceiverOperand(r0));
2194
2195 // Push the [[BoundArguments]] onto the stack.
2196 Generate_PushBoundArguments(masm);
2197
2198 // Call the [[BoundTargetFunction]] via the Call builtin.
2199 __ ldr(r1, FieldMemOperand(r1, JSBoundFunction::kBoundTargetFunctionOffset));
2200 __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
2201 RelocInfo::CODE_TARGET);
2202 }
2203
2204 // static
Generate_Call(MacroAssembler * masm,ConvertReceiverMode mode)2205 void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
2206 // ----------- S t a t e -------------
2207 // -- r0 : the number of arguments (not including the receiver)
2208 // -- r1 : the target to call (can be any Object).
2209 // -----------------------------------
2210
2211 Label non_callable, non_smi;
2212 __ JumpIfSmi(r1, &non_callable);
2213 __ bind(&non_smi);
2214 __ CompareObjectType(r1, r4, r5, JS_FUNCTION_TYPE);
2215 __ Jump(masm->isolate()->builtins()->CallFunction(mode),
2216 RelocInfo::CODE_TARGET, eq);
2217 __ cmp(r5, Operand(JS_BOUND_FUNCTION_TYPE));
2218 __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
2219 RelocInfo::CODE_TARGET, eq);
2220
2221 // Check if target has a [[Call]] internal method.
2222 __ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
2223 __ tst(r4, Operand(Map::Bits1::IsCallableBit::kMask));
2224 __ b(eq, &non_callable);
2225
2226 // Check if target is a proxy and call CallProxy external builtin
2227 __ cmp(r5, Operand(JS_PROXY_TYPE));
2228 __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq);
2229
2230 // 2. Call to something else, which might have a [[Call]] internal method (if
2231 // not we raise an exception).
2232 // Overwrite the original receiver the (original) target.
2233 __ str(r1, __ ReceiverOperand(r0));
2234 // Let the "call_as_function_delegate" take care of the rest.
2235 __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r1);
2236 __ Jump(masm->isolate()->builtins()->CallFunction(
2237 ConvertReceiverMode::kNotNullOrUndefined),
2238 RelocInfo::CODE_TARGET);
2239
2240 // 3. Call to something that is not callable.
2241 __ bind(&non_callable);
2242 {
2243 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2244 __ Push(r1);
2245 __ CallRuntime(Runtime::kThrowCalledNonCallable);
2246 }
2247 }
2248
2249 // static
Generate_ConstructFunction(MacroAssembler * masm)2250 void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
2251 // ----------- S t a t e -------------
2252 // -- r0 : the number of arguments (not including the receiver)
2253 // -- r1 : the constructor to call (checked to be a JSFunction)
2254 // -- r3 : the new target (checked to be a constructor)
2255 // -----------------------------------
2256 __ AssertConstructor(r1);
2257 __ AssertFunction(r1);
2258
2259 // Calling convention for function specific ConstructStubs require
2260 // r2 to contain either an AllocationSite or undefined.
2261 __ LoadRoot(r2, RootIndex::kUndefinedValue);
2262
2263 Label call_generic_stub;
2264
2265 // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
2266 __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
2267 __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
2268 __ tst(r4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
2269 __ b(eq, &call_generic_stub);
2270
2271 __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
2272 RelocInfo::CODE_TARGET);
2273
2274 __ bind(&call_generic_stub);
2275 __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric),
2276 RelocInfo::CODE_TARGET);
2277 }
2278
2279 // static
Generate_ConstructBoundFunction(MacroAssembler * masm)2280 void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
2281 // ----------- S t a t e -------------
2282 // -- r0 : the number of arguments (not including the receiver)
2283 // -- r1 : the function to call (checked to be a JSBoundFunction)
2284 // -- r3 : the new target (checked to be a constructor)
2285 // -----------------------------------
2286 __ AssertConstructor(r1);
2287 __ AssertBoundFunction(r1);
2288
2289 // Push the [[BoundArguments]] onto the stack.
2290 Generate_PushBoundArguments(masm);
2291
2292 // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
2293 __ cmp(r1, r3);
2294 __ ldr(r3, FieldMemOperand(r1, JSBoundFunction::kBoundTargetFunctionOffset),
2295 eq);
2296
2297 // Construct the [[BoundTargetFunction]] via the Construct builtin.
2298 __ ldr(r1, FieldMemOperand(r1, JSBoundFunction::kBoundTargetFunctionOffset));
2299 __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
2300 }
2301
2302 // static
Generate_Construct(MacroAssembler * masm)2303 void Builtins::Generate_Construct(MacroAssembler* masm) {
2304 // ----------- S t a t e -------------
2305 // -- r0 : the number of arguments (not including the receiver)
2306 // -- r1 : the constructor to call (can be any Object)
2307 // -- r3 : the new target (either the same as the constructor or
2308 // the JSFunction on which new was invoked initially)
2309 // -----------------------------------
2310
2311 // Check if target is a Smi.
2312 Label non_constructor, non_proxy;
2313 __ JumpIfSmi(r1, &non_constructor);
2314
2315 // Check if target has a [[Construct]] internal method.
2316 __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
2317 __ ldrb(r2, FieldMemOperand(r4, Map::kBitFieldOffset));
2318 __ tst(r2, Operand(Map::Bits1::IsConstructorBit::kMask));
2319 __ b(eq, &non_constructor);
2320
2321 // Dispatch based on instance type.
2322 __ CompareInstanceType(r4, r5, JS_FUNCTION_TYPE);
2323 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
2324 RelocInfo::CODE_TARGET, eq);
2325
2326 // Only dispatch to bound functions after checking whether they are
2327 // constructors.
2328 __ cmp(r5, Operand(JS_BOUND_FUNCTION_TYPE));
2329 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
2330 RelocInfo::CODE_TARGET, eq);
2331
2332 // Only dispatch to proxies after checking whether they are constructors.
2333 __ cmp(r5, Operand(JS_PROXY_TYPE));
2334 __ b(ne, &non_proxy);
2335 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
2336 RelocInfo::CODE_TARGET);
2337
2338 // Called Construct on an exotic Object with a [[Construct]] internal method.
2339 __ bind(&non_proxy);
2340 {
2341 // Overwrite the original receiver with the (original) target.
2342 __ str(r1, __ ReceiverOperand(r0));
2343 // Let the "call_as_constructor_delegate" take care of the rest.
2344 __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r1);
2345 __ Jump(masm->isolate()->builtins()->CallFunction(),
2346 RelocInfo::CODE_TARGET);
2347 }
2348
2349 // Called Construct on an Object that doesn't have a [[Construct]] internal
2350 // method.
2351 __ bind(&non_constructor);
2352 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable),
2353 RelocInfo::CODE_TARGET);
2354 }
2355
Generate_ArgumentsAdaptorTrampoline(MacroAssembler * masm)2356 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
2357 // ----------- S t a t e -------------
2358 // -- r0 : actual number of arguments
2359 // -- r1 : function (passed through to callee)
2360 // -- r2 : expected number of arguments
2361 // -- r3 : new target (passed through to callee)
2362 // -----------------------------------
2363
2364 Label dont_adapt_arguments, stack_overflow;
2365 __ cmp(r2, Operand(kDontAdaptArgumentsSentinel));
2366 __ b(eq, &dont_adapt_arguments);
2367 __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
2368 __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
2369
2370 // -------------------------------------------
2371 // Adapt arguments.
2372 // -------------------------------------------
2373 {
2374 Label under_application, over_application, invoke;
2375 __ cmp(r0, r2);
2376 __ b(lt, &under_application);
2377
2378 // Enough parameters: actual >= expected
2379 __ bind(&over_application);
2380 {
2381 EnterArgumentsAdaptorFrame(masm);
2382 __ StackOverflowCheck(r2, r5, &stack_overflow);
2383
2384 // Calculate copy start address into r0 and copy end address into r4.
2385 // r0: actual number of arguments as a smi
2386 // r1: function
2387 // r2: expected number of arguments
2388 // r3: new target (passed through to callee)
2389 __ add(r0, fp, Operand(r2, LSL, kSystemPointerSizeLog2));
2390 // adjust for return address and receiver
2391 __ add(r0, r0, Operand(2 * kSystemPointerSize));
2392 __ sub(r4, r0, Operand(r2, LSL, kSystemPointerSizeLog2));
2393
2394 // Copy the arguments (including the receiver) to the new stack frame.
2395 // r0: copy start address
2396 // r1: function
2397 // r2: expected number of arguments
2398 // r3: new target (passed through to callee)
2399 // r4: copy end address
2400
2401 Label copy;
2402 __ bind(©);
2403 __ ldr(r5, MemOperand(r0, 0));
2404 __ push(r5);
2405 __ cmp(r0, r4); // Compare before moving to next argument.
2406 __ sub(r0, r0, Operand(kSystemPointerSize));
2407 __ b(ne, ©);
2408
2409 __ b(&invoke);
2410 }
2411
2412 // Too few parameters: Actual < expected
2413 __ bind(&under_application);
2414 {
2415 EnterArgumentsAdaptorFrame(masm);
2416 __ StackOverflowCheck(r2, r5, &stack_overflow);
2417
2418 // Fill the remaining expected arguments with undefined.
2419 // r0: actual number of arguments as a smi
2420 // r1: function
2421 // r2: expected number of arguments
2422 // r3: new target (passed through to callee)
2423 __ LoadRoot(r5, RootIndex::kUndefinedValue);
2424 __ sub(r6, r2, Operand::SmiUntag(r0));
2425 __ sub(r4, fp, Operand(r6, LSL, kPointerSizeLog2));
2426 // Adjust for frame.
2427 __ sub(r4, r4,
2428 Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
2429 kPointerSize));
2430
2431 Label fill;
2432 __ bind(&fill);
2433 __ push(r5);
2434 __ cmp(sp, r4);
2435 __ b(ne, &fill);
2436
2437 // Calculate copy start address into r0 and copy end address is fp.
2438 // r0: actual number of arguments as a smi
2439 // r1: function
2440 // r2: expected number of arguments
2441 // r3: new target (passed through to callee)
2442 __ add(r0, fp, Operand::PointerOffsetFromSmiKey(r0));
2443
2444 // Copy the arguments (including the receiver) to the new stack frame.
2445 // r0: copy start address
2446 // r1: function
2447 // r2: expected number of arguments
2448 // r3: new target (passed through to callee)
2449 Label copy;
2450 __ bind(©);
2451
2452 // Adjust load for return address and receiver.
2453 __ ldr(r5, MemOperand(r0, 2 * kPointerSize));
2454 __ push(r5);
2455
2456 __ cmp(r0, fp); // Compare before moving to next argument.
2457 __ sub(r0, r0, Operand(kPointerSize));
2458 __ b(ne, ©);
2459 }
2460
2461 // Call the entry point.
2462 __ bind(&invoke);
2463 __ mov(r0, r2);
2464 // r0 : expected number of arguments
2465 // r1 : function (passed through to callee)
2466 // r3 : new target (passed through to callee)
2467 static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
2468 __ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
2469 __ CallCodeObject(r2);
2470
2471 // Store offset of return address for deoptimizer.
2472 masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(
2473 masm->pc_offset());
2474
2475 // Exit frame and return.
2476 LeaveArgumentsAdaptorFrame(masm);
2477 __ Jump(lr);
2478 }
2479
2480 // -------------------------------------------
2481 // Dont adapt arguments.
2482 // -------------------------------------------
2483 __ bind(&dont_adapt_arguments);
2484 static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
2485 __ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
2486 __ JumpCodeObject(r2);
2487
2488 __ bind(&stack_overflow);
2489 {
2490 FrameScope frame(masm, StackFrame::MANUAL);
2491 __ CallRuntime(Runtime::kThrowStackOverflow);
2492 __ bkpt(0);
2493 }
2494 }
2495
Generate_WasmCompileLazy(MacroAssembler * masm)2496 void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
2497 // The function index was put in a register by the jump table trampoline.
2498 // Convert to Smi for the runtime call.
2499 __ SmiTag(kWasmCompileLazyFuncIndexRegister,
2500 kWasmCompileLazyFuncIndexRegister);
2501 {
2502 HardAbortScope hard_abort(masm); // Avoid calls to Abort.
2503 FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
2504
2505 // Save all parameter registers (see wasm-linkage.cc). They might be
2506 // overwritten in the runtime call below. We don't have any callee-saved
2507 // registers in wasm, so no need to store anything else.
2508 constexpr RegList gp_regs = Register::ListOf(r0, r1, r2, r3);
2509 constexpr DwVfpRegister lowest_fp_reg = d0;
2510 constexpr DwVfpRegister highest_fp_reg = d7;
2511
2512 __ stm(db_w, sp, gp_regs);
2513 __ vstm(db_w, sp, lowest_fp_reg, highest_fp_reg);
2514
2515 // Pass instance and function index as explicit arguments to the runtime
2516 // function.
2517 __ push(kWasmInstanceRegister);
2518 __ push(kWasmCompileLazyFuncIndexRegister);
2519 // Initialize the JavaScript context with 0. CEntry will use it to
2520 // set the current context on the isolate.
2521 __ Move(cp, Smi::zero());
2522 __ CallRuntime(Runtime::kWasmCompileLazy, 2);
2523 // The entrypoint address is the return value.
2524 __ mov(r8, kReturnRegister0);
2525
2526 // Restore registers.
2527 __ vldm(ia_w, sp, lowest_fp_reg, highest_fp_reg);
2528 __ ldm(ia_w, sp, gp_regs);
2529 }
2530 // Finally, jump to the entrypoint.
2531 __ Jump(r8);
2532 }
2533
Generate_WasmDebugBreak(MacroAssembler * masm)2534 void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
2535 HardAbortScope hard_abort(masm); // Avoid calls to Abort.
2536 {
2537 FrameAndConstantPoolScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
2538
2539 // Save all parameter registers. They might hold live values, we restore
2540 // them after the runtime call.
2541 constexpr DwVfpRegister lowest_fp_reg = DwVfpRegister::from_code(
2542 WasmDebugBreakFrameConstants::kFirstPushedFpReg);
2543 constexpr DwVfpRegister highest_fp_reg = DwVfpRegister::from_code(
2544 WasmDebugBreakFrameConstants::kLastPushedFpReg);
2545
2546 // Store gp parameter registers.
2547 __ stm(db_w, sp, WasmDebugBreakFrameConstants::kPushedGpRegs);
2548 // Store fp parameter registers.
2549 __ vstm(db_w, sp, lowest_fp_reg, highest_fp_reg);
2550
2551 // Initialize the JavaScript context with 0. CEntry will use it to
2552 // set the current context on the isolate.
2553 __ Move(cp, Smi::zero());
2554 __ CallRuntime(Runtime::kWasmDebugBreak, 0);
2555
2556 // Restore registers.
2557 __ vldm(ia_w, sp, lowest_fp_reg, highest_fp_reg);
2558 __ ldm(ia_w, sp, WasmDebugBreakFrameConstants::kPushedGpRegs);
2559 }
2560 __ Ret();
2561 }
2562
Generate_CEntry(MacroAssembler * masm,int result_size,SaveFPRegsMode save_doubles,ArgvMode argv_mode,bool builtin_exit_frame)2563 void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
2564 SaveFPRegsMode save_doubles, ArgvMode argv_mode,
2565 bool builtin_exit_frame) {
2566 // Called from JavaScript; parameters are on stack as if calling JS function.
2567 // r0: number of arguments including receiver
2568 // r1: pointer to builtin function
2569 // fp: frame pointer (restored after C call)
2570 // sp: stack pointer (restored as callee's sp after C call)
2571 // cp: current context (C callee-saved)
2572 //
2573 // If argv_mode == kArgvInRegister:
2574 // r2: pointer to the first argument
2575
2576 __ mov(r5, Operand(r1));
2577
2578 if (argv_mode == kArgvInRegister) {
2579 // Move argv into the correct register.
2580 __ mov(r1, Operand(r2));
2581 } else {
2582 // Compute the argv pointer in a callee-saved register.
2583 __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2));
2584 __ sub(r1, r1, Operand(kPointerSize));
2585 }
2586
2587 // Enter the exit frame that transitions from JavaScript to C++.
2588 FrameScope scope(masm, StackFrame::MANUAL);
2589 __ EnterExitFrame(
2590 save_doubles == kSaveFPRegs, 0,
2591 builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
2592
2593 // Store a copy of argc in callee-saved registers for later.
2594 __ mov(r4, Operand(r0));
2595
2596 // r0, r4: number of arguments including receiver (C callee-saved)
2597 // r1: pointer to the first argument (C callee-saved)
2598 // r5: pointer to builtin function (C callee-saved)
2599
2600 #if V8_HOST_ARCH_ARM
2601 int frame_alignment = MacroAssembler::ActivationFrameAlignment();
2602 int frame_alignment_mask = frame_alignment - 1;
2603 if (FLAG_debug_code) {
2604 if (frame_alignment > kPointerSize) {
2605 Label alignment_as_expected;
2606 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
2607 __ tst(sp, Operand(frame_alignment_mask));
2608 __ b(eq, &alignment_as_expected);
2609 // Don't use Check here, as it will call Runtime_Abort re-entering here.
2610 __ stop();
2611 __ bind(&alignment_as_expected);
2612 }
2613 }
2614 #endif
2615
2616 // Call C built-in.
2617 // r0 = argc, r1 = argv, r2 = isolate
2618 __ Move(r2, ExternalReference::isolate_address(masm->isolate()));
2619 __ StoreReturnAddressAndCall(r5);
2620
2621 // Result returned in r0 or r1:r0 - do not destroy these registers!
2622
2623 // Check result for exception sentinel.
2624 Label exception_returned;
2625 __ CompareRoot(r0, RootIndex::kException);
2626 __ b(eq, &exception_returned);
2627
2628 // Check that there is no pending exception, otherwise we
2629 // should have returned the exception sentinel.
2630 if (FLAG_debug_code) {
2631 Label okay;
2632 ExternalReference pending_exception_address = ExternalReference::Create(
2633 IsolateAddressId::kPendingExceptionAddress, masm->isolate());
2634 __ Move(r3, pending_exception_address);
2635 __ ldr(r3, MemOperand(r3));
2636 __ CompareRoot(r3, RootIndex::kTheHoleValue);
2637 // Cannot use check here as it attempts to generate call into runtime.
2638 __ b(eq, &okay);
2639 __ stop();
2640 __ bind(&okay);
2641 }
2642
2643 // Exit C frame and return.
2644 // r0:r1: result
2645 // sp: stack pointer
2646 // fp: frame pointer
2647 Register argc = argv_mode == kArgvInRegister
2648 // We don't want to pop arguments so set argc to no_reg.
2649 ? no_reg
2650 // Callee-saved register r4 still holds argc.
2651 : r4;
2652 __ LeaveExitFrame(save_doubles == kSaveFPRegs, argc);
2653 __ mov(pc, lr);
2654
2655 // Handling of exception.
2656 __ bind(&exception_returned);
2657
2658 ExternalReference pending_handler_context_address = ExternalReference::Create(
2659 IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
2660 ExternalReference pending_handler_entrypoint_address =
2661 ExternalReference::Create(
2662 IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
2663 ExternalReference pending_handler_fp_address = ExternalReference::Create(
2664 IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
2665 ExternalReference pending_handler_sp_address = ExternalReference::Create(
2666 IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
2667
2668 // Ask the runtime for help to determine the handler. This will set r0 to
2669 // contain the current pending exception, don't clobber it.
2670 ExternalReference find_handler =
2671 ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
2672 {
2673 FrameScope scope(masm, StackFrame::MANUAL);
2674 __ PrepareCallCFunction(3, 0);
2675 __ mov(r0, Operand(0));
2676 __ mov(r1, Operand(0));
2677 __ Move(r2, ExternalReference::isolate_address(masm->isolate()));
2678 __ CallCFunction(find_handler, 3);
2679 }
2680
2681 // Retrieve the handler context, SP and FP.
2682 __ Move(cp, pending_handler_context_address);
2683 __ ldr(cp, MemOperand(cp));
2684 __ Move(sp, pending_handler_sp_address);
2685 __ ldr(sp, MemOperand(sp));
2686 __ Move(fp, pending_handler_fp_address);
2687 __ ldr(fp, MemOperand(fp));
2688
2689 // If the handler is a JS frame, restore the context to the frame. Note that
2690 // the context will be set to (cp == 0) for non-JS frames.
2691 __ cmp(cp, Operand(0));
2692 __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
2693
2694 // Reset the masking register. This is done independent of the underlying
2695 // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
2696 // with both configurations. It is safe to always do this, because the
2697 // underlying register is caller-saved and can be arbitrarily clobbered.
2698 __ ResetSpeculationPoisonRegister();
2699
2700 // Compute the handler entry address and jump to it.
2701 ConstantPoolUnavailableScope constant_pool_unavailable(masm);
2702 __ Move(r1, pending_handler_entrypoint_address);
2703 __ ldr(r1, MemOperand(r1));
2704 __ Jump(r1);
2705 }
2706
Generate_DoubleToI(MacroAssembler * masm)2707 void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
2708 Label negate, done;
2709
2710 HardAbortScope hard_abort(masm); // Avoid calls to Abort.
2711 UseScratchRegisterScope temps(masm);
2712 Register result_reg = r7;
2713 Register double_low = GetRegisterThatIsNotOneOf(result_reg);
2714 Register double_high = GetRegisterThatIsNotOneOf(result_reg, double_low);
2715 LowDwVfpRegister double_scratch = temps.AcquireLowD();
2716
2717 // Save the old values from these temporary registers on the stack.
2718 __ Push(result_reg, double_high, double_low);
2719
2720 // Account for saved regs.
2721 const int kArgumentOffset = 3 * kPointerSize;
2722
2723 MemOperand input_operand(sp, kArgumentOffset);
2724 MemOperand result_operand = input_operand;
2725
2726 // Load double input.
2727 __ vldr(double_scratch, input_operand);
2728 __ vmov(double_low, double_high, double_scratch);
2729 // Try to convert with a FPU convert instruction. This handles all
2730 // non-saturating cases.
2731 __ TryInlineTruncateDoubleToI(result_reg, double_scratch, &done);
2732
2733 Register scratch = temps.Acquire();
2734 __ Ubfx(scratch, double_high, HeapNumber::kExponentShift,
2735 HeapNumber::kExponentBits);
2736 // Load scratch with exponent - 1. This is faster than loading
2737 // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
2738 STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
2739 __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
2740 // If exponent is greater than or equal to 84, the 32 less significant
2741 // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
2742 // the result is 0.
2743 // Compare exponent with 84 (compare exponent - 1 with 83). If the exponent is
2744 // greater than this, the conversion is out of range, so return zero.
2745 __ cmp(scratch, Operand(83));
2746 __ mov(result_reg, Operand::Zero(), LeaveCC, ge);
2747 __ b(ge, &done);
2748
2749 // If we reach this code, 30 <= exponent <= 83.
2750 // `TryInlineTruncateDoubleToI` above will have truncated any double with an
2751 // exponent lower than 30.
2752 if (masm->emit_debug_code()) {
2753 // Scratch is exponent - 1.
2754 __ cmp(scratch, Operand(30 - 1));
2755 __ Check(ge, AbortReason::kUnexpectedValue);
2756 }
2757
2758 // We don't have to handle cases where 0 <= exponent <= 20 for which we would
2759 // need to shift right the high part of the mantissa.
2760 // Scratch contains exponent - 1.
2761 // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
2762 __ rsb(scratch, scratch, Operand(51), SetCC);
2763
2764 // 52 <= exponent <= 83, shift only double_low.
2765 // On entry, scratch contains: 52 - exponent.
2766 __ rsb(scratch, scratch, Operand::Zero(), LeaveCC, ls);
2767 __ mov(result_reg, Operand(double_low, LSL, scratch), LeaveCC, ls);
2768 __ b(ls, &negate);
2769
2770 // 21 <= exponent <= 51, shift double_low and double_high
2771 // to generate the result.
2772 __ mov(double_low, Operand(double_low, LSR, scratch));
2773 // Scratch contains: 52 - exponent.
2774 // We needs: exponent - 20.
2775 // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
2776 __ rsb(scratch, scratch, Operand(32));
2777 __ Ubfx(result_reg, double_high, 0, HeapNumber::kMantissaBitsInTopWord);
2778 // Set the implicit 1 before the mantissa part in double_high.
2779 __ orr(result_reg, result_reg,
2780 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
2781 __ orr(result_reg, double_low, Operand(result_reg, LSL, scratch));
2782
2783 __ bind(&negate);
2784 // If input was positive, double_high ASR 31 equals 0 and
2785 // double_high LSR 31 equals zero.
2786 // New result = (result eor 0) + 0 = result.
2787 // If the input was negative, we have to negate the result.
2788 // Input_high ASR 31 equals 0xFFFFFFFF and double_high LSR 31 equals 1.
2789 // New result = (result eor 0xFFFFFFFF) + 1 = 0 - result.
2790 __ eor(result_reg, result_reg, Operand(double_high, ASR, 31));
2791 __ add(result_reg, result_reg, Operand(double_high, LSR, 31));
2792
2793 __ bind(&done);
2794 __ str(result_reg, result_operand);
2795
2796 // Restore registers corrupted in this routine and return.
2797 __ Pop(result_reg, double_high, double_low);
2798 __ Ret();
2799 }
2800
Generate_GenericJSToWasmWrapper(MacroAssembler * masm)2801 void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
2802 // TODO(v8:10701): Implement for this platform.
2803 __ Trap();
2804 }
2805
2806 namespace {
2807
AddressOffset(ExternalReference ref0,ExternalReference ref1)2808 int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
2809 return ref0.address() - ref1.address();
2810 }
2811
2812 // Calls an API function. Allocates HandleScope, extracts returned value
2813 // from handle and propagates exceptions. Restores context. stack_space
2814 // - space to be unwound on exit (includes the call JS arguments space and
2815 // the additional space allocated for the fast call).
CallApiFunctionAndReturn(MacroAssembler * masm,Register function_address,ExternalReference thunk_ref,int stack_space,MemOperand * stack_space_operand,MemOperand return_value_operand)2816 void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
2817 ExternalReference thunk_ref, int stack_space,
2818 MemOperand* stack_space_operand,
2819 MemOperand return_value_operand) {
2820 Isolate* isolate = masm->isolate();
2821 ExternalReference next_address =
2822 ExternalReference::handle_scope_next_address(isolate);
2823 const int kNextOffset = 0;
2824 const int kLimitOffset = AddressOffset(
2825 ExternalReference::handle_scope_limit_address(isolate), next_address);
2826 const int kLevelOffset = AddressOffset(
2827 ExternalReference::handle_scope_level_address(isolate), next_address);
2828
2829 DCHECK(function_address == r1 || function_address == r2);
2830
2831 Label profiler_enabled, end_profiler_check;
2832 __ Move(r9, ExternalReference::is_profiling_address(isolate));
2833 __ ldrb(r9, MemOperand(r9, 0));
2834 __ cmp(r9, Operand(0));
2835 __ b(ne, &profiler_enabled);
2836 __ Move(r9, ExternalReference::address_of_runtime_stats_flag());
2837 __ ldr(r9, MemOperand(r9, 0));
2838 __ cmp(r9, Operand(0));
2839 __ b(ne, &profiler_enabled);
2840 {
2841 // Call the api function directly.
2842 __ Move(r3, function_address);
2843 __ b(&end_profiler_check);
2844 }
2845 __ bind(&profiler_enabled);
2846 {
2847 // Additional parameter is the address of the actual callback.
2848 __ Move(r3, thunk_ref);
2849 }
2850 __ bind(&end_profiler_check);
2851
2852 // Allocate HandleScope in callee-save registers.
2853 __ Move(r9, next_address);
2854 __ ldr(r4, MemOperand(r9, kNextOffset));
2855 __ ldr(r5, MemOperand(r9, kLimitOffset));
2856 __ ldr(r6, MemOperand(r9, kLevelOffset));
2857 __ add(r6, r6, Operand(1));
2858 __ str(r6, MemOperand(r9, kLevelOffset));
2859
2860 __ StoreReturnAddressAndCall(r3);
2861
2862 Label promote_scheduled_exception;
2863 Label delete_allocated_handles;
2864 Label leave_exit_frame;
2865 Label return_value_loaded;
2866
2867 // load value from ReturnValue
2868 __ ldr(r0, return_value_operand);
2869 __ bind(&return_value_loaded);
2870 // No more valid handles (the result handle was the last one). Restore
2871 // previous handle scope.
2872 __ str(r4, MemOperand(r9, kNextOffset));
2873 if (__ emit_debug_code()) {
2874 __ ldr(r1, MemOperand(r9, kLevelOffset));
2875 __ cmp(r1, r6);
2876 __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
2877 }
2878 __ sub(r6, r6, Operand(1));
2879 __ str(r6, MemOperand(r9, kLevelOffset));
2880 __ ldr(r6, MemOperand(r9, kLimitOffset));
2881 __ cmp(r5, r6);
2882 __ b(ne, &delete_allocated_handles);
2883
2884 // Leave the API exit frame.
2885 __ bind(&leave_exit_frame);
2886 // LeaveExitFrame expects unwind space to be in a register.
2887 if (stack_space_operand == nullptr) {
2888 DCHECK_NE(stack_space, 0);
2889 __ mov(r4, Operand(stack_space));
2890 } else {
2891 DCHECK_EQ(stack_space, 0);
2892 __ ldr(r4, *stack_space_operand);
2893 }
2894 __ LeaveExitFrame(false, r4, stack_space_operand != nullptr);
2895
2896 // Check if the function scheduled an exception.
2897 __ LoadRoot(r4, RootIndex::kTheHoleValue);
2898 __ Move(r6, ExternalReference::scheduled_exception_address(isolate));
2899 __ ldr(r5, MemOperand(r6));
2900 __ cmp(r4, r5);
2901 __ b(ne, &promote_scheduled_exception);
2902
2903 __ mov(pc, lr);
2904
2905 // Re-throw by promoting a scheduled exception.
2906 __ bind(&promote_scheduled_exception);
2907 __ TailCallRuntime(Runtime::kPromoteScheduledException);
2908
2909 // HandleScope limit has changed. Delete allocated extensions.
2910 __ bind(&delete_allocated_handles);
2911 __ str(r5, MemOperand(r9, kLimitOffset));
2912 __ mov(r4, r0);
2913 __ PrepareCallCFunction(1);
2914 __ Move(r0, ExternalReference::isolate_address(isolate));
2915 __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
2916 __ mov(r0, r4);
2917 __ jmp(&leave_exit_frame);
2918 }
2919
2920 } // namespace
2921
Generate_CallApiCallback(MacroAssembler * masm)2922 void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
2923 // ----------- S t a t e -------------
2924 // -- cp : context
2925 // -- r1 : api function address
2926 // -- r2 : arguments count (not including the receiver)
2927 // -- r3 : call data
2928 // -- r0 : holder
2929 // -- sp[0] : receiver
2930 // -- sp[8] : first argument
2931 // -- ...
2932 // -- sp[(argc) * 8] : last argument
2933 // -----------------------------------
2934
2935 Register api_function_address = r1;
2936 Register argc = r2;
2937 Register call_data = r3;
2938 Register holder = r0;
2939 Register scratch = r4;
2940
2941 DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch));
2942
2943 using FCA = FunctionCallbackArguments;
2944
2945 STATIC_ASSERT(FCA::kArgsLength == 6);
2946 STATIC_ASSERT(FCA::kNewTargetIndex == 5);
2947 STATIC_ASSERT(FCA::kDataIndex == 4);
2948 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
2949 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
2950 STATIC_ASSERT(FCA::kIsolateIndex == 1);
2951 STATIC_ASSERT(FCA::kHolderIndex == 0);
2952
2953 // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
2954 //
2955 // Target state:
2956 // sp[0 * kPointerSize]: kHolder
2957 // sp[1 * kPointerSize]: kIsolate
2958 // sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue)
2959 // sp[3 * kPointerSize]: undefined (kReturnValue)
2960 // sp[4 * kPointerSize]: kData
2961 // sp[5 * kPointerSize]: undefined (kNewTarget)
2962
2963 // Reserve space on the stack.
2964 __ AllocateStackSpace(FCA::kArgsLength * kPointerSize);
2965
2966 // kHolder.
2967 __ str(holder, MemOperand(sp, 0 * kPointerSize));
2968
2969 // kIsolate.
2970 __ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
2971 __ str(scratch, MemOperand(sp, 1 * kPointerSize));
2972
2973 // kReturnValueDefaultValue and kReturnValue.
2974 __ LoadRoot(scratch, RootIndex::kUndefinedValue);
2975 __ str(scratch, MemOperand(sp, 2 * kPointerSize));
2976 __ str(scratch, MemOperand(sp, 3 * kPointerSize));
2977
2978 // kData.
2979 __ str(call_data, MemOperand(sp, 4 * kPointerSize));
2980
2981 // kNewTarget.
2982 __ str(scratch, MemOperand(sp, 5 * kPointerSize));
2983
2984 // Keep a pointer to kHolder (= implicit_args) in a scratch register.
2985 // We use it below to set up the FunctionCallbackInfo object.
2986 __ mov(scratch, sp);
2987
2988 // Allocate the v8::Arguments structure in the arguments' space since
2989 // it's not controlled by GC.
2990 static constexpr int kApiStackSpace = 4;
2991 static constexpr bool kDontSaveDoubles = false;
2992 FrameScope frame_scope(masm, StackFrame::MANUAL);
2993 __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace);
2994
2995 // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
2996 // Arguments are after the return address (pushed by EnterExitFrame()).
2997 __ str(scratch, MemOperand(sp, 1 * kPointerSize));
2998
2999 // FunctionCallbackInfo::values_ (points at the first varargs argument passed
3000 // on the stack).
3001 __ add(scratch, scratch, Operand((FCA::kArgsLength + 1) * kPointerSize));
3002 __ str(scratch, MemOperand(sp, 2 * kPointerSize));
3003
3004 // FunctionCallbackInfo::length_.
3005 __ str(argc, MemOperand(sp, 3 * kPointerSize));
3006
3007 // We also store the number of bytes to drop from the stack after returning
3008 // from the API function here.
3009 __ mov(scratch,
3010 Operand((FCA::kArgsLength + 1 /* receiver */) * kPointerSize));
3011 __ add(scratch, scratch, Operand(argc, LSL, kPointerSizeLog2));
3012 __ str(scratch, MemOperand(sp, 4 * kPointerSize));
3013
3014 // v8::InvocationCallback's argument.
3015 __ add(r0, sp, Operand(1 * kPointerSize));
3016
3017 ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
3018
3019 // There are two stack slots above the arguments we constructed on the stack.
3020 // TODO(jgruber): Document what these arguments are.
3021 static constexpr int kStackSlotsAboveFCA = 2;
3022 MemOperand return_value_operand(
3023 fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize);
3024
3025 static constexpr int kUseStackSpaceOperand = 0;
3026 MemOperand stack_space_operand(sp, 4 * kPointerSize);
3027
3028 AllowExternalCallThatCantCauseGC scope(masm);
3029 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3030 kUseStackSpaceOperand, &stack_space_operand,
3031 return_value_operand);
3032 }
3033
Generate_CallApiGetter(MacroAssembler * masm)3034 void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
3035 // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
3036 // name below the exit frame to make GC aware of them.
3037 STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
3038 STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
3039 STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
3040 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
3041 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
3042 STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
3043 STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
3044 STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
3045
3046 Register receiver = ApiGetterDescriptor::ReceiverRegister();
3047 Register holder = ApiGetterDescriptor::HolderRegister();
3048 Register callback = ApiGetterDescriptor::CallbackRegister();
3049 Register scratch = r4;
3050 DCHECK(!AreAliased(receiver, holder, callback, scratch));
3051
3052 Register api_function_address = r2;
3053
3054 __ push(receiver);
3055 // Push data from AccessorInfo.
3056 __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
3057 __ push(scratch);
3058 __ LoadRoot(scratch, RootIndex::kUndefinedValue);
3059 __ Push(scratch, scratch);
3060 __ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
3061 __ Push(scratch, holder);
3062 __ Push(Smi::zero()); // should_throw_on_error -> false
3063 __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
3064 __ push(scratch);
3065 // v8::PropertyCallbackInfo::args_ array and name handle.
3066 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
3067
3068 // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
3069 __ mov(r0, sp); // r0 = Handle<Name>
3070 __ add(r1, r0, Operand(1 * kPointerSize)); // r1 = v8::PCI::args_
3071
3072 const int kApiStackSpace = 1;
3073 FrameScope frame_scope(masm, StackFrame::MANUAL);
3074 __ EnterExitFrame(false, kApiStackSpace);
3075
3076 // Create v8::PropertyCallbackInfo object on the stack and initialize
3077 // it's args_ field.
3078 __ str(r1, MemOperand(sp, 1 * kPointerSize));
3079 __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = v8::PropertyCallbackInfo&
3080
3081 ExternalReference thunk_ref =
3082 ExternalReference::invoke_accessor_getter_callback();
3083
3084 __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
3085 __ ldr(api_function_address,
3086 FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
3087
3088 // +3 is to skip prolog, return address and name handle.
3089 MemOperand return_value_operand(
3090 fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
3091 MemOperand* const kUseStackSpaceConstant = nullptr;
3092 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3093 kStackUnwindSpace, kUseStackSpaceConstant,
3094 return_value_operand);
3095 }
3096
Generate_DirectCEntry(MacroAssembler * masm)3097 void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
3098 // The sole purpose of DirectCEntry is for movable callers (e.g. any general
3099 // purpose Code object) to be able to call into C functions that may trigger
3100 // GC and thus move the caller.
3101 //
3102 // DirectCEntry places the return address on the stack (updated by the GC),
3103 // making the call GC safe. The irregexp backend relies on this.
3104
3105 __ str(lr, MemOperand(sp, 0)); // Store the return address.
3106 __ blx(ip); // Call the C++ function.
3107 __ ldr(pc, MemOperand(sp, 0)); // Return to calling code.
3108 }
3109
Generate_MemCopyUint8Uint8(MacroAssembler * masm)3110 void Builtins::Generate_MemCopyUint8Uint8(MacroAssembler* masm) {
3111 Register dest = r0;
3112 Register src = r1;
3113 Register chars = r2;
3114 Register temp1 = r3;
3115 Label less_4;
3116
3117 {
3118 UseScratchRegisterScope temps(masm);
3119 Register temp2 = temps.Acquire();
3120 Label loop;
3121
3122 __ bic(temp2, chars, Operand(0x3), SetCC);
3123 __ b(&less_4, eq);
3124 __ add(temp2, dest, temp2);
3125
3126 __ bind(&loop);
3127 __ ldr(temp1, MemOperand(src, 4, PostIndex));
3128 __ str(temp1, MemOperand(dest, 4, PostIndex));
3129 __ cmp(dest, temp2);
3130 __ b(&loop, ne);
3131 }
3132
3133 __ bind(&less_4);
3134 __ mov(chars, Operand(chars, LSL, 31), SetCC);
3135 // bit0 => Z (ne), bit1 => C (cs)
3136 __ ldrh(temp1, MemOperand(src, 2, PostIndex), cs);
3137 __ strh(temp1, MemOperand(dest, 2, PostIndex), cs);
3138 __ ldrb(temp1, MemOperand(src), ne);
3139 __ strb(temp1, MemOperand(dest), ne);
3140 __ Ret();
3141 }
3142
3143 namespace {
3144
3145 // This code tries to be close to ia32 code so that any changes can be
3146 // easily ported.
Generate_DeoptimizationEntry(MacroAssembler * masm,DeoptimizeKind deopt_kind)3147 void Generate_DeoptimizationEntry(MacroAssembler* masm,
3148 DeoptimizeKind deopt_kind) {
3149 Isolate* isolate = masm->isolate();
3150
3151 // Note: This is an overapproximation; we always reserve space for 32 double
3152 // registers, even though the actual CPU may only support 16. In the latter
3153 // case, SaveFPRegs and RestoreFPRegs still use 32 stack slots, but only fill
3154 // 16.
3155 static constexpr int kDoubleRegsSize =
3156 kDoubleSize * DwVfpRegister::kNumRegisters;
3157
3158 // Save all allocatable VFP registers before messing with them.
3159 {
3160 UseScratchRegisterScope temps(masm);
3161 Register scratch = temps.Acquire();
3162 __ SaveFPRegs(sp, scratch);
3163 }
3164
3165 // Save all general purpose registers before messing with them.
3166 static constexpr int kNumberOfRegisters = Register::kNumRegisters;
3167 STATIC_ASSERT(kNumberOfRegisters == 16);
3168
3169 // Everything but pc, lr and ip which will be saved but not restored.
3170 RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
3171
3172 // Push all 16 registers (needed to populate FrameDescription::registers_).
3173 // TODO(v8:1588): Note that using pc with stm is deprecated, so we should
3174 // perhaps handle this a bit differently.
3175 __ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit());
3176
3177 {
3178 UseScratchRegisterScope temps(masm);
3179 Register scratch = temps.Acquire();
3180 __ Move(scratch, ExternalReference::Create(
3181 IsolateAddressId::kCEntryFPAddress, isolate));
3182 __ str(fp, MemOperand(scratch));
3183 }
3184
3185 static constexpr int kSavedRegistersAreaSize =
3186 (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
3187
3188 __ mov(r2, Operand(Deoptimizer::kFixedExitSizeMarker));
3189 // Get the address of the location in the code object (r3) (return
3190 // address for lazy deoptimization) and compute the fp-to-sp delta in
3191 // register r4.
3192 __ mov(r3, lr);
3193 __ add(r4, sp, Operand(kSavedRegistersAreaSize));
3194 __ sub(r4, fp, r4);
3195
3196 // Allocate a new deoptimizer object.
3197 // Pass four arguments in r0 to r3 and fifth argument on stack.
3198 __ PrepareCallCFunction(6);
3199 __ mov(r0, Operand(0));
3200 Label context_check;
3201 __ ldr(r1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
3202 __ JumpIfSmi(r1, &context_check);
3203 __ ldr(r0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
3204 __ bind(&context_check);
3205 __ mov(r1, Operand(static_cast<int>(deopt_kind)));
3206 // r2: bailout id already loaded.
3207 // r3: code address or 0 already loaded.
3208 __ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta.
3209 __ Move(r5, ExternalReference::isolate_address(isolate));
3210 __ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate.
3211 // Call Deoptimizer::New().
3212 {
3213 AllowExternalCallThatCantCauseGC scope(masm);
3214 __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
3215 }
3216
3217 // Preserve "deoptimizer" object in register r0 and get the input
3218 // frame descriptor pointer to r1 (deoptimizer->input_);
3219 __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
3220
3221 // Copy core registers into FrameDescription::registers_.
3222 DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
3223 for (int i = 0; i < kNumberOfRegisters; i++) {
3224 int offset = (i * kPointerSize) + FrameDescription::registers_offset();
3225 __ ldr(r2, MemOperand(sp, i * kPointerSize));
3226 __ str(r2, MemOperand(r1, offset));
3227 }
3228
3229 // Copy double registers to double_registers_.
3230 static constexpr int kDoubleRegsOffset =
3231 FrameDescription::double_registers_offset();
3232 {
3233 UseScratchRegisterScope temps(masm);
3234 Register scratch = temps.Acquire();
3235 Register src_location = r4;
3236 __ add(src_location, sp, Operand(kNumberOfRegisters * kPointerSize));
3237 __ RestoreFPRegs(src_location, scratch);
3238
3239 Register dst_location = r4;
3240 __ add(dst_location, r1, Operand(kDoubleRegsOffset));
3241 __ SaveFPRegsToHeap(dst_location, scratch);
3242 }
3243
3244 // Mark the stack as not iterable for the CPU profiler which won't be able to
3245 // walk the stack without the return address.
3246 {
3247 UseScratchRegisterScope temps(masm);
3248 Register is_iterable = temps.Acquire();
3249 Register zero = r4;
3250 __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
3251 __ mov(zero, Operand(0));
3252 __ strb(zero, MemOperand(is_iterable));
3253 }
3254
3255 // Remove the saved registers from the stack.
3256 __ add(sp, sp, Operand(kSavedRegistersAreaSize));
3257
3258 // Compute a pointer to the unwinding limit in register r2; that is
3259 // the first stack slot not part of the input frame.
3260 __ ldr(r2, MemOperand(r1, FrameDescription::frame_size_offset()));
3261 __ add(r2, r2, sp);
3262
3263 // Unwind the stack down to - but not including - the unwinding
3264 // limit and copy the contents of the activation frame to the input
3265 // frame description.
3266 __ add(r3, r1, Operand(FrameDescription::frame_content_offset()));
3267 Label pop_loop;
3268 Label pop_loop_header;
3269 __ b(&pop_loop_header);
3270 __ bind(&pop_loop);
3271 __ pop(r4);
3272 __ str(r4, MemOperand(r3, 0));
3273 __ add(r3, r3, Operand(sizeof(uint32_t)));
3274 __ bind(&pop_loop_header);
3275 __ cmp(r2, sp);
3276 __ b(ne, &pop_loop);
3277
3278 // Compute the output frame in the deoptimizer.
3279 __ push(r0); // Preserve deoptimizer object across call.
3280 // r0: deoptimizer object; r1: scratch.
3281 __ PrepareCallCFunction(1);
3282 // Call Deoptimizer::ComputeOutputFrames().
3283 {
3284 AllowExternalCallThatCantCauseGC scope(masm);
3285 __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
3286 }
3287 __ pop(r0); // Restore deoptimizer object (class Deoptimizer).
3288
3289 __ ldr(sp, MemOperand(r0, Deoptimizer::caller_frame_top_offset()));
3290
3291 // Replace the current (input) frame with the output frames.
3292 Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
3293 // Outer loop state: r4 = current "FrameDescription** output_",
3294 // r1 = one past the last FrameDescription**.
3295 __ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset()));
3296 __ ldr(r4, MemOperand(r0, Deoptimizer::output_offset())); // r4 is output_.
3297 __ add(r1, r4, Operand(r1, LSL, 2));
3298 __ jmp(&outer_loop_header);
3299 __ bind(&outer_push_loop);
3300 // Inner loop state: r2 = current FrameDescription*, r3 = loop index.
3301 __ ldr(r2, MemOperand(r4, 0)); // output_[ix]
3302 __ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
3303 __ jmp(&inner_loop_header);
3304 __ bind(&inner_push_loop);
3305 __ sub(r3, r3, Operand(sizeof(uint32_t)));
3306 __ add(r6, r2, Operand(r3));
3307 __ ldr(r6, MemOperand(r6, FrameDescription::frame_content_offset()));
3308 __ push(r6);
3309 __ bind(&inner_loop_header);
3310 __ cmp(r3, Operand::Zero());
3311 __ b(ne, &inner_push_loop); // test for gt?
3312 __ add(r4, r4, Operand(kPointerSize));
3313 __ bind(&outer_loop_header);
3314 __ cmp(r4, r1);
3315 __ b(lt, &outer_push_loop);
3316
3317 __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
3318
3319 // State:
3320 // r1: Deoptimizer::input_ (FrameDescription*).
3321 // r2: The last output FrameDescription pointer (FrameDescription*).
3322
3323 // Restore double registers from the input frame description.
3324 {
3325 UseScratchRegisterScope temps(masm);
3326 Register scratch = temps.Acquire();
3327 Register src_location = r6;
3328 __ add(src_location, r1, Operand(kDoubleRegsOffset));
3329 __ RestoreFPRegsFromHeap(src_location, scratch);
3330 }
3331
3332 // Push pc and continuation from the last output frame.
3333 __ ldr(r6, MemOperand(r2, FrameDescription::pc_offset()));
3334 __ push(r6);
3335 __ ldr(r6, MemOperand(r2, FrameDescription::continuation_offset()));
3336 __ push(r6);
3337
3338 // Push the registers from the last output frame.
3339 for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
3340 int offset = (i * kPointerSize) + FrameDescription::registers_offset();
3341 __ ldr(r6, MemOperand(r2, offset));
3342 __ push(r6);
3343 }
3344
3345 // Restore the registers from the stack.
3346 __ ldm(ia_w, sp, restored_regs); // all but pc registers.
3347
3348 {
3349 UseScratchRegisterScope temps(masm);
3350 Register is_iterable = temps.Acquire();
3351 Register one = r4;
3352 __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
3353 __ mov(one, Operand(1));
3354 __ strb(one, MemOperand(is_iterable));
3355 }
3356
3357 // Remove sp, lr and pc.
3358 __ Drop(3);
3359 {
3360 UseScratchRegisterScope temps(masm);
3361 Register scratch = temps.Acquire();
3362 __ pop(scratch); // get continuation, leave pc on stack
3363 __ pop(lr);
3364 __ Jump(scratch);
3365 }
3366
3367 __ stop();
3368 }
3369
3370 } // namespace
3371
Generate_DeoptimizationEntry_Eager(MacroAssembler * masm)3372 void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
3373 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
3374 }
3375
Generate_DeoptimizationEntry_Soft(MacroAssembler * masm)3376 void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
3377 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
3378 }
3379
Generate_DeoptimizationEntry_Bailout(MacroAssembler * masm)3380 void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
3381 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
3382 }
3383
Generate_DeoptimizationEntry_Lazy(MacroAssembler * masm)3384 void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
3385 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
3386 }
3387
3388 #undef __
3389
3390 } // namespace internal
3391 } // namespace v8
3392
3393 #endif // V8_TARGET_ARCH_ARM
3394