1 // Copyright 2021 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if V8_TARGET_ARCH_RISCV64
6
7 #include "src/api/api-arguments.h"
8 #include "src/codegen/code-factory.h"
9 #include "src/codegen/interface-descriptors-inl.h"
10 #include "src/debug/debug.h"
11 #include "src/deoptimizer/deoptimizer.h"
12 #include "src/execution/frame-constants.h"
13 #include "src/execution/frames.h"
14 #include "src/logging/counters.h"
15 // For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
16 #include "src/codegen/macro-assembler-inl.h"
17 #include "src/codegen/register-configuration.h"
18 #include "src/codegen/riscv64/constants-riscv64.h"
19 #include "src/heap/heap-inl.h"
20 #include "src/objects/cell.h"
21 #include "src/objects/foreign.h"
22 #include "src/objects/heap-number.h"
23 #include "src/objects/js-generator.h"
24 #include "src/objects/objects-inl.h"
25 #include "src/objects/smi.h"
26 #include "src/runtime/runtime.h"
27 #include "src/wasm/wasm-linkage.h"
28 #include "src/wasm/wasm-objects.h"
29
30 namespace v8 {
31 namespace internal {
32
33 #define __ ACCESS_MASM(masm)
34
Generate_Adaptor(MacroAssembler * masm,Address address)35 void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
36 ASM_CODE_COMMENT(masm);
37 __ li(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
38 __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
39 RelocInfo::CODE_TARGET);
40 }
41
GenerateTailCallToReturnedCode(MacroAssembler * masm,Runtime::FunctionId function_id)42 static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
43 Runtime::FunctionId function_id) {
44 // ----------- S t a t e -------------
45 // -- a0 : actual argument count
46 // -- a1 : target function (preserved for callee)
47 // -- a3 : new target (preserved for callee)
48 // -----------------------------------
49 {
50 FrameScope scope(masm, StackFrame::INTERNAL);
51 // Push a copy of the target function, the new target and the actual
52 // argument count.
53 // Push function as parameter to the runtime call.
54 __ SmiTag(kJavaScriptCallArgCountRegister);
55 __ Push(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
56 kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister);
57
58 __ CallRuntime(function_id, 1);
59 // Use the return value before restoring a0
60 __ Add64(a2, a0, Operand(Code::kHeaderSize - kHeapObjectTag));
61 // Restore target function, new target and actual argument count.
62 __ Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
63 kJavaScriptCallArgCountRegister);
64 __ SmiUntag(kJavaScriptCallArgCountRegister);
65 }
66
67 static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
68 __ Jump(a2);
69 }
70
71 namespace {
72
Generate_JSBuiltinsConstructStubHelper(MacroAssembler * masm)73 void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
74 // ----------- S t a t e -------------
75 // -- a0 : number of arguments
76 // -- a1 : constructor function
77 // -- a3 : new target
78 // -- cp : context
79 // -- ra : return address
80 // -- sp[...]: constructor arguments
81 // -----------------------------------
82
83 // Enter a construct frame.
84 {
85 FrameScope scope(masm, StackFrame::CONSTRUCT);
86
87 // Preserve the incoming parameters on the stack.
88 __ SmiTag(a0);
89 __ Push(cp, a0);
90 __ SmiUntag(a0);
91
92 // Set up pointer to last argument (skip receiver).
93 UseScratchRegisterScope temps(masm);
94 temps.Include(t0);
95 Register scratch = temps.Acquire();
96 __ Add64(
97 scratch, fp,
98 Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
99 // Copy arguments and receiver to the expression stack.
100 __ PushArray(scratch, a0);
101 // The receiver for the builtin/api call.
102 __ PushRoot(RootIndex::kTheHoleValue);
103
104 // Call the function.
105 // a0: number of arguments (untagged)
106 // a1: constructor function
107 // a3: new target
108 __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
109
110 // Restore context from the frame.
111 __ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
112 // Restore smi-tagged arguments count from the frame.
113 __ Ld(kScratchReg, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
114 // Leave construct frame.
115 }
116
117 // Remove caller arguments from the stack and return.
118 __ SmiScale(kScratchReg, kScratchReg, kSystemPointerSizeLog2);
119 __ Add64(sp, sp, kScratchReg);
120 __ Add64(sp, sp, kSystemPointerSize);
121 __ Ret();
122 }
123
124 } // namespace
125
126 // The construct stub for ES5 constructor functions and ES6 class constructors.
Generate_JSConstructStubGeneric(MacroAssembler * masm)127 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
128 // ----------- S t a t e -------------
129 // -- a0: number of arguments (untagged)
130 // -- a1: constructor function
131 // -- a3: new target
132 // -- cp: context
133 // -- ra: return address
134 // -- sp[...]: constructor arguments
135 // -----------------------------------
136 UseScratchRegisterScope temps(masm);
137 temps.Include(t0, t1);
138 // Enter a construct frame.
139 FrameScope scope(masm, StackFrame::MANUAL);
140 Label post_instantiation_deopt_entry, not_create_implicit_receiver;
141 __ EnterFrame(StackFrame::CONSTRUCT);
142
143 // Preserve the incoming parameters on the stack.
144 __ SmiTag(a0);
145 __ Push(cp, a0, a1);
146 __ PushRoot(RootIndex::kUndefinedValue);
147 __ Push(a3);
148
149 // ----------- S t a t e -------------
150 // -- sp[0*kSystemPointerSize]: new target
151 // -- sp[1*kSystemPointerSize]: padding
152 // -- a1 and sp[2*kSystemPointerSize]: constructor function
153 // -- sp[3*kSystemPointerSize]: number of arguments (tagged)
154 // -- sp[4*kSystemPointerSize]: context
155 // -----------------------------------
156 {
157 UseScratchRegisterScope temps(masm);
158 Register func_info = temps.Acquire();
159 __ LoadTaggedPointerField(
160 func_info, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
161 __ Lwu(func_info,
162 FieldMemOperand(func_info, SharedFunctionInfo::kFlagsOffset));
163 __ DecodeField<SharedFunctionInfo::FunctionKindBits>(func_info);
164 __ JumpIfIsInRange(func_info, kDefaultDerivedConstructor,
165 kDerivedConstructor, ¬_create_implicit_receiver);
166 Register scratch = func_info;
167 Register scratch2 = temps.Acquire();
168 // If not derived class constructor: Allocate the new receiver object.
169 __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
170 scratch, scratch2);
171 __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
172 RelocInfo::CODE_TARGET);
173 __ BranchShort(&post_instantiation_deopt_entry);
174
175 // Else: use TheHoleValue as receiver for constructor call
176 __ bind(¬_create_implicit_receiver);
177 __ LoadRoot(a0, RootIndex::kTheHoleValue);
178 }
179 // ----------- S t a t e -------------
180 // -- a0: receiver
181 // -- Slot 4 / sp[0*kSystemPointerSize]: new target
182 // -- Slot 3 / sp[1*kSystemPointerSize]: padding
183 // -- Slot 2 / sp[2*kSystemPointerSize]: constructor function
184 // -- Slot 1 / sp[3*kSystemPointerSize]: number of arguments (tagged)
185 // -- Slot 0 / sp[4*kSystemPointerSize]: context
186 // -----------------------------------
187 // Deoptimizer enters here.
188 masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
189 masm->pc_offset());
190 __ bind(&post_instantiation_deopt_entry);
191
192 // Restore new target.
193 __ Pop(a3);
194
195 // Push the allocated receiver to the stack.
196 __ Push(a0);
197
198 // We need two copies because we may have to return the original one
199 // and the calling conventions dictate that the called function pops the
200 // receiver. The second copy is pushed after the arguments, we saved in a6
201 // since a0 will store the return value of callRuntime.
202 __ Move(a6, a0);
203
204 // Set up pointer to last argument.
205 Register scratch = temps.Acquire();
206 __ Add64(
207 scratch, fp,
208 Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
209
210 // ----------- S t a t e -------------
211 // -- a3: new target
212 // -- sp[0*kSystemPointerSize]: implicit receiver
213 // -- sp[1*kSystemPointerSize]: implicit receiver
214 // -- sp[2*kSystemPointerSize]: padding
215 // -- sp[3*kSystemPointerSize]: constructor function
216 // -- sp[4*kSystemPointerSize]: number of arguments (tagged)
217 // -- sp[5*kSystemPointerSize]: context
218 // -----------------------------------
219
220 // Restore constructor function and argument count.
221 __ Ld(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
222 __ Ld(a0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
223 __ SmiUntag(a0);
224
225 Label stack_overflow;
226 {
227 UseScratchRegisterScope temps(masm);
228 __ StackOverflowCheck(a0, temps.Acquire(), temps.Acquire(),
229 &stack_overflow);
230 }
231 // TODO(victorgomes): When the arguments adaptor is completely removed, we
232 // should get the formal parameter count and copy the arguments in its
233 // correct position (including any undefined), instead of delaying this to
234 // InvokeFunction.
235
236 // Copy arguments and receiver to the expression stack.
237 __ PushArray(scratch, a0);
238 // We need two copies because we may have to return the original one
239 // and the calling conventions dictate that the called function pops the
240 // receiver. The second copy is pushed after the arguments,
241 __ Push(a6);
242
243 // Call the function.
244 __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
245
246 // ----------- S t a t e -------------
247 // -- a0: constructor result
248 // -- sp[0*kSystemPointerSize]: implicit receiver
249 // -- sp[1*kSystemPointerSize]: padding
250 // -- sp[2*kSystemPointerSize]: constructor function
251 // -- sp[3*kSystemPointerSize]: number of arguments
252 // -- sp[4*kSystemPointerSize]: context
253 // -----------------------------------
254
255 // Store offset of return address for deoptimizer.
256 masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
257 masm->pc_offset());
258
259 // Restore the context from the frame.
260 __ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
261
262 // If the result is an object (in the ECMA sense), we should get rid
263 // of the receiver and use the result; see ECMA-262 section 13.2.2-7
264 // on page 74.
265 Label use_receiver, do_throw, leave_and_return, check_receiver;
266
267 // If the result is undefined, we jump out to using the implicit receiver.
268 __ JumpIfNotRoot(a0, RootIndex::kUndefinedValue, &check_receiver);
269
270 // Otherwise we do a smi check and fall through to check if the return value
271 // is a valid receiver.
272
273 // Throw away the result of the constructor invocation and use the
274 // on-stack receiver as the result.
275 __ bind(&use_receiver);
276 __ Ld(a0, MemOperand(sp, 0 * kSystemPointerSize));
277 __ JumpIfRoot(a0, RootIndex::kTheHoleValue, &do_throw);
278
279 __ bind(&leave_and_return);
280 // Restore smi-tagged arguments count from the frame.
281 __ Ld(a1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
282 // Leave construct frame.
283 __ LeaveFrame(StackFrame::CONSTRUCT);
284
285 // Remove caller arguments from the stack and return.
286 __ SmiScale(a4, a1, kSystemPointerSizeLog2);
287 __ Add64(sp, sp, a4);
288 __ Add64(sp, sp, kSystemPointerSize);
289 __ Ret();
290
291 __ bind(&check_receiver);
292 __ JumpIfSmi(a0, &use_receiver);
293
294 // If the type of the result (stored in its map) is less than
295 // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
296 {
297 UseScratchRegisterScope temps(masm);
298 Register map = temps.Acquire(), type = temps.Acquire();
299 __ GetObjectType(a0, map, type);
300
301 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
302 __ Branch(&leave_and_return, greater_equal, type,
303 Operand(FIRST_JS_RECEIVER_TYPE));
304 __ Branch(&use_receiver);
305 }
306 __ bind(&do_throw);
307 // Restore the context from the frame.
308 __ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
309 __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
310 __ break_(0xCC);
311
312 __ bind(&stack_overflow);
313 // Restore the context from the frame.
314 __ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
315 __ CallRuntime(Runtime::kThrowStackOverflow);
316 __ break_(0xCC);
317 }
318
Generate_JSBuiltinsConstructStub(MacroAssembler * masm)319 void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
320 Generate_JSBuiltinsConstructStubHelper(masm);
321 }
322
AssertCodeIsBaseline(MacroAssembler * masm,Register code,Register scratch)323 static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
324 Register scratch) {
325 DCHECK(!AreAliased(code, scratch));
326 // Verify that the code kind is baseline code via the CodeKind.
327 __ Ld(scratch, FieldMemOperand(code, Code::kFlagsOffset));
328 __ DecodeField<Code::KindField>(scratch);
329 __ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
330 Operand(static_cast<int64_t>(CodeKind::BASELINE)));
331 }
332 // TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
333 // the more general dispatch.
GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler * masm,Register sfi_data,Register scratch1,Label * is_baseline)334 static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
335 Register sfi_data,
336 Register scratch1,
337 Label* is_baseline) {
338 ASM_CODE_COMMENT(masm);
339 Label done;
340
341 __ GetObjectType(sfi_data, scratch1, scratch1);
342 __ Branch(is_baseline, eq, scratch1, Operand(CODET_TYPE));
343
344 __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE),
345 Label::Distance::kNear);
346 __ LoadTaggedPointerField(
347 sfi_data,
348 FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
349
350 __ bind(&done);
351 }
352
353 // static
Generate_ResumeGeneratorTrampoline(MacroAssembler * masm)354 void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
355 // ----------- S t a t e -------------
356 // -- a0 : the value to pass to the generator
357 // -- a1 : the JSGeneratorObject to resume
358 // -- ra : return address
359 // -----------------------------------
360
361 // Store input value into generator object.
362 __ StoreTaggedField(
363 a0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
364 __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, a0,
365 kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore);
366 // Check that a1 is still valid, RecordWrite might have clobbered it.
367 __ AssertGeneratorObject(a1);
368
369 // Load suspended function and context.
370 __ LoadTaggedPointerField(
371 a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
372 __ LoadTaggedPointerField(cp,
373 FieldMemOperand(a4, JSFunction::kContextOffset));
374
375 // Flood function if we are stepping.
376 Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
377 Label stepping_prepared;
378 ExternalReference debug_hook =
379 ExternalReference::debug_hook_on_function_call_address(masm->isolate());
380 __ li(a5, debug_hook);
381 __ Lb(a5, MemOperand(a5));
382 __ Branch(&prepare_step_in_if_stepping, ne, a5, Operand(zero_reg));
383
384 // Flood function if we need to continue stepping in the suspended generator.
385 ExternalReference debug_suspended_generator =
386 ExternalReference::debug_suspended_generator_address(masm->isolate());
387 __ li(a5, debug_suspended_generator);
388 __ Ld(a5, MemOperand(a5));
389 __ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(a5));
390 __ bind(&stepping_prepared);
391
392 // Check the stack for overflow. We are not trying to catch interruptions
393 // (i.e. debug break and preemption) here, so check the "real stack limit".
394 Label stack_overflow;
395 __ LoadStackLimit(kScratchReg,
396 MacroAssembler::StackLimitKind::kRealStackLimit);
397 __ Branch(&stack_overflow, Uless, sp, Operand(kScratchReg));
398
399 // ----------- S t a t e -------------
400 // -- a1 : the JSGeneratorObject to resume
401 // -- a4 : generator function
402 // -- cp : generator context
403 // -- ra : return address
404 // -----------------------------------
405
406 // Push holes for arguments to generator function. Since the parser forced
407 // context allocation for any variables in generators, the actual argument
408 // values have already been copied into the context and these dummy values
409 // will never be used.
410 __ LoadTaggedPointerField(
411 a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
412 __ Lhu(a3,
413 FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
414 __ LoadTaggedPointerField(
415 t1,
416 FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset));
417 {
418 Label done_loop, loop;
419 __ bind(&loop);
420 __ Sub64(a3, a3, Operand(1));
421 __ Branch(&done_loop, lt, a3, Operand(zero_reg), Label::Distance::kNear);
422 __ CalcScaledAddress(kScratchReg, t1, a3, kTaggedSizeLog2);
423 __ LoadAnyTaggedField(
424 kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
425 __ Push(kScratchReg);
426 __ Branch(&loop);
427 __ bind(&done_loop);
428 // Push receiver.
429 __ LoadAnyTaggedField(
430 kScratchReg, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
431 __ Push(kScratchReg);
432 }
433
434 // Underlying function needs to have bytecode available.
435 if (FLAG_debug_code) {
436 Label is_baseline;
437 __ LoadTaggedPointerField(
438 a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
439 __ LoadTaggedPointerField(
440 a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
441 GetSharedFunctionInfoBytecodeOrBaseline(masm, a3, a0, &is_baseline);
442 __ GetObjectType(a3, a3, a3);
443 __ Assert(eq, AbortReason::kMissingBytecodeArray, a3,
444 Operand(BYTECODE_ARRAY_TYPE));
445 __ bind(&is_baseline);
446 }
447
448 // Resume (Ignition/TurboFan) generator object.
449 {
450 __ LoadTaggedPointerField(
451 a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
452 __ Lhu(a0, FieldMemOperand(
453 a0, SharedFunctionInfo::kFormalParameterCountOffset));
454 // We abuse new.target both to indicate that this is a resume call and to
455 // pass in the generator object. In ordinary calls, new.target is always
456 // undefined because generator functions are non-constructable.
457 __ Move(a3, a1);
458 __ Move(a1, a4);
459 static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
460 __ LoadTaggedPointerField(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
461 __ JumpCodeObject(a2);
462 }
463
464 __ bind(&prepare_step_in_if_stepping);
465 {
466 FrameScope scope(masm, StackFrame::INTERNAL);
467 __ Push(a1, a4);
468 // Push hole as receiver since we do not use it for stepping.
469 __ PushRoot(RootIndex::kTheHoleValue);
470 __ CallRuntime(Runtime::kDebugOnFunctionCall);
471 __ Pop(a1);
472 }
473 __ LoadTaggedPointerField(
474 a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
475 __ Branch(&stepping_prepared);
476
477 __ bind(&prepare_step_in_suspended_generator);
478 {
479 FrameScope scope(masm, StackFrame::INTERNAL);
480 __ Push(a1);
481 __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
482 __ Pop(a1);
483 }
484 __ LoadTaggedPointerField(
485 a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
486 __ Branch(&stepping_prepared);
487
488 __ bind(&stack_overflow);
489 {
490 FrameScope scope(masm, StackFrame::INTERNAL);
491 __ CallRuntime(Runtime::kThrowStackOverflow);
492 __ break_(0xCC); // This should be unreachable.
493 }
494 }
495
Generate_ConstructedNonConstructable(MacroAssembler * masm)496 void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
497 FrameScope scope(masm, StackFrame::INTERNAL);
498 __ Push(a1);
499 __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
500 }
501
502 // Clobbers scratch1 and scratch2; preserves all other registers.
Generate_CheckStackOverflow(MacroAssembler * masm,Register argc,Register scratch1,Register scratch2)503 static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
504 Register scratch1, Register scratch2) {
505 // Check the stack for overflow. We are not trying to catch
506 // interruptions (e.g. debug break and preemption) here, so the "real stack
507 // limit" is checked.
508 Label okay;
509 __ LoadStackLimit(scratch1, MacroAssembler::StackLimitKind::kRealStackLimit);
510 // Make a2 the space we have left. The stack might already be overflowed
511 // here which will cause r2 to become negative.
512 __ Sub64(scratch1, sp, scratch1);
513 // Check if the arguments will overflow the stack.
514 __ Sll64(scratch2, argc, kSystemPointerSizeLog2);
515 __ Branch(&okay, gt, scratch1, Operand(scratch2),
516 Label::Distance::kNear); // Signed comparison.
517
518 // Out of stack space.
519 __ CallRuntime(Runtime::kThrowStackOverflow);
520
521 __ bind(&okay);
522 }
523
524 namespace {
525
526 // Called with the native C calling convention. The corresponding function
527 // signature is either:
528 //
529 // using JSEntryFunction = GeneratedCode<Address(
530 // Address root_register_value, Address new_target, Address target,
531 // Address receiver, intptr_t argc, Address** args)>;
532 // or
533 // using JSEntryFunction = GeneratedCode<Address(
534 // Address root_register_value, MicrotaskQueue* microtask_queue)>;
Generate_JSEntryVariant(MacroAssembler * masm,StackFrame::Type type,Builtin entry_trampoline)535 void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
536 Builtin entry_trampoline) {
537 Label invoke, handler_entry, exit;
538
539 {
540 NoRootArrayScope no_root_array(masm);
541
542 // TODO(plind): unify the ABI description here.
543 // Registers:
544 // either
545 // a0: root register value
546 // a1: entry address
547 // a2: function
548 // a3: receiver
549 // a4: argc
550 // a5: argv
551 // or
552 // a0: root register value
553 // a1: microtask_queue
554
555 // Save callee saved registers on the stack.
556 __ MultiPush(kCalleeSaved | ra.bit());
557
558 // Save callee-saved FPU registers.
559 __ MultiPushFPU(kCalleeSavedFPU);
560 // Set up the reserved register for 0.0.
561 __ LoadFPRImmediate(kDoubleRegZero, 0.0);
562
563 // Initialize the root register.
564 // C calling convention. The first argument is passed in a0.
565 __ Move(kRootRegister, a0);
566
567 #ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
568 // Initialize the pointer cage base register.
569 __ LoadRootRelative(kPtrComprCageBaseRegister,
570 IsolateData::cage_base_offset());
571 #endif
572 }
573
574 // a1: entry address
575 // a2: function
576 // a3: receiver
577 // a4: argc
578 // a5: argv
579
580 // We build an EntryFrame.
581 __ li(s1, Operand(-1)); // Push a bad frame pointer to fail if it is used.
582 __ li(s2, Operand(StackFrame::TypeToMarker(type)));
583 __ li(s3, Operand(StackFrame::TypeToMarker(type)));
584 ExternalReference c_entry_fp = ExternalReference::Create(
585 IsolateAddressId::kCEntryFPAddress, masm->isolate());
586 __ li(s5, c_entry_fp);
587 __ Ld(s4, MemOperand(s5));
588 __ Push(s1, s2, s3, s4);
589 // Clear c_entry_fp, now we've pushed its previous value to the stack.
590 // If the c_entry_fp is not already zero and we don't clear it, the
591 // SafeStackFrameIterator will assume we are executing C++ and miss the JS
592 // frames on top.
593 __ Sd(zero_reg, MemOperand(s5));
594 // Set up frame pointer for the frame to be pushed.
595 __ Add64(fp, sp, -EntryFrameConstants::kCallerFPOffset);
596 // Registers:
597 // either
598 // a1: entry address
599 // a2: function
600 // a3: receiver
601 // a4: argc
602 // a5: argv
603 // or
604 // a1: microtask_queue
605 //
606 // Stack:
607 // caller fp |
608 // function slot | entry frame
609 // context slot |
610 // bad fp (0xFF...F) |
611 // callee saved registers + ra
612 // [ O32: 4 args slots]
613 // args
614
615 // If this is the outermost JS call, set js_entry_sp value.
616 Label non_outermost_js;
617 ExternalReference js_entry_sp = ExternalReference::Create(
618 IsolateAddressId::kJSEntrySPAddress, masm->isolate());
619 __ li(s1, js_entry_sp);
620 __ Ld(s2, MemOperand(s1));
621 __ Branch(&non_outermost_js, ne, s2, Operand(zero_reg),
622 Label::Distance::kNear);
623 __ Sd(fp, MemOperand(s1));
624 __ li(s3, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
625 Label cont;
626 __ Branch(&cont);
627 __ bind(&non_outermost_js);
628 __ li(s3, Operand(StackFrame::INNER_JSENTRY_FRAME));
629 __ bind(&cont);
630 __ push(s3);
631
632 // Jump to a faked try block that does the invoke, with a faked catch
633 // block that sets the pending exception.
634 __ BranchShort(&invoke);
635 __ bind(&handler_entry);
636
637 // Store the current pc as the handler offset. It's used later to create the
638 // handler table.
639 masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
640
641 // Caught exception: Store result (exception) in the pending exception
642 // field in the JSEnv and return a failure sentinel. Coming in here the
643 // fp will be invalid because the PushStackHandler below sets it to 0 to
644 // signal the existence of the JSEntry frame.
645 __ li(s1, ExternalReference::Create(
646 IsolateAddressId::kPendingExceptionAddress, masm->isolate()));
647 __ Sd(a0, MemOperand(s1)); // We come back from 'invoke'. result is in a0.
648 __ LoadRoot(a0, RootIndex::kException);
649 __ BranchShort(&exit);
650
651 // Invoke: Link this frame into the handler chain.
652 __ bind(&invoke);
653 __ PushStackHandler();
654 // If an exception not caught by another handler occurs, this handler
655 // returns control to the code after the bal(&invoke) above, which
656 // restores all kCalleeSaved registers (including cp and fp) to their
657 // saved values before returning a failure to C.
658 //
659 // Registers:
660 // either
661 // a0: root register value
662 // a1: entry address
663 // a2: function
664 // a3: receiver
665 // a4: argc
666 // a5: argv
667 // or
668 // a0: root register value
669 // a1: microtask_queue
670 //
671 // Stack:
672 // handler frame
673 // entry frame
674 // callee saved registers + ra
675 // [ O32: 4 args slots]
676 // args
677 //
678 // Invoke the function by calling through JS entry trampoline builtin and
679 // pop the faked function when we return.
680
681 Handle<Code> trampoline_code =
682 masm->isolate()->builtins()->code_handle(entry_trampoline);
683 __ Call(trampoline_code, RelocInfo::CODE_TARGET);
684
685 // Unlink this frame from the handler chain.
686 __ PopStackHandler();
687
688 __ bind(&exit); // a0 holds result
689 // Check if the current stack frame is marked as the outermost JS frame.
690 Label non_outermost_js_2;
691 __ pop(a5);
692 __ Branch(&non_outermost_js_2, ne, a5,
693 Operand(StackFrame::OUTERMOST_JSENTRY_FRAME),
694 Label::Distance::kNear);
695 __ li(a5, js_entry_sp);
696 __ Sd(zero_reg, MemOperand(a5));
697 __ bind(&non_outermost_js_2);
698
699 // Restore the top frame descriptors from the stack.
700 __ pop(a5);
701 __ li(a4, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
702 masm->isolate()));
703 __ Sd(a5, MemOperand(a4));
704
705 // Reset the stack to the callee saved registers.
706 __ Add64(sp, sp, -EntryFrameConstants::kCallerFPOffset);
707
708 // Restore callee-saved fpu registers.
709 __ MultiPopFPU(kCalleeSavedFPU);
710
711 // Restore callee saved registers from the stack.
712 __ MultiPop(kCalleeSaved | ra.bit());
713 // Return.
714 __ Jump(ra);
715 }
716
717 } // namespace
718
Generate_JSEntry(MacroAssembler * masm)719 void Builtins::Generate_JSEntry(MacroAssembler* masm) {
720 Generate_JSEntryVariant(masm, StackFrame::ENTRY, Builtin::kJSEntryTrampoline);
721 }
722
Generate_JSConstructEntry(MacroAssembler * masm)723 void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
724 Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
725 Builtin::kJSConstructEntryTrampoline);
726 }
727
Generate_JSRunMicrotasksEntry(MacroAssembler * masm)728 void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
729 Generate_JSEntryVariant(masm, StackFrame::ENTRY,
730 Builtin::kRunMicrotasksTrampoline);
731 }
732
Generate_JSEntryTrampolineHelper(MacroAssembler * masm,bool is_construct)733 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
734 bool is_construct) {
735 // ----------- S t a t e -------------
736 // -- a1: new.target
737 // -- a2: function
738 // -- a3: receiver_pointer
739 // -- a4: argc
740 // -- a5: argv
741 // -----------------------------------
742
743 // Enter an internal frame.
744 {
745 FrameScope scope(masm, StackFrame::INTERNAL);
746
747 // Setup the context (we need to use the caller context from the isolate).
748 ExternalReference context_address = ExternalReference::Create(
749 IsolateAddressId::kContextAddress, masm->isolate());
750 __ li(cp, context_address);
751 __ Ld(cp, MemOperand(cp));
752
753 // Push the function onto the stack.
754 __ Push(a2);
755
756 // Check if we have enough stack space to push all arguments.
757 __ Add64(a6, a4, 1);
758 Generate_CheckStackOverflow(masm, a6, a0, s2);
759
760 // Copy arguments to the stack in a loop.
761 // a4: argc
762 // a5: argv, i.e. points to first arg
763 Label loop, entry;
764 __ CalcScaledAddress(s1, a5, a4, kSystemPointerSizeLog2);
765 __ BranchShort(&entry);
766 // s1 points past last arg.
767 __ bind(&loop);
768 __ Add64(s1, s1, -kSystemPointerSize);
769 __ Ld(s2, MemOperand(s1)); // Read next parameter.
770 __ Ld(s2, MemOperand(s2)); // Dereference handle.
771 __ push(s2); // Push parameter.
772 __ bind(&entry);
773 __ Branch(&loop, ne, a5, Operand(s1));
774
775 // Push the receive.
776 __ Push(a3);
777
778 // a0: argc
779 // a1: function
780 // a3: new.target
781 __ Move(a3, a1);
782 __ Move(a1, a2);
783 __ Move(a0, a4);
784
785 // Initialize all JavaScript callee-saved registers, since they will be seen
786 // by the garbage collector as part of handlers.
787 __ LoadRoot(a4, RootIndex::kUndefinedValue);
788 __ Move(a5, a4);
789 __ Move(s1, a4);
790 __ Move(s2, a4);
791 __ Move(s3, a4);
792 __ Move(s4, a4);
793 __ Move(s5, a4);
794 #ifndef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
795 __ Move(s11, a4);
796 #endif
797 // s6 holds the root address. Do not clobber.
798 // s7 is cp. Do not init.
799
800 // Invoke the code.
801 Handle<Code> builtin = is_construct
802 ? BUILTIN_CODE(masm->isolate(), Construct)
803 : masm->isolate()->builtins()->Call();
804 __ Call(builtin, RelocInfo::CODE_TARGET);
805
806 // Leave internal frame.
807 }
808 __ Jump(ra);
809 }
810
Generate_JSEntryTrampoline(MacroAssembler * masm)811 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
812 Generate_JSEntryTrampolineHelper(masm, false);
813 }
814
Generate_JSConstructEntryTrampoline(MacroAssembler * masm)815 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
816 Generate_JSEntryTrampolineHelper(masm, true);
817 }
818
Generate_RunMicrotasksTrampoline(MacroAssembler * masm)819 void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
820 // a1: microtask_queue
821 __ Move(RunMicrotasksDescriptor::MicrotaskQueueRegister(), a1);
822 __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
823 }
824
ReplaceClosureCodeWithOptimizedCode(MacroAssembler * masm,Register optimized_code,Register closure,Register scratch1,Register scratch2)825 static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
826 Register optimized_code,
827 Register closure,
828 Register scratch1,
829 Register scratch2) {
830 ASM_CODE_COMMENT(masm);
831 DCHECK(!AreAliased(optimized_code, closure));
832 // Store code entry in the closure.
833 __ StoreTaggedField(optimized_code,
834 FieldMemOperand(closure, JSFunction::kCodeOffset));
835 __ Move(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
836 __ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1,
837 kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore,
838 RememberedSetAction::kOmit, SmiCheck::kOmit);
839 }
840
LeaveInterpreterFrame(MacroAssembler * masm,Register scratch1,Register scratch2)841 static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
842 Register scratch2) {
843 ASM_CODE_COMMENT(masm);
844 Register params_size = scratch1;
845
846 // Get the size of the formal parameters + receiver (in bytes).
847 __ Ld(params_size,
848 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
849 __ Lw(params_size,
850 FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
851
852 Register actual_params_size = scratch2;
853 Label L1;
854 // Compute the size of the actual parameters + receiver (in bytes).
855 __ Ld(actual_params_size,
856 MemOperand(fp, StandardFrameConstants::kArgCOffset));
857 __ Sll64(actual_params_size, actual_params_size, kSystemPointerSizeLog2);
858 __ Add64(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
859
860 // If actual is bigger than formal, then we should use it to free up the stack
861 // arguments.
862 __ Branch(&L1, le, actual_params_size, Operand(params_size),
863 Label::Distance::kNear);
864 __ Move(params_size, actual_params_size);
865 __ bind(&L1);
866
867 // Leave the frame (also dropping the register file).
868 __ LeaveFrame(StackFrame::INTERPRETED);
869
870 // Drop receiver + arguments.
871 __ Add64(sp, sp, params_size);
872 }
873
874 // Tail-call |function_id| if |actual_marker| == |expected_marker|
TailCallRuntimeIfMarkerEquals(MacroAssembler * masm,Register actual_marker,OptimizationMarker expected_marker,Runtime::FunctionId function_id)875 static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
876 Register actual_marker,
877 OptimizationMarker expected_marker,
878 Runtime::FunctionId function_id) {
879 ASM_CODE_COMMENT(masm);
880 Label no_match;
881 __ Branch(&no_match, ne, actual_marker, Operand(expected_marker),
882 Label::Distance::kNear);
883 GenerateTailCallToReturnedCode(masm, function_id);
884 __ bind(&no_match);
885 }
886
TailCallOptimizedCodeSlot(MacroAssembler * masm,Register optimized_code_entry,Register scratch1,Register scratch2)887 static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
888 Register optimized_code_entry,
889 Register scratch1, Register scratch2) {
890 // ----------- S t a t e -------------
891 // -- a0 : actual argument count
892 // -- a3 : new target (preserved for callee if needed, and caller)
893 // -- a1 : target function (preserved for callee if needed, and caller)
894 // -----------------------------------
895 ASM_CODE_COMMENT(masm);
896 DCHECK(!AreAliased(optimized_code_entry, a1, a3, scratch1, scratch2));
897
898 Register closure = a1;
899 Label heal_optimized_code_slot;
900
901 // If the optimized code is cleared, go to runtime to update the optimization
902 // marker field.
903 __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
904 &heal_optimized_code_slot);
905
906 // Check if the optimized code is marked for deopt. If it is, call the
907 // runtime to clear it.
908 __ LoadTaggedPointerField(
909 a5,
910 FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
911 __ Lw(a5, FieldMemOperand(a5, CodeDataContainer::kKindSpecificFlagsOffset));
912 __ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit));
913 __ Branch(&heal_optimized_code_slot, ne, a5, Operand(zero_reg),
914 Label::Distance::kNear);
915
916 // Optimized code is good, get it into the closure and link the closure into
917 // the optimized functions list, then tail call the optimized code.
918 // The feedback vector is no longer used, so re-use it as a scratch
919 // register.
920 ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
921 scratch1, scratch2);
922
923 static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
924 __ LoadCodeObjectEntry(a2, optimized_code_entry);
925 __ Jump(a2);
926
927 // Optimized code slot contains deoptimized code or code is cleared and
928 // optimized code marker isn't updated. Evict the code, update the marker
929 // and re-enter the closure's code.
930 __ bind(&heal_optimized_code_slot);
931 GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
932 }
933
MaybeOptimizeCode(MacroAssembler * masm,Register feedback_vector,Register optimization_marker)934 static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
935 Register optimization_marker) {
936 // ----------- S t a t e -------------
937 // -- a0 : actual argument count
938 // -- a3 : new target (preserved for callee if needed, and caller)
939 // -- a1 : target function (preserved for callee if needed, and caller)
940 // -- feedback vector (preserved for caller if needed)
941 // -- optimization_marker : a int32 containing a non-zero optimization
942 // marker.
943 // -----------------------------------
944 ASM_CODE_COMMENT(masm);
945 DCHECK(!AreAliased(feedback_vector, a1, a3, optimization_marker));
946
947 // TODO(v8:8394): The logging of first execution will break if
948 // feedback vectors are not allocated. We need to find a different way of
949 // logging these events if required.
950 TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
951 OptimizationMarker::kLogFirstExecution,
952 Runtime::kFunctionFirstExecution);
953 TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
954 OptimizationMarker::kCompileOptimized,
955 Runtime::kCompileOptimized_NotConcurrent);
956 TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
957 OptimizationMarker::kCompileOptimizedConcurrent,
958 Runtime::kCompileOptimized_Concurrent);
959
960 // Marker should be one of LogFirstExecution / CompileOptimized /
961 // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
962 // here.
963 if (FLAG_debug_code) {
964 __ stop();
965 }
966 }
967
968 // Advance the current bytecode offset. This simulates what all bytecode
969 // handlers do upon completion of the underlying operation. Will bail out to a
970 // label if the bytecode (without prefix) is a return bytecode. Will not advance
971 // the bytecode offset if the current bytecode is a JumpLoop, instead just
972 // re-executing the JumpLoop to jump to the correct bytecode.
AdvanceBytecodeOffsetOrReturn(MacroAssembler * masm,Register bytecode_array,Register bytecode_offset,Register bytecode,Register scratch1,Register scratch2,Register scratch3,Label * if_return)973 static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
974 Register bytecode_array,
975 Register bytecode_offset,
976 Register bytecode, Register scratch1,
977 Register scratch2, Register scratch3,
978 Label* if_return) {
979 ASM_CODE_COMMENT(masm);
980 Register bytecode_size_table = scratch1;
981
982 // The bytecode offset value will be increased by one in wide and extra wide
983 // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
984 // will restore the original bytecode. In order to simplify the code, we have
985 // a backup of it.
986 Register original_bytecode_offset = scratch3;
987 DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode,
988 bytecode_size_table, original_bytecode_offset));
989 __ Move(original_bytecode_offset, bytecode_offset);
990 __ li(bytecode_size_table, ExternalReference::bytecode_size_table_address());
991
992 // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
993 Label process_bytecode, extra_wide;
994 STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
995 STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
996 STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
997 STATIC_ASSERT(3 ==
998 static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
999 __ Branch(&process_bytecode, Ugreater, bytecode, Operand(3),
1000 Label::Distance::kNear);
1001 __ And(scratch2, bytecode, Operand(1));
1002 __ Branch(&extra_wide, ne, scratch2, Operand(zero_reg),
1003 Label::Distance::kNear);
1004
1005 // Load the next bytecode and update table to the wide scaled table.
1006 __ Add64(bytecode_offset, bytecode_offset, Operand(1));
1007 __ Add64(scratch2, bytecode_array, bytecode_offset);
1008 __ Lbu(bytecode, MemOperand(scratch2));
1009 __ Add64(bytecode_size_table, bytecode_size_table,
1010 Operand(kByteSize * interpreter::Bytecodes::kBytecodeCount));
1011 __ BranchShort(&process_bytecode);
1012
1013 __ bind(&extra_wide);
1014 // Load the next bytecode and update table to the extra wide scaled table.
1015 __ Add64(bytecode_offset, bytecode_offset, Operand(1));
1016 __ Add64(scratch2, bytecode_array, bytecode_offset);
1017 __ Lbu(bytecode, MemOperand(scratch2));
1018 __ Add64(bytecode_size_table, bytecode_size_table,
1019 Operand(2 * kByteSize * interpreter::Bytecodes::kBytecodeCount));
1020
1021 __ bind(&process_bytecode);
1022
1023 // Bailout to the return label if this is a return bytecode.
1024 #define JUMP_IF_EQUAL(NAME) \
1025 __ Branch(if_return, eq, bytecode, \
1026 Operand(static_cast<int64_t>(interpreter::Bytecode::k##NAME)));
1027 RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
1028 #undef JUMP_IF_EQUAL
1029
1030 // If this is a JumpLoop, re-execute it to perform the jump to the beginning
1031 // of the loop.
1032 Label end, not_jump_loop;
1033 __ Branch(¬_jump_loop, ne, bytecode,
1034 Operand(static_cast<int64_t>(interpreter::Bytecode::kJumpLoop)),
1035 Label::Distance::kNear);
1036 // We need to restore the original bytecode_offset since we might have
1037 // increased it to skip the wide / extra-wide prefix bytecode.
1038 __ Move(bytecode_offset, original_bytecode_offset);
1039 __ BranchShort(&end);
1040
1041 __ bind(¬_jump_loop);
1042 // Otherwise, load the size of the current bytecode and advance the offset.
1043 __ Add64(scratch2, bytecode_size_table, bytecode);
1044 __ Lb(scratch2, MemOperand(scratch2));
1045 __ Add64(bytecode_offset, bytecode_offset, scratch2);
1046
1047 __ bind(&end);
1048 }
1049
1050 // Read off the optimization state in the feedback vector and check if there
1051 // is optimized code or a optimization marker that needs to be processed.
LoadOptimizationStateAndJumpIfNeedsProcessing(MacroAssembler * masm,Register optimization_state,Register feedback_vector,Label * has_optimized_code_or_marker)1052 static void LoadOptimizationStateAndJumpIfNeedsProcessing(
1053 MacroAssembler* masm, Register optimization_state, Register feedback_vector,
1054 Label* has_optimized_code_or_marker) {
1055 ASM_CODE_COMMENT(masm);
1056 DCHECK(!AreAliased(optimization_state, feedback_vector));
1057 UseScratchRegisterScope temps(masm);
1058 Register scratch = temps.Acquire();
1059 __ Lw(optimization_state,
1060 FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
1061 __ And(
1062 scratch, optimization_state,
1063 Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
1064 __ Branch(has_optimized_code_or_marker, ne, scratch, Operand(zero_reg));
1065 }
1066
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(MacroAssembler * masm,Register optimization_state,Register feedback_vector)1067 static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
1068 MacroAssembler* masm, Register optimization_state,
1069 Register feedback_vector) {
1070 ASM_CODE_COMMENT(masm);
1071 DCHECK(!AreAliased(optimization_state, feedback_vector));
1072 UseScratchRegisterScope temps(masm);
1073 temps.Include(t0, t1);
1074 Label maybe_has_optimized_code;
1075 // Check if optimized code marker is available
1076 {
1077 UseScratchRegisterScope temps(masm);
1078 Register scratch = temps.Acquire();
1079 __ And(
1080 scratch, optimization_state,
1081 Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
1082 __ Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg),
1083 Label::Distance::kNear);
1084 }
1085 Register optimization_marker = optimization_state;
1086 __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
1087 MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
1088
1089 __ bind(&maybe_has_optimized_code);
1090 Register optimized_code_entry = optimization_state;
1091 __ LoadAnyTaggedField(
1092 optimization_marker,
1093 FieldMemOperand(feedback_vector,
1094 FeedbackVector::kMaybeOptimizedCodeOffset));
1095 TailCallOptimizedCodeSlot(masm, optimized_code_entry, temps.Acquire(),
1096 temps.Acquire());
1097 }
1098
1099 // static
Generate_BaselineOutOfLinePrologue(MacroAssembler * masm)1100 void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
1101 UseScratchRegisterScope temps(masm);
1102 temps.Include(kScratchReg.bit() | kScratchReg2.bit());
1103 auto descriptor =
1104 Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
1105 Register closure = descriptor.GetRegisterParameter(
1106 BaselineOutOfLinePrologueDescriptor::kClosure);
1107 // Load the feedback vector from the closure.
1108 Register feedback_vector = temps.Acquire();
1109 __ Ld(feedback_vector,
1110 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1111 __ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
1112 if (FLAG_debug_code) {
1113 UseScratchRegisterScope temps(masm);
1114 Register type = temps.Acquire();
1115 __ GetObjectType(feedback_vector, type, type);
1116 __ Assert(eq, AbortReason::kExpectedFeedbackVector, type,
1117 Operand(FEEDBACK_VECTOR_TYPE));
1118 }
1119
1120 // Check for an optimization marker.
1121 Label has_optimized_code_or_marker;
1122 Register optimization_state = temps.Acquire();
1123 LoadOptimizationStateAndJumpIfNeedsProcessing(
1124 masm, optimization_state, feedback_vector, &has_optimized_code_or_marker);
1125
1126 // Increment invocation count for the function.
1127 {
1128 UseScratchRegisterScope temps(masm);
1129 Register invocation_count = temps.Acquire();
1130 __ Lw(invocation_count,
1131 FieldMemOperand(feedback_vector,
1132 FeedbackVector::kInvocationCountOffset));
1133 __ Add32(invocation_count, invocation_count, Operand(1));
1134 __ Sw(invocation_count,
1135 FieldMemOperand(feedback_vector,
1136 FeedbackVector::kInvocationCountOffset));
1137 }
1138
1139 FrameScope frame_scope(masm, StackFrame::MANUAL);
1140 {
1141 ASM_CODE_COMMENT_STRING(masm, "Frame Setup");
1142 // Normally the first thing we'd do here is Push(lr, fp), but we already
1143 // entered the frame in BaselineCompiler::Prologue, as we had to use the
1144 // value lr before the call to this BaselineOutOfLinePrologue builtin.
1145
1146 Register callee_context = descriptor.GetRegisterParameter(
1147 BaselineOutOfLinePrologueDescriptor::kCalleeContext);
1148 Register callee_js_function = descriptor.GetRegisterParameter(
1149 BaselineOutOfLinePrologueDescriptor::kClosure);
1150 __ Push(callee_context, callee_js_function);
1151 DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
1152 DCHECK_EQ(callee_js_function, kJSFunctionRegister);
1153
1154 Register argc = descriptor.GetRegisterParameter(
1155 BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
1156 // We'll use the bytecode for both code age/OSR resetting, and pushing onto
1157 // the frame, so load it into a register.
1158 Register bytecodeArray = descriptor.GetRegisterParameter(
1159 BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
1160
1161 // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
1162 // are 8-bit fields next to each other, so we could just optimize by writing
1163 // a 16-bit. These static asserts guard our assumption is valid.
1164 STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
1165 BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
1166 STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
1167 __ Sh(zero_reg, FieldMemOperand(bytecodeArray,
1168 BytecodeArray::kOsrLoopNestingLevelOffset));
1169
1170 __ Push(argc, bytecodeArray);
1171
1172 // Baseline code frames store the feedback vector where interpreter would
1173 // store the bytecode offset.
1174 if (FLAG_debug_code) {
1175 UseScratchRegisterScope temps(masm);
1176 Register invocation_count = temps.Acquire();
1177 __ GetObjectType(feedback_vector, invocation_count, invocation_count);
1178 __ Assert(eq, AbortReason::kExpectedFeedbackVector, invocation_count,
1179 Operand(FEEDBACK_VECTOR_TYPE));
1180 }
1181 // Our stack is currently aligned. We have have to push something along with
1182 // the feedback vector to keep it that way -- we may as well start
1183 // initialising the register frame.
1184 // TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves
1185 // `undefined` in the accumulator register, to skip the load in the baseline
1186 // code.
1187 __ Push(feedback_vector);
1188 }
1189
1190 Label call_stack_guard;
1191 Register frame_size = descriptor.GetRegisterParameter(
1192 BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
1193 {
1194 ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt check");
1195 // Stack check. This folds the checks for both the interrupt stack limit
1196 // check and the real stack limit into one by just checking for the
1197 // interrupt limit. The interrupt limit is either equal to the real stack
1198 // limit or tighter. By ensuring we have space until that limit after
1199 // building the frame we can quickly precheck both at once.
1200 UseScratchRegisterScope temps(masm);
1201 Register sp_minus_frame_size = temps.Acquire();
1202 __ Sub64(sp_minus_frame_size, sp, frame_size);
1203 Register interrupt_limit = temps.Acquire();
1204 __ LoadStackLimit(interrupt_limit,
1205 MacroAssembler::StackLimitKind::kInterruptStackLimit);
1206 __ Branch(&call_stack_guard, Uless, sp_minus_frame_size,
1207 Operand(interrupt_limit));
1208 }
1209
1210 // Do "fast" return to the caller pc in lr.
1211 // TODO(v8:11429): Document this frame setup better.
1212 __ Ret();
1213
1214 __ bind(&has_optimized_code_or_marker);
1215 {
1216 ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
1217 // Drop the frame created by the baseline call.
1218 __ Pop(ra, fp);
1219 MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
1220 feedback_vector);
1221 __ Trap();
1222 }
1223
1224 __ bind(&call_stack_guard);
1225 {
1226 ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
1227 FrameScope frame_scope(masm, StackFrame::INTERNAL);
1228 // Save incoming new target or generator
1229 __ Push(kJavaScriptCallNewTargetRegister);
1230 __ SmiTag(frame_size);
1231 __ Push(frame_size);
1232 __ CallRuntime(Runtime::kStackGuardWithGap);
1233 __ Pop(kJavaScriptCallNewTargetRegister);
1234 }
1235 __ Ret();
1236 temps.Exclude(kScratchReg.bit() | kScratchReg2.bit());
1237 }
1238
1239 // Generate code for entering a JS function with the interpreter.
1240 // On entry to the function the receiver and arguments have been pushed on the
1241 // stack left to right.
1242 //
1243 // The live registers are:
1244 // o a0 : actual argument count (not including the receiver)
1245 // o a1: the JS function object being called.
1246 // o a3: the incoming new target or generator object
1247 // o cp: our context
1248 // o fp: the caller's frame pointer
1249 // o sp: stack pointer
1250 // o ra: return address
1251 //
1252 // The function builds an interpreter frame. See InterpreterFrameConstants in
1253 // frames-constants.h for its layout.
Generate_InterpreterEntryTrampoline(MacroAssembler * masm)1254 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
1255 Register closure = a1;
1256 Register feedback_vector = a2;
1257 UseScratchRegisterScope temps(masm);
1258 temps.Include(t0, t1);
1259 Register scratch = temps.Acquire();
1260 Register scratch2 = temps.Acquire();
1261 // Get the bytecode array from the function object and load it into
1262 // kInterpreterBytecodeArrayRegister.
1263 __ LoadTaggedPointerField(
1264 kScratchReg,
1265 FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
1266 __ LoadTaggedPointerField(
1267 kInterpreterBytecodeArrayRegister,
1268 FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset));
1269 Label is_baseline;
1270 GetSharedFunctionInfoBytecodeOrBaseline(
1271 masm, kInterpreterBytecodeArrayRegister, kScratchReg, &is_baseline);
1272
1273 // The bytecode array could have been flushed from the shared function info,
1274 // if so, call into CompileLazy.
1275 Label compile_lazy;
1276 __ GetObjectType(kInterpreterBytecodeArrayRegister, kScratchReg, kScratchReg);
1277 __ Branch(&compile_lazy, ne, kScratchReg, Operand(BYTECODE_ARRAY_TYPE));
1278
1279 // Load the feedback vector from the closure.
1280 __ LoadTaggedPointerField(
1281 feedback_vector,
1282 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1283 __ LoadTaggedPointerField(
1284 feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
1285
1286 Label push_stack_frame;
1287 // Check if feedback vector is valid. If valid, check for optimized code
1288 // and update invocation count. Otherwise, setup the stack frame.
1289 __ LoadTaggedPointerField(
1290 a4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
1291 __ Lhu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
1292 __ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE),
1293 Label::Distance::kNear);
1294
1295 // Read off the optimization state in the feedback vector, and if there
1296 // is optimized code or an optimization marker, call that instead.
1297 Register optimization_state = a4;
1298 __ Lw(optimization_state,
1299 FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
1300
1301 // Check if the optimized code slot is not empty or has a optimization marker.
1302 Label has_optimized_code_or_marker;
1303
1304 __ And(scratch, optimization_state,
1305 FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask);
1306 __ Branch(&has_optimized_code_or_marker, ne, scratch, Operand(zero_reg));
1307
1308 Label not_optimized;
1309 __ bind(¬_optimized);
1310
1311 // Increment invocation count for the function.
1312 __ Lw(a4, FieldMemOperand(feedback_vector,
1313 FeedbackVector::kInvocationCountOffset));
1314 __ Add32(a4, a4, Operand(1));
1315 __ Sw(a4, FieldMemOperand(feedback_vector,
1316 FeedbackVector::kInvocationCountOffset));
1317
1318 // Open a frame scope to indicate that there is a frame on the stack. The
1319 // MANUAL indicates that the scope shouldn't actually generate code to set up
1320 // the frame (that is done below).
1321 __ bind(&push_stack_frame);
1322 FrameScope frame_scope(masm, StackFrame::MANUAL);
1323 __ PushStandardFrame(closure);
1324
1325 // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
1326 // 8-bit fields next to each other, so we could just optimize by writing a
1327 // 16-bit. These static asserts guard our assumption is valid.
1328 STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
1329 BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
1330 STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
1331 __ Sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
1332 BytecodeArray::kOsrLoopNestingLevelOffset));
1333
1334 // Load initial bytecode offset.
1335 __ li(kInterpreterBytecodeOffsetRegister,
1336 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1337
1338 // Push bytecode array and Smi tagged bytecode array offset.
1339 __ SmiTag(a4, kInterpreterBytecodeOffsetRegister);
1340 __ Push(kInterpreterBytecodeArrayRegister, a4);
1341
1342 // Allocate the local and temporary register file on the stack.
1343 Label stack_overflow;
1344 {
1345 // Load frame size (word) from the BytecodeArray object.
1346 __ Lw(a4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
1347 BytecodeArray::kFrameSizeOffset));
1348
1349 // Do a stack check to ensure we don't go over the limit.
1350 __ Sub64(a5, sp, Operand(a4));
1351 __ LoadStackLimit(a2, MacroAssembler::StackLimitKind::kRealStackLimit);
1352 __ Branch(&stack_overflow, Uless, a5, Operand(a2));
1353
1354 // If ok, push undefined as the initial value for all register file entries.
1355 Label loop_header;
1356 Label loop_check;
1357 __ LoadRoot(a5, RootIndex::kUndefinedValue);
1358 __ BranchShort(&loop_check);
1359 __ bind(&loop_header);
1360 // TODO(rmcilroy): Consider doing more than one push per loop iteration.
1361 __ push(a5);
1362 // Continue loop if not done.
1363 __ bind(&loop_check);
1364 __ Sub64(a4, a4, Operand(kSystemPointerSize));
1365 __ Branch(&loop_header, ge, a4, Operand(zero_reg));
1366 }
1367
1368 // If the bytecode array has a valid incoming new target or generator object
1369 // register, initialize it with incoming value which was passed in a3.
1370 Label no_incoming_new_target_or_generator_register;
1371 __ Lw(a5, FieldMemOperand(
1372 kInterpreterBytecodeArrayRegister,
1373 BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
1374 __ Branch(&no_incoming_new_target_or_generator_register, eq, a5,
1375 Operand(zero_reg), Label::Distance::kNear);
1376 __ CalcScaledAddress(a5, fp, a5, kSystemPointerSizeLog2);
1377 __ Sd(a3, MemOperand(a5));
1378 __ bind(&no_incoming_new_target_or_generator_register);
1379
1380 // Perform interrupt stack check.
1381 // TODO(solanes): Merge with the real stack limit check above.
1382 Label stack_check_interrupt, after_stack_check_interrupt;
1383 __ LoadStackLimit(a5, MacroAssembler::StackLimitKind::kInterruptStackLimit);
1384 __ Branch(&stack_check_interrupt, Uless, sp, Operand(a5),
1385 Label::Distance::kNear);
1386 __ bind(&after_stack_check_interrupt);
1387
1388 // Load accumulator as undefined.
1389 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1390
1391 // Load the dispatch table into a register and dispatch to the bytecode
1392 // handler at the current bytecode offset.
1393 Label do_dispatch;
1394 __ bind(&do_dispatch);
1395 __ li(kInterpreterDispatchTableRegister,
1396 ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1397 __ Add64(a1, kInterpreterBytecodeArrayRegister,
1398 kInterpreterBytecodeOffsetRegister);
1399 __ Lbu(a7, MemOperand(a1));
1400 __ CalcScaledAddress(kScratchReg, kInterpreterDispatchTableRegister, a7,
1401 kSystemPointerSizeLog2);
1402 __ Ld(kJavaScriptCallCodeStartRegister, MemOperand(kScratchReg));
1403 __ Call(kJavaScriptCallCodeStartRegister);
1404 masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
1405
1406 // Any returns to the entry trampoline are either due to the return bytecode
1407 // or the interpreter tail calling a builtin and then a dispatch.
1408
1409 // Get bytecode array and bytecode offset from the stack frame.
1410 __ Ld(kInterpreterBytecodeArrayRegister,
1411 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1412 __ Ld(kInterpreterBytecodeOffsetRegister,
1413 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1414 __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1415
1416 // Either return, or advance to the next bytecode and dispatch.
1417 Label do_return;
1418 __ Add64(a1, kInterpreterBytecodeArrayRegister,
1419 kInterpreterBytecodeOffsetRegister);
1420 __ Lbu(a1, MemOperand(a1));
1421 AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1422 kInterpreterBytecodeOffsetRegister, a1, a2, a3,
1423 a4, &do_return);
1424 __ Branch(&do_dispatch);
1425
1426 __ bind(&do_return);
1427 // The return value is in a0.
1428 LeaveInterpreterFrame(masm, scratch, scratch2);
1429 __ Jump(ra);
1430
1431 __ bind(&stack_check_interrupt);
1432 // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
1433 // for the call to the StackGuard.
1434 __ li(kInterpreterBytecodeOffsetRegister,
1435 Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
1436 kFunctionEntryBytecodeOffset)));
1437 __ Sd(kInterpreterBytecodeOffsetRegister,
1438 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1439 __ CallRuntime(Runtime::kStackGuard);
1440
1441 // After the call, restore the bytecode array, bytecode offset and accumulator
1442 // registers again. Also, restore the bytecode offset in the stack to its
1443 // previous value.
1444 __ Ld(kInterpreterBytecodeArrayRegister,
1445 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1446 __ li(kInterpreterBytecodeOffsetRegister,
1447 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1448 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1449
1450 __ SmiTag(a5, kInterpreterBytecodeOffsetRegister);
1451 __ Sd(a5, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1452
1453 __ Branch(&after_stack_check_interrupt);
1454
1455 __ bind(&has_optimized_code_or_marker);
1456 Label maybe_has_optimized_code;
1457 // Check if optimized code marker is available
1458 __ And(scratch, optimization_state,
1459 FeedbackVector::OptimizationTierBits::kMask);
1460 __ Branch(&maybe_has_optimized_code, ne, scratch, Operand(zero_reg),
1461 Label::Distance::kNear);
1462
1463 Register optimization_marker = optimization_state;
1464 __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
1465 MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
1466 // Fall through if there's no runnable optimized code.
1467 __ Branch(¬_optimized);
1468
1469 __ bind(&maybe_has_optimized_code);
1470 Register optimized_code_entry = optimization_state;
1471 __ LoadAnyTaggedField(
1472 optimization_marker,
1473 FieldMemOperand(feedback_vector,
1474 FeedbackVector::kMaybeOptimizedCodeOffset));
1475
1476 TailCallOptimizedCodeSlot(masm, optimized_code_entry, t4, a5);
1477 __ bind(&is_baseline);
1478 {
1479 // Load the feedback vector from the closure.
1480 __ LoadTaggedPointerField(
1481 feedback_vector,
1482 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1483 __ LoadTaggedPointerField(
1484 feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
1485
1486 Label install_baseline_code;
1487 // Check if feedback vector is valid. If not, call prepare for baseline to
1488 // allocate it.
1489 __ LoadTaggedPointerField(
1490 scratch, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
1491 __ Lhu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1492 __ Branch(&install_baseline_code, ne, scratch,
1493 Operand(FEEDBACK_VECTOR_TYPE));
1494
1495 // Check for an optimization marker.
1496 LoadOptimizationStateAndJumpIfNeedsProcessing(
1497 masm, optimization_state, feedback_vector,
1498 &has_optimized_code_or_marker);
1499
1500 // Load the baseline code into the closure.
1501 __ Move(a2, kInterpreterBytecodeArrayRegister);
1502 static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
1503 ReplaceClosureCodeWithOptimizedCode(masm, a2, closure, scratch, scratch2);
1504 __ JumpCodeObject(a2);
1505
1506 __ bind(&install_baseline_code);
1507 GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode);
1508 }
1509
1510 __ bind(&compile_lazy);
1511 GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
1512 // Unreachable code.
1513 __ break_(0xCC);
1514
1515 __ bind(&stack_overflow);
1516 __ CallRuntime(Runtime::kThrowStackOverflow);
1517 // Unreachable code.
1518 __ break_(0xCC);
1519 }
1520
GenerateInterpreterPushArgs(MacroAssembler * masm,Register num_args,Register start_address,Register scratch)1521 static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
1522 Register start_address,
1523 Register scratch) {
1524 ASM_CODE_COMMENT(masm);
1525 // Find the address of the last argument.
1526 __ Sub64(scratch, num_args, Operand(1));
1527 __ Sll64(scratch, scratch, kSystemPointerSizeLog2);
1528 __ Sub64(start_address, start_address, scratch);
1529
1530 // Push the arguments.
1531 __ PushArray(start_address, num_args,
1532 TurboAssembler::PushArrayOrder::kReverse);
1533 }
1534
1535 // static
Generate_InterpreterPushArgsThenCallImpl(MacroAssembler * masm,ConvertReceiverMode receiver_mode,InterpreterPushArgsMode mode)1536 void Builtins::Generate_InterpreterPushArgsThenCallImpl(
1537 MacroAssembler* masm, ConvertReceiverMode receiver_mode,
1538 InterpreterPushArgsMode mode) {
1539 DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
1540 // ----------- S t a t e -------------
1541 // -- a0 : the number of arguments (not including the receiver)
1542 // -- a2 : the address of the first argument to be pushed. Subsequent
1543 // arguments should be consecutive above this, in the same order as
1544 // they are to be pushed onto the stack.
1545 // -- a1 : the target to call (can be any Object).
1546 // -----------------------------------
1547 Label stack_overflow;
1548 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1549 // The spread argument should not be pushed.
1550 __ Sub64(a0, a0, Operand(1));
1551 }
1552
1553 __ Add64(a3, a0, Operand(1)); // Add one for receiver.
1554
1555 __ StackOverflowCheck(a3, a4, t0, &stack_overflow);
1556
1557 if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1558 // Don't copy receiver.
1559 __ Move(a3, a0);
1560 }
1561
1562 // This function modifies a2 and a4.
1563 GenerateInterpreterPushArgs(masm, a3, a2, a4);
1564
1565 if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1566 __ PushRoot(RootIndex::kUndefinedValue);
1567 }
1568
1569 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1570 // Pass the spread in the register a2.
1571 // a2 already points to the penultime argument, the spread
1572 // is below that.
1573 __ Ld(a2, MemOperand(a2, -kSystemPointerSize));
1574 }
1575
1576 // Call the target.
1577 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1578 __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
1579 RelocInfo::CODE_TARGET);
1580 } else {
1581 __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
1582 RelocInfo::CODE_TARGET);
1583 }
1584
1585 __ bind(&stack_overflow);
1586 {
1587 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1588 // Unreachable code.
1589 __ break_(0xCC);
1590 }
1591 }
1592
1593 // static
Generate_InterpreterPushArgsThenConstructImpl(MacroAssembler * masm,InterpreterPushArgsMode mode)1594 void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
1595 MacroAssembler* masm, InterpreterPushArgsMode mode) {
1596 // ----------- S t a t e -------------
1597 // -- a0 : argument count (not including receiver)
1598 // -- a3 : new target
1599 // -- a1 : constructor to call
1600 // -- a2 : allocation site feedback if available, undefined otherwise.
1601 // -- a4 : address of the first argument
1602 // -----------------------------------
1603 Label stack_overflow;
1604 __ Add64(a6, a0, 1);
1605 __ StackOverflowCheck(a6, a5, t0, &stack_overflow);
1606
1607 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1608 // The spread argument should not be pushed.
1609 __ Sub64(a0, a0, Operand(1));
1610 }
1611
1612 // Push the arguments, This function modifies a4 and a5.
1613 GenerateInterpreterPushArgs(masm, a0, a4, a5);
1614
1615 // Push a slot for the receiver.
1616 __ push(zero_reg);
1617
1618 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1619 // Pass the spread in the register a2.
1620 // a4 already points to the penultimate argument, the spread
1621 // lies in the next interpreter register.
1622 __ Ld(a2, MemOperand(a4, -kSystemPointerSize));
1623 } else {
1624 __ AssertUndefinedOrAllocationSite(a2, t0);
1625 }
1626
1627 if (mode == InterpreterPushArgsMode::kArrayFunction) {
1628 __ AssertFunction(a1);
1629
1630 // Tail call to the function-specific construct stub (still in the caller
1631 // context at this point).
1632 __ Jump(BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl),
1633 RelocInfo::CODE_TARGET);
1634 } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1635 // Call the constructor with a0, a1, and a3 unmodified.
1636 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
1637 RelocInfo::CODE_TARGET);
1638 } else {
1639 DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
1640 // Call the constructor with a0, a1, and a3 unmodified.
1641 __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
1642 }
1643
1644 __ bind(&stack_overflow);
1645 {
1646 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1647 // Unreachable code.
1648 __ break_(0xCC);
1649 }
1650 }
1651
Generate_InterpreterEnterBytecode(MacroAssembler * masm)1652 static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
1653 // Set the return address to the correct point in the interpreter entry
1654 // trampoline.
1655 Label builtin_trampoline, trampoline_loaded;
1656 Smi interpreter_entry_return_pc_offset(
1657 masm->isolate()->heap()->interpreter_entry_return_pc_offset());
1658 DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
1659
1660 // If the SFI function_data is an InterpreterData, the function will have a
1661 // custom copy of the interpreter entry trampoline for profiling. If so,
1662 // get the custom trampoline, otherwise grab the entry address of the global
1663 // trampoline.
1664 __ Ld(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
1665 __ LoadTaggedPointerField(
1666 t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
1667 __ LoadTaggedPointerField(
1668 t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
1669 __ GetObjectType(t0, kInterpreterDispatchTableRegister,
1670 kInterpreterDispatchTableRegister);
1671 __ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister,
1672 Operand(INTERPRETER_DATA_TYPE), Label::Distance::kNear);
1673
1674 __ LoadTaggedPointerField(
1675 t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
1676 __ Add64(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
1677 __ BranchShort(&trampoline_loaded);
1678
1679 __ bind(&builtin_trampoline);
1680 __ li(t0, ExternalReference::
1681 address_of_interpreter_entry_trampoline_instruction_start(
1682 masm->isolate()));
1683 __ Ld(t0, MemOperand(t0));
1684
1685 __ bind(&trampoline_loaded);
1686 __ Add64(ra, t0, Operand(interpreter_entry_return_pc_offset.value()));
1687
1688 // Initialize the dispatch table register.
1689 __ li(kInterpreterDispatchTableRegister,
1690 ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1691
1692 // Get the bytecode array pointer from the frame.
1693 __ Ld(kInterpreterBytecodeArrayRegister,
1694 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1695
1696 if (FLAG_debug_code) {
1697 // Check function data field is actually a BytecodeArray object.
1698 __ SmiTst(kInterpreterBytecodeArrayRegister, kScratchReg);
1699 __ Assert(ne,
1700 AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
1701 kScratchReg, Operand(zero_reg));
1702 __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
1703 __ Assert(eq,
1704 AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
1705 a1, Operand(BYTECODE_ARRAY_TYPE));
1706 }
1707
1708 // Get the target bytecode offset from the frame.
1709 __ SmiUntag(kInterpreterBytecodeOffsetRegister,
1710 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1711
1712 if (FLAG_debug_code) {
1713 Label okay;
1714 __ Branch(&okay, ge, kInterpreterBytecodeOffsetRegister,
1715 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag),
1716 Label::Distance::kNear);
1717 // Unreachable code.
1718 __ break_(0xCC);
1719 __ bind(&okay);
1720 }
1721
1722 // Dispatch to the target bytecode.
1723 __ Add64(a1, kInterpreterBytecodeArrayRegister,
1724 kInterpreterBytecodeOffsetRegister);
1725 __ Lbu(a7, MemOperand(a1));
1726 __ CalcScaledAddress(a1, kInterpreterDispatchTableRegister, a7,
1727 kSystemPointerSizeLog2);
1728 __ Ld(kJavaScriptCallCodeStartRegister, MemOperand(a1));
1729 __ Jump(kJavaScriptCallCodeStartRegister);
1730 }
1731
Generate_InterpreterEnterAtNextBytecode(MacroAssembler * masm)1732 void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
1733 // Advance the current bytecode offset stored within the given interpreter
1734 // stack frame. This simulates what all bytecode handlers do upon completion
1735 // of the underlying operation.
1736 __ Ld(kInterpreterBytecodeArrayRegister,
1737 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1738 __ Ld(kInterpreterBytecodeOffsetRegister,
1739 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1740 __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1741
1742 Label enter_bytecode, function_entry_bytecode;
1743 __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
1744 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
1745 kFunctionEntryBytecodeOffset));
1746
1747 // Load the current bytecode.
1748 __ Add64(a1, kInterpreterBytecodeArrayRegister,
1749 kInterpreterBytecodeOffsetRegister);
1750 __ Lbu(a1, MemOperand(a1));
1751
1752 // Advance to the next bytecode.
1753 Label if_return;
1754 AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1755 kInterpreterBytecodeOffsetRegister, a1, a2, a3,
1756 a4, &if_return);
1757
1758 __ bind(&enter_bytecode);
1759 // Convert new bytecode offset to a Smi and save in the stackframe.
1760 __ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
1761 __ Sd(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1762
1763 Generate_InterpreterEnterBytecode(masm);
1764
1765 __ bind(&function_entry_bytecode);
1766 // If the code deoptimizes during the implicit function entry stack interrupt
1767 // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
1768 // not a valid bytecode offset. Detect this case and advance to the first
1769 // actual bytecode.
1770 __ li(kInterpreterBytecodeOffsetRegister,
1771 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1772 __ Branch(&enter_bytecode);
1773
1774 // We should never take the if_return path.
1775 __ bind(&if_return);
1776 __ Abort(AbortReason::kInvalidBytecodeAdvance);
1777 }
1778
Generate_InterpreterEnterAtBytecode(MacroAssembler * masm)1779 void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
1780 Generate_InterpreterEnterBytecode(masm);
1781 }
1782
1783 namespace {
Generate_ContinueToBuiltinHelper(MacroAssembler * masm,bool java_script_builtin,bool with_result)1784 void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
1785 bool java_script_builtin,
1786 bool with_result) {
1787 const RegisterConfiguration* config(RegisterConfiguration::Default());
1788 int allocatable_register_count = config->num_allocatable_general_registers();
1789 UseScratchRegisterScope temp(masm);
1790 Register scratch = temp.Acquire();
1791 if (with_result) {
1792 if (java_script_builtin) {
1793 __ Move(scratch, a0);
1794 } else {
1795 // Overwrite the hole inserted by the deoptimizer with the return value
1796 // from the LAZY deopt point.
1797 __ Sd(a0,
1798 MemOperand(sp,
1799 config->num_allocatable_general_registers() *
1800 kSystemPointerSize +
1801 BuiltinContinuationFrameConstants::kFixedFrameSize));
1802 }
1803 }
1804 for (int i = allocatable_register_count - 1; i >= 0; --i) {
1805 int code = config->GetAllocatableGeneralCode(i);
1806 __ Pop(Register::from_code(code));
1807 if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
1808 __ SmiUntag(Register::from_code(code));
1809 }
1810 }
1811
1812 if (with_result && java_script_builtin) {
1813 // Overwrite the hole inserted by the deoptimizer with the return value from
1814 // the LAZY deopt point. t0 contains the arguments count, the return value
1815 // from LAZY is always the last argument.
1816 __ Add64(a0, a0,
1817 Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
1818 __ CalcScaledAddress(t0, sp, a0, kSystemPointerSizeLog2);
1819 __ Sd(scratch, MemOperand(t0));
1820 // Recover arguments count.
1821 __ Sub64(a0, a0,
1822 Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
1823 }
1824
1825 __ Ld(fp, MemOperand(
1826 sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1827 // Load builtin index (stored as a Smi) and use it to get the builtin start
1828 // address from the builtins table.
1829 __ Pop(t6);
1830 __ Add64(sp, sp,
1831 Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1832 __ Pop(ra);
1833 __ LoadEntryFromBuiltinIndex(t6);
1834 __ Jump(t6);
1835 }
1836 } // namespace
1837
Generate_ContinueToCodeStubBuiltin(MacroAssembler * masm)1838 void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
1839 Generate_ContinueToBuiltinHelper(masm, false, false);
1840 }
1841
Generate_ContinueToCodeStubBuiltinWithResult(MacroAssembler * masm)1842 void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
1843 MacroAssembler* masm) {
1844 Generate_ContinueToBuiltinHelper(masm, false, true);
1845 }
1846
Generate_ContinueToJavaScriptBuiltin(MacroAssembler * masm)1847 void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
1848 Generate_ContinueToBuiltinHelper(masm, true, false);
1849 }
1850
Generate_ContinueToJavaScriptBuiltinWithResult(MacroAssembler * masm)1851 void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
1852 MacroAssembler* masm) {
1853 Generate_ContinueToBuiltinHelper(masm, true, true);
1854 }
1855
Generate_NotifyDeoptimized(MacroAssembler * masm)1856 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
1857 {
1858 FrameScope scope(masm, StackFrame::INTERNAL);
1859 __ CallRuntime(Runtime::kNotifyDeoptimized);
1860 }
1861
1862 DCHECK_EQ(kInterpreterAccumulatorRegister.code(), a0.code());
1863 __ Ld(a0, MemOperand(sp, 0 * kSystemPointerSize));
1864 __ Add64(sp, sp, Operand(1 * kSystemPointerSize)); // Remove state.
1865 __ Ret();
1866 }
1867
1868 namespace {
1869
Generate_OSREntry(MacroAssembler * masm,Register entry_address,Operand offset=Operand (int64_t (0)))1870 void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
1871 Operand offset = Operand(int64_t(0))) {
1872 __ Add64(ra, entry_address, offset);
1873 // And "return" to the OSR entry point of the function.
1874 __ Ret();
1875 }
1876
OnStackReplacement(MacroAssembler * masm,bool is_interpreter)1877 void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
1878 ASM_CODE_COMMENT(masm);
1879 {
1880 FrameScope scope(masm, StackFrame::INTERNAL);
1881 __ CallRuntime(Runtime::kCompileForOnStackReplacement);
1882 }
1883
1884 // If the code object is null, just return to the caller.
1885 __ Ret(eq, a0, Operand(Smi::zero()));
1886 if (is_interpreter) {
1887 // Drop the handler frame that is be sitting on top of the actual
1888 // JavaScript frame. This is the case then OSR is triggered from bytecode.
1889 __ LeaveFrame(StackFrame::STUB);
1890 }
1891 // Load deoptimization data from the code object.
1892 // <deopt_data> = <code>[#deoptimization_data_offset]
1893 __ LoadTaggedPointerField(
1894 a1, MemOperand(a0, Code::kDeoptimizationDataOrInterpreterDataOffset -
1895 kHeapObjectTag));
1896
1897 // Load the OSR entrypoint offset from the deoptimization data.
1898 // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
1899 __ SmiUntag(a1, MemOperand(a1, FixedArray::OffsetOfElementAt(
1900 DeoptimizationData::kOsrPcOffsetIndex) -
1901 kHeapObjectTag));
1902
1903 // Compute the target address = code_obj + header_size + osr_offset
1904 // <entry_addr> = <code_obj> + #header_size + <osr_offset>
1905 __ Add64(a0, a0, a1);
1906 Generate_OSREntry(masm, a0, Operand(Code::kHeaderSize - kHeapObjectTag));
1907 }
1908 } // namespace
1909
Generate_InterpreterOnStackReplacement(MacroAssembler * masm)1910 void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
1911 return OnStackReplacement(masm, true);
1912 }
1913
Generate_BaselineOnStackReplacement(MacroAssembler * masm)1914 void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
1915 __ Ld(kContextRegister,
1916 MemOperand(fp, StandardFrameConstants::kContextOffset));
1917 return OnStackReplacement(masm, false);
1918 }
1919
1920 // static
Generate_FunctionPrototypeApply(MacroAssembler * masm)1921 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
1922 // ----------- S t a t e -------------
1923 // -- a0 : argc
1924 // -- sp[0] : receiver
1925 // -- sp[4] : thisArg
1926 // -- sp[8] : argArray
1927 // -----------------------------------
1928
1929 Register argc = a0;
1930 Register arg_array = a2;
1931 Register receiver = a1;
1932 Register this_arg = a5;
1933 Register undefined_value = a3;
1934
1935 __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
1936
1937 // 1. Load receiver into a1, argArray into a2 (if present), remove all
1938 // arguments from the stack (including the receiver), and push thisArg (if
1939 // present) instead.
1940 {
1941 // Claim (2 - argc) dummy arguments form the stack, to put the stack in a
1942 // consistent state for a simple pop operation.
1943
1944 __ Ld(this_arg, MemOperand(sp, kSystemPointerSize));
1945 __ Ld(arg_array, MemOperand(sp, 2 * kSystemPointerSize));
1946
1947 Label done0, done1;
1948 __ Branch(&done0, ne, argc, Operand(zero_reg), Label::Distance::kNear);
1949 __ Move(arg_array, undefined_value); // if argc == 0
1950 __ Move(this_arg, undefined_value); // if argc == 0
1951 __ bind(&done0); // else (i.e., argc > 0)
1952
1953 __ Branch(&done1, ne, argc, Operand(1), Label::Distance::kNear);
1954 __ Move(arg_array, undefined_value); // if argc == 1
1955 __ bind(&done1); // else (i.e., argc > 1)
1956
1957 __ Ld(receiver, MemOperand(sp));
1958 __ CalcScaledAddress(sp, sp, argc, kSystemPointerSizeLog2);
1959 __ Sd(this_arg, MemOperand(sp));
1960 }
1961
1962 // ----------- S t a t e -------------
1963 // -- a2 : argArray
1964 // -- a1 : receiver
1965 // -- a3 : undefined root value
1966 // -- sp[0] : thisArg
1967 // -----------------------------------
1968
1969 // 2. We don't need to check explicitly for callable receiver here,
1970 // since that's the first thing the Call/CallWithArrayLike builtins
1971 // will do.
1972
1973 // 3. Tail call with no arguments if argArray is null or undefined.
1974 Label no_arguments;
1975 __ JumpIfRoot(arg_array, RootIndex::kNullValue, &no_arguments);
1976 __ Branch(&no_arguments, eq, arg_array, Operand(undefined_value),
1977 Label::Distance::kNear);
1978
1979 // 4a. Apply the receiver to the given argArray.
1980 __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1981 RelocInfo::CODE_TARGET);
1982
1983 // 4b. The argArray is either null or undefined, so we tail call without any
1984 // arguments to the receiver.
1985 __ bind(&no_arguments);
1986 {
1987 __ Move(a0, zero_reg);
1988 DCHECK(receiver == a1);
1989 __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1990 }
1991 }
1992
1993 // static
Generate_FunctionPrototypeCall(MacroAssembler * masm)1994 void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
1995 // 1. Get the callable to call (passed as receiver) from the stack.
1996 { __ Pop(a1); }
1997
1998 // 2. Make sure we have at least one argument.
1999 // a0: actual number of arguments
2000 {
2001 Label done;
2002 __ Branch(&done, ne, a0, Operand(zero_reg), Label::Distance::kNear);
2003 __ PushRoot(RootIndex::kUndefinedValue);
2004 __ Add64(a0, a0, Operand(1));
2005 __ bind(&done);
2006 }
2007
2008 // 3. Adjust the actual number of arguments.
2009 __ Add64(a0, a0, -1);
2010
2011 // 4. Call the callable.
2012 __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
2013 }
2014
Generate_ReflectApply(MacroAssembler * masm)2015 void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
2016 // ----------- S t a t e -------------
2017 // -- a0 : argc
2018 // -- sp[0] : receiver
2019 // -- sp[8] : target (if argc >= 1)
2020 // -- sp[16] : thisArgument (if argc >= 2)
2021 // -- sp[24] : argumentsList (if argc == 3)
2022 // -----------------------------------
2023
2024 Register argc = a0;
2025 Register arguments_list = a2;
2026 Register target = a1;
2027 Register this_argument = a5;
2028 Register undefined_value = a3;
2029
2030 __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
2031
2032 // 1. Load target into a1 (if present), argumentsList into a2 (if present),
2033 // remove all arguments from the stack (including the receiver), and push
2034 // thisArgument (if present) instead.
2035 {
2036 // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
2037 // consistent state for a simple pop operation.
2038
2039 __ Ld(target, MemOperand(sp, kSystemPointerSize));
2040 __ Ld(this_argument, MemOperand(sp, 2 * kSystemPointerSize));
2041 __ Ld(arguments_list, MemOperand(sp, 3 * kSystemPointerSize));
2042
2043 Label done0, done1, done2;
2044 __ Branch(&done0, ne, argc, Operand(zero_reg), Label::Distance::kNear);
2045 __ Move(arguments_list, undefined_value); // if argc == 0
2046 __ Move(this_argument, undefined_value); // if argc == 0
2047 __ Move(target, undefined_value); // if argc == 0
2048 __ bind(&done0); // argc != 0
2049
2050 __ Branch(&done1, ne, argc, Operand(1), Label::Distance::kNear);
2051 __ Move(arguments_list, undefined_value); // if argc == 1
2052 __ Move(this_argument, undefined_value); // if argc == 1
2053 __ bind(&done1); // argc > 1
2054
2055 __ Branch(&done2, ne, argc, Operand(2), Label::Distance::kNear);
2056 __ Move(arguments_list, undefined_value); // if argc == 2
2057 __ bind(&done2); // argc > 2
2058
2059 __ CalcScaledAddress(sp, sp, argc, kSystemPointerSizeLog2);
2060 __ Sd(this_argument, MemOperand(sp, 0)); // Overwrite receiver
2061 }
2062
2063 // ----------- S t a t e -------------
2064 // -- a2 : argumentsList
2065 // -- a1 : target
2066 // -- a3 : undefined root value
2067 // -- sp[0] : thisArgument
2068 // -----------------------------------
2069
2070 // 2. We don't need to check explicitly for callable target here,
2071 // since that's the first thing the Call/CallWithArrayLike builtins
2072 // will do.
2073
2074 // 3. Apply the target to the given argumentsList.
2075 __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
2076 RelocInfo::CODE_TARGET);
2077 }
2078
Generate_ReflectConstruct(MacroAssembler * masm)2079 void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
2080 // ----------- S t a t e -------------
2081 // -- a0 : argc
2082 // -- sp[0] : receiver
2083 // -- sp[8] : target
2084 // -- sp[16] : argumentsList
2085 // -- sp[24] : new.target (optional)
2086 // -----------------------------------
2087 Register argc = a0;
2088 Register arguments_list = a2;
2089 Register target = a1;
2090 Register new_target = a3;
2091 Register undefined_value = a4;
2092
2093 __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
2094
2095 // 1. Load target into a1 (if present), argumentsList into a2 (if present),
2096 // new.target into a3 (if present, otherwise use target), remove all
2097 // arguments from the stack (including the receiver), and push thisArgument
2098 // (if present) instead.
2099 {
2100 // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
2101 // consistent state for a simple pop operation.
2102 __ Ld(target, MemOperand(sp, kSystemPointerSize));
2103 __ Ld(arguments_list, MemOperand(sp, 2 * kSystemPointerSize));
2104 __ Ld(new_target, MemOperand(sp, 3 * kSystemPointerSize));
2105
2106 Label done0, done1, done2;
2107 __ Branch(&done0, ne, argc, Operand(zero_reg), Label::Distance::kNear);
2108 __ Move(arguments_list, undefined_value); // if argc == 0
2109 __ Move(new_target, undefined_value); // if argc == 0
2110 __ Move(target, undefined_value); // if argc == 0
2111 __ bind(&done0);
2112
2113 __ Branch(&done1, ne, argc, Operand(1), Label::Distance::kNear);
2114 __ Move(arguments_list, undefined_value); // if argc == 1
2115 __ Move(new_target, target); // if argc == 1
2116 __ bind(&done1);
2117
2118 __ Branch(&done2, ne, argc, Operand(2), Label::Distance::kNear);
2119 __ Move(new_target, target); // if argc == 2
2120 __ bind(&done2);
2121
2122 __ CalcScaledAddress(sp, sp, argc, kSystemPointerSizeLog2);
2123 __ Sd(undefined_value, MemOperand(sp, 0)); // Overwrite receiver
2124 }
2125
2126 // ----------- S t a t e -------------
2127 // -- a2 : argumentsList
2128 // -- a1 : target
2129 // -- a3 : new.target
2130 // -- sp[0] : receiver (undefined)
2131 // -----------------------------------
2132
2133 // 2. We don't need to check explicitly for constructor target here,
2134 // since that's the first thing the Construct/ConstructWithArrayLike
2135 // builtins will do.
2136
2137 // 3. We don't need to check explicitly for constructor new.target here,
2138 // since that's the second thing the Construct/ConstructWithArrayLike
2139 // builtins will do.
2140
2141 // 4. Construct the target with the given new.target and argumentsList.
2142 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike),
2143 RelocInfo::CODE_TARGET);
2144 }
2145
2146 // static
Generate_CallOrConstructVarargs(MacroAssembler * masm,Handle<Code> code)2147 void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
2148 Handle<Code> code) {
2149 UseScratchRegisterScope temps(masm);
2150 temps.Include(t1, t0);
2151 // ----------- S t a t e -------------
2152 // -- a1 : target
2153 // -- a0 : number of parameters on the stack (not including the receiver)
2154 // -- a2 : arguments list (a FixedArray)
2155 // -- a4 : len (number of elements to push from args)
2156 // -- a3 : new.target (for [[Construct]])
2157 // -----------------------------------
2158 if (FLAG_debug_code) {
2159 // Allow a2 to be a FixedArray, or a FixedDoubleArray if a4 == 0.
2160 Label ok, fail;
2161 __ AssertNotSmi(a2);
2162 __ GetObjectType(a2, kScratchReg, kScratchReg);
2163 __ Branch(&ok, eq, kScratchReg, Operand(FIXED_ARRAY_TYPE),
2164 Label::Distance::kNear);
2165 __ Branch(&fail, ne, kScratchReg, Operand(FIXED_DOUBLE_ARRAY_TYPE),
2166 Label::Distance::kNear);
2167 __ Branch(&ok, eq, a4, Operand(zero_reg), Label::Distance::kNear);
2168 // Fall through.
2169 __ bind(&fail);
2170 __ Abort(AbortReason::kOperandIsNotAFixedArray);
2171
2172 __ bind(&ok);
2173 }
2174
2175 Register args = a2;
2176 Register len = a4;
2177
2178 // Check for stack overflow.
2179 Label stack_overflow;
2180 __ StackOverflowCheck(len, kScratchReg, a5, &stack_overflow);
2181
2182 // Move the arguments already in the stack,
2183 // including the receiver and the return address.
2184 {
2185 Label copy;
2186 Register src = a6, dest = a7;
2187 UseScratchRegisterScope temps(masm);
2188 Register size = temps.Acquire();
2189 Register vlaue = temps.Acquire();
2190 __ Move(src, sp);
2191 __ Sll64(size, len, kSystemPointerSizeLog2);
2192 __ Sub64(sp, sp, Operand(size));
2193 // Update stack pointer.
2194 __ Move(dest, sp);
2195 __ Add64(size, a0, Operand(zero_reg));
2196
2197 __ bind(©);
2198 __ Ld(vlaue, MemOperand(src, 0));
2199 __ Sd(vlaue, MemOperand(dest, 0));
2200 __ Sub64(size, size, Operand(1));
2201 __ Add64(src, src, Operand(kSystemPointerSize));
2202 __ Add64(dest, dest, Operand(kSystemPointerSize));
2203 __ Branch(©, ge, size, Operand(zero_reg));
2204 }
2205
2206 // Push arguments onto the stack (thisArgument is already on the stack).
2207 {
2208 Label done, push, loop;
2209 Register src = a6;
2210 Register scratch = len;
2211 UseScratchRegisterScope temps(masm);
2212 Register hole_value = temps.Acquire();
2213 __ Add64(src, args, FixedArray::kHeaderSize - kHeapObjectTag);
2214 __ Add64(a0, a0, len); // The 'len' argument for Call() or Construct().
2215 __ Branch(&done, eq, len, Operand(zero_reg), Label::Distance::kNear);
2216 __ Sll64(scratch, len, kTaggedSizeLog2);
2217 __ Sub64(scratch, sp, Operand(scratch));
2218 __ LoadRoot(hole_value, RootIndex::kTheHoleValue);
2219 __ bind(&loop);
2220 __ LoadTaggedPointerField(a5, MemOperand(src));
2221 __ Add64(src, src, kTaggedSize);
2222 __ Branch(&push, ne, a5, Operand(hole_value), Label::Distance::kNear);
2223 __ LoadRoot(a5, RootIndex::kUndefinedValue);
2224 __ bind(&push);
2225 __ Sd(a5, MemOperand(a7, 0));
2226 __ Add64(a7, a7, Operand(kSystemPointerSize));
2227 __ Add64(scratch, scratch, Operand(kTaggedSize));
2228 __ Branch(&loop, ne, scratch, Operand(sp));
2229 __ bind(&done);
2230 }
2231
2232 // Tail-call to the actual Call or Construct builtin.
2233 __ Jump(code, RelocInfo::CODE_TARGET);
2234
2235 __ bind(&stack_overflow);
2236 __ TailCallRuntime(Runtime::kThrowStackOverflow);
2237 }
2238
2239 // static
Generate_CallOrConstructForwardVarargs(MacroAssembler * masm,CallOrConstructMode mode,Handle<Code> code)2240 void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
2241 CallOrConstructMode mode,
2242 Handle<Code> code) {
2243 // ----------- S t a t e -------------
2244 // -- a0 : the number of arguments (not including the receiver)
2245 // -- a3 : the new.target (for [[Construct]] calls)
2246 // -- a1 : the target to call (can be any Object)
2247 // -- a2 : start index (to support rest parameters)
2248 // -----------------------------------
2249 UseScratchRegisterScope temps(masm);
2250 temps.Include(t0, t1);
2251 temps.Include(t2);
2252 // Check if new.target has a [[Construct]] internal method.
2253 if (mode == CallOrConstructMode::kConstruct) {
2254 Label new_target_constructor, new_target_not_constructor;
2255 UseScratchRegisterScope temps(masm);
2256 Register scratch = temps.Acquire();
2257 __ JumpIfSmi(a3, &new_target_not_constructor);
2258 __ LoadTaggedPointerField(scratch,
2259 FieldMemOperand(a3, HeapObject::kMapOffset));
2260 __ Lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
2261 __ And(scratch, scratch, Operand(Map::Bits1::IsConstructorBit::kMask));
2262 __ Branch(&new_target_constructor, ne, scratch, Operand(zero_reg),
2263 Label::Distance::kNear);
2264 __ bind(&new_target_not_constructor);
2265 {
2266 FrameScope scope(masm, StackFrame::MANUAL);
2267 __ EnterFrame(StackFrame::INTERNAL);
2268 __ Push(a3);
2269 __ CallRuntime(Runtime::kThrowNotConstructor);
2270 }
2271 __ bind(&new_target_constructor);
2272 }
2273
2274 // TODO(victorgomes): Remove this copy when all the arguments adaptor frame
2275 // code is erased.
2276 __ Move(a6, fp);
2277 __ Ld(a7, MemOperand(fp, StandardFrameConstants::kArgCOffset));
2278
2279 Label stack_done, stack_overflow;
2280 __ Sub32(a7, a7, a2);
2281 __ Branch(&stack_done, le, a7, Operand(zero_reg));
2282 {
2283 // Check for stack overflow.
2284 __ StackOverflowCheck(a7, a4, a5, &stack_overflow);
2285
2286 // Forward the arguments from the caller frame.
2287
2288 // Point to the first argument to copy (skipping the receiver).
2289 __ Add64(a6, a6,
2290 Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
2291 kSystemPointerSize));
2292 __ CalcScaledAddress(a6, a6, a2, kSystemPointerSizeLog2);
2293
2294 // Move the arguments already in the stack,
2295 // including the receiver and the return address.
2296 {
2297 Label copy;
2298 UseScratchRegisterScope temps(masm);
2299 Register src = temps.Acquire(), dest = a2, scratch = temps.Acquire();
2300 Register count = temps.Acquire();
2301 __ Move(src, sp);
2302 // Update stack pointer.
2303 __ Sll64(scratch, a7, kSystemPointerSizeLog2);
2304 __ Sub64(sp, sp, Operand(scratch));
2305 __ Move(dest, sp);
2306 __ Move(count, a0);
2307
2308 __ bind(©);
2309 __ Ld(scratch, MemOperand(src, 0));
2310 __ Sd(scratch, MemOperand(dest, 0));
2311 __ Sub64(count, count, Operand(1));
2312 __ Add64(src, src, Operand(kSystemPointerSize));
2313 __ Add64(dest, dest, Operand(kSystemPointerSize));
2314 __ Branch(©, ge, count, Operand(zero_reg));
2315 }
2316
2317 // Copy arguments from the caller frame.
2318 // TODO(victorgomes): Consider using forward order as potentially more cache
2319 // friendly.
2320 {
2321 Label loop;
2322 __ Add64(a0, a0, a7);
2323 __ bind(&loop);
2324 {
2325 UseScratchRegisterScope temps(masm);
2326 Register scratch = temps.Acquire(), addr = temps.Acquire();
2327 __ Sub32(a7, a7, Operand(1));
2328 __ CalcScaledAddress(addr, a6, a7, kSystemPointerSizeLog2);
2329 __ Ld(scratch, MemOperand(addr));
2330 __ CalcScaledAddress(addr, a2, a7, kSystemPointerSizeLog2);
2331 __ Sd(scratch, MemOperand(addr));
2332 __ Branch(&loop, ne, a7, Operand(zero_reg));
2333 }
2334 }
2335 }
2336 __ BranchShort(&stack_done);
2337 __ bind(&stack_overflow);
2338 __ TailCallRuntime(Runtime::kThrowStackOverflow);
2339 __ bind(&stack_done);
2340
2341 // Tail-call to the {code} handler.
2342 __ Jump(code, RelocInfo::CODE_TARGET);
2343 }
2344
2345 // static
Generate_CallFunction(MacroAssembler * masm,ConvertReceiverMode mode)2346 void Builtins::Generate_CallFunction(MacroAssembler* masm,
2347 ConvertReceiverMode mode) {
2348 // ----------- S t a t e -------------
2349 // -- a0 : the number of arguments (not including the receiver)
2350 // -- a1 : the function to call (checked to be a JSFunction)
2351 // -----------------------------------
2352 __ AssertFunction(a1);
2353
2354 // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
2355 // Check that function is not a "classConstructor".
2356 Label class_constructor;
2357 __ LoadTaggedPointerField(
2358 a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2359 __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
2360 __ And(kScratchReg, a3,
2361 Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
2362 __ Branch(&class_constructor, ne, kScratchReg, Operand(zero_reg));
2363
2364 // Enter the context of the function; ToObject has to run in the function
2365 // context, and we also need to take the global proxy from the function
2366 // context in case of conversion.
2367 __ LoadTaggedPointerField(cp,
2368 FieldMemOperand(a1, JSFunction::kContextOffset));
2369 // We need to convert the receiver for non-native sloppy mode functions.
2370 Label done_convert;
2371 __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
2372 __ And(kScratchReg, a3,
2373 Operand(SharedFunctionInfo::IsNativeBit::kMask |
2374 SharedFunctionInfo::IsStrictBit::kMask));
2375 __ Branch(&done_convert, ne, kScratchReg, Operand(zero_reg));
2376 {
2377 // ----------- S t a t e -------------
2378 // -- a0 : the number of arguments (not including the receiver)
2379 // -- a1 : the function to call (checked to be a JSFunction)
2380 // -- a2 : the shared function info.
2381 // -- cp : the function context.
2382 // -----------------------------------
2383
2384 if (mode == ConvertReceiverMode::kNullOrUndefined) {
2385 // Patch receiver to global proxy.
2386 __ LoadGlobalProxy(a3);
2387 } else {
2388 Label convert_to_object, convert_receiver;
2389 __ LoadReceiver(a3, a0);
2390 __ JumpIfSmi(a3, &convert_to_object);
2391 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
2392 __ GetObjectType(a3, a4, a4);
2393 __ Branch(&done_convert, Ugreater_equal, a4,
2394 Operand(FIRST_JS_RECEIVER_TYPE));
2395 if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
2396 Label convert_global_proxy;
2397 __ JumpIfRoot(a3, RootIndex::kUndefinedValue, &convert_global_proxy);
2398 __ JumpIfNotRoot(a3, RootIndex::kNullValue, &convert_to_object);
2399 __ bind(&convert_global_proxy);
2400 {
2401 // Patch receiver to global proxy.
2402 __ LoadGlobalProxy(a3);
2403 }
2404 __ Branch(&convert_receiver);
2405 }
2406 __ bind(&convert_to_object);
2407 {
2408 // Convert receiver using ToObject.
2409 // TODO(bmeurer): Inline the allocation here to avoid building the frame
2410 // in the fast case? (fall back to AllocateInNewSpace?)
2411 FrameScope scope(masm, StackFrame::INTERNAL);
2412 __ SmiTag(a0);
2413 __ Push(a0, a1);
2414 __ Move(a0, a3);
2415 __ Push(cp);
2416 __ Call(BUILTIN_CODE(masm->isolate(), ToObject),
2417 RelocInfo::CODE_TARGET);
2418 __ Pop(cp);
2419 __ Move(a3, a0);
2420 __ Pop(a0, a1);
2421 __ SmiUntag(a0);
2422 }
2423 __ LoadTaggedPointerField(
2424 a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2425 __ bind(&convert_receiver);
2426 }
2427 __ StoreReceiver(a3, a0, kScratchReg);
2428 }
2429 __ bind(&done_convert);
2430
2431 // ----------- S t a t e -------------
2432 // -- a0 : the number of arguments (not including the receiver)
2433 // -- a1 : the function to call (checked to be a JSFunction)
2434 // -- a2 : the shared function info.
2435 // -- cp : the function context.
2436 // -----------------------------------
2437
2438 __ Lhu(a2,
2439 FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
2440 __ InvokeFunctionCode(a1, no_reg, a2, a0, InvokeType::kJump);
2441
2442 // The function is a "classConstructor", need to raise an exception.
2443 __ bind(&class_constructor);
2444 {
2445 FrameScope frame(masm, StackFrame::INTERNAL);
2446 __ Push(a1);
2447 __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
2448 }
2449 }
2450
2451 namespace {
2452
Generate_PushBoundArguments(MacroAssembler * masm)2453 void Generate_PushBoundArguments(MacroAssembler* masm) {
2454 // ----------- S t a t e -------------
2455 // -- a0 : the number of arguments (not including the receiver)
2456 // -- a1 : target (checked to be a JSBoundFunction)
2457 // -- a3 : new.target (only in case of [[Construct]])
2458 // -----------------------------------
2459 UseScratchRegisterScope temps(masm);
2460 temps.Include(t0, t1);
2461 Register bound_argc = a4;
2462 Register bound_argv = a2;
2463 // Load [[BoundArguments]] into a2 and length of that into a4.
2464 Label no_bound_arguments;
2465 __ LoadTaggedPointerField(
2466 bound_argv, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
2467 __ SmiUntagField(bound_argc,
2468 FieldMemOperand(bound_argv, FixedArray::kLengthOffset));
2469 __ Branch(&no_bound_arguments, eq, bound_argc, Operand(zero_reg));
2470 {
2471 // ----------- S t a t e -------------
2472 // -- a0 : the number of arguments (not including the receiver)
2473 // -- a1 : target (checked to be a JSBoundFunction)
2474 // -- a2 : the [[BoundArguments]] (implemented as FixedArray)
2475 // -- a3 : new.target (only in case of [[Construct]])
2476 // -- a4: the number of [[BoundArguments]]
2477 // -----------------------------------
2478 UseScratchRegisterScope temps(masm);
2479 Register scratch = temps.Acquire();
2480 Label done;
2481 // Reserve stack space for the [[BoundArguments]].
2482 {
2483 // Check the stack for overflow. We are not trying to catch interruptions
2484 // (i.e. debug break and preemption) here, so check the "real stack
2485 // limit".
2486 __ StackOverflowCheck(a4, temps.Acquire(), temps.Acquire(), nullptr,
2487 &done);
2488 {
2489 FrameScope scope(masm, StackFrame::MANUAL);
2490 __ EnterFrame(StackFrame::INTERNAL);
2491 __ CallRuntime(Runtime::kThrowStackOverflow);
2492 }
2493 __ bind(&done);
2494 }
2495
2496 // Pop receiver.
2497 __ Pop(scratch);
2498
2499 // Push [[BoundArguments]].
2500 {
2501 Label loop, done_loop;
2502 __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
2503 __ Add64(a0, a0, Operand(a4));
2504 __ Add64(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2505 __ bind(&loop);
2506 __ Sub64(a4, a4, Operand(1));
2507 __ Branch(&done_loop, lt, a4, Operand(zero_reg), Label::Distance::kNear);
2508 __ CalcScaledAddress(a5, a2, a4, kTaggedSizeLog2);
2509 __ LoadAnyTaggedField(kScratchReg, MemOperand(a5));
2510 __ Push(kScratchReg);
2511 __ Branch(&loop);
2512 __ bind(&done_loop);
2513 }
2514
2515 // Push receiver.
2516 __ Push(scratch);
2517 }
2518 __ bind(&no_bound_arguments);
2519 }
2520
2521 } // namespace
2522
2523 // static
Generate_CallBoundFunctionImpl(MacroAssembler * masm)2524 void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
2525 // ----------- S t a t e -------------
2526 // -- a0 : the number of arguments (not including the receiver)
2527 // -- a1 : the function to call (checked to be a JSBoundFunction)
2528 // -----------------------------------
2529 __ AssertBoundFunction(a1);
2530
2531 // Patch the receiver to [[BoundThis]].
2532 {
2533 UseScratchRegisterScope temps(masm);
2534 Register scratch = temps.Acquire();
2535 __ LoadAnyTaggedField(
2536 scratch, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
2537 __ StoreReceiver(scratch, a0, kScratchReg);
2538 }
2539
2540 // Push the [[BoundArguments]] onto the stack.
2541 Generate_PushBoundArguments(masm);
2542
2543 // Call the [[BoundTargetFunction]] via the Call builtin.
2544 __ LoadTaggedPointerField(
2545 a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2546 __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
2547 RelocInfo::CODE_TARGET);
2548 }
2549
2550 // static
Generate_Call(MacroAssembler * masm,ConvertReceiverMode mode)2551 void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
2552 // ----------- S t a t e -------------
2553 // -- a0 : the number of arguments (not including the receiver)
2554 // -- a1 : the target to call (can be any Object).
2555 // -----------------------------------
2556
2557 Label non_callable, non_smi;
2558 UseScratchRegisterScope temps(masm);
2559 temps.Include(t1, t2);
2560 temps.Include(t4);
2561 Register map = temps.Acquire(), type = temps.Acquire(),
2562 range = temps.Acquire();
2563 __ JumpIfSmi(a1, &non_callable);
2564 __ bind(&non_smi);
2565 __ LoadMap(map, a1);
2566 __ GetInstanceTypeRange(map, type, FIRST_JS_FUNCTION_TYPE, range);
2567 __ Jump(masm->isolate()->builtins()->CallFunction(mode),
2568 RelocInfo::CODE_TARGET, Uless_equal, range,
2569 Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
2570 __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
2571 RelocInfo::CODE_TARGET, eq, type, Operand(JS_BOUND_FUNCTION_TYPE));
2572 Register scratch = map;
2573 // Check if target has a [[Call]] internal method.
2574 __ Lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
2575 __ And(scratch, scratch, Operand(Map::Bits1::IsCallableBit::kMask));
2576 __ Branch(&non_callable, eq, scratch, Operand(zero_reg),
2577 Label::Distance::kNear);
2578
2579 __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq,
2580 type, Operand(JS_PROXY_TYPE));
2581
2582 // 2. Call to something else, which might have a [[Call]] internal method (if
2583 // not we raise an exception).
2584 // Overwrite the original receiver with the (original) target.
2585 __ StoreReceiver(a1, a0, kScratchReg);
2586 // Let the "call_as_function_delegate" take care of the rest.
2587 __ LoadNativeContextSlot(a1, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
2588 __ Jump(masm->isolate()->builtins()->CallFunction(
2589 ConvertReceiverMode::kNotNullOrUndefined),
2590 RelocInfo::CODE_TARGET);
2591
2592 // 3. Call to something that is not callable.
2593 __ bind(&non_callable);
2594 {
2595 FrameScope scope(masm, StackFrame::INTERNAL);
2596 __ Push(a1);
2597 __ CallRuntime(Runtime::kThrowCalledNonCallable);
2598 }
2599 }
2600
Generate_ConstructFunction(MacroAssembler * masm)2601 void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
2602 // ----------- S t a t e -------------
2603 // -- a0 : the number of arguments (not including the receiver)
2604 // -- a1 : the constructor to call (checked to be a JSFunction)
2605 // -- a3 : the new target (checked to be a constructor)
2606 // -----------------------------------
2607 __ AssertConstructor(a1);
2608 __ AssertFunction(a1);
2609
2610 // Calling convention for function specific ConstructStubs require
2611 // a2 to contain either an AllocationSite or undefined.
2612 __ LoadRoot(a2, RootIndex::kUndefinedValue);
2613
2614 Label call_generic_stub;
2615
2616 // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
2617 __ LoadTaggedPointerField(
2618 a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2619 __ Lwu(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset));
2620 __ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
2621 __ Branch(&call_generic_stub, eq, a4, Operand(zero_reg),
2622 Label::Distance::kNear);
2623
2624 __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
2625 RelocInfo::CODE_TARGET);
2626
2627 __ bind(&call_generic_stub);
2628 __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric),
2629 RelocInfo::CODE_TARGET);
2630 }
2631
2632 // static
Generate_ConstructBoundFunction(MacroAssembler * masm)2633 void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
2634 // ----------- S t a t e -------------
2635 // -- a0 : the number of arguments (not including the receiver)
2636 // -- a1 : the function to call (checked to be a JSBoundFunction)
2637 // -- a3 : the new target (checked to be a constructor)
2638 // -----------------------------------
2639 __ AssertBoundFunction(a1);
2640
2641 // Push the [[BoundArguments]] onto the stack.
2642 Generate_PushBoundArguments(masm);
2643
2644 // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
2645 Label skip;
2646 {
2647 UseScratchRegisterScope temps(masm);
2648 Register scratch = temps.Acquire();
2649 __ CmpTagged(scratch, a1, a3);
2650 __ Branch(&skip, ne, scratch, Operand(zero_reg), Label::Distance::kNear);
2651 }
2652 __ LoadTaggedPointerField(
2653 a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2654 __ bind(&skip);
2655
2656 // Construct the [[BoundTargetFunction]] via the Construct builtin.
2657 __ LoadTaggedPointerField(
2658 a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2659 __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
2660 }
2661
2662 // static
Generate_Construct(MacroAssembler * masm)2663 void Builtins::Generate_Construct(MacroAssembler* masm) {
2664 // ----------- S t a t e -------------
2665 // -- a0 : the number of arguments (not including the receiver)
2666 // -- a1 : the constructor to call (can be any Object)
2667 // -- a3 : the new target (either the same as the constructor or
2668 // the JSFunction on which new was invoked initially)
2669 // -----------------------------------
2670
2671 // Check if target is a Smi.
2672 Label non_constructor, non_proxy;
2673 __ JumpIfSmi(a1, &non_constructor);
2674
2675 // Check if target has a [[Construct]] internal method.
2676 UseScratchRegisterScope temps(masm);
2677 temps.Include(t0, t1);
2678 Register map = temps.Acquire();
2679 Register scratch = temps.Acquire();
2680 __ LoadTaggedPointerField(map, FieldMemOperand(a1, HeapObject::kMapOffset));
2681 __ Lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
2682 __ And(scratch, scratch, Operand(Map::Bits1::IsConstructorBit::kMask));
2683 __ Branch(&non_constructor, eq, scratch, Operand(zero_reg));
2684 Register range = temps.Acquire();
2685 // Dispatch based on instance type.
2686 __ GetInstanceTypeRange(map, scratch, FIRST_JS_FUNCTION_TYPE, range);
2687 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
2688 RelocInfo::CODE_TARGET, Uless_equal, range,
2689 Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
2690
2691 // Only dispatch to bound functions after checking whether they are
2692 // constructors.
2693 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
2694 RelocInfo::CODE_TARGET, eq, scratch, Operand(JS_BOUND_FUNCTION_TYPE));
2695
2696 // Only dispatch to proxies after checking whether they are constructors.
2697 __ Branch(&non_proxy, ne, scratch, Operand(JS_PROXY_TYPE),
2698 Label::Distance::kNear);
2699 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
2700 RelocInfo::CODE_TARGET);
2701
2702 // Called Construct on an exotic Object with a [[Construct]] internal method.
2703 __ bind(&non_proxy);
2704 {
2705 // Overwrite the original receiver with the (original) target.
2706 __ StoreReceiver(a1, a0, kScratchReg);
2707 // Let the "call_as_constructor_delegate" take care of the rest.
2708 __ LoadNativeContextSlot(a1, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
2709 __ Jump(masm->isolate()->builtins()->CallFunction(),
2710 RelocInfo::CODE_TARGET);
2711 }
2712
2713 // Called Construct on an Object that doesn't have a [[Construct]] internal
2714 // method.
2715 __ bind(&non_constructor);
2716 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable),
2717 RelocInfo::CODE_TARGET);
2718 }
2719
2720 #if V8_ENABLE_WEBASSEMBLY
Generate_WasmCompileLazy(MacroAssembler * masm)2721 void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
2722 // The function index was put in t0 by the jump table trampoline.
2723 // Convert to Smi for the runtime call
2724 __ SmiTag(kWasmCompileLazyFuncIndexRegister);
2725 {
2726 HardAbortScope hard_abort(masm); // Avoid calls to Abort.
2727 FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
2728
2729 // Save all parameter registers (see kGpParamRegisters in wasm-linkage.cc).
2730 // They might be overwritten in the runtime call below. We don't have any
2731 // callee-saved registers in wasm, so no need to store anything else.
2732 RegList gp_regs = 0;
2733 for (Register gp_param_reg : wasm::kGpParamRegisters) {
2734 gp_regs |= gp_param_reg.bit();
2735 }
2736 // Also push a1, because we must push multiples of 16 bytes (see
2737 // {TurboAssembler::PushCPURegList}.
2738 CHECK_EQ(0, NumRegs(gp_regs) % 2);
2739
2740 RegList fp_regs = 0;
2741 for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
2742 fp_regs |= fp_param_reg.bit();
2743 }
2744
2745 CHECK_EQ(NumRegs(gp_regs), arraysize(wasm::kGpParamRegisters));
2746 CHECK_EQ(NumRegs(fp_regs), arraysize(wasm::kFpParamRegisters));
2747 CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs,
2748 NumRegs(gp_regs));
2749 CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
2750 NumRegs(fp_regs));
2751 __ MultiPush(gp_regs);
2752 __ MultiPushFPU(fp_regs);
2753
2754 // Pass instance and function index as an explicit arguments to the runtime
2755 // function.
2756 __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
2757 // Initialize the JavaScript context with 0. CEntry will use it to
2758 // set the current context on the isolate.
2759 __ Move(kContextRegister, Smi::zero());
2760 __ CallRuntime(Runtime::kWasmCompileLazy, 2);
2761
2762 __ Move(s1, a0); // move return value to s1 since a0 will be restored to
2763 // the value before the call
2764
2765 // Restore registers.
2766 __ MultiPopFPU(fp_regs);
2767 __ MultiPop(gp_regs);
2768 }
2769 // Finally, jump to the entrypoint.
2770 __ Jump(s1);
2771 }
2772
Generate_WasmDebugBreak(MacroAssembler * masm)2773 void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
2774 HardAbortScope hard_abort(masm); // Avoid calls to Abort.
2775 {
2776 FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
2777
2778 // Save all parameter registers. They might hold live values, we restore
2779 // them after the runtime call.
2780 __ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs);
2781 __ MultiPushFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
2782
2783 // Initialize the JavaScript context with 0. CEntry will use it to
2784 // set the current context on the isolate.
2785 __ Move(cp, Smi::zero());
2786 __ CallRuntime(Runtime::kWasmDebugBreak, 0);
2787
2788 // Restore registers.
2789 __ MultiPopFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
2790 __ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs);
2791 }
2792 __ Ret();
2793 }
2794 #endif // V8_ENABLE_WEBASSEMBLY
2795
Generate_CEntry(MacroAssembler * masm,int result_size,SaveFPRegsMode save_doubles,ArgvMode argv_mode,bool builtin_exit_frame)2796 void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
2797 SaveFPRegsMode save_doubles, ArgvMode argv_mode,
2798 bool builtin_exit_frame) {
2799 // Called from JavaScript; parameters are on stack as if calling JS function
2800 // a0: number of arguments including receiver
2801 // a1: pointer to builtin function
2802 // fp: frame pointer (restored after C call)
2803 // sp: stack pointer (restored as callee's sp after C call)
2804 // cp: current context (C callee-saved)
2805 //
2806 // If argv_mode == ArgvMode::kRegister:
2807 // a2: pointer to the first argument
2808
2809 if (argv_mode == ArgvMode::kRegister) {
2810 // Move argv into the correct register.
2811 __ Move(s1, a2);
2812 } else {
2813 // Compute the argv pointer in a callee-saved register.
2814 __ CalcScaledAddress(s1, sp, a0, kSystemPointerSizeLog2);
2815 __ Sub64(s1, s1, kSystemPointerSize);
2816 }
2817
2818 // Enter the exit frame that transitions from JavaScript to C++.
2819 FrameScope scope(masm, StackFrame::MANUAL);
2820 __ EnterExitFrame(
2821 save_doubles == SaveFPRegsMode::kSave, 0,
2822 builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
2823
2824 // s3: number of arguments including receiver (C callee-saved)
2825 // s1: pointer to first argument (C callee-saved)
2826 // s2: pointer to builtin function (C callee-saved)
2827
2828 // Prepare arguments for C routine.
2829 // a0 = argc
2830 __ Move(s3, a0);
2831 __ Move(s2, a1);
2832
2833 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
2834 // also need to reserve the 4 argument slots on the stack.
2835
2836 __ AssertStackIsAligned();
2837
2838 // a0 = argc, a1 = argv, a2 = isolate
2839 __ li(a2, ExternalReference::isolate_address(masm->isolate()));
2840 __ Move(a1, s1);
2841
2842 __ StoreReturnAddressAndCall(s2);
2843
2844 // Result returned in a0 or a1:a0 - do not destroy these registers!
2845
2846 // Check result for exception sentinel.
2847 Label exception_returned;
2848 __ LoadRoot(a4, RootIndex::kException);
2849 __ Branch(&exception_returned, eq, a4, Operand(a0));
2850
2851 // Check that there is no pending exception, otherwise we
2852 // should have returned the exception sentinel.
2853 if (FLAG_debug_code) {
2854 Label okay;
2855 ExternalReference pending_exception_address = ExternalReference::Create(
2856 IsolateAddressId::kPendingExceptionAddress, masm->isolate());
2857 __ li(a2, pending_exception_address);
2858 __ Ld(a2, MemOperand(a2));
2859 __ LoadRoot(a4, RootIndex::kTheHoleValue);
2860 // Cannot use check here as it attempts to generate call into runtime.
2861 __ Branch(&okay, eq, a4, Operand(a2), Label::Distance::kNear);
2862 __ stop();
2863 __ bind(&okay);
2864 }
2865
2866 // Exit C frame and return.
2867 // a0:a1: result
2868 // sp: stack pointer
2869 // fp: frame pointer
2870 Register argc = argv_mode == ArgvMode::kRegister
2871 // We don't want to pop arguments so set argc to no_reg.
2872 ? no_reg
2873 // s3: still holds argc (callee-saved).
2874 : s3;
2875 __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc, EMIT_RETURN);
2876
2877 // Handling of exception.
2878 __ bind(&exception_returned);
2879
2880 ExternalReference pending_handler_context_address = ExternalReference::Create(
2881 IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
2882 ExternalReference pending_handler_entrypoint_address =
2883 ExternalReference::Create(
2884 IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
2885 ExternalReference pending_handler_fp_address = ExternalReference::Create(
2886 IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
2887 ExternalReference pending_handler_sp_address = ExternalReference::Create(
2888 IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
2889
2890 // Ask the runtime for help to determine the handler. This will set a0 to
2891 // contain the current pending exception, don't clobber it.
2892 ExternalReference find_handler =
2893 ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
2894 {
2895 FrameScope scope(masm, StackFrame::MANUAL);
2896 __ PrepareCallCFunction(3, 0, a0);
2897 __ Move(a0, zero_reg);
2898 __ Move(a1, zero_reg);
2899 __ li(a2, ExternalReference::isolate_address(masm->isolate()));
2900 __ CallCFunction(find_handler, 3);
2901 }
2902
2903 // Retrieve the handler context, SP and FP.
2904 __ li(cp, pending_handler_context_address);
2905 __ Ld(cp, MemOperand(cp));
2906 __ li(sp, pending_handler_sp_address);
2907 __ Ld(sp, MemOperand(sp));
2908 __ li(fp, pending_handler_fp_address);
2909 __ Ld(fp, MemOperand(fp));
2910
2911 // If the handler is a JS frame, restore the context to the frame. Note that
2912 // the context will be set to (cp == 0) for non-JS frames.
2913 Label zero;
2914 __ Branch(&zero, eq, cp, Operand(zero_reg), Label::Distance::kNear);
2915 __ Sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2916 __ bind(&zero);
2917
2918 // Compute the handler entry address and jump to it.
2919 UseScratchRegisterScope temp(masm);
2920 Register scratch = temp.Acquire();
2921 __ li(scratch, pending_handler_entrypoint_address);
2922 __ Ld(scratch, MemOperand(scratch));
2923 __ Jump(scratch);
2924 }
2925
Generate_DoubleToI(MacroAssembler * masm)2926 void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
2927 Label done;
2928 Register result_reg = t0;
2929
2930 Register scratch = GetRegisterThatIsNotOneOf(result_reg);
2931 Register scratch2 = GetRegisterThatIsNotOneOf(result_reg, scratch);
2932 Register scratch3 = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch2);
2933 DoubleRegister double_scratch = kScratchDoubleReg;
2934
2935 // Account for saved regs.
2936 const int kArgumentOffset = 4 * kSystemPointerSize;
2937
2938 __ Push(result_reg);
2939 __ Push(scratch, scratch2, scratch3);
2940
2941 // Load double input.
2942 __ LoadDouble(double_scratch, MemOperand(sp, kArgumentOffset));
2943
2944 // Try a conversion to a signed integer, if exception occurs, scratch is
2945 // set to 0
2946 __ Trunc_w_d(scratch3, double_scratch, scratch);
2947
2948 // If we had no exceptions then set result_reg and we are done.
2949 Label error;
2950 __ Branch(&error, eq, scratch, Operand(zero_reg), Label::Distance::kNear);
2951 __ Move(result_reg, scratch3);
2952 __ Branch(&done);
2953 __ bind(&error);
2954
2955 // Load the double value and perform a manual truncation.
2956 Register input_high = scratch2;
2957 Register input_low = scratch3;
2958
2959 __ Lw(input_low, MemOperand(sp, kArgumentOffset + Register::kMantissaOffset));
2960 __ Lw(input_high,
2961 MemOperand(sp, kArgumentOffset + Register::kExponentOffset));
2962
2963 Label normal_exponent;
2964 // Extract the biased exponent in result.
2965 __ ExtractBits(result_reg, input_high, HeapNumber::kExponentShift,
2966 HeapNumber::kExponentBits);
2967
2968 // Check for Infinity and NaNs, which should return 0.
2969 __ Sub32(scratch, result_reg, HeapNumber::kExponentMask);
2970 __ LoadZeroIfConditionZero(
2971 result_reg,
2972 scratch); // result_reg = scratch == 0 ? 0 : result_reg
2973 __ Branch(&done, eq, scratch, Operand(zero_reg));
2974
2975 // Express exponent as delta to (number of mantissa bits + 31).
2976 __ Sub32(result_reg, result_reg,
2977 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
2978
2979 // If the delta is strictly positive, all bits would be shifted away,
2980 // which means that we can return 0.
2981 __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg),
2982 Label::Distance::kNear);
2983 __ Move(result_reg, zero_reg);
2984 __ Branch(&done);
2985
2986 __ bind(&normal_exponent);
2987 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
2988 // Calculate shift.
2989 __ Add32(scratch, result_reg,
2990 Operand(kShiftBase + HeapNumber::kMantissaBits));
2991
2992 // Save the sign.
2993 Register sign = result_reg;
2994 result_reg = no_reg;
2995 __ And(sign, input_high, Operand(HeapNumber::kSignMask));
2996
2997 // We must specially handle shifts greater than 31.
2998 Label high_shift_needed, high_shift_done;
2999 __ Branch(&high_shift_needed, lt, scratch, Operand(32),
3000 Label::Distance::kNear);
3001 __ Move(input_high, zero_reg);
3002 __ BranchShort(&high_shift_done);
3003 __ bind(&high_shift_needed);
3004
3005 // Set the implicit 1 before the mantissa part in input_high.
3006 __ Or(input_high, input_high,
3007 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
3008 // Shift the mantissa bits to the correct position.
3009 // We don't need to clear non-mantissa bits as they will be shifted away.
3010 // If they weren't, it would mean that the answer is in the 32bit range.
3011 __ Sll32(input_high, input_high, scratch);
3012
3013 __ bind(&high_shift_done);
3014
3015 // Replace the shifted bits with bits from the lower mantissa word.
3016 Label pos_shift, shift_done, sign_negative;
3017 __ li(kScratchReg, 32);
3018 __ subw(scratch, kScratchReg, scratch);
3019 __ Branch(&pos_shift, ge, scratch, Operand(zero_reg), Label::Distance::kNear);
3020
3021 // Negate scratch.
3022 __ Sub32(scratch, zero_reg, scratch);
3023 __ Sll32(input_low, input_low, scratch);
3024 __ BranchShort(&shift_done);
3025
3026 __ bind(&pos_shift);
3027 __ srlw(input_low, input_low, scratch);
3028
3029 __ bind(&shift_done);
3030 __ Or(input_high, input_high, Operand(input_low));
3031 // Restore sign if necessary.
3032 __ Move(scratch, sign);
3033 result_reg = sign;
3034 sign = no_reg;
3035 __ Sub32(result_reg, zero_reg, input_high);
3036 __ Branch(&sign_negative, ne, scratch, Operand(zero_reg),
3037 Label::Distance::kNear);
3038 __ Move(result_reg, input_high);
3039 __ bind(&sign_negative);
3040
3041 __ bind(&done);
3042
3043 __ Sd(result_reg, MemOperand(sp, kArgumentOffset));
3044 __ Pop(scratch, scratch2, scratch3);
3045 __ Pop(result_reg);
3046 __ Ret();
3047 }
3048
Generate_GenericJSToWasmWrapper(MacroAssembler * masm)3049 void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
3050 // TODO(v8:10701): Implement for this platform.
3051 __ Trap();
3052 }
3053
Generate_WasmOnStackReplace(MacroAssembler * masm)3054 void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
3055 // Only needed on x64.
3056 __ Trap();
3057 }
3058 namespace {
3059
AddressOffset(ExternalReference ref0,ExternalReference ref1)3060 int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
3061 int64_t offset = (ref0.address() - ref1.address());
3062 DCHECK(static_cast<int>(offset) == offset);
3063 return static_cast<int>(offset);
3064 }
3065
3066 // Calls an API function. Allocates HandleScope, extracts returned value
3067 // from handle and propagates exceptions. Restores context. stack_space
3068 // - space to be unwound on exit (includes the call JS arguments space and
3069 // the additional space allocated for the fast call).
CallApiFunctionAndReturn(MacroAssembler * masm,Register function_address,ExternalReference thunk_ref,int stack_space,MemOperand * stack_space_operand,MemOperand return_value_operand)3070 void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
3071 ExternalReference thunk_ref, int stack_space,
3072 MemOperand* stack_space_operand,
3073 MemOperand return_value_operand) {
3074 ASM_CODE_COMMENT(masm);
3075 Isolate* isolate = masm->isolate();
3076 ExternalReference next_address =
3077 ExternalReference::handle_scope_next_address(isolate);
3078 const int kNextOffset = 0;
3079 const int kLimitOffset = AddressOffset(
3080 ExternalReference::handle_scope_limit_address(isolate), next_address);
3081 const int kLevelOffset = AddressOffset(
3082 ExternalReference::handle_scope_level_address(isolate), next_address);
3083
3084 DCHECK(function_address == a1 || function_address == a2);
3085
3086 Label profiler_enabled, end_profiler_check;
3087 {
3088 UseScratchRegisterScope temp(masm);
3089 Register scratch = temp.Acquire();
3090 __ li(scratch, ExternalReference::is_profiling_address(isolate));
3091 __ Lb(scratch, MemOperand(scratch, 0));
3092 __ Branch(&profiler_enabled, ne, scratch, Operand(zero_reg),
3093 Label::Distance::kNear);
3094 __ li(scratch, ExternalReference::address_of_runtime_stats_flag());
3095 __ Lw(scratch, MemOperand(scratch, 0));
3096 __ Branch(&profiler_enabled, ne, scratch, Operand(zero_reg),
3097 Label::Distance::kNear);
3098 {
3099 // Call the api function directly.
3100 __ Move(scratch, function_address);
3101 __ BranchShort(&end_profiler_check);
3102 }
3103
3104 __ bind(&profiler_enabled);
3105 {
3106 // Additional parameter is the address of the actual callback.
3107 __ li(scratch, thunk_ref);
3108 }
3109 __ bind(&end_profiler_check);
3110
3111 // Allocate HandleScope in callee-save registers.
3112 __ li(s5, next_address);
3113 __ Ld(s3, MemOperand(s5, kNextOffset));
3114 __ Ld(s1, MemOperand(s5, kLimitOffset));
3115 __ Lw(s2, MemOperand(s5, kLevelOffset));
3116 __ Add32(s2, s2, Operand(1));
3117 __ Sw(s2, MemOperand(s5, kLevelOffset));
3118
3119 __ StoreReturnAddressAndCall(scratch);
3120 }
3121
3122 Label promote_scheduled_exception;
3123 Label delete_allocated_handles;
3124 Label leave_exit_frame;
3125 Label return_value_loaded;
3126
3127 // Load value from ReturnValue.
3128 __ Ld(a0, return_value_operand);
3129 __ bind(&return_value_loaded);
3130
3131 // No more valid handles (the result handle was the last one). Restore
3132 // previous handle scope.
3133 __ Sd(s3, MemOperand(s5, kNextOffset));
3134 if (FLAG_debug_code) {
3135 __ Lw(a1, MemOperand(s5, kLevelOffset));
3136 __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
3137 Operand(s2));
3138 }
3139 __ Sub32(s2, s2, Operand(1));
3140 __ Sw(s2, MemOperand(s5, kLevelOffset));
3141 __ Ld(kScratchReg, MemOperand(s5, kLimitOffset));
3142 __ Branch(&delete_allocated_handles, ne, s1, Operand(kScratchReg));
3143
3144 // Leave the API exit frame.
3145 __ bind(&leave_exit_frame);
3146
3147 if (stack_space_operand == nullptr) {
3148 DCHECK_NE(stack_space, 0);
3149 __ li(s3, Operand(stack_space));
3150 } else {
3151 DCHECK_EQ(stack_space, 0);
3152 STATIC_ASSERT(kCArgSlotCount == 0);
3153 __ Ld(s3, *stack_space_operand);
3154 }
3155
3156 static constexpr bool kDontSaveDoubles = false;
3157 static constexpr bool kRegisterContainsSlotCount = false;
3158 __ LeaveExitFrame(kDontSaveDoubles, s3, NO_EMIT_RETURN,
3159 kRegisterContainsSlotCount);
3160
3161 // Check if the function scheduled an exception.
3162 __ LoadRoot(a4, RootIndex::kTheHoleValue);
3163 __ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate));
3164 __ Ld(a5, MemOperand(kScratchReg));
3165 __ Branch(&promote_scheduled_exception, ne, a4, Operand(a5),
3166 Label::Distance::kNear);
3167
3168 __ Ret();
3169
3170 // Re-throw by promoting a scheduled exception.
3171 __ bind(&promote_scheduled_exception);
3172 __ TailCallRuntime(Runtime::kPromoteScheduledException);
3173
3174 // HandleScope limit has changed. Delete allocated extensions.
3175 __ bind(&delete_allocated_handles);
3176 __ Sd(s1, MemOperand(s5, kLimitOffset));
3177 __ Move(s3, a0);
3178 __ PrepareCallCFunction(1, s1);
3179 __ li(a0, ExternalReference::isolate_address(isolate));
3180 __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
3181 __ Move(a0, s3);
3182 __ Branch(&leave_exit_frame);
3183 }
3184
3185 } // namespace
3186
Generate_CallApiCallback(MacroAssembler * masm)3187 void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
3188 // ----------- S t a t e -------------
3189 // -- cp : context
3190 // -- a1 : api function address
3191 // -- a2 : arguments count (not including the receiver)
3192 // -- a3 : call data
3193 // -- a0 : holder
3194 // --
3195 // -- sp[0] : receiver
3196 // -- sp[8] : first argument
3197 // -- ...
3198 // -- sp[(argc) * 8] : last argument
3199 // -----------------------------------
3200 UseScratchRegisterScope temps(masm);
3201 temps.Include(t0, t1);
3202 Register api_function_address = a1;
3203 Register argc = a2;
3204 Register call_data = a3;
3205 Register holder = a0;
3206 Register scratch = temps.Acquire();
3207 Register base = temps.Acquire(); // For addressing MemOperands on the stack.
3208
3209 DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch,
3210 base));
3211
3212 using FCA = FunctionCallbackArguments;
3213
3214 STATIC_ASSERT(FCA::kArgsLength == 6);
3215 STATIC_ASSERT(FCA::kNewTargetIndex == 5);
3216 STATIC_ASSERT(FCA::kDataIndex == 4);
3217 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
3218 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
3219 STATIC_ASSERT(FCA::kIsolateIndex == 1);
3220 STATIC_ASSERT(FCA::kHolderIndex == 0);
3221
3222 // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
3223 //
3224 // Target state:
3225 // sp[0 * kSystemPointerSize]: kHolder
3226 // sp[1 * kSystemPointerSize]: kIsolate
3227 // sp[2 * kSystemPointerSize]: undefined (kReturnValueDefaultValue)
3228 // sp[3 * kSystemPointerSize]: undefined (kReturnValue)
3229 // sp[4 * kSystemPointerSize]: kData
3230 // sp[5 * kSystemPointerSize]: undefined (kNewTarget)
3231
3232 // Set up the base register for addressing through MemOperands. It will point
3233 // at the receiver (located at sp + argc * kSystemPointerSize).
3234 __ CalcScaledAddress(base, sp, argc, kSystemPointerSizeLog2);
3235
3236 // Reserve space on the stack.
3237 __ Sub64(sp, sp, Operand(FCA::kArgsLength * kSystemPointerSize));
3238
3239 // kHolder.
3240 __ Sd(holder, MemOperand(sp, 0 * kSystemPointerSize));
3241
3242 // kIsolate.
3243 __ li(scratch, ExternalReference::isolate_address(masm->isolate()));
3244 __ Sd(scratch, MemOperand(sp, 1 * kSystemPointerSize));
3245
3246 // kReturnValueDefaultValue and kReturnValue.
3247 __ LoadRoot(scratch, RootIndex::kUndefinedValue);
3248 __ Sd(scratch, MemOperand(sp, 2 * kSystemPointerSize));
3249 __ Sd(scratch, MemOperand(sp, 3 * kSystemPointerSize));
3250
3251 // kData.
3252 __ Sd(call_data, MemOperand(sp, 4 * kSystemPointerSize));
3253
3254 // kNewTarget.
3255 __ Sd(scratch, MemOperand(sp, 5 * kSystemPointerSize));
3256
3257 // Keep a pointer to kHolder (= implicit_args) in a scratch register.
3258 // We use it below to set up the FunctionCallbackInfo object.
3259 __ Move(scratch, sp);
3260
3261 // Allocate the v8::Arguments structure in the arguments' space since
3262 // it's not controlled by GC.
3263 static constexpr int kApiStackSpace = 4;
3264 static constexpr bool kDontSaveDoubles = false;
3265 FrameScope frame_scope(masm, StackFrame::MANUAL);
3266 __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace);
3267
3268 // EnterExitFrame may align the sp.
3269
3270 // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
3271 // Arguments are after the return address (pushed by EnterExitFrame()).
3272 __ Sd(scratch, MemOperand(sp, 1 * kSystemPointerSize));
3273
3274 // FunctionCallbackInfo::values_ (points at the first varargs argument passed
3275 // on the stack).
3276 __ Add64(scratch, scratch,
3277 Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
3278 __ Sd(scratch, MemOperand(sp, 2 * kSystemPointerSize));
3279
3280 // FunctionCallbackInfo::length_.
3281 // Stored as int field, 32-bit integers within struct on stack always left
3282 // justified by n64 ABI.
3283 __ Sw(argc, MemOperand(sp, 3 * kSystemPointerSize));
3284
3285 // We also store the number of bytes to drop from the stack after returning
3286 // from the API function here.
3287 // Note: Unlike on other architectures, this stores the number of slots to
3288 // drop, not the number of bytes.
3289 __ Add64(scratch, argc, Operand(FCA::kArgsLength + 1 /* receiver */));
3290 __ Sd(scratch, MemOperand(sp, 4 * kSystemPointerSize));
3291
3292 // v8::InvocationCallback's argument.
3293 DCHECK(!AreAliased(api_function_address, scratch, a0));
3294 __ Add64(a0, sp, Operand(1 * kSystemPointerSize));
3295
3296 ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
3297
3298 // There are two stack slots above the arguments we constructed on the stack.
3299 // TODO(jgruber): Document what these arguments are.
3300 static constexpr int kStackSlotsAboveFCA = 2;
3301 MemOperand return_value_operand(
3302 fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kSystemPointerSize);
3303
3304 static constexpr int kUseStackSpaceOperand = 0;
3305 MemOperand stack_space_operand(sp, 4 * kSystemPointerSize);
3306
3307 AllowExternalCallThatCantCauseGC scope(masm);
3308 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3309 kUseStackSpaceOperand, &stack_space_operand,
3310 return_value_operand);
3311 }
3312
Generate_CallApiGetter(MacroAssembler * masm)3313 void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
3314 // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
3315 // name below the exit frame to make GC aware of them.
3316 STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
3317 STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
3318 STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
3319 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
3320 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
3321 STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
3322 STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
3323 STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
3324
3325 Register receiver = ApiGetterDescriptor::ReceiverRegister();
3326 Register holder = ApiGetterDescriptor::HolderRegister();
3327 Register callback = ApiGetterDescriptor::CallbackRegister();
3328 Register scratch = a4;
3329 DCHECK(!AreAliased(receiver, holder, callback, scratch));
3330
3331 Register api_function_address = a2;
3332
3333 // Here and below +1 is for name() pushed after the args_ array.
3334 using PCA = PropertyCallbackArguments;
3335 __ Sub64(sp, sp, (PCA::kArgsLength + 1) * kSystemPointerSize);
3336 __ Sd(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kSystemPointerSize));
3337 __ LoadAnyTaggedField(scratch,
3338 FieldMemOperand(callback, AccessorInfo::kDataOffset));
3339 __ Sd(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kSystemPointerSize));
3340 __ LoadRoot(scratch, RootIndex::kUndefinedValue);
3341 __ Sd(scratch,
3342 MemOperand(sp, (PCA::kReturnValueOffset + 1) * kSystemPointerSize));
3343 __ Sd(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
3344 kSystemPointerSize));
3345 __ li(scratch, ExternalReference::isolate_address(masm->isolate()));
3346 __ Sd(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kSystemPointerSize));
3347 __ Sd(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kSystemPointerSize));
3348 // should_throw_on_error -> false
3349 DCHECK_EQ(0, Smi::zero().ptr());
3350 __ Sd(zero_reg, MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) *
3351 kSystemPointerSize));
3352 __ LoadTaggedPointerField(
3353 scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
3354 __ Sd(scratch, MemOperand(sp, 0 * kSystemPointerSize));
3355
3356 // v8::PropertyCallbackInfo::args_ array and name handle.
3357 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
3358
3359 // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
3360 __ Move(a0, sp); // a0 = Handle<Name>
3361 __ Add64(a1, a0, Operand(1 * kSystemPointerSize)); // a1 = v8::PCI::args_
3362
3363 const int kApiStackSpace = 1;
3364 FrameScope frame_scope(masm, StackFrame::MANUAL);
3365 __ EnterExitFrame(false, kApiStackSpace);
3366
3367 // Create v8::PropertyCallbackInfo object on the stack and initialize
3368 // it's args_ field.
3369 __ Sd(a1, MemOperand(sp, 1 * kSystemPointerSize));
3370 __ Add64(a1, sp, Operand(1 * kSystemPointerSize));
3371 // a1 = v8::PropertyCallbackInfo&
3372
3373 ExternalReference thunk_ref =
3374 ExternalReference::invoke_accessor_getter_callback();
3375
3376 __ LoadTaggedPointerField(
3377 scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
3378 __ Ld(api_function_address,
3379 FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
3380
3381 // +3 is to skip prolog, return address and name handle.
3382 MemOperand return_value_operand(
3383 fp,
3384 (PropertyCallbackArguments::kReturnValueOffset + 3) * kSystemPointerSize);
3385 MemOperand* const kUseStackSpaceConstant = nullptr;
3386 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3387 kStackUnwindSpace, kUseStackSpaceConstant,
3388 return_value_operand);
3389 }
3390
Generate_DirectCEntry(MacroAssembler * masm)3391 void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
3392 // The sole purpose of DirectCEntry is for movable callers (e.g. any general
3393 // purpose Code object) to be able to call into C functions that may trigger
3394 // GC and thus move the caller.
3395 //
3396 // DirectCEntry places the return address on the stack (updated by the GC),
3397 // making the call GC safe. The irregexp backend relies on this.
3398
3399 // Make place for arguments to fit C calling convention. Callers use
3400 // EnterExitFrame/LeaveExitFrame so they handle stack restoring and we don't
3401 // have to do that here. Any caller must drop kCArgsSlotsSize stack space
3402 // after the call.
3403 __ Add64(sp, sp, -kCArgsSlotsSize);
3404
3405 __ Sd(ra, MemOperand(sp, kCArgsSlotsSize)); // Store the return address.
3406 __ Call(t6); // Call the C++ function.
3407 __ Ld(t6, MemOperand(sp, kCArgsSlotsSize)); // Return to calling code.
3408
3409 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
3410 // In case of an error the return address may point to a memory area
3411 // filled with kZapValue by the GC. Dereference the address and check for
3412 // this.
3413 __ Uld(a4, MemOperand(t6));
3414 __ Assert(ne, AbortReason::kReceivedInvalidReturnAddress, a4,
3415 Operand(reinterpret_cast<uint64_t>(kZapValue)));
3416 }
3417
3418 __ Jump(t6);
3419 }
3420
3421 namespace {
3422
3423 // This code tries to be close to ia32 code so that any changes can be
3424 // easily ported.
Generate_DeoptimizationEntry(MacroAssembler * masm,DeoptimizeKind deopt_kind)3425 void Generate_DeoptimizationEntry(MacroAssembler* masm,
3426 DeoptimizeKind deopt_kind) {
3427 Isolate* isolate = masm->isolate();
3428
3429 // Unlike on ARM we don't save all the registers, just the useful ones.
3430 // For the rest, there are gaps on the stack, so the offsets remain the same.
3431 const int kNumberOfRegisters = Register::kNumRegisters;
3432
3433 RegList restored_regs = kJSCallerSaved | kCalleeSaved;
3434 RegList saved_regs = restored_regs | sp.bit() | ra.bit();
3435
3436 const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
3437
3438 // Save all double FPU registers before messing with them.
3439 __ Sub64(sp, sp, Operand(kDoubleRegsSize));
3440 const RegisterConfiguration* config = RegisterConfiguration::Default();
3441 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
3442 int code = config->GetAllocatableDoubleCode(i);
3443 const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
3444 int offset = code * kDoubleSize;
3445 __ StoreDouble(fpu_reg, MemOperand(sp, offset));
3446 }
3447
3448 // Push saved_regs (needed to populate FrameDescription::registers_).
3449 // Leave gaps for other registers.
3450 __ Sub64(sp, sp, kNumberOfRegisters * kSystemPointerSize);
3451 for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
3452 if ((saved_regs & (1 << i)) != 0) {
3453 __ Sd(ToRegister(i), MemOperand(sp, kSystemPointerSize * i));
3454 }
3455 }
3456
3457 __ li(a2,
3458 ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate));
3459 __ Sd(fp, MemOperand(a2));
3460
3461 const int kSavedRegistersAreaSize =
3462 (kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
3463
3464 __ li(a2, Operand(Deoptimizer::kFixedExitSizeMarker));
3465 // Get the address of the location in the code object (a3) (return
3466 // address for lazy deoptimization) and compute the fp-to-sp delta in
3467 // register a4.
3468 __ Move(a3, ra);
3469 __ Add64(a4, sp, Operand(kSavedRegistersAreaSize));
3470
3471 __ Sub64(a4, fp, a4);
3472
3473 // Allocate a new deoptimizer object.
3474 __ PrepareCallCFunction(6, a5);
3475 // Pass six arguments, according to n64 ABI.
3476 __ Move(a0, zero_reg);
3477 Label context_check;
3478 __ Ld(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
3479 __ JumpIfSmi(a1, &context_check);
3480 __ Ld(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
3481 __ bind(&context_check);
3482 __ li(a1, Operand(static_cast<int64_t>(deopt_kind)));
3483 // a2: bailout id already loaded.
3484 // a3: code address or 0 already loaded.
3485 // a4: already has fp-to-sp delta.
3486 __ li(a5, ExternalReference::isolate_address(isolate));
3487
3488 // Call Deoptimizer::New().
3489 {
3490 AllowExternalCallThatCantCauseGC scope(masm);
3491 __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
3492 }
3493
3494 // Preserve "deoptimizer" object in register a0 and get the input
3495 // frame descriptor pointer to a1 (deoptimizer->input_);
3496 __ Ld(a1, MemOperand(a0, Deoptimizer::input_offset()));
3497
3498 // Copy core registers into FrameDescription::registers_[kNumRegisters].
3499 DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
3500 for (int i = 0; i < kNumberOfRegisters; i++) {
3501 int offset =
3502 (i * kSystemPointerSize) + FrameDescription::registers_offset();
3503 if ((saved_regs & (1 << i)) != 0) {
3504 __ Ld(a2, MemOperand(sp, i * kSystemPointerSize));
3505 __ Sd(a2, MemOperand(a1, offset));
3506 } else if (FLAG_debug_code) {
3507 __ li(a2, kDebugZapValue);
3508 __ Sd(a2, MemOperand(a1, offset));
3509 }
3510 }
3511
3512 int double_regs_offset = FrameDescription::double_registers_offset();
3513 // Copy FPU registers to
3514 // double_registers_[DoubleRegister::kNumAllocatableRegisters]
3515 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
3516 int code = config->GetAllocatableDoubleCode(i);
3517 int dst_offset = code * kDoubleSize + double_regs_offset;
3518 int src_offset =
3519 code * kDoubleSize + kNumberOfRegisters * kSystemPointerSize;
3520 __ LoadDouble(ft0, MemOperand(sp, src_offset));
3521 __ StoreDouble(ft0, MemOperand(a1, dst_offset));
3522 }
3523
3524 // Remove the saved registers from the stack.
3525 __ Add64(sp, sp, Operand(kSavedRegistersAreaSize));
3526
3527 // Compute a pointer to the unwinding limit in register a2; that is
3528 // the first stack slot not part of the input frame.
3529 __ Ld(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
3530 __ Add64(a2, a2, sp);
3531
3532 // Unwind the stack down to - but not including - the unwinding
3533 // limit and copy the contents of the activation frame to the input
3534 // frame description.
3535 __ Add64(a3, a1, Operand(FrameDescription::frame_content_offset()));
3536 Label pop_loop;
3537 Label pop_loop_header;
3538 __ BranchShort(&pop_loop_header);
3539 __ bind(&pop_loop);
3540 __ pop(a4);
3541 __ Sd(a4, MemOperand(a3, 0));
3542 __ Add64(a3, a3, sizeof(uint64_t));
3543 __ bind(&pop_loop_header);
3544 __ Branch(&pop_loop, ne, a2, Operand(sp), Label::Distance::kNear);
3545 // Compute the output frame in the deoptimizer.
3546 __ push(a0); // Preserve deoptimizer object across call.
3547 // a0: deoptimizer object; a1: scratch.
3548 __ PrepareCallCFunction(1, a1);
3549 // Call Deoptimizer::ComputeOutputFrames().
3550 {
3551 AllowExternalCallThatCantCauseGC scope(masm);
3552 __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
3553 }
3554 __ pop(a0); // Restore deoptimizer object (class Deoptimizer).
3555
3556 __ Ld(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
3557
3558 // Replace the current (input) frame with the output frames.
3559 Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
3560 // Outer loop state: a4 = current "FrameDescription** output_",
3561 // a1 = one past the last FrameDescription**.
3562 __ Lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
3563 __ Ld(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_.
3564 __ CalcScaledAddress(a1, a4, a1, kSystemPointerSizeLog2);
3565 __ BranchShort(&outer_loop_header);
3566 __ bind(&outer_push_loop);
3567 // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
3568 __ Ld(a2, MemOperand(a4, 0)); // output_[ix]
3569 __ Ld(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
3570 __ BranchShort(&inner_loop_header);
3571 __ bind(&inner_push_loop);
3572 __ Sub64(a3, a3, Operand(sizeof(uint64_t)));
3573 __ Add64(a6, a2, Operand(a3));
3574 __ Ld(a7, MemOperand(a6, FrameDescription::frame_content_offset()));
3575 __ push(a7);
3576 __ bind(&inner_loop_header);
3577 __ Branch(&inner_push_loop, ne, a3, Operand(zero_reg));
3578
3579 __ Add64(a4, a4, Operand(kSystemPointerSize));
3580 __ bind(&outer_loop_header);
3581 __ Branch(&outer_push_loop, lt, a4, Operand(a1));
3582
3583 __ Ld(a1, MemOperand(a0, Deoptimizer::input_offset()));
3584 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
3585 int code = config->GetAllocatableDoubleCode(i);
3586 const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
3587 int src_offset = code * kDoubleSize + double_regs_offset;
3588 __ LoadDouble(fpu_reg, MemOperand(a1, src_offset));
3589 }
3590
3591 // Push pc and continuation from the last output frame.
3592 __ Ld(a6, MemOperand(a2, FrameDescription::pc_offset()));
3593 __ push(a6);
3594 __ Ld(a6, MemOperand(a2, FrameDescription::continuation_offset()));
3595 __ push(a6);
3596
3597 // Technically restoring 't3' should work unless zero_reg is also restored
3598 // but it's safer to check for this.
3599 DCHECK(!(t3.bit() & restored_regs));
3600 // Restore the registers from the last output frame.
3601 __ Move(t3, a2);
3602 for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
3603 int offset =
3604 (i * kSystemPointerSize) + FrameDescription::registers_offset();
3605 if ((restored_regs & (1 << i)) != 0) {
3606 __ Ld(ToRegister(i), MemOperand(t3, offset));
3607 }
3608 }
3609
3610 __ pop(t6); // Get continuation, leave pc on stack.
3611 __ pop(ra);
3612 __ Jump(t6);
3613 __ stop();
3614 }
3615
3616 } // namespace
3617
Generate_DeoptimizationEntry_Eager(MacroAssembler * masm)3618 void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
3619 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
3620 }
3621
Generate_DeoptimizationEntry_Soft(MacroAssembler * masm)3622 void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
3623 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
3624 }
3625
Generate_DeoptimizationEntry_Bailout(MacroAssembler * masm)3626 void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
3627 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
3628 }
3629
Generate_DeoptimizationEntry_Lazy(MacroAssembler * masm)3630 void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
3631 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
3632 }
3633
3634 namespace {
3635
3636 // Restarts execution either at the current or next (in execution order)
3637 // bytecode. If there is baseline code on the shared function info, converts an
3638 // interpreter frame into a baseline frame and continues execution in baseline
3639 // code. Otherwise execution continues with bytecode.
Generate_BaselineOrInterpreterEntry(MacroAssembler * masm,bool next_bytecode,bool is_osr=false)3640 void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
3641 bool next_bytecode,
3642 bool is_osr = false) {
3643 Label start;
3644 __ bind(&start);
3645
3646 // Get function from the frame.
3647 Register closure = a1;
3648 __ Ld(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
3649
3650 // Get the Code object from the shared function info.
3651 Register code_obj = s1;
3652 __ LoadTaggedPointerField(
3653 code_obj,
3654 FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
3655 __ LoadTaggedPointerField(
3656 code_obj,
3657 FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
3658
3659 // Check if we have baseline code. For OSR entry it is safe to assume we
3660 // always have baseline code.
3661 if (!is_osr) {
3662 Label start_with_baseline;
3663 UseScratchRegisterScope temps(masm);
3664 Register scratch = temps.Acquire();
3665 __ GetObjectType(code_obj, scratch, scratch);
3666 __ Branch(&start_with_baseline, eq, scratch, Operand(CODET_TYPE));
3667
3668 // Start with bytecode as there is no baseline code.
3669 Builtin builtin_id = next_bytecode
3670 ? Builtin::kInterpreterEnterAtNextBytecode
3671 : Builtin::kInterpreterEnterAtBytecode;
3672 __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
3673 RelocInfo::CODE_TARGET);
3674
3675 // Start with baseline code.
3676 __ bind(&start_with_baseline);
3677 } else if (FLAG_debug_code) {
3678 UseScratchRegisterScope temps(masm);
3679 Register scratch = temps.Acquire();
3680 __ GetObjectType(code_obj, scratch, scratch);
3681 __ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
3682 Operand(CODET_TYPE));
3683 }
3684 if (FLAG_debug_code) {
3685 UseScratchRegisterScope temps(masm);
3686 Register scratch = temps.Acquire();
3687 AssertCodeIsBaseline(masm, code_obj, scratch);
3688 }
3689 // Replace BytecodeOffset with the feedback vector.
3690 Register feedback_vector = a2;
3691 __ LoadTaggedPointerField(
3692 feedback_vector,
3693 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
3694 __ LoadTaggedPointerField(
3695 feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
3696 Label install_baseline_code;
3697 // Check if feedback vector is valid. If not, call prepare for baseline to
3698 // allocate it.
3699 UseScratchRegisterScope temps(masm);
3700 Register type = temps.Acquire();
3701 __ GetObjectType(feedback_vector, type, type);
3702 __ Branch(&install_baseline_code, ne, type, Operand(FEEDBACK_VECTOR_TYPE));
3703 // Save BytecodeOffset from the stack frame.
3704 __ SmiUntag(kInterpreterBytecodeOffsetRegister,
3705 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
3706 // Replace BytecodeOffset with the feedback vector.
3707 __ Sd(feedback_vector,
3708 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
3709 feedback_vector = no_reg;
3710
3711 // Compute baseline pc for bytecode offset.
3712 ExternalReference get_baseline_pc_extref;
3713 if (next_bytecode || is_osr) {
3714 get_baseline_pc_extref =
3715 ExternalReference::baseline_pc_for_next_executed_bytecode();
3716 } else {
3717 get_baseline_pc_extref =
3718 ExternalReference::baseline_pc_for_bytecode_offset();
3719 }
3720
3721 Register get_baseline_pc = a3;
3722 __ li(get_baseline_pc, get_baseline_pc_extref);
3723
3724 // If the code deoptimizes during the implicit function entry stack interrupt
3725 // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
3726 // not a valid bytecode offset.
3727 // TODO(pthier): Investigate if it is feasible to handle this special case
3728 // in TurboFan instead of here.
3729 Label valid_bytecode_offset, function_entry_bytecode;
3730 if (!is_osr) {
3731 __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
3732 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
3733 kFunctionEntryBytecodeOffset));
3734 }
3735
3736 __ Sub64(kInterpreterBytecodeOffsetRegister,
3737 kInterpreterBytecodeOffsetRegister,
3738 (BytecodeArray::kHeaderSize - kHeapObjectTag));
3739
3740 __ bind(&valid_bytecode_offset);
3741 // Get bytecode array from the stack frame.
3742 __ Ld(kInterpreterBytecodeArrayRegister,
3743 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
3744 __ Push(kInterpreterAccumulatorRegister);
3745 {
3746 Register arg_reg_1 = a0;
3747 Register arg_reg_2 = a1;
3748 Register arg_reg_3 = a2;
3749 __ Move(arg_reg_1, code_obj);
3750 __ Move(arg_reg_2, kInterpreterBytecodeOffsetRegister);
3751 __ Move(arg_reg_3, kInterpreterBytecodeArrayRegister);
3752 FrameScope scope(masm, StackFrame::INTERNAL);
3753 __ CallCFunction(get_baseline_pc, 3, 0);
3754 }
3755 __ Add64(code_obj, code_obj, kReturnRegister0);
3756 __ Pop(kInterpreterAccumulatorRegister);
3757
3758 if (is_osr) {
3759 // Reset the OSR loop nesting depth to disarm back edges.
3760 // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
3761 // Sparkplug here.
3762 __ Ld(kInterpreterBytecodeArrayRegister,
3763 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
3764 __ Sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
3765 BytecodeArray::kOsrLoopNestingLevelOffset));
3766 Generate_OSREntry(masm, code_obj,
3767 Operand(Code::kHeaderSize - kHeapObjectTag));
3768 } else {
3769 __ Add64(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
3770 __ Jump(code_obj);
3771 }
3772 __ Trap(); // Unreachable.
3773
3774 if (!is_osr) {
3775 __ bind(&function_entry_bytecode);
3776 // If the bytecode offset is kFunctionEntryOffset, get the start address of
3777 // the first bytecode.
3778 __ li(kInterpreterBytecodeOffsetRegister, Operand(int64_t(0)));
3779 if (next_bytecode) {
3780 __ li(get_baseline_pc,
3781 ExternalReference::baseline_pc_for_bytecode_offset());
3782 }
3783 __ Branch(&valid_bytecode_offset);
3784 }
3785
3786 __ bind(&install_baseline_code);
3787 {
3788 FrameScope scope(masm, StackFrame::INTERNAL);
3789 __ Push(kInterpreterAccumulatorRegister);
3790 __ Push(closure);
3791 __ CallRuntime(Runtime::kInstallBaselineCode, 1);
3792 __ Pop(kInterpreterAccumulatorRegister);
3793 }
3794 // Retry from the start after installing baseline code.
3795 __ Branch(&start);
3796 }
3797
3798 } // namespace
3799
Generate_BaselineOrInterpreterEnterAtBytecode(MacroAssembler * masm)3800 void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
3801 MacroAssembler* masm) {
3802 Generate_BaselineOrInterpreterEntry(masm, false);
3803 }
3804
Generate_BaselineOrInterpreterEnterAtNextBytecode(MacroAssembler * masm)3805 void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
3806 MacroAssembler* masm) {
3807 Generate_BaselineOrInterpreterEntry(masm, true);
3808 }
3809
Generate_InterpreterOnStackReplacement_ToBaseline(MacroAssembler * masm)3810 void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
3811 MacroAssembler* masm) {
3812 Generate_BaselineOrInterpreterEntry(masm, false, true);
3813 }
3814
Generate_DynamicCheckMapsTrampoline(MacroAssembler * masm)3815 void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
3816 Generate_DynamicCheckMapsTrampoline<DynamicCheckMapsDescriptor>(
3817 masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMaps));
3818 }
3819
Generate_DynamicCheckMapsWithFeedbackVectorTrampoline(MacroAssembler * masm)3820 void Builtins::Generate_DynamicCheckMapsWithFeedbackVectorTrampoline(
3821 MacroAssembler* masm) {
3822 Generate_DynamicCheckMapsTrampoline<
3823 DynamicCheckMapsWithFeedbackVectorDescriptor>(
3824 masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMapsWithFeedbackVector));
3825 }
3826
3827 template <class Descriptor>
Generate_DynamicCheckMapsTrampoline(MacroAssembler * masm,Handle<Code> builtin_target)3828 void Builtins::Generate_DynamicCheckMapsTrampoline(
3829 MacroAssembler* masm, Handle<Code> builtin_target) {
3830 FrameScope scope(masm, StackFrame::MANUAL);
3831 __ EnterFrame(StackFrame::INTERNAL);
3832
3833 // Only save the registers that the DynamicMapChecks builtin can clobber.
3834 Descriptor descriptor;
3835 RegList registers = descriptor.allocatable_registers();
3836 // FLAG_debug_code is enabled CSA checks will call C function and so we need
3837 // to save all CallerSaved registers too.
3838 if (FLAG_debug_code) registers |= kJSCallerSaved;
3839 __ MaybeSaveRegisters(registers);
3840
3841 // Load the immediate arguments from the deopt exit to pass to the builtin.
3842 Register slot_arg = descriptor.GetRegisterParameter(Descriptor::kSlot);
3843 Register handler_arg = descriptor.GetRegisterParameter(Descriptor::kHandler);
3844 __ Ld(handler_arg, MemOperand(fp, CommonFrameConstants::kCallerPCOffset));
3845 __ Uld(slot_arg, MemOperand(handler_arg,
3846 Deoptimizer::kEagerWithResumeImmedArgs1PcOffset));
3847 __ Uld(
3848 handler_arg,
3849 MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs2PcOffset));
3850 __ Call(builtin_target, RelocInfo::CODE_TARGET);
3851
3852 Label deopt, bailout;
3853 __ Branch(&deopt, ne, a0,
3854 Operand(static_cast<int64_t>(DynamicCheckMapsStatus::kSuccess)),
3855 Label::Distance::kNear);
3856
3857 __ MaybeRestoreRegisters(registers);
3858 __ LeaveFrame(StackFrame::INTERNAL);
3859 __ Ret();
3860
3861 __ bind(&deopt);
3862 __ Branch(&bailout, eq, a0,
3863 Operand(static_cast<int64_t>(DynamicCheckMapsStatus::kBailout)));
3864
3865 if (FLAG_debug_code) {
3866 __ Assert(eq, AbortReason::kUnexpectedDynamicCheckMapsStatus, a0,
3867 Operand(static_cast<int64_t>(DynamicCheckMapsStatus::kDeopt)));
3868 }
3869 __ MaybeRestoreRegisters(registers);
3870 __ LeaveFrame(StackFrame::INTERNAL);
3871 Handle<Code> deopt_eager = masm->isolate()->builtins()->code_handle(
3872 Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kEager));
3873 __ Jump(deopt_eager, RelocInfo::CODE_TARGET);
3874
3875 __ bind(&bailout);
3876 __ MaybeRestoreRegisters(registers);
3877 __ LeaveFrame(StackFrame::INTERNAL);
3878 Handle<Code> deopt_bailout = masm->isolate()->builtins()->code_handle(
3879 Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kBailout));
3880 __ Jump(deopt_bailout, RelocInfo::CODE_TARGET);
3881 }
3882
3883 #undef __
3884
3885 } // namespace internal
3886 } // namespace v8
3887
3888 #endif // V8_TARGET_ARCH_RISCV64
3889