1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <limits.h> // For LONG_MIN, LONG_MAX.
6
7 #if V8_TARGET_ARCH_ARM
8
9 #include "src/base/bits.h"
10 #include "src/base/division-by-constant.h"
11 #include "src/base/numbers/double.h"
12 #include "src/base/utils/random-number-generator.h"
13 #include "src/codegen/assembler-inl.h"
14 #include "src/codegen/callable.h"
15 #include "src/codegen/code-factory.h"
16 #include "src/codegen/external-reference-table.h"
17 #include "src/codegen/interface-descriptors-inl.h"
18 #include "src/codegen/macro-assembler.h"
19 #include "src/codegen/register-configuration.h"
20 #include "src/debug/debug.h"
21 #include "src/deoptimizer/deoptimizer.h"
22 #include "src/execution/frames-inl.h"
23 #include "src/heap/memory-chunk.h"
24 #include "src/init/bootstrapper.h"
25 #include "src/logging/counters.h"
26 #include "src/objects/objects-inl.h"
27 #include "src/runtime/runtime.h"
28 #include "src/snapshot/snapshot.h"
29
30 #if V8_ENABLE_WEBASSEMBLY
31 #include "src/wasm/wasm-code-manager.h"
32 #endif // V8_ENABLE_WEBASSEMBLY
33
34 // Satisfy cpplint check, but don't include platform-specific header. It is
35 // included recursively via macro-assembler.h.
36 #if 0
37 #include "src/codegen/arm/macro-assembler-arm.h"
38 #endif
39
40 namespace v8 {
41 namespace internal {
42
RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3) const43 int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
44 Register exclusion1,
45 Register exclusion2,
46 Register exclusion3) const {
47 int bytes = 0;
48 RegList exclusions = 0;
49 if (exclusion1 != no_reg) {
50 exclusions |= exclusion1.bit();
51 if (exclusion2 != no_reg) {
52 exclusions |= exclusion2.bit();
53 if (exclusion3 != no_reg) {
54 exclusions |= exclusion3.bit();
55 }
56 }
57 }
58
59 RegList list = (kCallerSaved | lr.bit()) & ~exclusions;
60
61 bytes += NumRegs(list) * kPointerSize;
62
63 if (fp_mode == SaveFPRegsMode::kSave) {
64 bytes += DwVfpRegister::kNumRegisters * DwVfpRegister::kSizeInBytes;
65 }
66
67 return bytes;
68 }
69
PushCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)70 int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
71 Register exclusion2, Register exclusion3) {
72 ASM_CODE_COMMENT(this);
73 int bytes = 0;
74 RegList exclusions = 0;
75 if (exclusion1 != no_reg) {
76 exclusions |= exclusion1.bit();
77 if (exclusion2 != no_reg) {
78 exclusions |= exclusion2.bit();
79 if (exclusion3 != no_reg) {
80 exclusions |= exclusion3.bit();
81 }
82 }
83 }
84
85 RegList list = (kCallerSaved | lr.bit()) & ~exclusions;
86 stm(db_w, sp, list);
87
88 bytes += NumRegs(list) * kPointerSize;
89
90 if (fp_mode == SaveFPRegsMode::kSave) {
91 SaveFPRegs(sp, lr);
92 bytes += DwVfpRegister::kNumRegisters * DwVfpRegister::kSizeInBytes;
93 }
94
95 return bytes;
96 }
97
PopCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)98 int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
99 Register exclusion2, Register exclusion3) {
100 ASM_CODE_COMMENT(this);
101 int bytes = 0;
102 if (fp_mode == SaveFPRegsMode::kSave) {
103 RestoreFPRegs(sp, lr);
104 bytes += DwVfpRegister::kNumRegisters * DwVfpRegister::kSizeInBytes;
105 }
106
107 RegList exclusions = 0;
108 if (exclusion1 != no_reg) {
109 exclusions |= exclusion1.bit();
110 if (exclusion2 != no_reg) {
111 exclusions |= exclusion2.bit();
112 if (exclusion3 != no_reg) {
113 exclusions |= exclusion3.bit();
114 }
115 }
116 }
117
118 RegList list = (kCallerSaved | lr.bit()) & ~exclusions;
119 ldm(ia_w, sp, list);
120
121 bytes += NumRegs(list) * kPointerSize;
122
123 return bytes;
124 }
125
LoadFromConstantsTable(Register destination,int constant_index)126 void TurboAssembler::LoadFromConstantsTable(Register destination,
127 int constant_index) {
128 DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
129
130 const uint32_t offset =
131 FixedArray::kHeaderSize + constant_index * kPointerSize - kHeapObjectTag;
132
133 LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
134 ldr(destination, MemOperand(destination, offset));
135 }
136
LoadRootRelative(Register destination,int32_t offset)137 void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
138 ldr(destination, MemOperand(kRootRegister, offset));
139 }
140
LoadRootRegisterOffset(Register destination,intptr_t offset)141 void TurboAssembler::LoadRootRegisterOffset(Register destination,
142 intptr_t offset) {
143 if (offset == 0) {
144 Move(destination, kRootRegister);
145 } else {
146 add(destination, kRootRegister, Operand(offset));
147 }
148 }
149
Jump(Register target,Condition cond)150 void TurboAssembler::Jump(Register target, Condition cond) { bx(target, cond); }
151
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond)152 void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
153 Condition cond) {
154 mov(pc, Operand(target, rmode), LeaveCC, cond);
155 }
156
Jump(Address target,RelocInfo::Mode rmode,Condition cond)157 void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
158 Condition cond) {
159 DCHECK(!RelocInfo::IsCodeTarget(rmode));
160 Jump(static_cast<intptr_t>(target), rmode, cond);
161 }
162
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond)163 void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
164 Condition cond) {
165 DCHECK(RelocInfo::IsCodeTarget(rmode));
166 DCHECK_IMPLIES(options().isolate_independent_code,
167 Builtins::IsIsolateIndependentBuiltin(*code));
168 DCHECK_IMPLIES(options().use_pc_relative_calls_and_jumps,
169 Builtins::IsIsolateIndependentBuiltin(*code));
170
171 Builtin builtin = Builtin::kNoBuiltinId;
172 bool target_is_builtin =
173 isolate()->builtins()->IsBuiltinHandle(code, &builtin);
174
175 if (options().use_pc_relative_calls_and_jumps && target_is_builtin) {
176 int32_t code_target_index = AddCodeTarget(code);
177 b(code_target_index * kInstrSize, cond, RelocInfo::RELATIVE_CODE_TARGET);
178 return;
179 } else if (root_array_available_ && options().isolate_independent_code) {
180 // This branch is taken only for specific cctests, where we force isolate
181 // creation at runtime. At this point, Code space isn't restricted to a
182 // size s.t. pc-relative calls may be used.
183 UseScratchRegisterScope temps(this);
184 Register scratch = temps.Acquire();
185 int offset = IsolateData::BuiltinEntrySlotOffset(code->builtin_id());
186 ldr(scratch, MemOperand(kRootRegister, offset));
187 Jump(scratch, cond);
188 return;
189 } else if (options().inline_offheap_trampolines && target_is_builtin) {
190 // Inline the trampoline.
191 RecordCommentForOffHeapTrampoline(builtin);
192 // Use ip directly instead of using UseScratchRegisterScope, as we do not
193 // preserve scratch registers across calls.
194 mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
195 Jump(ip, cond);
196 return;
197 }
198
199 // 'code' is always generated ARM code, never THUMB code
200 Jump(static_cast<intptr_t>(code.address()), rmode, cond);
201 }
202
Jump(const ExternalReference & reference)203 void TurboAssembler::Jump(const ExternalReference& reference) {
204 UseScratchRegisterScope temps(this);
205 Register scratch = temps.Acquire();
206 Move(scratch, reference);
207 Jump(scratch);
208 }
209
Call(Register target,Condition cond)210 void TurboAssembler::Call(Register target, Condition cond) {
211 // Block constant pool for the call instruction sequence.
212 BlockConstPoolScope block_const_pool(this);
213 blx(target, cond);
214 }
215
Call(Address target,RelocInfo::Mode rmode,Condition cond,TargetAddressStorageMode mode,bool check_constant_pool)216 void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
217 TargetAddressStorageMode mode,
218 bool check_constant_pool) {
219 // Check if we have to emit the constant pool before we block it.
220 if (check_constant_pool) MaybeCheckConstPool();
221 // Block constant pool for the call instruction sequence.
222 BlockConstPoolScope block_const_pool(this);
223
224 bool old_predictable_code_size = predictable_code_size();
225 if (mode == NEVER_INLINE_TARGET_ADDRESS) {
226 set_predictable_code_size(true);
227 }
228
229 // Use ip directly instead of using UseScratchRegisterScope, as we do not
230 // preserve scratch registers across calls.
231
232 // Call sequence on V7 or later may be :
233 // movw ip, #... @ call address low 16
234 // movt ip, #... @ call address high 16
235 // blx ip
236 // @ return address
237 // Or for pre-V7 or values that may be back-patched
238 // to avoid ICache flushes:
239 // ldr ip, [pc, #...] @ call address
240 // blx ip
241 // @ return address
242
243 mov(ip, Operand(target, rmode));
244 blx(ip, cond);
245
246 if (mode == NEVER_INLINE_TARGET_ADDRESS) {
247 set_predictable_code_size(old_predictable_code_size);
248 }
249 }
250
Call(Handle<Code> code,RelocInfo::Mode rmode,Condition cond,TargetAddressStorageMode mode,bool check_constant_pool)251 void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
252 Condition cond, TargetAddressStorageMode mode,
253 bool check_constant_pool) {
254 DCHECK(RelocInfo::IsCodeTarget(rmode));
255 DCHECK_IMPLIES(options().isolate_independent_code,
256 Builtins::IsIsolateIndependentBuiltin(*code));
257 DCHECK_IMPLIES(options().use_pc_relative_calls_and_jumps,
258 Builtins::IsIsolateIndependentBuiltin(*code));
259
260 Builtin builtin = Builtin::kNoBuiltinId;
261 bool target_is_builtin =
262 isolate()->builtins()->IsBuiltinHandle(code, &builtin);
263
264 if (target_is_builtin && options().use_pc_relative_calls_and_jumps) {
265 int32_t code_target_index = AddCodeTarget(code);
266 bl(code_target_index * kInstrSize, cond, RelocInfo::RELATIVE_CODE_TARGET);
267 return;
268 } else if (root_array_available_ && options().isolate_independent_code) {
269 // This branch is taken only for specific cctests, where we force isolate
270 // creation at runtime. At this point, Code space isn't restricted to a
271 // size s.t. pc-relative calls may be used.
272 int offset = IsolateData::BuiltinEntrySlotOffset(code->builtin_id());
273 ldr(ip, MemOperand(kRootRegister, offset));
274 Call(ip, cond);
275 return;
276 } else if (target_is_builtin && options().inline_offheap_trampolines) {
277 // Inline the trampoline.
278 CallBuiltin(builtin);
279 return;
280 }
281
282 // 'code' is always generated ARM code, never THUMB code
283 DCHECK(code->IsExecutable());
284 Call(code.address(), rmode, cond, mode);
285 }
286
LoadEntryFromBuiltinIndex(Register builtin_index)287 void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
288 ASM_CODE_COMMENT(this);
289 STATIC_ASSERT(kSystemPointerSize == 4);
290 STATIC_ASSERT(kSmiShiftSize == 0);
291 STATIC_ASSERT(kSmiTagSize == 1);
292 STATIC_ASSERT(kSmiTag == 0);
293
294 // The builtin_index register contains the builtin index as a Smi.
295 // Untagging is folded into the indexing operand below.
296 mov(builtin_index,
297 Operand(builtin_index, LSL, kSystemPointerSizeLog2 - kSmiTagSize));
298 add(builtin_index, builtin_index,
299 Operand(IsolateData::builtin_entry_table_offset()));
300 ldr(builtin_index, MemOperand(kRootRegister, builtin_index));
301 }
302
CallBuiltinByIndex(Register builtin_index)303 void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
304 LoadEntryFromBuiltinIndex(builtin_index);
305 Call(builtin_index);
306 }
307
LoadEntryFromBuiltin(Builtin builtin,Register destination)308 void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin,
309 Register destination) {
310 ASM_CODE_COMMENT(this);
311 ldr(destination, EntryFromBuiltinAsOperand(builtin));
312 }
313
EntryFromBuiltinAsOperand(Builtin builtin)314 MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
315 ASM_CODE_COMMENT(this);
316 DCHECK(root_array_available());
317 return MemOperand(kRootRegister,
318 IsolateData::BuiltinEntrySlotOffset(builtin));
319 }
320
CallBuiltin(Builtin builtin,Condition cond)321 void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) {
322 ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin));
323 DCHECK(Builtins::IsBuiltinId(builtin));
324 // Use ip directly instead of using UseScratchRegisterScope, as we do not
325 // preserve scratch registers across calls.
326 mov(ip, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
327 Call(ip, cond);
328 }
329
LoadCodeObjectEntry(Register destination,Register code_object)330 void TurboAssembler::LoadCodeObjectEntry(Register destination,
331 Register code_object) {
332 ASM_CODE_COMMENT(this);
333 // Code objects are called differently depending on whether we are generating
334 // builtin code (which will later be embedded into the binary) or compiling
335 // user JS code at runtime.
336 // * Builtin code runs in --jitless mode and thus must not call into on-heap
337 // Code targets. Instead, we dispatch through the builtins entry table.
338 // * Codegen at runtime does not have this restriction and we can use the
339 // shorter, branchless instruction sequence. The assumption here is that
340 // targets are usually generated code and not builtin Code objects.
341
342 if (options().isolate_independent_code) {
343 DCHECK(root_array_available());
344 Label if_code_is_off_heap, out;
345
346 {
347 UseScratchRegisterScope temps(this);
348 Register scratch = temps.Acquire();
349
350 DCHECK(!AreAliased(destination, scratch));
351 DCHECK(!AreAliased(code_object, scratch));
352
353 // Check whether the Code object is an off-heap trampoline. If so, call
354 // its (off-heap) entry point directly without going through the (on-heap)
355 // trampoline. Otherwise, just call the Code object as always.
356 ldr(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
357 tst(scratch, Operand(Code::IsOffHeapTrampoline::kMask));
358 b(ne, &if_code_is_off_heap);
359
360 // Not an off-heap trampoline, the entry point is at
361 // Code::raw_instruction_start().
362 add(destination, code_object,
363 Operand(Code::kHeaderSize - kHeapObjectTag));
364 jmp(&out);
365
366 // An off-heap trampoline, the entry point is loaded from the builtin
367 // entry table.
368 bind(&if_code_is_off_heap);
369 ldr(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
370 lsl(destination, scratch, Operand(kSystemPointerSizeLog2));
371 }
372 add(destination, destination, kRootRegister);
373 ldr(destination,
374 MemOperand(destination, IsolateData::builtin_entry_table_offset()));
375
376 bind(&out);
377 } else {
378 add(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
379 }
380 }
381
CallCodeObject(Register code_object)382 void TurboAssembler::CallCodeObject(Register code_object) {
383 ASM_CODE_COMMENT(this);
384 LoadCodeObjectEntry(code_object, code_object);
385 Call(code_object);
386 }
387
JumpCodeObject(Register code_object,JumpMode jump_mode)388 void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
389 ASM_CODE_COMMENT(this);
390 DCHECK_EQ(JumpMode::kJump, jump_mode);
391 LoadCodeObjectEntry(code_object, code_object);
392 Jump(code_object);
393 }
394
StoreReturnAddressAndCall(Register target)395 void TurboAssembler::StoreReturnAddressAndCall(Register target) {
396 ASM_CODE_COMMENT(this);
397 // This generates the final instruction sequence for calls to C functions
398 // once an exit frame has been constructed.
399 //
400 // Note that this assumes the caller code (i.e. the Code object currently
401 // being generated) is immovable or that the callee function cannot trigger
402 // GC, since the callee function will return to it.
403
404 // Compute the return address in lr to return to after the jump below. The pc
405 // is already at '+ 8' from the current instruction; but return is after three
406 // instructions, so add another 4 to pc to get the return address.
407 Assembler::BlockConstPoolScope block_const_pool(this);
408 add(lr, pc, Operand(4));
409 str(lr, MemOperand(sp));
410 Call(target);
411 }
412
Ret(Condition cond)413 void TurboAssembler::Ret(Condition cond) { bx(lr, cond); }
414
Drop(int count,Condition cond)415 void TurboAssembler::Drop(int count, Condition cond) {
416 if (count > 0) {
417 add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
418 }
419 }
420
Drop(Register count,Condition cond)421 void TurboAssembler::Drop(Register count, Condition cond) {
422 add(sp, sp, Operand(count, LSL, kPointerSizeLog2), LeaveCC, cond);
423 }
424
Ret(int drop,Condition cond)425 void TurboAssembler::Ret(int drop, Condition cond) {
426 Drop(drop, cond);
427 Ret(cond);
428 }
429
Call(Label * target)430 void TurboAssembler::Call(Label* target) { bl(target); }
431
Push(Handle<HeapObject> handle)432 void TurboAssembler::Push(Handle<HeapObject> handle) {
433 UseScratchRegisterScope temps(this);
434 Register scratch = temps.Acquire();
435 mov(scratch, Operand(handle));
436 push(scratch);
437 }
438
Push(Smi smi)439 void TurboAssembler::Push(Smi smi) {
440 UseScratchRegisterScope temps(this);
441 Register scratch = temps.Acquire();
442 mov(scratch, Operand(smi));
443 push(scratch);
444 }
445
PushArray(Register array,Register size,Register scratch,PushArrayOrder order)446 void TurboAssembler::PushArray(Register array, Register size, Register scratch,
447 PushArrayOrder order) {
448 ASM_CODE_COMMENT(this);
449 UseScratchRegisterScope temps(this);
450 Register counter = scratch;
451 Register tmp = temps.Acquire();
452 DCHECK(!AreAliased(array, size, counter, tmp));
453 Label loop, entry;
454 if (order == PushArrayOrder::kReverse) {
455 mov(counter, Operand(0));
456 b(&entry);
457 bind(&loop);
458 ldr(tmp, MemOperand(array, counter, LSL, kSystemPointerSizeLog2));
459 push(tmp);
460 add(counter, counter, Operand(1));
461 bind(&entry);
462 cmp(counter, size);
463 b(lt, &loop);
464 } else {
465 mov(counter, size);
466 b(&entry);
467 bind(&loop);
468 ldr(tmp, MemOperand(array, counter, LSL, kSystemPointerSizeLog2));
469 push(tmp);
470 bind(&entry);
471 sub(counter, counter, Operand(1), SetCC);
472 b(ge, &loop);
473 }
474 }
475
Move(Register dst,Smi smi)476 void TurboAssembler::Move(Register dst, Smi smi) { mov(dst, Operand(smi)); }
477
Move(Register dst,Handle<HeapObject> value)478 void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
479 // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
480 // non-isolate-independent code. In many cases it might be cheaper than
481 // embedding the relocatable value.
482 if (root_array_available_ && options().isolate_independent_code) {
483 IndirectLoadConstant(dst, value);
484 return;
485 }
486 mov(dst, Operand(value));
487 }
488
Move(Register dst,ExternalReference reference)489 void TurboAssembler::Move(Register dst, ExternalReference reference) {
490 // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
491 // non-isolate-independent code. In many cases it might be cheaper than
492 // embedding the relocatable value.
493 if (root_array_available_ && options().isolate_independent_code) {
494 IndirectLoadExternalReference(dst, reference);
495 return;
496 }
497 mov(dst, Operand(reference));
498 }
499
Move(Register dst,Register src,Condition cond)500 void TurboAssembler::Move(Register dst, Register src, Condition cond) {
501 if (dst != src) {
502 mov(dst, src, LeaveCC, cond);
503 }
504 }
505
Move(SwVfpRegister dst,SwVfpRegister src,Condition cond)506 void TurboAssembler::Move(SwVfpRegister dst, SwVfpRegister src,
507 Condition cond) {
508 if (dst != src) {
509 vmov(dst, src, cond);
510 }
511 }
512
Move(DwVfpRegister dst,DwVfpRegister src,Condition cond)513 void TurboAssembler::Move(DwVfpRegister dst, DwVfpRegister src,
514 Condition cond) {
515 if (dst != src) {
516 vmov(dst, src, cond);
517 }
518 }
519
Move(QwNeonRegister dst,QwNeonRegister src)520 void TurboAssembler::Move(QwNeonRegister dst, QwNeonRegister src) {
521 if (dst != src) {
522 vmov(dst, src);
523 }
524 }
525
MovePair(Register dst0,Register src0,Register dst1,Register src1)526 void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1,
527 Register src1) {
528 DCHECK_NE(dst0, dst1);
529 if (dst0 != src1) {
530 Move(dst0, src0);
531 Move(dst1, src1);
532 } else if (dst1 != src0) {
533 // Swap the order of the moves to resolve the overlap.
534 Move(dst1, src1);
535 Move(dst0, src0);
536 } else {
537 // Worse case scenario, this is a swap.
538 Swap(dst0, src0);
539 }
540 }
541
Swap(Register srcdst0,Register srcdst1)542 void TurboAssembler::Swap(Register srcdst0, Register srcdst1) {
543 DCHECK(srcdst0 != srcdst1);
544 UseScratchRegisterScope temps(this);
545 Register scratch = temps.Acquire();
546 mov(scratch, srcdst0);
547 mov(srcdst0, srcdst1);
548 mov(srcdst1, scratch);
549 }
550
Swap(DwVfpRegister srcdst0,DwVfpRegister srcdst1)551 void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
552 DCHECK(srcdst0 != srcdst1);
553 DCHECK(VfpRegisterIsAvailable(srcdst0));
554 DCHECK(VfpRegisterIsAvailable(srcdst1));
555
556 if (CpuFeatures::IsSupported(NEON)) {
557 vswp(srcdst0, srcdst1);
558 } else {
559 UseScratchRegisterScope temps(this);
560 DwVfpRegister scratch = temps.AcquireD();
561 vmov(scratch, srcdst0);
562 vmov(srcdst0, srcdst1);
563 vmov(srcdst1, scratch);
564 }
565 }
566
Swap(QwNeonRegister srcdst0,QwNeonRegister srcdst1)567 void TurboAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) {
568 DCHECK(srcdst0 != srcdst1);
569 vswp(srcdst0, srcdst1);
570 }
571
Mls(Register dst,Register src1,Register src2,Register srcA,Condition cond)572 void MacroAssembler::Mls(Register dst, Register src1, Register src2,
573 Register srcA, Condition cond) {
574 if (CpuFeatures::IsSupported(ARMv7)) {
575 CpuFeatureScope scope(this, ARMv7);
576 mls(dst, src1, src2, srcA, cond);
577 } else {
578 UseScratchRegisterScope temps(this);
579 Register scratch = temps.Acquire();
580 DCHECK(srcA != scratch);
581 mul(scratch, src1, src2, LeaveCC, cond);
582 sub(dst, srcA, scratch, LeaveCC, cond);
583 }
584 }
585
And(Register dst,Register src1,const Operand & src2,Condition cond)586 void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
587 Condition cond) {
588 if (!src2.IsRegister() && !src2.MustOutputRelocInfo(this) &&
589 src2.immediate() == 0) {
590 mov(dst, Operand::Zero(), LeaveCC, cond);
591 } else if (!(src2.InstructionsRequired(this) == 1) &&
592 !src2.MustOutputRelocInfo(this) &&
593 CpuFeatures::IsSupported(ARMv7) &&
594 base::bits::IsPowerOfTwo(src2.immediate() + 1)) {
595 CpuFeatureScope scope(this, ARMv7);
596 ubfx(dst, src1, 0,
597 base::bits::WhichPowerOfTwo(static_cast<uint32_t>(src2.immediate()) +
598 1),
599 cond);
600 } else {
601 and_(dst, src1, src2, LeaveCC, cond);
602 }
603 }
604
Ubfx(Register dst,Register src1,int lsb,int width,Condition cond)605 void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
606 Condition cond) {
607 DCHECK_LT(lsb, 32);
608 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
609 int mask = (1u << (width + lsb)) - 1u - ((1u << lsb) - 1u);
610 and_(dst, src1, Operand(mask), LeaveCC, cond);
611 if (lsb != 0) {
612 mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
613 }
614 } else {
615 CpuFeatureScope scope(this, ARMv7);
616 ubfx(dst, src1, lsb, width, cond);
617 }
618 }
619
Sbfx(Register dst,Register src1,int lsb,int width,Condition cond)620 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
621 Condition cond) {
622 DCHECK_LT(lsb, 32);
623 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
624 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
625 and_(dst, src1, Operand(mask), LeaveCC, cond);
626 int shift_up = 32 - lsb - width;
627 int shift_down = lsb + shift_up;
628 if (shift_up != 0) {
629 mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
630 }
631 if (shift_down != 0) {
632 mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
633 }
634 } else {
635 CpuFeatureScope scope(this, ARMv7);
636 sbfx(dst, src1, lsb, width, cond);
637 }
638 }
639
Bfc(Register dst,Register src,int lsb,int width,Condition cond)640 void TurboAssembler::Bfc(Register dst, Register src, int lsb, int width,
641 Condition cond) {
642 DCHECK_LT(lsb, 32);
643 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
644 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
645 bic(dst, src, Operand(mask));
646 } else {
647 CpuFeatureScope scope(this, ARMv7);
648 Move(dst, src, cond);
649 bfc(dst, lsb, width, cond);
650 }
651 }
652
LoadRoot(Register destination,RootIndex index,Condition cond)653 void TurboAssembler::LoadRoot(Register destination, RootIndex index,
654 Condition cond) {
655 ldr(destination,
656 MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), cond);
657 }
658
RecordWriteField(Register object,int offset,Register value,LinkRegisterStatus lr_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check)659 void MacroAssembler::RecordWriteField(Register object, int offset,
660 Register value,
661 LinkRegisterStatus lr_status,
662 SaveFPRegsMode save_fp,
663 RememberedSetAction remembered_set_action,
664 SmiCheck smi_check) {
665 ASM_CODE_COMMENT(this);
666 // First, check if a write barrier is even needed. The tests below
667 // catch stores of Smis.
668 Label done;
669
670 // Skip barrier if writing a smi.
671 if (smi_check == SmiCheck::kInline) {
672 JumpIfSmi(value, &done);
673 }
674
675 // Although the object register is tagged, the offset is relative to the start
676 // of the object, so so offset must be a multiple of kPointerSize.
677 DCHECK(IsAligned(offset, kPointerSize));
678
679 if (FLAG_debug_code) {
680 ASM_CODE_COMMENT_STRING(this, "Verify slot_address");
681 Label ok;
682 UseScratchRegisterScope temps(this);
683 Register scratch = temps.Acquire();
684 DCHECK(!AreAliased(object, value, scratch));
685 add(scratch, object, Operand(offset - kHeapObjectTag));
686 tst(scratch, Operand(kPointerSize - 1));
687 b(eq, &ok);
688 stop();
689 bind(&ok);
690 }
691
692 RecordWrite(object, Operand(offset - kHeapObjectTag), value, lr_status,
693 save_fp, remembered_set_action, SmiCheck::kOmit);
694
695 bind(&done);
696 }
697
MaybeSaveRegisters(RegList registers)698 void TurboAssembler::MaybeSaveRegisters(RegList registers) {
699 if (registers == 0) return;
700 ASM_CODE_COMMENT(this);
701 RegList regs = 0;
702 for (int i = 0; i < Register::kNumRegisters; ++i) {
703 if ((registers >> i) & 1u) {
704 regs |= Register::from_code(i).bit();
705 }
706 }
707 stm(db_w, sp, regs);
708 }
709
MaybeRestoreRegisters(RegList registers)710 void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
711 if (registers == 0) return;
712 ASM_CODE_COMMENT(this);
713 RegList regs = 0;
714 for (int i = 0; i < Register::kNumRegisters; ++i) {
715 if ((registers >> i) & 1u) {
716 regs |= Register::from_code(i).bit();
717 }
718 }
719 ldm(ia_w, sp, regs);
720 }
721
CallEphemeronKeyBarrier(Register object,Operand offset,SaveFPRegsMode fp_mode)722 void TurboAssembler::CallEphemeronKeyBarrier(Register object, Operand offset,
723 SaveFPRegsMode fp_mode) {
724 ASM_CODE_COMMENT(this);
725 RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object);
726 MaybeSaveRegisters(registers);
727
728 Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
729 Register slot_address_parameter =
730 WriteBarrierDescriptor::SlotAddressRegister();
731 MoveObjectAndSlot(object_parameter, slot_address_parameter, object, offset);
732
733 Call(isolate()->builtins()->code_handle(
734 Builtins::GetEphemeronKeyBarrierStub(fp_mode)),
735 RelocInfo::CODE_TARGET);
736 MaybeRestoreRegisters(registers);
737 }
738
CallRecordWriteStubSaveRegisters(Register object,Operand offset,RememberedSetAction remembered_set_action,SaveFPRegsMode fp_mode,StubCallMode mode)739 void TurboAssembler::CallRecordWriteStubSaveRegisters(
740 Register object, Operand offset, RememberedSetAction remembered_set_action,
741 SaveFPRegsMode fp_mode, StubCallMode mode) {
742 ASM_CODE_COMMENT(this);
743 RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object);
744 MaybeSaveRegisters(registers);
745
746 Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
747 Register slot_address_parameter =
748 WriteBarrierDescriptor::SlotAddressRegister();
749 MoveObjectAndSlot(object_parameter, slot_address_parameter, object, offset);
750
751 CallRecordWriteStub(object_parameter, slot_address_parameter,
752 remembered_set_action, fp_mode, mode);
753
754 MaybeRestoreRegisters(registers);
755 }
756
CallRecordWriteStub(Register object,Register slot_address,RememberedSetAction remembered_set_action,SaveFPRegsMode fp_mode,StubCallMode mode)757 void TurboAssembler::CallRecordWriteStub(
758 Register object, Register slot_address,
759 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
760 StubCallMode mode) {
761 ASM_CODE_COMMENT(this);
762 DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object);
763 DCHECK_EQ(WriteBarrierDescriptor::SlotAddressRegister(), slot_address);
764 #if V8_ENABLE_WEBASSEMBLY
765 if (mode == StubCallMode::kCallWasmRuntimeStub) {
766 auto wasm_target =
767 wasm::WasmCode::GetRecordWriteStub(remembered_set_action, fp_mode);
768 Call(wasm_target, RelocInfo::WASM_STUB_CALL);
769 #else
770 if (false) {
771 #endif
772 } else {
773 Builtin builtin =
774 Builtins::GetRecordWriteStub(remembered_set_action, fp_mode);
775 if (options().inline_offheap_trampolines) {
776 CallBuiltin(builtin);
777 } else {
778 Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
779 Call(code_target, RelocInfo::CODE_TARGET);
780 }
781 }
782 }
783
784 void TurboAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot,
785 Register object, Operand offset) {
786 DCHECK_NE(dst_object, dst_slot);
787 DCHECK(offset.IsRegister() || offset.IsImmediate());
788 // If `offset` is a register, it cannot overlap with `object`.
789 DCHECK_IMPLIES(offset.IsRegister(), offset.rm() != object);
790
791 // If the slot register does not overlap with the object register, we can
792 // overwrite it.
793 if (dst_slot != object) {
794 add(dst_slot, object, offset);
795 Move(dst_object, object);
796 return;
797 }
798
799 DCHECK_EQ(dst_slot, object);
800
801 // If the destination object register does not overlap with the offset
802 // register, we can overwrite it.
803 if (!offset.IsRegister() || (offset.rm() != dst_object)) {
804 Move(dst_object, dst_slot);
805 add(dst_slot, dst_slot, offset);
806 return;
807 }
808
809 DCHECK_EQ(dst_object, offset.rm());
810
811 // We only have `dst_slot` and `dst_object` left as distinct registers so we
812 // have to swap them. We write this as a add+sub sequence to avoid using a
813 // scratch register.
814 add(dst_slot, dst_slot, dst_object);
815 sub(dst_object, dst_slot, dst_object);
816 }
817
818 // The register 'object' contains a heap object pointer. The heap object tag is
819 // shifted away. A scratch register also needs to be available.
820 void MacroAssembler::RecordWrite(Register object, Operand offset,
821 Register value, LinkRegisterStatus lr_status,
822 SaveFPRegsMode fp_mode,
823 RememberedSetAction remembered_set_action,
824 SmiCheck smi_check) {
825 DCHECK(!AreAliased(object, value));
826 if (FLAG_debug_code) {
827 ASM_CODE_COMMENT_STRING(this, "Verify slot_address");
828 UseScratchRegisterScope temps(this);
829 Register scratch = temps.Acquire();
830 DCHECK(!AreAliased(object, value, scratch));
831 add(scratch, object, offset);
832 ldr(scratch, MemOperand(scratch));
833 cmp(scratch, value);
834 Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
835 }
836
837 if ((remembered_set_action == RememberedSetAction::kOmit &&
838 !FLAG_incremental_marking) ||
839 FLAG_disable_write_barriers) {
840 return;
841 }
842
843 // First, check if a write barrier is even needed. The tests below
844 // catch stores of smis and stores into the young generation.
845 Label done;
846
847 if (smi_check == SmiCheck::kInline) {
848 JumpIfSmi(value, &done);
849 }
850
851 CheckPageFlag(value, MemoryChunk::kPointersToHereAreInterestingMask, eq,
852 &done);
853 CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask, eq,
854 &done);
855
856 // Record the actual write.
857 if (lr_status == kLRHasNotBeenSaved) {
858 push(lr);
859 }
860
861 Register slot_address = WriteBarrierDescriptor::SlotAddressRegister();
862 DCHECK(!AreAliased(object, value, slot_address));
863 DCHECK(!offset.IsRegister());
864 add(slot_address, object, offset);
865 CallRecordWriteStub(object, slot_address, remembered_set_action, fp_mode);
866 if (lr_status == kLRHasNotBeenSaved) {
867 pop(lr);
868 }
869
870 if (FLAG_debug_code) Move(slot_address, Operand(kZapValue));
871
872 bind(&done);
873 }
874
875 void TurboAssembler::PushCommonFrame(Register marker_reg) {
876 ASM_CODE_COMMENT(this);
877 if (marker_reg.is_valid()) {
878 if (marker_reg.code() > fp.code()) {
879 stm(db_w, sp, fp.bit() | lr.bit());
880 mov(fp, Operand(sp));
881 Push(marker_reg);
882 } else {
883 stm(db_w, sp, marker_reg.bit() | fp.bit() | lr.bit());
884 add(fp, sp, Operand(kPointerSize));
885 }
886 } else {
887 stm(db_w, sp, fp.bit() | lr.bit());
888 mov(fp, sp);
889 }
890 }
891
892 void TurboAssembler::PushStandardFrame(Register function_reg) {
893 ASM_CODE_COMMENT(this);
894 DCHECK(!function_reg.is_valid() || function_reg.code() < cp.code());
895 stm(db_w, sp,
896 (function_reg.is_valid() ? function_reg.bit() : 0) | cp.bit() | fp.bit() |
897 lr.bit());
898 int offset = -StandardFrameConstants::kContextOffset;
899 offset += function_reg.is_valid() ? kPointerSize : 0;
900 add(fp, sp, Operand(offset));
901 Push(kJavaScriptCallArgCountRegister);
902 }
903
904 void TurboAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
905 const DwVfpRegister src,
906 const Condition cond) {
907 // Subtracting 0.0 preserves all inputs except for signalling NaNs, which
908 // become quiet NaNs. We use vsub rather than vadd because vsub preserves -0.0
909 // inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
910 vsub(dst, src, kDoubleRegZero, cond);
911 }
912
913 void TurboAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
914 const SwVfpRegister src2,
915 const Condition cond) {
916 // Compare and move FPSCR flags to the normal condition flags.
917 VFPCompareAndLoadFlags(src1, src2, pc, cond);
918 }
919
920 void TurboAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1,
921 const float src2,
922 const Condition cond) {
923 // Compare and move FPSCR flags to the normal condition flags.
924 VFPCompareAndLoadFlags(src1, src2, pc, cond);
925 }
926
927 void TurboAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
928 const DwVfpRegister src2,
929 const Condition cond) {
930 // Compare and move FPSCR flags to the normal condition flags.
931 VFPCompareAndLoadFlags(src1, src2, pc, cond);
932 }
933
934 void TurboAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
935 const double src2,
936 const Condition cond) {
937 // Compare and move FPSCR flags to the normal condition flags.
938 VFPCompareAndLoadFlags(src1, src2, pc, cond);
939 }
940
941 void TurboAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
942 const SwVfpRegister src2,
943 const Register fpscr_flags,
944 const Condition cond) {
945 // Compare and load FPSCR.
946 vcmp(src1, src2, cond);
947 vmrs(fpscr_flags, cond);
948 }
949
950 void TurboAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1,
951 const float src2,
952 const Register fpscr_flags,
953 const Condition cond) {
954 // Compare and load FPSCR.
955 vcmp(src1, src2, cond);
956 vmrs(fpscr_flags, cond);
957 }
958
959 void TurboAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
960 const DwVfpRegister src2,
961 const Register fpscr_flags,
962 const Condition cond) {
963 // Compare and load FPSCR.
964 vcmp(src1, src2, cond);
965 vmrs(fpscr_flags, cond);
966 }
967
968 void TurboAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
969 const double src2,
970 const Register fpscr_flags,
971 const Condition cond) {
972 // Compare and load FPSCR.
973 vcmp(src1, src2, cond);
974 vmrs(fpscr_flags, cond);
975 }
976
977 void TurboAssembler::VmovHigh(Register dst, DwVfpRegister src) {
978 if (src.code() < 16) {
979 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
980 vmov(dst, loc.high());
981 } else {
982 vmov(NeonS32, dst, src, 1);
983 }
984 }
985
986 void TurboAssembler::VmovHigh(DwVfpRegister dst, Register src) {
987 if (dst.code() < 16) {
988 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
989 vmov(loc.high(), src);
990 } else {
991 vmov(NeonS32, dst, 1, src);
992 }
993 }
994
995 void TurboAssembler::VmovLow(Register dst, DwVfpRegister src) {
996 if (src.code() < 16) {
997 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
998 vmov(dst, loc.low());
999 } else {
1000 vmov(NeonS32, dst, src, 0);
1001 }
1002 }
1003
1004 void TurboAssembler::VmovLow(DwVfpRegister dst, Register src) {
1005 if (dst.code() < 16) {
1006 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
1007 vmov(loc.low(), src);
1008 } else {
1009 vmov(NeonS32, dst, 0, src);
1010 }
1011 }
1012
1013 void TurboAssembler::VmovExtended(Register dst, int src_code) {
1014 DCHECK_LE(SwVfpRegister::kNumRegisters, src_code);
1015 DCHECK_GT(SwVfpRegister::kNumRegisters * 2, src_code);
1016 if (src_code & 0x1) {
1017 VmovHigh(dst, DwVfpRegister::from_code(src_code / 2));
1018 } else {
1019 VmovLow(dst, DwVfpRegister::from_code(src_code / 2));
1020 }
1021 }
1022
1023 void TurboAssembler::VmovExtended(int dst_code, Register src) {
1024 DCHECK_LE(SwVfpRegister::kNumRegisters, dst_code);
1025 DCHECK_GT(SwVfpRegister::kNumRegisters * 2, dst_code);
1026 if (dst_code & 0x1) {
1027 VmovHigh(DwVfpRegister::from_code(dst_code / 2), src);
1028 } else {
1029 VmovLow(DwVfpRegister::from_code(dst_code / 2), src);
1030 }
1031 }
1032
1033 void TurboAssembler::VmovExtended(int dst_code, int src_code) {
1034 if (src_code == dst_code) return;
1035
1036 if (src_code < SwVfpRegister::kNumRegisters &&
1037 dst_code < SwVfpRegister::kNumRegisters) {
1038 // src and dst are both s-registers.
1039 vmov(SwVfpRegister::from_code(dst_code),
1040 SwVfpRegister::from_code(src_code));
1041 return;
1042 }
1043 DwVfpRegister dst_d_reg = DwVfpRegister::from_code(dst_code / 2);
1044 DwVfpRegister src_d_reg = DwVfpRegister::from_code(src_code / 2);
1045 int dst_offset = dst_code & 1;
1046 int src_offset = src_code & 1;
1047 if (CpuFeatures::IsSupported(NEON)) {
1048 UseScratchRegisterScope temps(this);
1049 DwVfpRegister scratch = temps.AcquireD();
1050 // On Neon we can shift and insert from d-registers.
1051 if (src_offset == dst_offset) {
1052 // Offsets are the same, use vdup to copy the source to the opposite lane.
1053 vdup(Neon32, scratch, src_d_reg, src_offset);
1054 // Here we are extending the lifetime of scratch.
1055 src_d_reg = scratch;
1056 src_offset = dst_offset ^ 1;
1057 }
1058 if (dst_offset) {
1059 if (dst_d_reg == src_d_reg) {
1060 vdup(Neon32, dst_d_reg, src_d_reg, 0);
1061 } else {
1062 vsli(Neon64, dst_d_reg, src_d_reg, 32);
1063 }
1064 } else {
1065 if (dst_d_reg == src_d_reg) {
1066 vdup(Neon32, dst_d_reg, src_d_reg, 1);
1067 } else {
1068 vsri(Neon64, dst_d_reg, src_d_reg, 32);
1069 }
1070 }
1071 return;
1072 }
1073
1074 // Without Neon, use the scratch registers to move src and/or dst into
1075 // s-registers.
1076 UseScratchRegisterScope temps(this);
1077 LowDwVfpRegister d_scratch = temps.AcquireLowD();
1078 LowDwVfpRegister d_scratch2 = temps.AcquireLowD();
1079 int s_scratch_code = d_scratch.low().code();
1080 int s_scratch_code2 = d_scratch2.low().code();
1081 if (src_code < SwVfpRegister::kNumRegisters) {
1082 // src is an s-register, dst is not.
1083 vmov(d_scratch, dst_d_reg);
1084 vmov(SwVfpRegister::from_code(s_scratch_code + dst_offset),
1085 SwVfpRegister::from_code(src_code));
1086 vmov(dst_d_reg, d_scratch);
1087 } else if (dst_code < SwVfpRegister::kNumRegisters) {
1088 // dst is an s-register, src is not.
1089 vmov(d_scratch, src_d_reg);
1090 vmov(SwVfpRegister::from_code(dst_code),
1091 SwVfpRegister::from_code(s_scratch_code + src_offset));
1092 } else {
1093 // Neither src or dst are s-registers. Both scratch double registers are
1094 // available when there are 32 VFP registers.
1095 vmov(d_scratch, src_d_reg);
1096 vmov(d_scratch2, dst_d_reg);
1097 vmov(SwVfpRegister::from_code(s_scratch_code + dst_offset),
1098 SwVfpRegister::from_code(s_scratch_code2 + src_offset));
1099 vmov(dst_d_reg, d_scratch2);
1100 }
1101 }
1102
1103 void TurboAssembler::VmovExtended(int dst_code, const MemOperand& src) {
1104 if (dst_code < SwVfpRegister::kNumRegisters) {
1105 vldr(SwVfpRegister::from_code(dst_code), src);
1106 } else {
1107 UseScratchRegisterScope temps(this);
1108 LowDwVfpRegister scratch = temps.AcquireLowD();
1109 // TODO(bbudge) If Neon supported, use load single lane form of vld1.
1110 int dst_s_code = scratch.low().code() + (dst_code & 1);
1111 vmov(scratch, DwVfpRegister::from_code(dst_code / 2));
1112 vldr(SwVfpRegister::from_code(dst_s_code), src);
1113 vmov(DwVfpRegister::from_code(dst_code / 2), scratch);
1114 }
1115 }
1116
1117 void TurboAssembler::VmovExtended(const MemOperand& dst, int src_code) {
1118 if (src_code < SwVfpRegister::kNumRegisters) {
1119 vstr(SwVfpRegister::from_code(src_code), dst);
1120 } else {
1121 // TODO(bbudge) If Neon supported, use store single lane form of vst1.
1122 UseScratchRegisterScope temps(this);
1123 LowDwVfpRegister scratch = temps.AcquireLowD();
1124 int src_s_code = scratch.low().code() + (src_code & 1);
1125 vmov(scratch, DwVfpRegister::from_code(src_code / 2));
1126 vstr(SwVfpRegister::from_code(src_s_code), dst);
1127 }
1128 }
1129
1130 void TurboAssembler::ExtractLane(Register dst, QwNeonRegister src,
1131 NeonDataType dt, int lane) {
1132 int size = NeonSz(dt); // 0, 1, 2
1133 int byte = lane << size;
1134 int double_word = byte >> kDoubleSizeLog2;
1135 int double_byte = byte & (kDoubleSize - 1);
1136 int double_lane = double_byte >> size;
1137 DwVfpRegister double_source =
1138 DwVfpRegister::from_code(src.code() * 2 + double_word);
1139 vmov(dt, dst, double_source, double_lane);
1140 }
1141
1142 void TurboAssembler::ExtractLane(Register dst, DwVfpRegister src,
1143 NeonDataType dt, int lane) {
1144 int size = NeonSz(dt); // 0, 1, 2
1145 int byte = lane << size;
1146 int double_byte = byte & (kDoubleSize - 1);
1147 int double_lane = double_byte >> size;
1148 vmov(dt, dst, src, double_lane);
1149 }
1150
1151 void TurboAssembler::ExtractLane(SwVfpRegister dst, QwNeonRegister src,
1152 int lane) {
1153 int s_code = src.code() * 4 + lane;
1154 VmovExtended(dst.code(), s_code);
1155 }
1156
1157 void TurboAssembler::ExtractLane(DwVfpRegister dst, QwNeonRegister src,
1158 int lane) {
1159 DwVfpRegister double_dst = DwVfpRegister::from_code(src.code() * 2 + lane);
1160 vmov(dst, double_dst);
1161 }
1162
1163 void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
1164 Register src_lane, NeonDataType dt, int lane) {
1165 Move(dst, src);
1166 int size = NeonSz(dt); // 0, 1, 2
1167 int byte = lane << size;
1168 int double_word = byte >> kDoubleSizeLog2;
1169 int double_byte = byte & (kDoubleSize - 1);
1170 int double_lane = double_byte >> size;
1171 DwVfpRegister double_dst =
1172 DwVfpRegister::from_code(dst.code() * 2 + double_word);
1173 vmov(dt, double_dst, double_lane, src_lane);
1174 }
1175
1176 void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
1177 SwVfpRegister src_lane, int lane) {
1178 Move(dst, src);
1179 int s_code = dst.code() * 4 + lane;
1180 VmovExtended(s_code, src_lane.code());
1181 }
1182
1183 void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
1184 DwVfpRegister src_lane, int lane) {
1185 Move(dst, src);
1186 DwVfpRegister double_dst = DwVfpRegister::from_code(dst.code() * 2 + lane);
1187 vmov(double_dst, src_lane);
1188 }
1189
1190 void TurboAssembler::LoadLane(NeonSize sz, NeonListOperand dst_list,
1191 uint8_t lane, NeonMemOperand src) {
1192 if (sz == Neon64) {
1193 // vld1s is not valid for Neon64.
1194 vld1(Neon64, dst_list, src);
1195 } else {
1196 vld1s(sz, dst_list, lane, src);
1197 }
1198 }
1199
1200 void TurboAssembler::StoreLane(NeonSize sz, NeonListOperand src_list,
1201 uint8_t lane, NeonMemOperand dst) {
1202 if (sz == Neon64) {
1203 // vst1s is not valid for Neon64.
1204 vst1(Neon64, src_list, dst);
1205 } else {
1206 vst1s(sz, src_list, lane, dst);
1207 }
1208 }
1209
1210 void TurboAssembler::LslPair(Register dst_low, Register dst_high,
1211 Register src_low, Register src_high,
1212 Register shift) {
1213 DCHECK(!AreAliased(dst_high, src_low));
1214 DCHECK(!AreAliased(dst_high, shift));
1215 UseScratchRegisterScope temps(this);
1216 Register scratch = temps.Acquire();
1217
1218 Label less_than_32;
1219 Label done;
1220 rsb(scratch, shift, Operand(32), SetCC);
1221 b(gt, &less_than_32);
1222 // If shift >= 32
1223 and_(scratch, shift, Operand(0x1F));
1224 lsl(dst_high, src_low, Operand(scratch));
1225 mov(dst_low, Operand(0));
1226 jmp(&done);
1227 bind(&less_than_32);
1228 // If shift < 32
1229 lsl(dst_high, src_high, Operand(shift));
1230 orr(dst_high, dst_high, Operand(src_low, LSR, scratch));
1231 lsl(dst_low, src_low, Operand(shift));
1232 bind(&done);
1233 }
1234
1235 void TurboAssembler::LslPair(Register dst_low, Register dst_high,
1236 Register src_low, Register src_high,
1237 uint32_t shift) {
1238 DCHECK_GE(63, shift);
1239 DCHECK(!AreAliased(dst_high, src_low));
1240
1241 if (shift == 0) {
1242 Move(dst_high, src_high);
1243 Move(dst_low, src_low);
1244 } else if (shift == 32) {
1245 Move(dst_high, src_low);
1246 Move(dst_low, Operand(0));
1247 } else if (shift >= 32) {
1248 shift &= 0x1F;
1249 lsl(dst_high, src_low, Operand(shift));
1250 mov(dst_low, Operand(0));
1251 } else {
1252 lsl(dst_high, src_high, Operand(shift));
1253 orr(dst_high, dst_high, Operand(src_low, LSR, 32 - shift));
1254 lsl(dst_low, src_low, Operand(shift));
1255 }
1256 }
1257
1258 void TurboAssembler::LsrPair(Register dst_low, Register dst_high,
1259 Register src_low, Register src_high,
1260 Register shift) {
1261 DCHECK(!AreAliased(dst_low, src_high));
1262 DCHECK(!AreAliased(dst_low, shift));
1263 UseScratchRegisterScope temps(this);
1264 Register scratch = temps.Acquire();
1265
1266 Label less_than_32;
1267 Label done;
1268 rsb(scratch, shift, Operand(32), SetCC);
1269 b(gt, &less_than_32);
1270 // If shift >= 32
1271 and_(scratch, shift, Operand(0x1F));
1272 lsr(dst_low, src_high, Operand(scratch));
1273 mov(dst_high, Operand(0));
1274 jmp(&done);
1275 bind(&less_than_32);
1276 // If shift < 32
1277
1278 lsr(dst_low, src_low, Operand(shift));
1279 orr(dst_low, dst_low, Operand(src_high, LSL, scratch));
1280 lsr(dst_high, src_high, Operand(shift));
1281 bind(&done);
1282 }
1283
1284 void TurboAssembler::LsrPair(Register dst_low, Register dst_high,
1285 Register src_low, Register src_high,
1286 uint32_t shift) {
1287 DCHECK_GE(63, shift);
1288 DCHECK(!AreAliased(dst_low, src_high));
1289
1290 if (shift == 32) {
1291 mov(dst_low, src_high);
1292 mov(dst_high, Operand(0));
1293 } else if (shift > 32) {
1294 shift &= 0x1F;
1295 lsr(dst_low, src_high, Operand(shift));
1296 mov(dst_high, Operand(0));
1297 } else if (shift == 0) {
1298 Move(dst_low, src_low);
1299 Move(dst_high, src_high);
1300 } else {
1301 lsr(dst_low, src_low, Operand(shift));
1302 orr(dst_low, dst_low, Operand(src_high, LSL, 32 - shift));
1303 lsr(dst_high, src_high, Operand(shift));
1304 }
1305 }
1306
1307 void TurboAssembler::AsrPair(Register dst_low, Register dst_high,
1308 Register src_low, Register src_high,
1309 Register shift) {
1310 DCHECK(!AreAliased(dst_low, src_high));
1311 DCHECK(!AreAliased(dst_low, shift));
1312 UseScratchRegisterScope temps(this);
1313 Register scratch = temps.Acquire();
1314
1315 Label less_than_32;
1316 Label done;
1317 rsb(scratch, shift, Operand(32), SetCC);
1318 b(gt, &less_than_32);
1319 // If shift >= 32
1320 and_(scratch, shift, Operand(0x1F));
1321 asr(dst_low, src_high, Operand(scratch));
1322 asr(dst_high, src_high, Operand(31));
1323 jmp(&done);
1324 bind(&less_than_32);
1325 // If shift < 32
1326 lsr(dst_low, src_low, Operand(shift));
1327 orr(dst_low, dst_low, Operand(src_high, LSL, scratch));
1328 asr(dst_high, src_high, Operand(shift));
1329 bind(&done);
1330 }
1331
1332 void TurboAssembler::AsrPair(Register dst_low, Register dst_high,
1333 Register src_low, Register src_high,
1334 uint32_t shift) {
1335 DCHECK_GE(63, shift);
1336 DCHECK(!AreAliased(dst_low, src_high));
1337
1338 if (shift == 32) {
1339 mov(dst_low, src_high);
1340 asr(dst_high, src_high, Operand(31));
1341 } else if (shift > 32) {
1342 shift &= 0x1F;
1343 asr(dst_low, src_high, Operand(shift));
1344 asr(dst_high, src_high, Operand(31));
1345 } else if (shift == 0) {
1346 Move(dst_low, src_low);
1347 Move(dst_high, src_high);
1348 } else {
1349 lsr(dst_low, src_low, Operand(shift));
1350 orr(dst_low, dst_low, Operand(src_high, LSL, 32 - shift));
1351 asr(dst_high, src_high, Operand(shift));
1352 }
1353 }
1354
1355 void TurboAssembler::StubPrologue(StackFrame::Type type) {
1356 ASM_CODE_COMMENT(this);
1357 UseScratchRegisterScope temps(this);
1358 Register scratch = temps.Acquire();
1359 mov(scratch, Operand(StackFrame::TypeToMarker(type)));
1360 PushCommonFrame(scratch);
1361 }
1362
1363 void TurboAssembler::Prologue() { PushStandardFrame(r1); }
1364
1365 void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
1366 ArgumentsCountMode mode) {
1367 int receiver_bytes = (mode == kCountExcludesReceiver) ? kPointerSize : 0;
1368 switch (type) {
1369 case kCountIsInteger: {
1370 add(sp, sp, Operand(count, LSL, kPointerSizeLog2), LeaveCC);
1371 break;
1372 }
1373 case kCountIsSmi: {
1374 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
1375 add(sp, sp, Operand(count, LSL, kPointerSizeLog2 - kSmiTagSize), LeaveCC);
1376 break;
1377 }
1378 case kCountIsBytes: {
1379 add(sp, sp, count, LeaveCC);
1380 break;
1381 }
1382 }
1383 if (receiver_bytes != 0) {
1384 add(sp, sp, Operand(receiver_bytes), LeaveCC);
1385 }
1386 }
1387
1388 void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
1389 Register receiver,
1390 ArgumentsCountType type,
1391 ArgumentsCountMode mode) {
1392 DCHECK(!AreAliased(argc, receiver));
1393 if (mode == kCountExcludesReceiver) {
1394 // Drop arguments without receiver and override old receiver.
1395 DropArguments(argc, type, kCountIncludesReceiver);
1396 str(receiver, MemOperand(sp, 0));
1397 } else {
1398 DropArguments(argc, type, mode);
1399 push(receiver);
1400 }
1401 }
1402
1403 void TurboAssembler::EnterFrame(StackFrame::Type type,
1404 bool load_constant_pool_pointer_reg) {
1405 ASM_CODE_COMMENT(this);
1406 // r0-r3: preserved
1407 UseScratchRegisterScope temps(this);
1408 Register scratch = no_reg;
1409 if (!StackFrame::IsJavaScript(type)) {
1410 scratch = temps.Acquire();
1411 mov(scratch, Operand(StackFrame::TypeToMarker(type)));
1412 }
1413 PushCommonFrame(scratch);
1414 #if V8_ENABLE_WEBASSEMBLY
1415 if (type == StackFrame::WASM) Push(kWasmInstanceRegister);
1416 #endif // V8_ENABLE_WEBASSEMBLY
1417 }
1418
1419 int TurboAssembler::LeaveFrame(StackFrame::Type type) {
1420 ASM_CODE_COMMENT(this);
1421 // r0: preserved
1422 // r1: preserved
1423 // r2: preserved
1424
1425 // Drop the execution stack down to the frame pointer and restore
1426 // the caller frame pointer and return address.
1427 mov(sp, fp);
1428 int frame_ends = pc_offset();
1429 ldm(ia_w, sp, fp.bit() | lr.bit());
1430 return frame_ends;
1431 }
1432
1433 #ifdef V8_OS_WIN
1434 void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
1435 // "Functions that allocate 4 KB or more on the stack must ensure that each
1436 // page prior to the final page is touched in order." Source:
1437 // https://docs.microsoft.com/en-us/cpp/build/overview-of-arm-abi-conventions?view=vs-2019#stack
1438 ASM_CODE_COMMENT(this);
1439 UseScratchRegisterScope temps(this);
1440 DwVfpRegister scratch = temps.AcquireD();
1441 Label check_offset;
1442 Label touch_next_page;
1443 jmp(&check_offset);
1444 bind(&touch_next_page);
1445 sub(sp, sp, Operand(kStackPageSize));
1446 // Just to touch the page, before we increment further.
1447 vldr(scratch, MemOperand(sp));
1448 sub(bytes_scratch, bytes_scratch, Operand(kStackPageSize));
1449
1450 bind(&check_offset);
1451 cmp(bytes_scratch, Operand(kStackPageSize));
1452 b(gt, &touch_next_page);
1453
1454 sub(sp, sp, bytes_scratch);
1455 }
1456
1457 void TurboAssembler::AllocateStackSpace(int bytes) {
1458 ASM_CODE_COMMENT(this);
1459 DCHECK_GE(bytes, 0);
1460 UseScratchRegisterScope temps(this);
1461 DwVfpRegister scratch = no_dreg;
1462 while (bytes > kStackPageSize) {
1463 if (scratch == no_dreg) {
1464 scratch = temps.AcquireD();
1465 }
1466 sub(sp, sp, Operand(kStackPageSize));
1467 vldr(scratch, MemOperand(sp));
1468 bytes -= kStackPageSize;
1469 }
1470 if (bytes == 0) return;
1471 sub(sp, sp, Operand(bytes));
1472 }
1473 #endif
1474
1475 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
1476 StackFrame::Type frame_type) {
1477 ASM_CODE_COMMENT(this);
1478 DCHECK(frame_type == StackFrame::EXIT ||
1479 frame_type == StackFrame::BUILTIN_EXIT);
1480 UseScratchRegisterScope temps(this);
1481 Register scratch = temps.Acquire();
1482
1483 // Set up the frame structure on the stack.
1484 DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
1485 DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
1486 DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
1487 mov(scratch, Operand(StackFrame::TypeToMarker(frame_type)));
1488 PushCommonFrame(scratch);
1489 // Reserve room for saved entry sp.
1490 sub(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
1491 if (FLAG_debug_code) {
1492 mov(scratch, Operand::Zero());
1493 str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
1494 }
1495
1496 // Save the frame pointer and the context in top.
1497 Move(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
1498 isolate()));
1499 str(fp, MemOperand(scratch));
1500 Move(scratch,
1501 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1502 str(cp, MemOperand(scratch));
1503
1504 // Optionally save all double registers.
1505 if (save_doubles) {
1506 SaveFPRegs(sp, scratch);
1507 // Note that d0 will be accessible at
1508 // fp - ExitFrameConstants::kFrameSize -
1509 // DwVfpRegister::kNumRegisters * kDoubleSize,
1510 // since the sp slot and code slot were pushed after the fp.
1511 }
1512
1513 // Reserve place for the return address and stack space and align the frame
1514 // preparing for calling the runtime function.
1515 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
1516 AllocateStackSpace((stack_space + 1) * kPointerSize);
1517 if (frame_alignment > 0) {
1518 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
1519 and_(sp, sp, Operand(-frame_alignment));
1520 }
1521
1522 // Set the exit frame sp value to point just before the return address
1523 // location.
1524 add(scratch, sp, Operand(kPointerSize));
1525 str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
1526 }
1527
1528 int TurboAssembler::ActivationFrameAlignment() {
1529 #if V8_HOST_ARCH_ARM
1530 // Running on the real platform. Use the alignment as mandated by the local
1531 // environment.
1532 // Note: This will break if we ever start generating snapshots on one ARM
1533 // platform for another ARM platform with a different alignment.
1534 return base::OS::ActivationFrameAlignment();
1535 #else // V8_HOST_ARCH_ARM
1536 // If we are using the simulator then we should always align to the expected
1537 // alignment. As the simulator is used to generate snapshots we do not know
1538 // if the target platform will need alignment, so this is controlled from a
1539 // flag.
1540 return FLAG_sim_stack_alignment;
1541 #endif // V8_HOST_ARCH_ARM
1542 }
1543
1544 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
1545 bool argument_count_is_length) {
1546 ASM_CODE_COMMENT(this);
1547 ConstantPoolUnavailableScope constant_pool_unavailable(this);
1548 UseScratchRegisterScope temps(this);
1549 Register scratch = temps.Acquire();
1550
1551 // Optionally restore all double registers.
1552 if (save_doubles) {
1553 // Calculate the stack location of the saved doubles and restore them.
1554 const int offset = ExitFrameConstants::kFixedFrameSizeFromFp;
1555 sub(r3, fp, Operand(offset + DwVfpRegister::kNumRegisters * kDoubleSize));
1556 RestoreFPRegs(r3, scratch);
1557 }
1558
1559 // Clear top frame.
1560 mov(r3, Operand::Zero());
1561 Move(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
1562 isolate()));
1563 str(r3, MemOperand(scratch));
1564
1565 // Restore current context from top and clear it in debug mode.
1566 Move(scratch,
1567 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1568 ldr(cp, MemOperand(scratch));
1569 #ifdef DEBUG
1570 mov(r3, Operand(Context::kInvalidContext));
1571 Move(scratch,
1572 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1573 str(r3, MemOperand(scratch));
1574 #endif
1575
1576 // Tear down the exit frame, pop the arguments, and return.
1577 mov(sp, Operand(fp));
1578 ldm(ia_w, sp, fp.bit() | lr.bit());
1579 if (argument_count.is_valid()) {
1580 if (argument_count_is_length) {
1581 add(sp, sp, argument_count);
1582 } else {
1583 add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
1584 }
1585 }
1586 }
1587
1588 void TurboAssembler::MovFromFloatResult(const DwVfpRegister dst) {
1589 if (use_eabi_hardfloat()) {
1590 Move(dst, d0);
1591 } else {
1592 vmov(dst, r0, r1);
1593 }
1594 }
1595
1596 // On ARM this is just a synonym to make the purpose clear.
1597 void TurboAssembler::MovFromFloatParameter(DwVfpRegister dst) {
1598 MovFromFloatResult(dst);
1599 }
1600
1601 void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
1602 ASM_CODE_COMMENT(this);
1603 DCHECK(root_array_available());
1604 Isolate* isolate = this->isolate();
1605 ExternalReference limit =
1606 kind == StackLimitKind::kRealStackLimit
1607 ? ExternalReference::address_of_real_jslimit(isolate)
1608 : ExternalReference::address_of_jslimit(isolate);
1609 DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
1610
1611 intptr_t offset =
1612 TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
1613 CHECK(is_int32(offset));
1614 ldr(destination, MemOperand(kRootRegister, offset));
1615 }
1616
1617 void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch,
1618 Label* stack_overflow) {
1619 ASM_CODE_COMMENT(this);
1620 // Check the stack for overflow. We are not trying to catch
1621 // interruptions (e.g. debug break and preemption) here, so the "real stack
1622 // limit" is checked.
1623 LoadStackLimit(scratch, StackLimitKind::kRealStackLimit);
1624 // Make scratch the space we have left. The stack might already be overflowed
1625 // here which will cause scratch to become negative.
1626 sub(scratch, sp, scratch);
1627 // Check if the arguments will overflow the stack.
1628 cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
1629 b(le, stack_overflow); // Signed comparison.
1630 }
1631
1632 void MacroAssembler::InvokePrologue(Register expected_parameter_count,
1633 Register actual_parameter_count,
1634 Label* done, InvokeType type) {
1635 ASM_CODE_COMMENT(this);
1636 Label regular_invoke;
1637 // r0: actual arguments count
1638 // r1: function (passed through to callee)
1639 // r2: expected arguments count
1640 DCHECK_EQ(actual_parameter_count, r0);
1641 DCHECK_EQ(expected_parameter_count, r2);
1642
1643 // If the expected parameter count is equal to the adaptor sentinel, no need
1644 // to push undefined value as arguments.
1645 if (kDontAdaptArgumentsSentinel != 0) {
1646 cmp(expected_parameter_count, Operand(kDontAdaptArgumentsSentinel));
1647 b(eq, ®ular_invoke);
1648 }
1649
1650 // If overapplication or if the actual argument count is equal to the
1651 // formal parameter count, no need to push extra undefined values.
1652 sub(expected_parameter_count, expected_parameter_count,
1653 actual_parameter_count, SetCC);
1654 b(le, ®ular_invoke);
1655
1656 Label stack_overflow;
1657 Register scratch = r4;
1658 StackOverflowCheck(expected_parameter_count, scratch, &stack_overflow);
1659
1660 // Underapplication. Move the arguments already in the stack, including the
1661 // receiver and the return address.
1662 {
1663 Label copy, check;
1664 Register num = r5, src = r6, dest = r9; // r7 and r8 are context and root.
1665 mov(src, sp);
1666 // Update stack pointer.
1667 lsl(scratch, expected_parameter_count, Operand(kSystemPointerSizeLog2));
1668 AllocateStackSpace(scratch);
1669 mov(dest, sp);
1670 mov(num, actual_parameter_count);
1671 b(&check);
1672 bind(©);
1673 ldr(scratch, MemOperand(src, kSystemPointerSize, PostIndex));
1674 str(scratch, MemOperand(dest, kSystemPointerSize, PostIndex));
1675 sub(num, num, Operand(1), SetCC);
1676 bind(&check);
1677 if (kJSArgcIncludesReceiver) {
1678 b(gt, ©);
1679 } else {
1680 b(ge, ©);
1681 }
1682 }
1683
1684 // Fill remaining expected arguments with undefined values.
1685 LoadRoot(scratch, RootIndex::kUndefinedValue);
1686 {
1687 Label loop;
1688 bind(&loop);
1689 str(scratch, MemOperand(r9, kSystemPointerSize, PostIndex));
1690 sub(expected_parameter_count, expected_parameter_count, Operand(1), SetCC);
1691 b(gt, &loop);
1692 }
1693 b(®ular_invoke);
1694
1695 bind(&stack_overflow);
1696 {
1697 FrameScope frame(
1698 this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
1699 CallRuntime(Runtime::kThrowStackOverflow);
1700 bkpt(0);
1701 }
1702
1703 bind(®ular_invoke);
1704 }
1705
1706 void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
1707 Register expected_parameter_count,
1708 Register actual_parameter_count) {
1709 ASM_CODE_COMMENT(this);
1710 // Load receiver to pass it later to DebugOnFunctionCall hook.
1711 ldr(r4, ReceiverOperand(actual_parameter_count));
1712 FrameScope frame(
1713 this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
1714
1715 SmiTag(expected_parameter_count);
1716 Push(expected_parameter_count);
1717
1718 SmiTag(actual_parameter_count);
1719 Push(actual_parameter_count);
1720
1721 if (new_target.is_valid()) {
1722 Push(new_target);
1723 }
1724 Push(fun);
1725 Push(fun);
1726 Push(r4);
1727 CallRuntime(Runtime::kDebugOnFunctionCall);
1728 Pop(fun);
1729 if (new_target.is_valid()) {
1730 Pop(new_target);
1731 }
1732
1733 Pop(actual_parameter_count);
1734 SmiUntag(actual_parameter_count);
1735
1736 Pop(expected_parameter_count);
1737 SmiUntag(expected_parameter_count);
1738 }
1739
1740 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
1741 Register expected_parameter_count,
1742 Register actual_parameter_count,
1743 InvokeType type) {
1744 ASM_CODE_COMMENT(this);
1745 // You can't call a function without a valid frame.
1746 DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
1747 DCHECK_EQ(function, r1);
1748 DCHECK_IMPLIES(new_target.is_valid(), new_target == r3);
1749
1750 // On function call, call into the debugger if necessary.
1751 Label debug_hook, continue_after_hook;
1752 {
1753 ExternalReference debug_hook_active =
1754 ExternalReference::debug_hook_on_function_call_address(isolate());
1755 Move(r4, debug_hook_active);
1756 ldrsb(r4, MemOperand(r4));
1757 cmp(r4, Operand(0));
1758 b(ne, &debug_hook);
1759 }
1760 bind(&continue_after_hook);
1761
1762 // Clear the new.target register if not given.
1763 if (!new_target.is_valid()) {
1764 LoadRoot(r3, RootIndex::kUndefinedValue);
1765 }
1766
1767 Label done;
1768 InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
1769 // We call indirectly through the code field in the function to
1770 // allow recompilation to take effect without changing any of the
1771 // call sites.
1772 Register code = kJavaScriptCallCodeStartRegister;
1773 ldr(code, FieldMemOperand(function, JSFunction::kCodeOffset));
1774 switch (type) {
1775 case InvokeType::kCall:
1776 CallCodeObject(code);
1777 break;
1778 case InvokeType::kJump:
1779 JumpCodeObject(code);
1780 break;
1781 }
1782 b(&done);
1783
1784 // Deferred debug hook.
1785 bind(&debug_hook);
1786 CallDebugOnFunctionCall(function, new_target, expected_parameter_count,
1787 actual_parameter_count);
1788 b(&continue_after_hook);
1789
1790 // Continue here if InvokePrologue does handle the invocation due to
1791 // mismatched parameter counts.
1792 bind(&done);
1793 }
1794
1795 void MacroAssembler::InvokeFunctionWithNewTarget(
1796 Register fun, Register new_target, Register actual_parameter_count,
1797 InvokeType type) {
1798 ASM_CODE_COMMENT(this);
1799 // You can't call a function without a valid frame.
1800 DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
1801
1802 // Contract with called JS functions requires that function is passed in r1.
1803 DCHECK_EQ(fun, r1);
1804
1805 Register expected_reg = r2;
1806 Register temp_reg = r4;
1807
1808 ldr(temp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
1809 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1810 ldrh(expected_reg,
1811 FieldMemOperand(temp_reg,
1812 SharedFunctionInfo::kFormalParameterCountOffset));
1813
1814 InvokeFunctionCode(fun, new_target, expected_reg, actual_parameter_count,
1815 type);
1816 }
1817
1818 void MacroAssembler::InvokeFunction(Register function,
1819 Register expected_parameter_count,
1820 Register actual_parameter_count,
1821 InvokeType type) {
1822 ASM_CODE_COMMENT(this);
1823 // You can't call a function without a valid frame.
1824 DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
1825
1826 // Contract with called JS functions requires that function is passed in r1.
1827 DCHECK_EQ(function, r1);
1828
1829 // Get the function and setup the context.
1830 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1831
1832 InvokeFunctionCode(r1, no_reg, expected_parameter_count,
1833 actual_parameter_count, type);
1834 }
1835
1836 void MacroAssembler::PushStackHandler() {
1837 ASM_CODE_COMMENT(this);
1838 // Adjust this code if not the case.
1839 STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
1840 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1841
1842 Push(Smi::zero()); // Padding.
1843 // Link the current handler as the next handler.
1844 Move(r6,
1845 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
1846 ldr(r5, MemOperand(r6));
1847 push(r5);
1848 // Set this new handler as the current one.
1849 str(sp, MemOperand(r6));
1850 }
1851
1852 void MacroAssembler::PopStackHandler() {
1853 ASM_CODE_COMMENT(this);
1854 UseScratchRegisterScope temps(this);
1855 Register scratch = temps.Acquire();
1856 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1857 pop(r1);
1858 Move(scratch,
1859 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
1860 str(r1, MemOperand(scratch));
1861 add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
1862 }
1863
1864 void MacroAssembler::CompareObjectType(Register object, Register map,
1865 Register type_reg, InstanceType type) {
1866 ASM_CODE_COMMENT(this);
1867 UseScratchRegisterScope temps(this);
1868 const Register temp = type_reg == no_reg ? temps.Acquire() : type_reg;
1869
1870 LoadMap(map, object);
1871 CompareInstanceType(map, temp, type);
1872 }
1873
1874 void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
1875 InstanceType type) {
1876 ldrh(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1877 cmp(type_reg, Operand(type));
1878 }
1879
1880 void MacroAssembler::CompareRange(Register value, unsigned lower_limit,
1881 unsigned higher_limit) {
1882 ASM_CODE_COMMENT(this);
1883 DCHECK_LT(lower_limit, higher_limit);
1884 if (lower_limit != 0) {
1885 UseScratchRegisterScope temps(this);
1886 Register scratch = temps.Acquire();
1887 sub(scratch, value, Operand(lower_limit));
1888 cmp(scratch, Operand(higher_limit - lower_limit));
1889 } else {
1890 cmp(value, Operand(higher_limit));
1891 }
1892 }
1893 void MacroAssembler::CompareInstanceTypeRange(Register map, Register type_reg,
1894 InstanceType lower_limit,
1895 InstanceType higher_limit) {
1896 ASM_CODE_COMMENT(this);
1897 DCHECK_LT(lower_limit, higher_limit);
1898 ldrh(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1899 CompareRange(type_reg, lower_limit, higher_limit);
1900 }
1901
1902 void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
1903 UseScratchRegisterScope temps(this);
1904 Register scratch = temps.Acquire();
1905 DCHECK(obj != scratch);
1906 LoadRoot(scratch, index);
1907 cmp(obj, scratch);
1908 }
1909
1910 void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
1911 unsigned higher_limit,
1912 Label* on_in_range) {
1913 ASM_CODE_COMMENT(this);
1914 CompareRange(value, lower_limit, higher_limit);
1915 b(ls, on_in_range);
1916 }
1917
1918 void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
1919 DwVfpRegister double_input,
1920 Label* done) {
1921 ASM_CODE_COMMENT(this);
1922 UseScratchRegisterScope temps(this);
1923 SwVfpRegister single_scratch = SwVfpRegister::no_reg();
1924 if (temps.CanAcquireVfp<SwVfpRegister>()) {
1925 single_scratch = temps.AcquireS();
1926 } else {
1927 // Re-use the input as a scratch register. However, we can only do this if
1928 // the input register is d0-d15 as there are no s32+ registers.
1929 DCHECK_LT(double_input.code(), LowDwVfpRegister::kNumRegisters);
1930 LowDwVfpRegister double_scratch =
1931 LowDwVfpRegister::from_code(double_input.code());
1932 single_scratch = double_scratch.low();
1933 }
1934 vcvt_s32_f64(single_scratch, double_input);
1935 vmov(result, single_scratch);
1936
1937 Register scratch = temps.Acquire();
1938 // If result is not saturated (0x7FFFFFFF or 0x80000000), we are done.
1939 sub(scratch, result, Operand(1));
1940 cmp(scratch, Operand(0x7FFFFFFE));
1941 b(lt, done);
1942 }
1943
1944 void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
1945 Register result,
1946 DwVfpRegister double_input,
1947 StubCallMode stub_mode) {
1948 ASM_CODE_COMMENT(this);
1949 Label done;
1950
1951 TryInlineTruncateDoubleToI(result, double_input, &done);
1952
1953 // If we fell through then inline version didn't succeed - call stub instead.
1954 push(lr);
1955 AllocateStackSpace(kDoubleSize); // Put input on stack.
1956 vstr(double_input, MemOperand(sp, 0));
1957
1958 #if V8_ENABLE_WEBASSEMBLY
1959 if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
1960 Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
1961 #else
1962 // For balance.
1963 if (false) {
1964 #endif // V8_ENABLE_WEBASSEMBLY
1965 } else if (options().inline_offheap_trampolines) {
1966 CallBuiltin(Builtin::kDoubleToI);
1967 } else {
1968 Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
1969 }
1970 ldr(result, MemOperand(sp, 0));
1971
1972 add(sp, sp, Operand(kDoubleSize));
1973 pop(lr);
1974
1975 bind(&done);
1976 }
1977
1978 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
1979 SaveFPRegsMode save_doubles) {
1980 ASM_CODE_COMMENT(this);
1981 // All parameters are on the stack. r0 has the return value after call.
1982
1983 // If the expected number of arguments of the runtime function is
1984 // constant, we check that the actual number of arguments match the
1985 // expectation.
1986 CHECK(f->nargs < 0 || f->nargs == num_arguments);
1987
1988 // TODO(1236192): Most runtime routines don't need the number of
1989 // arguments passed in because it is constant. At some point we
1990 // should remove this need and make the runtime routine entry code
1991 // smarter.
1992 mov(r0, Operand(num_arguments));
1993 Move(r1, ExternalReference::Create(f));
1994 Handle<Code> code =
1995 CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
1996 Call(code, RelocInfo::CODE_TARGET);
1997 }
1998
1999 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
2000 ASM_CODE_COMMENT(this);
2001 const Runtime::Function* function = Runtime::FunctionForId(fid);
2002 DCHECK_EQ(1, function->result_size);
2003 if (function->nargs >= 0) {
2004 // TODO(1236192): Most runtime routines don't need the number of
2005 // arguments passed in because it is constant. At some point we
2006 // should remove this need and make the runtime routine entry code
2007 // smarter.
2008 mov(r0, Operand(function->nargs));
2009 }
2010 JumpToExternalReference(ExternalReference::Create(fid));
2011 }
2012
2013 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
2014 bool builtin_exit_frame) {
2015 #if defined(__thumb__)
2016 // Thumb mode builtin.
2017 DCHECK_EQ(builtin.address() & 1, 1);
2018 #endif
2019 Move(r1, builtin);
2020 Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
2021 ArgvMode::kStack, builtin_exit_frame);
2022 Jump(code, RelocInfo::CODE_TARGET);
2023 }
2024
2025 void MacroAssembler::JumpToInstructionStream(Address entry) {
2026 mov(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
2027 Jump(kOffHeapTrampolineRegister);
2028 }
2029
2030 void MacroAssembler::LoadWeakValue(Register out, Register in,
2031 Label* target_if_cleared) {
2032 cmp(in, Operand(kClearedWeakHeapObjectLower32));
2033 b(eq, target_if_cleared);
2034
2035 and_(out, in, Operand(~kWeakHeapObjectMask));
2036 }
2037
2038 void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value,
2039 Register scratch1,
2040 Register scratch2) {
2041 DCHECK_GT(value, 0);
2042 if (FLAG_native_code_counters && counter->Enabled()) {
2043 ASM_CODE_COMMENT(this);
2044 Move(scratch2, ExternalReference::Create(counter));
2045 ldr(scratch1, MemOperand(scratch2));
2046 add(scratch1, scratch1, Operand(value));
2047 str(scratch1, MemOperand(scratch2));
2048 }
2049 }
2050
2051 void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
2052 Register scratch1,
2053 Register scratch2) {
2054 DCHECK_GT(value, 0);
2055 if (FLAG_native_code_counters && counter->Enabled()) {
2056 ASM_CODE_COMMENT(this);
2057 Move(scratch2, ExternalReference::Create(counter));
2058 ldr(scratch1, MemOperand(scratch2));
2059 sub(scratch1, scratch1, Operand(value));
2060 str(scratch1, MemOperand(scratch2));
2061 }
2062 }
2063
2064 void TurboAssembler::Assert(Condition cond, AbortReason reason) {
2065 if (FLAG_debug_code) Check(cond, reason);
2066 }
2067
2068 void TurboAssembler::AssertUnreachable(AbortReason reason) {
2069 if (FLAG_debug_code) Abort(reason);
2070 }
2071
2072 void TurboAssembler::Check(Condition cond, AbortReason reason) {
2073 Label L;
2074 b(cond, &L);
2075 Abort(reason);
2076 // will not return here
2077 bind(&L);
2078 }
2079
2080 void TurboAssembler::Abort(AbortReason reason) {
2081 ASM_CODE_COMMENT(this);
2082 Label abort_start;
2083 bind(&abort_start);
2084 if (FLAG_code_comments) {
2085 const char* msg = GetAbortReason(reason);
2086 RecordComment("Abort message: ");
2087 RecordComment(msg);
2088 }
2089
2090 // Avoid emitting call to builtin if requested.
2091 if (trap_on_abort()) {
2092 stop();
2093 return;
2094 }
2095
2096 if (should_abort_hard()) {
2097 // We don't care if we constructed a frame. Just pretend we did.
2098 FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
2099 Move32BitImmediate(r0, Operand(static_cast<int>(reason)));
2100 PrepareCallCFunction(1, 0, r1);
2101 Move(r1, ExternalReference::abort_with_reason());
2102 // Use Call directly to avoid any unneeded overhead. The function won't
2103 // return anyway.
2104 Call(r1);
2105 return;
2106 }
2107
2108 Move(r1, Smi::FromInt(static_cast<int>(reason)));
2109
2110 // Disable stub call restrictions to always allow calls to abort.
2111 if (!has_frame()) {
2112 // We don't actually want to generate a pile of code for this, so just
2113 // claim there is a stack frame, without generating one.
2114 FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
2115 Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
2116 } else {
2117 Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
2118 }
2119 // will not return here
2120 }
2121
2122 void TurboAssembler::LoadMap(Register destination, Register object) {
2123 ldr(destination, FieldMemOperand(object, HeapObject::kMapOffset));
2124 }
2125
2126 void MacroAssembler::LoadGlobalProxy(Register dst) {
2127 ASM_CODE_COMMENT(this);
2128 LoadNativeContextSlot(dst, Context::GLOBAL_PROXY_INDEX);
2129 }
2130
2131 void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
2132 ASM_CODE_COMMENT(this);
2133 LoadMap(dst, cp);
2134 ldr(dst, FieldMemOperand(
2135 dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
2136 ldr(dst, MemOperand(dst, Context::SlotOffset(index)));
2137 }
2138
2139 void TurboAssembler::InitializeRootRegister() {
2140 ASM_CODE_COMMENT(this);
2141 ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
2142 mov(kRootRegister, Operand(isolate_root));
2143 }
2144
2145 void MacroAssembler::SmiTag(Register reg, SBit s) {
2146 add(reg, reg, Operand(reg), s);
2147 }
2148
2149 void MacroAssembler::SmiTag(Register dst, Register src, SBit s) {
2150 add(dst, src, Operand(src), s);
2151 }
2152
2153 void MacroAssembler::SmiTst(Register value) {
2154 tst(value, Operand(kSmiTagMask));
2155 }
2156
2157 void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) {
2158 tst(value, Operand(kSmiTagMask));
2159 b(eq, smi_label);
2160 }
2161
2162 void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
2163 cmp(x, Operand(y));
2164 b(eq, dest);
2165 }
2166
2167 void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
2168 cmp(x, Operand(y));
2169 b(lt, dest);
2170 }
2171
2172 void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
2173 tst(value, Operand(kSmiTagMask));
2174 b(ne, not_smi_label);
2175 }
2176
2177 void MacroAssembler::AssertNotSmi(Register object) {
2178 if (!FLAG_debug_code) return;
2179 ASM_CODE_COMMENT(this);
2180 STATIC_ASSERT(kSmiTag == 0);
2181 tst(object, Operand(kSmiTagMask));
2182 Check(ne, AbortReason::kOperandIsASmi);
2183 }
2184
2185 void MacroAssembler::AssertSmi(Register object) {
2186 if (!FLAG_debug_code) return;
2187 ASM_CODE_COMMENT(this);
2188 STATIC_ASSERT(kSmiTag == 0);
2189 tst(object, Operand(kSmiTagMask));
2190 Check(eq, AbortReason::kOperandIsNotASmi);
2191 }
2192
2193 void MacroAssembler::AssertConstructor(Register object) {
2194 if (!FLAG_debug_code) return;
2195 ASM_CODE_COMMENT(this);
2196 STATIC_ASSERT(kSmiTag == 0);
2197 tst(object, Operand(kSmiTagMask));
2198 Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor);
2199 push(object);
2200 LoadMap(object, object);
2201 ldrb(object, FieldMemOperand(object, Map::kBitFieldOffset));
2202 tst(object, Operand(Map::Bits1::IsConstructorBit::kMask));
2203 pop(object);
2204 Check(ne, AbortReason::kOperandIsNotAConstructor);
2205 }
2206
2207 void MacroAssembler::AssertFunction(Register object) {
2208 if (!FLAG_debug_code) return;
2209 ASM_CODE_COMMENT(this);
2210 STATIC_ASSERT(kSmiTag == 0);
2211 tst(object, Operand(kSmiTagMask));
2212 Check(ne, AbortReason::kOperandIsASmiAndNotAFunction);
2213 push(object);
2214 LoadMap(object, object);
2215 CompareInstanceTypeRange(object, object, FIRST_JS_FUNCTION_TYPE,
2216 LAST_JS_FUNCTION_TYPE);
2217 pop(object);
2218 Check(ls, AbortReason::kOperandIsNotAFunction);
2219 }
2220
2221 void MacroAssembler::AssertBoundFunction(Register object) {
2222 if (!FLAG_debug_code) return;
2223 ASM_CODE_COMMENT(this);
2224 STATIC_ASSERT(kSmiTag == 0);
2225 tst(object, Operand(kSmiTagMask));
2226 Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction);
2227 push(object);
2228 CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
2229 pop(object);
2230 Check(eq, AbortReason::kOperandIsNotABoundFunction);
2231 }
2232
2233 void MacroAssembler::AssertGeneratorObject(Register object) {
2234 if (!FLAG_debug_code) return;
2235 ASM_CODE_COMMENT(this);
2236 tst(object, Operand(kSmiTagMask));
2237 Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
2238
2239 // Load map
2240 Register map = object;
2241 push(object);
2242 LoadMap(map, object);
2243
2244 // Check if JSGeneratorObject
2245 Label do_check;
2246 Register instance_type = object;
2247 CompareInstanceType(map, instance_type, JS_GENERATOR_OBJECT_TYPE);
2248 b(eq, &do_check);
2249
2250 // Check if JSAsyncFunctionObject (See MacroAssembler::CompareInstanceType)
2251 cmp(instance_type, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE));
2252 b(eq, &do_check);
2253
2254 // Check if JSAsyncGeneratorObject (See MacroAssembler::CompareInstanceType)
2255 cmp(instance_type, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
2256
2257 bind(&do_check);
2258 // Restore generator object to register and perform assertion
2259 pop(object);
2260 Check(eq, AbortReason::kOperandIsNotAGeneratorObject);
2261 }
2262
2263 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
2264 Register scratch) {
2265 if (!FLAG_debug_code) return;
2266 ASM_CODE_COMMENT(this);
2267 Label done_checking;
2268 AssertNotSmi(object);
2269 CompareRoot(object, RootIndex::kUndefinedValue);
2270 b(eq, &done_checking);
2271 LoadMap(scratch, object);
2272 CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
2273 Assert(eq, AbortReason::kExpectedUndefinedOrCell);
2274 bind(&done_checking);
2275 }
2276
2277 void TurboAssembler::CheckFor32DRegs(Register scratch) {
2278 ASM_CODE_COMMENT(this);
2279 Move(scratch, ExternalReference::cpu_features());
2280 ldr(scratch, MemOperand(scratch));
2281 tst(scratch, Operand(1u << VFP32DREGS));
2282 }
2283
2284 void TurboAssembler::SaveFPRegs(Register location, Register scratch) {
2285 ASM_CODE_COMMENT(this);
2286 CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
2287 CheckFor32DRegs(scratch);
2288 vstm(db_w, location, d16, d31, ne);
2289 sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
2290 vstm(db_w, location, d0, d15);
2291 }
2292
2293 void TurboAssembler::RestoreFPRegs(Register location, Register scratch) {
2294 ASM_CODE_COMMENT(this);
2295 CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
2296 CheckFor32DRegs(scratch);
2297 vldm(ia_w, location, d0, d15);
2298 vldm(ia_w, location, d16, d31, ne);
2299 add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
2300 }
2301
2302 void TurboAssembler::SaveFPRegsToHeap(Register location, Register scratch) {
2303 ASM_CODE_COMMENT(this);
2304 CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
2305 CheckFor32DRegs(scratch);
2306 vstm(ia_w, location, d0, d15);
2307 vstm(ia_w, location, d16, d31, ne);
2308 add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
2309 }
2310
2311 void TurboAssembler::RestoreFPRegsFromHeap(Register location,
2312 Register scratch) {
2313 ASM_CODE_COMMENT(this);
2314 CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported);
2315 CheckFor32DRegs(scratch);
2316 vldm(ia_w, location, d0, d15);
2317 vldm(ia_w, location, d16, d31, ne);
2318 add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
2319 }
2320
2321 template <typename T>
2322 void TurboAssembler::FloatMaxHelper(T result, T left, T right,
2323 Label* out_of_line) {
2324 // This trivial case is caught sooner, so that the out-of-line code can be
2325 // completely avoided.
2326 DCHECK(left != right);
2327
2328 if (CpuFeatures::IsSupported(ARMv8)) {
2329 CpuFeatureScope scope(this, ARMv8);
2330 VFPCompareAndSetFlags(left, right);
2331 b(vs, out_of_line);
2332 vmaxnm(result, left, right);
2333 } else {
2334 Label done;
2335 VFPCompareAndSetFlags(left, right);
2336 b(vs, out_of_line);
2337 // Avoid a conditional instruction if the result register is unique.
2338 bool aliased_result_reg = result == left || result == right;
2339 Move(result, right, aliased_result_reg ? mi : al);
2340 Move(result, left, gt);
2341 b(ne, &done);
2342 // Left and right are equal, but check for +/-0.
2343 VFPCompareAndSetFlags(left, 0.0);
2344 b(eq, out_of_line);
2345 // The arguments are equal and not zero, so it doesn't matter which input we
2346 // pick. We have already moved one input into the result (if it didn't
2347 // already alias) so there's nothing more to do.
2348 bind(&done);
2349 }
2350 }
2351
2352 template <typename T>
2353 void TurboAssembler::FloatMaxOutOfLineHelper(T result, T left, T right) {
2354 DCHECK(left != right);
2355
2356 // ARMv8: At least one of left and right is a NaN.
2357 // Anything else: At least one of left and right is a NaN, or both left and
2358 // right are zeroes with unknown sign.
2359
2360 // If left and right are +/-0, select the one with the most positive sign.
2361 // If left or right are NaN, vadd propagates the appropriate one.
2362 vadd(result, left, right);
2363 }
2364
2365 template <typename T>
2366 void TurboAssembler::FloatMinHelper(T result, T left, T right,
2367 Label* out_of_line) {
2368 // This trivial case is caught sooner, so that the out-of-line code can be
2369 // completely avoided.
2370 DCHECK(left != right);
2371
2372 if (CpuFeatures::IsSupported(ARMv8)) {
2373 CpuFeatureScope scope(this, ARMv8);
2374 VFPCompareAndSetFlags(left, right);
2375 b(vs, out_of_line);
2376 vminnm(result, left, right);
2377 } else {
2378 Label done;
2379 VFPCompareAndSetFlags(left, right);
2380 b(vs, out_of_line);
2381 // Avoid a conditional instruction if the result register is unique.
2382 bool aliased_result_reg = result == left || result == right;
2383 Move(result, left, aliased_result_reg ? mi : al);
2384 Move(result, right, gt);
2385 b(ne, &done);
2386 // Left and right are equal, but check for +/-0.
2387 VFPCompareAndSetFlags(left, 0.0);
2388 // If the arguments are equal and not zero, it doesn't matter which input we
2389 // pick. We have already moved one input into the result (if it didn't
2390 // already alias) so there's nothing more to do.
2391 b(ne, &done);
2392 // At this point, both left and right are either 0 or -0.
2393 // We could use a single 'vorr' instruction here if we had NEON support.
2394 // The algorithm used is -((-L) + (-R)), which is most efficiently expressed
2395 // as -((-L) - R).
2396 if (left == result) {
2397 DCHECK(right != result);
2398 vneg(result, left);
2399 vsub(result, result, right);
2400 vneg(result, result);
2401 } else {
2402 DCHECK(left != result);
2403 vneg(result, right);
2404 vsub(result, result, left);
2405 vneg(result, result);
2406 }
2407 bind(&done);
2408 }
2409 }
2410
2411 template <typename T>
2412 void TurboAssembler::FloatMinOutOfLineHelper(T result, T left, T right) {
2413 DCHECK(left != right);
2414
2415 // At least one of left and right is a NaN. Use vadd to propagate the NaN
2416 // appropriately. +/-0 is handled inline.
2417 vadd(result, left, right);
2418 }
2419
2420 void TurboAssembler::FloatMax(SwVfpRegister result, SwVfpRegister left,
2421 SwVfpRegister right, Label* out_of_line) {
2422 FloatMaxHelper(result, left, right, out_of_line);
2423 }
2424
2425 void TurboAssembler::FloatMin(SwVfpRegister result, SwVfpRegister left,
2426 SwVfpRegister right, Label* out_of_line) {
2427 FloatMinHelper(result, left, right, out_of_line);
2428 }
2429
2430 void TurboAssembler::FloatMax(DwVfpRegister result, DwVfpRegister left,
2431 DwVfpRegister right, Label* out_of_line) {
2432 FloatMaxHelper(result, left, right, out_of_line);
2433 }
2434
2435 void TurboAssembler::FloatMin(DwVfpRegister result, DwVfpRegister left,
2436 DwVfpRegister right, Label* out_of_line) {
2437 FloatMinHelper(result, left, right, out_of_line);
2438 }
2439
2440 void TurboAssembler::FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left,
2441 SwVfpRegister right) {
2442 FloatMaxOutOfLineHelper(result, left, right);
2443 }
2444
2445 void TurboAssembler::FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left,
2446 SwVfpRegister right) {
2447 FloatMinOutOfLineHelper(result, left, right);
2448 }
2449
2450 void TurboAssembler::FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left,
2451 DwVfpRegister right) {
2452 FloatMaxOutOfLineHelper(result, left, right);
2453 }
2454
2455 void TurboAssembler::FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left,
2456 DwVfpRegister right) {
2457 FloatMinOutOfLineHelper(result, left, right);
2458 }
2459
2460 static const int kRegisterPassedArguments = 4;
2461 // The hardfloat calling convention passes double arguments in registers d0-d7.
2462 static const int kDoubleRegisterPassedArguments = 8;
2463
2464 int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
2465 int num_double_arguments) {
2466 int stack_passed_words = 0;
2467 if (use_eabi_hardfloat()) {
2468 // In the hard floating point calling convention, we can use the first 8
2469 // registers to pass doubles.
2470 if (num_double_arguments > kDoubleRegisterPassedArguments) {
2471 stack_passed_words +=
2472 2 * (num_double_arguments - kDoubleRegisterPassedArguments);
2473 }
2474 } else {
2475 // In the soft floating point calling convention, every double
2476 // argument is passed using two registers.
2477 num_reg_arguments += 2 * num_double_arguments;
2478 }
2479 // Up to four simple arguments are passed in registers r0..r3.
2480 if (num_reg_arguments > kRegisterPassedArguments) {
2481 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
2482 }
2483 return stack_passed_words;
2484 }
2485
2486 void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
2487 int num_double_arguments,
2488 Register scratch) {
2489 ASM_CODE_COMMENT(this);
2490 int frame_alignment = ActivationFrameAlignment();
2491 int stack_passed_arguments =
2492 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
2493 if (frame_alignment > kPointerSize) {
2494 UseScratchRegisterScope temps(this);
2495 if (!scratch.is_valid()) scratch = temps.Acquire();
2496 // Make stack end at alignment and make room for num_arguments - 4 words
2497 // and the original value of sp.
2498 mov(scratch, sp);
2499 AllocateStackSpace((stack_passed_arguments + 1) * kPointerSize);
2500 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
2501 and_(sp, sp, Operand(-frame_alignment));
2502 str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
2503 } else if (stack_passed_arguments > 0) {
2504 AllocateStackSpace(stack_passed_arguments * kPointerSize);
2505 }
2506 }
2507
2508 void TurboAssembler::MovToFloatParameter(DwVfpRegister src) {
2509 DCHECK(src == d0);
2510 if (!use_eabi_hardfloat()) {
2511 vmov(r0, r1, src);
2512 }
2513 }
2514
2515 // On ARM this is just a synonym to make the purpose clear.
2516 void TurboAssembler::MovToFloatResult(DwVfpRegister src) {
2517 MovToFloatParameter(src);
2518 }
2519
2520 void TurboAssembler::MovToFloatParameters(DwVfpRegister src1,
2521 DwVfpRegister src2) {
2522 DCHECK(src1 == d0);
2523 DCHECK(src2 == d1);
2524 if (!use_eabi_hardfloat()) {
2525 vmov(r0, r1, src1);
2526 vmov(r2, r3, src2);
2527 }
2528 }
2529
2530 void TurboAssembler::CallCFunction(ExternalReference function,
2531 int num_reg_arguments,
2532 int num_double_arguments) {
2533 UseScratchRegisterScope temps(this);
2534 Register scratch = temps.Acquire();
2535 Move(scratch, function);
2536 CallCFunctionHelper(scratch, num_reg_arguments, num_double_arguments);
2537 }
2538
2539 void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
2540 int num_double_arguments) {
2541 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
2542 }
2543
2544 void TurboAssembler::CallCFunction(ExternalReference function,
2545 int num_arguments) {
2546 CallCFunction(function, num_arguments, 0);
2547 }
2548
2549 void TurboAssembler::CallCFunction(Register function, int num_arguments) {
2550 CallCFunction(function, num_arguments, 0);
2551 }
2552
2553 void TurboAssembler::CallCFunctionHelper(Register function,
2554 int num_reg_arguments,
2555 int num_double_arguments) {
2556 ASM_CODE_COMMENT(this);
2557 DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
2558 DCHECK(has_frame());
2559 // Make sure that the stack is aligned before calling a C function unless
2560 // running in the simulator. The simulator has its own alignment check which
2561 // provides more information.
2562 #if V8_HOST_ARCH_ARM
2563 if (FLAG_debug_code) {
2564 int frame_alignment = base::OS::ActivationFrameAlignment();
2565 int frame_alignment_mask = frame_alignment - 1;
2566 if (frame_alignment > kPointerSize) {
2567 ASM_CODE_COMMENT_STRING(this, "Check stack alignment");
2568 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
2569 Label alignment_as_expected;
2570 tst(sp, Operand(frame_alignment_mask));
2571 b(eq, &alignment_as_expected);
2572 // Don't use Check here, as it will call Runtime_Abort possibly
2573 // re-entering here.
2574 stop();
2575 bind(&alignment_as_expected);
2576 }
2577 }
2578 #endif
2579
2580 // Save the frame pointer and PC so that the stack layout remains iterable,
2581 // even without an ExitFrame which normally exists between JS and C frames.
2582 Register addr_scratch = r4;
2583 // See x64 code for reasoning about how to address the isolate data fields.
2584 if (root_array_available()) {
2585 str(pc,
2586 MemOperand(kRootRegister, IsolateData::fast_c_call_caller_pc_offset()));
2587 str(fp,
2588 MemOperand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()));
2589 } else {
2590 DCHECK_NOT_NULL(isolate());
2591 Push(addr_scratch);
2592
2593 Move(addr_scratch,
2594 ExternalReference::fast_c_call_caller_pc_address(isolate()));
2595 str(pc, MemOperand(addr_scratch));
2596 Move(addr_scratch,
2597 ExternalReference::fast_c_call_caller_fp_address(isolate()));
2598 str(fp, MemOperand(addr_scratch));
2599
2600 Pop(addr_scratch);
2601 }
2602
2603 // Just call directly. The function called cannot cause a GC, or
2604 // allow preemption, so the return address in the link register
2605 // stays correct.
2606 Call(function);
2607
2608 // We don't unset the PC; the FP is the source of truth.
2609 Register zero_scratch = r5;
2610 Push(zero_scratch);
2611 mov(zero_scratch, Operand::Zero());
2612
2613 if (root_array_available()) {
2614 str(zero_scratch,
2615 MemOperand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()));
2616 } else {
2617 DCHECK_NOT_NULL(isolate());
2618 Push(addr_scratch);
2619 Move(addr_scratch,
2620 ExternalReference::fast_c_call_caller_fp_address(isolate()));
2621 str(zero_scratch, MemOperand(addr_scratch));
2622 Pop(addr_scratch);
2623 }
2624
2625 Pop(zero_scratch);
2626
2627 int stack_passed_arguments =
2628 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
2629 if (ActivationFrameAlignment() > kPointerSize) {
2630 ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
2631 } else {
2632 add(sp, sp, Operand(stack_passed_arguments * kPointerSize));
2633 }
2634 }
2635
2636 void TurboAssembler::CheckPageFlag(Register object, int mask, Condition cc,
2637 Label* condition_met) {
2638 ASM_CODE_COMMENT(this);
2639 UseScratchRegisterScope temps(this);
2640 Register scratch = temps.Acquire();
2641 DCHECK(!AreAliased(object, scratch));
2642 DCHECK(cc == eq || cc == ne);
2643 Bfc(scratch, object, 0, kPageSizeBits);
2644 ldr(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
2645 tst(scratch, Operand(mask));
2646 b(cc, condition_met);
2647 }
2648
2649 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
2650 Register reg4, Register reg5,
2651 Register reg6) {
2652 RegList regs = 0;
2653 if (reg1.is_valid()) regs |= reg1.bit();
2654 if (reg2.is_valid()) regs |= reg2.bit();
2655 if (reg3.is_valid()) regs |= reg3.bit();
2656 if (reg4.is_valid()) regs |= reg4.bit();
2657 if (reg5.is_valid()) regs |= reg5.bit();
2658 if (reg6.is_valid()) regs |= reg6.bit();
2659
2660 const RegisterConfiguration* config = RegisterConfiguration::Default();
2661 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
2662 int code = config->GetAllocatableGeneralCode(i);
2663 Register candidate = Register::from_code(code);
2664 if (regs & candidate.bit()) continue;
2665 return candidate;
2666 }
2667 UNREACHABLE();
2668 }
2669
2670 void TurboAssembler::ComputeCodeStartAddress(Register dst) {
2671 ASM_CODE_COMMENT(this);
2672 // We can use the register pc - 8 for the address of the current instruction.
2673 sub(dst, pc, Operand(pc_offset() + Instruction::kPcLoadDelta));
2674 }
2675
2676 void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
2677 DeoptimizeKind kind, Label* ret,
2678 Label*) {
2679 ASM_CODE_COMMENT(this);
2680
2681 // All constants should have been emitted prior to deoptimization exit
2682 // emission. See PrepareForDeoptimizationExits.
2683 DCHECK(!has_pending_constants());
2684 BlockConstPoolScope block_const_pool(this);
2685
2686 CHECK_LE(target, Builtins::kLastTier0);
2687 ldr(ip,
2688 MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(target)));
2689 Call(ip);
2690 DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
2691 (kind == DeoptimizeKind::kLazy)
2692 ? Deoptimizer::kLazyDeoptExitSize
2693 : Deoptimizer::kNonLazyDeoptExitSize);
2694
2695 if (kind == DeoptimizeKind::kEagerWithResume) {
2696 b(ret);
2697 DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
2698 Deoptimizer::kEagerWithResumeBeforeArgsSize);
2699 }
2700
2701 // The above code must not emit constants either.
2702 DCHECK(!has_pending_constants());
2703 }
2704
2705 void TurboAssembler::Trap() { stop(); }
2706 void TurboAssembler::DebugBreak() { stop(); }
2707
2708 void TurboAssembler::I64x2BitMask(Register dst, QwNeonRegister src) {
2709 UseScratchRegisterScope temps(this);
2710 QwNeonRegister tmp1 = temps.AcquireQ();
2711 Register tmp = temps.Acquire();
2712
2713 vshr(NeonU64, tmp1, src, 63);
2714 vmov(NeonU32, dst, tmp1.low(), 0);
2715 vmov(NeonU32, tmp, tmp1.high(), 0);
2716 add(dst, dst, Operand(tmp, LSL, 1));
2717 }
2718
2719 void TurboAssembler::I64x2Eq(QwNeonRegister dst, QwNeonRegister src1,
2720 QwNeonRegister src2) {
2721 UseScratchRegisterScope temps(this);
2722 Simd128Register scratch = temps.AcquireQ();
2723 vceq(Neon32, dst, src1, src2);
2724 vrev64(Neon32, scratch, dst);
2725 vand(dst, dst, scratch);
2726 }
2727
2728 void TurboAssembler::I64x2Ne(QwNeonRegister dst, QwNeonRegister src1,
2729 QwNeonRegister src2) {
2730 UseScratchRegisterScope temps(this);
2731 Simd128Register tmp = temps.AcquireQ();
2732 vceq(Neon32, dst, src1, src2);
2733 vrev64(Neon32, tmp, dst);
2734 vmvn(dst, dst);
2735 vorn(dst, dst, tmp);
2736 }
2737
2738 void TurboAssembler::I64x2GtS(QwNeonRegister dst, QwNeonRegister src1,
2739 QwNeonRegister src2) {
2740 ASM_CODE_COMMENT(this);
2741 vqsub(NeonS64, dst, src2, src1);
2742 vshr(NeonS64, dst, dst, 63);
2743 }
2744
2745 void TurboAssembler::I64x2GeS(QwNeonRegister dst, QwNeonRegister src1,
2746 QwNeonRegister src2) {
2747 ASM_CODE_COMMENT(this);
2748 vqsub(NeonS64, dst, src1, src2);
2749 vshr(NeonS64, dst, dst, 63);
2750 vmvn(dst, dst);
2751 }
2752
2753 void TurboAssembler::I64x2AllTrue(Register dst, QwNeonRegister src) {
2754 ASM_CODE_COMMENT(this);
2755 UseScratchRegisterScope temps(this);
2756 QwNeonRegister tmp = temps.AcquireQ();
2757 // src = | a | b | c | d |
2758 // tmp = | max(a,b) | max(c,d) | ...
2759 vpmax(NeonU32, tmp.low(), src.low(), src.high());
2760 // tmp = | max(a,b) == 0 | max(c,d) == 0 | ...
2761 vceq(Neon32, tmp, tmp, 0);
2762 // tmp = | max(a,b) == 0 or max(c,d) == 0 | ...
2763 vpmax(NeonU32, tmp.low(), tmp.low(), tmp.low());
2764 // dst = (max(a,b) == 0 || max(c,d) == 0)
2765 // dst will either be -1 or 0.
2766 vmov(NeonS32, dst, tmp.low(), 0);
2767 // dst = !dst (-1 -> 0, 0 -> 1)
2768 add(dst, dst, Operand(1));
2769 // This works because:
2770 // !dst
2771 // = !(max(a,b) == 0 || max(c,d) == 0)
2772 // = max(a,b) != 0 && max(c,d) != 0
2773 // = (a != 0 || b != 0) && (c != 0 || d != 0)
2774 // = defintion of i64x2.all_true.
2775 }
2776
2777 void TurboAssembler::I64x2Abs(QwNeonRegister dst, QwNeonRegister src) {
2778 ASM_CODE_COMMENT(this);
2779 UseScratchRegisterScope temps(this);
2780 Simd128Register tmp = temps.AcquireQ();
2781 vshr(NeonS64, tmp, src, 63);
2782 veor(dst, src, tmp);
2783 vsub(Neon64, dst, dst, tmp);
2784 }
2785
2786 namespace {
2787 using AssemblerFunc = void (Assembler::*)(DwVfpRegister, SwVfpRegister,
2788 VFPConversionMode, const Condition);
2789 // Helper function for f64x2 convert low instructions.
2790 // This ensures that we do not overwrite src, if dst == src.
2791 void F64x2ConvertLowHelper(Assembler* assm, QwNeonRegister dst,
2792 QwNeonRegister src, AssemblerFunc convert_fn) {
2793 LowDwVfpRegister src_d = LowDwVfpRegister::from_code(src.low().code());
2794 UseScratchRegisterScope temps(assm);
2795 if (dst == src) {
2796 LowDwVfpRegister tmp = temps.AcquireLowD();
2797 assm->vmov(tmp, src_d);
2798 src_d = tmp;
2799 }
2800 // Default arguments are not part of the function type
2801 (assm->*convert_fn)(dst.low(), src_d.low(), kDefaultRoundToZero, al);
2802 (assm->*convert_fn)(dst.high(), src_d.high(), kDefaultRoundToZero, al);
2803 }
2804 } // namespace
2805
2806 void TurboAssembler::F64x2ConvertLowI32x4S(QwNeonRegister dst,
2807 QwNeonRegister src) {
2808 F64x2ConvertLowHelper(this, dst, src, &Assembler::vcvt_f64_s32);
2809 }
2810
2811 void TurboAssembler::F64x2ConvertLowI32x4U(QwNeonRegister dst,
2812 QwNeonRegister src) {
2813 F64x2ConvertLowHelper(this, dst, src, &Assembler::vcvt_f64_u32);
2814 }
2815
2816 void TurboAssembler::F64x2PromoteLowF32x4(QwNeonRegister dst,
2817 QwNeonRegister src) {
2818 F64x2ConvertLowHelper(this, dst, src, &Assembler::vcvt_f64_f32);
2819 }
2820
2821 } // namespace internal
2822 } // namespace v8
2823
2824 #endif // V8_TARGET_ARCH_ARM
2825