1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <cstdint>
6 #if V8_TARGET_ARCH_X64
7
8 #include "src/base/bits.h"
9 #include "src/base/division-by-constant.h"
10 #include "src/base/utils/random-number-generator.h"
11 #include "src/codegen/callable.h"
12 #include "src/codegen/code-factory.h"
13 #include "src/codegen/cpu-features.h"
14 #include "src/codegen/external-reference-table.h"
15 #include "src/codegen/interface-descriptors-inl.h"
16 #include "src/codegen/macro-assembler.h"
17 #include "src/codegen/register-configuration.h"
18 #include "src/codegen/string-constants.h"
19 #include "src/codegen/x64/assembler-x64.h"
20 #include "src/codegen/x64/register-x64.h"
21 #include "src/common/external-pointer.h"
22 #include "src/common/globals.h"
23 #include "src/debug/debug.h"
24 #include "src/deoptimizer/deoptimizer.h"
25 #include "src/execution/frames-inl.h"
26 #include "src/heap/memory-chunk.h"
27 #include "src/init/bootstrapper.h"
28 #include "src/logging/counters.h"
29 #include "src/objects/objects-inl.h"
30 #include "src/objects/smi.h"
31 #include "src/snapshot/snapshot.h"
32
33 // Satisfy cpplint check, but don't include platform-specific header. It is
34 // included recursively via macro-assembler.h.
35 #if 0
36 #include "src/codegen/x64/macro-assembler-x64.h"
37 #endif
38
39 namespace v8 {
40 namespace internal {
41
GetArgumentOperand(int index) const42 Operand StackArgumentsAccessor::GetArgumentOperand(int index) const {
43 DCHECK_GE(index, 0);
44 // arg[0] = rsp + kPCOnStackSize;
45 // arg[i] = arg[0] + i * kSystemPointerSize;
46 return Operand(rsp, kPCOnStackSize + index * kSystemPointerSize);
47 }
48
Load(Register destination,ExternalReference source)49 void MacroAssembler::Load(Register destination, ExternalReference source) {
50 if (root_array_available_ && options().enable_root_relative_access) {
51 intptr_t delta = RootRegisterOffsetForExternalReference(isolate(), source);
52 if (is_int32(delta)) {
53 movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
54 return;
55 }
56 }
57 // Safe code.
58 if (destination == rax && !options().isolate_independent_code) {
59 load_rax(source);
60 } else {
61 movq(destination, ExternalReferenceAsOperand(source));
62 }
63 }
64
Store(ExternalReference destination,Register source)65 void MacroAssembler::Store(ExternalReference destination, Register source) {
66 if (root_array_available_ && options().enable_root_relative_access) {
67 intptr_t delta =
68 RootRegisterOffsetForExternalReference(isolate(), destination);
69 if (is_int32(delta)) {
70 movq(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
71 return;
72 }
73 }
74 // Safe code.
75 if (source == rax && !options().isolate_independent_code) {
76 store_rax(destination);
77 } else {
78 movq(ExternalReferenceAsOperand(destination), source);
79 }
80 }
81
LoadFromConstantsTable(Register destination,int constant_index)82 void TurboAssembler::LoadFromConstantsTable(Register destination,
83 int constant_index) {
84 DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
85 LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
86 LoadTaggedPointerField(
87 destination,
88 FieldOperand(destination, FixedArray::OffsetOfElementAt(constant_index)));
89 }
90
LoadRootRegisterOffset(Register destination,intptr_t offset)91 void TurboAssembler::LoadRootRegisterOffset(Register destination,
92 intptr_t offset) {
93 DCHECK(is_int32(offset));
94 if (offset == 0) {
95 Move(destination, kRootRegister);
96 } else {
97 leaq(destination, Operand(kRootRegister, static_cast<int32_t>(offset)));
98 }
99 }
100
LoadRootRelative(Register destination,int32_t offset)101 void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
102 movq(destination, Operand(kRootRegister, offset));
103 }
104
LoadAddress(Register destination,ExternalReference source)105 void TurboAssembler::LoadAddress(Register destination,
106 ExternalReference source) {
107 if (root_array_available_ && options().enable_root_relative_access) {
108 intptr_t delta = RootRegisterOffsetForExternalReference(isolate(), source);
109 if (is_int32(delta)) {
110 leaq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
111 return;
112 }
113 }
114 // Safe code.
115 // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
116 // non-isolate-independent code. In many cases it might be cheaper than
117 // embedding the relocatable value.
118 if (root_array_available_ && options().isolate_independent_code) {
119 IndirectLoadExternalReference(destination, source);
120 return;
121 }
122 Move(destination, source);
123 }
124
ExternalReferenceAsOperand(ExternalReference reference,Register scratch)125 Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference,
126 Register scratch) {
127 if (root_array_available_ && options().enable_root_relative_access) {
128 int64_t delta =
129 RootRegisterOffsetForExternalReference(isolate(), reference);
130 if (is_int32(delta)) {
131 return Operand(kRootRegister, static_cast<int32_t>(delta));
132 }
133 }
134 if (root_array_available_ && options().isolate_independent_code) {
135 if (IsAddressableThroughRootRegister(isolate(), reference)) {
136 // Some external references can be efficiently loaded as an offset from
137 // kRootRegister.
138 intptr_t offset =
139 RootRegisterOffsetForExternalReference(isolate(), reference);
140 CHECK(is_int32(offset));
141 return Operand(kRootRegister, static_cast<int32_t>(offset));
142 } else {
143 // Otherwise, do a memory load from the external reference table.
144 movq(scratch, Operand(kRootRegister,
145 RootRegisterOffsetForExternalReferenceTableEntry(
146 isolate(), reference)));
147 return Operand(scratch, 0);
148 }
149 }
150 Move(scratch, reference);
151 return Operand(scratch, 0);
152 }
153
PushAddress(ExternalReference source)154 void MacroAssembler::PushAddress(ExternalReference source) {
155 LoadAddress(kScratchRegister, source);
156 Push(kScratchRegister);
157 }
158
RootAsOperand(RootIndex index)159 Operand TurboAssembler::RootAsOperand(RootIndex index) {
160 DCHECK(root_array_available());
161 return Operand(kRootRegister, RootRegisterOffsetForRootIndex(index));
162 }
163
LoadRoot(Register destination,RootIndex index)164 void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
165 DCHECK(root_array_available_);
166 movq(destination, RootAsOperand(index));
167 }
168
PushRoot(RootIndex index)169 void MacroAssembler::PushRoot(RootIndex index) {
170 DCHECK(root_array_available_);
171 Push(RootAsOperand(index));
172 }
173
CompareRoot(Register with,RootIndex index)174 void TurboAssembler::CompareRoot(Register with, RootIndex index) {
175 DCHECK(root_array_available_);
176 if (base::IsInRange(index, RootIndex::kFirstStrongOrReadOnlyRoot,
177 RootIndex::kLastStrongOrReadOnlyRoot)) {
178 cmp_tagged(with, RootAsOperand(index));
179 } else {
180 // Some smi roots contain system pointer size values like stack limits.
181 cmpq(with, RootAsOperand(index));
182 }
183 }
184
CompareRoot(Operand with,RootIndex index)185 void TurboAssembler::CompareRoot(Operand with, RootIndex index) {
186 DCHECK(root_array_available_);
187 DCHECK(!with.AddressUsesRegister(kScratchRegister));
188 LoadRoot(kScratchRegister, index);
189 if (base::IsInRange(index, RootIndex::kFirstStrongOrReadOnlyRoot,
190 RootIndex::kLastStrongOrReadOnlyRoot)) {
191 cmp_tagged(with, kScratchRegister);
192 } else {
193 // Some smi roots contain system pointer size values like stack limits.
194 cmpq(with, kScratchRegister);
195 }
196 }
197
LoadMap(Register destination,Register object)198 void TurboAssembler::LoadMap(Register destination, Register object) {
199 LoadTaggedPointerField(destination,
200 FieldOperand(object, HeapObject::kMapOffset));
201 #ifdef V8_MAP_PACKING
202 UnpackMapWord(destination);
203 #endif
204 }
205
LoadTaggedPointerField(Register destination,Operand field_operand)206 void TurboAssembler::LoadTaggedPointerField(Register destination,
207 Operand field_operand) {
208 if (COMPRESS_POINTERS_BOOL) {
209 DecompressTaggedPointer(destination, field_operand);
210 } else {
211 mov_tagged(destination, field_operand);
212 }
213 }
214
215 #ifdef V8_MAP_PACKING
UnpackMapWord(Register r)216 void TurboAssembler::UnpackMapWord(Register r) {
217 // Clear the top two bytes (which may include metadata). Must be in sync with
218 // MapWord::Unpack, and vice versa.
219 shlq(r, Immediate(16));
220 shrq(r, Immediate(16));
221 xorq(r, Immediate(Internals::kMapWordXorMask));
222 }
223 #endif
224
LoadTaggedSignedField(Register destination,Operand field_operand)225 void TurboAssembler::LoadTaggedSignedField(Register destination,
226 Operand field_operand) {
227 if (COMPRESS_POINTERS_BOOL) {
228 DecompressTaggedSigned(destination, field_operand);
229 } else {
230 mov_tagged(destination, field_operand);
231 }
232 }
233
LoadAnyTaggedField(Register destination,Operand field_operand)234 void TurboAssembler::LoadAnyTaggedField(Register destination,
235 Operand field_operand) {
236 if (COMPRESS_POINTERS_BOOL) {
237 DecompressAnyTagged(destination, field_operand);
238 } else {
239 mov_tagged(destination, field_operand);
240 }
241 }
242
PushTaggedPointerField(Operand field_operand,Register scratch)243 void TurboAssembler::PushTaggedPointerField(Operand field_operand,
244 Register scratch) {
245 if (COMPRESS_POINTERS_BOOL) {
246 DCHECK(!field_operand.AddressUsesRegister(scratch));
247 DecompressTaggedPointer(scratch, field_operand);
248 Push(scratch);
249 } else {
250 Push(field_operand);
251 }
252 }
253
PushTaggedAnyField(Operand field_operand,Register scratch)254 void TurboAssembler::PushTaggedAnyField(Operand field_operand,
255 Register scratch) {
256 if (COMPRESS_POINTERS_BOOL) {
257 DCHECK(!field_operand.AddressUsesRegister(scratch));
258 DecompressAnyTagged(scratch, field_operand);
259 Push(scratch);
260 } else {
261 Push(field_operand);
262 }
263 }
264
SmiUntagField(Register dst,Operand src)265 void TurboAssembler::SmiUntagField(Register dst, Operand src) {
266 SmiUntag(dst, src);
267 }
268
StoreTaggedField(Operand dst_field_operand,Immediate value)269 void TurboAssembler::StoreTaggedField(Operand dst_field_operand,
270 Immediate value) {
271 if (COMPRESS_POINTERS_BOOL) {
272 movl(dst_field_operand, value);
273 } else {
274 movq(dst_field_operand, value);
275 }
276 }
277
StoreTaggedField(Operand dst_field_operand,Register value)278 void TurboAssembler::StoreTaggedField(Operand dst_field_operand,
279 Register value) {
280 if (COMPRESS_POINTERS_BOOL) {
281 movl(dst_field_operand, value);
282 } else {
283 movq(dst_field_operand, value);
284 }
285 }
286
StoreTaggedSignedField(Operand dst_field_operand,Smi value)287 void TurboAssembler::StoreTaggedSignedField(Operand dst_field_operand,
288 Smi value) {
289 if (SmiValuesAre32Bits()) {
290 Move(kScratchRegister, value);
291 movq(dst_field_operand, kScratchRegister);
292 } else {
293 StoreTaggedField(dst_field_operand, Immediate(value));
294 }
295 }
296
AtomicStoreTaggedField(Operand dst_field_operand,Register value)297 void TurboAssembler::AtomicStoreTaggedField(Operand dst_field_operand,
298 Register value) {
299 if (COMPRESS_POINTERS_BOOL) {
300 movl(kScratchRegister, value);
301 xchgl(kScratchRegister, dst_field_operand);
302 } else {
303 movq(kScratchRegister, value);
304 xchgq(kScratchRegister, dst_field_operand);
305 }
306 }
307
DecompressTaggedSigned(Register destination,Operand field_operand)308 void TurboAssembler::DecompressTaggedSigned(Register destination,
309 Operand field_operand) {
310 ASM_CODE_COMMENT(this);
311 movl(destination, field_operand);
312 }
313
DecompressTaggedPointer(Register destination,Operand field_operand)314 void TurboAssembler::DecompressTaggedPointer(Register destination,
315 Operand field_operand) {
316 ASM_CODE_COMMENT(this);
317 movl(destination, field_operand);
318 addq(destination, kPtrComprCageBaseRegister);
319 }
320
DecompressTaggedPointer(Register destination,Register source)321 void TurboAssembler::DecompressTaggedPointer(Register destination,
322 Register source) {
323 ASM_CODE_COMMENT(this);
324 movl(destination, source);
325 addq(destination, kPtrComprCageBaseRegister);
326 }
327
DecompressAnyTagged(Register destination,Operand field_operand)328 void TurboAssembler::DecompressAnyTagged(Register destination,
329 Operand field_operand) {
330 ASM_CODE_COMMENT(this);
331 movl(destination, field_operand);
332 addq(destination, kPtrComprCageBaseRegister);
333 }
334
RecordWriteField(Register object,int offset,Register value,Register slot_address,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check)335 void MacroAssembler::RecordWriteField(Register object, int offset,
336 Register value, Register slot_address,
337 SaveFPRegsMode save_fp,
338 RememberedSetAction remembered_set_action,
339 SmiCheck smi_check) {
340 ASM_CODE_COMMENT(this);
341 DCHECK(!AreAliased(object, value, slot_address));
342 // First, check if a write barrier is even needed. The tests below
343 // catch stores of Smis.
344 Label done;
345
346 // Skip barrier if writing a smi.
347 if (smi_check == SmiCheck::kInline) {
348 JumpIfSmi(value, &done);
349 }
350
351 // Although the object register is tagged, the offset is relative to the start
352 // of the object, so the offset must be a multiple of kTaggedSize.
353 DCHECK(IsAligned(offset, kTaggedSize));
354
355 leaq(slot_address, FieldOperand(object, offset));
356 if (FLAG_debug_code) {
357 ASM_CODE_COMMENT_STRING(this, "Debug check slot_address");
358 Label ok;
359 testb(slot_address, Immediate(kTaggedSize - 1));
360 j(zero, &ok, Label::kNear);
361 int3();
362 bind(&ok);
363 }
364
365 RecordWrite(object, slot_address, value, save_fp, remembered_set_action,
366 SmiCheck::kOmit);
367
368 bind(&done);
369
370 // Clobber clobbered input registers when running with the debug-code flag
371 // turned on to provoke errors.
372 if (FLAG_debug_code) {
373 ASM_CODE_COMMENT_STRING(this, "Zap scratch registers");
374 Move(value, kZapValue, RelocInfo::NONE);
375 Move(slot_address, kZapValue, RelocInfo::NONE);
376 }
377 }
378
LoadExternalPointerField(Register destination,Operand field_operand,ExternalPointerTag tag,Register scratch,IsolateRootLocation isolateRootLocation)379 void TurboAssembler::LoadExternalPointerField(
380 Register destination, Operand field_operand, ExternalPointerTag tag,
381 Register scratch, IsolateRootLocation isolateRootLocation) {
382 DCHECK(!AreAliased(destination, scratch));
383 #ifdef V8_HEAP_SANDBOX
384 DCHECK(!field_operand.AddressUsesRegister(scratch));
385 if (isolateRootLocation == IsolateRootLocation::kInRootRegister) {
386 DCHECK(root_array_available_);
387 movq(scratch, Operand(kRootRegister,
388 IsolateData::external_pointer_table_offset() +
389 Internals::kExternalPointerTableBufferOffset));
390 } else {
391 DCHECK(isolateRootLocation == IsolateRootLocation::kInScratchRegister);
392 movq(scratch,
393 Operand(scratch, IsolateData::external_pointer_table_offset() +
394 Internals::kExternalPointerTableBufferOffset));
395 }
396 movl(destination, field_operand);
397 movq(destination, Operand(scratch, destination, times_8, 0));
398 if (tag != 0) {
399 movq(scratch, Immediate64(~tag));
400 andq(destination, scratch);
401 }
402 #else
403 movq(destination, field_operand);
404 #endif // V8_HEAP_SANDBOX
405 }
406
MaybeSaveRegisters(RegList registers)407 void TurboAssembler::MaybeSaveRegisters(RegList registers) {
408 if (registers == 0) return;
409 DCHECK_GT(NumRegs(registers), 0);
410 for (int i = 0; i < Register::kNumRegisters; ++i) {
411 if ((registers >> i) & 1u) {
412 pushq(Register::from_code(i));
413 }
414 }
415 }
416
MaybeRestoreRegisters(RegList registers)417 void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
418 if (registers == 0) return;
419 DCHECK_GT(NumRegs(registers), 0);
420 for (int i = Register::kNumRegisters - 1; i >= 0; --i) {
421 if ((registers >> i) & 1u) {
422 popq(Register::from_code(i));
423 }
424 }
425 }
426
CallEphemeronKeyBarrier(Register object,Register slot_address,SaveFPRegsMode fp_mode)427 void TurboAssembler::CallEphemeronKeyBarrier(Register object,
428 Register slot_address,
429 SaveFPRegsMode fp_mode) {
430 ASM_CODE_COMMENT(this);
431 DCHECK(!AreAliased(object, slot_address));
432 RegList registers =
433 WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address);
434 MaybeSaveRegisters(registers);
435
436 Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
437 Register slot_address_parameter =
438 WriteBarrierDescriptor::SlotAddressRegister();
439 MovePair(slot_address_parameter, slot_address, object_parameter, object);
440
441 Call(isolate()->builtins()->code_handle(
442 Builtins::GetEphemeronKeyBarrierStub(fp_mode)),
443 RelocInfo::CODE_TARGET);
444 MaybeRestoreRegisters(registers);
445 }
446
CallRecordWriteStubSaveRegisters(Register object,Register slot_address,RememberedSetAction remembered_set_action,SaveFPRegsMode fp_mode,StubCallMode mode)447 void TurboAssembler::CallRecordWriteStubSaveRegisters(
448 Register object, Register slot_address,
449 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
450 StubCallMode mode) {
451 ASM_CODE_COMMENT(this);
452 DCHECK(!AreAliased(object, slot_address));
453 RegList registers =
454 WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address);
455 MaybeSaveRegisters(registers);
456 Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
457 Register slot_address_parameter =
458 WriteBarrierDescriptor::SlotAddressRegister();
459 MovePair(object_parameter, object, slot_address_parameter, slot_address);
460
461 CallRecordWriteStub(object_parameter, slot_address_parameter,
462 remembered_set_action, fp_mode, mode);
463 MaybeRestoreRegisters(registers);
464 }
465
CallRecordWriteStub(Register object,Register slot_address,RememberedSetAction remembered_set_action,SaveFPRegsMode fp_mode,StubCallMode mode)466 void TurboAssembler::CallRecordWriteStub(
467 Register object, Register slot_address,
468 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
469 StubCallMode mode) {
470 ASM_CODE_COMMENT(this);
471 // Use CallRecordWriteStubSaveRegisters if the object and slot registers
472 // need to be caller saved.
473 DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object);
474 DCHECK_EQ(WriteBarrierDescriptor::SlotAddressRegister(), slot_address);
475 #if V8_ENABLE_WEBASSEMBLY
476 if (mode == StubCallMode::kCallWasmRuntimeStub) {
477 // Use {near_call} for direct Wasm call within a module.
478 auto wasm_target =
479 wasm::WasmCode::GetRecordWriteStub(remembered_set_action, fp_mode);
480 near_call(wasm_target, RelocInfo::WASM_STUB_CALL);
481 #else
482 if (false) {
483 #endif
484 } else {
485 Builtin builtin =
486 Builtins::GetRecordWriteStub(remembered_set_action, fp_mode);
487 if (options().inline_offheap_trampolines) {
488 CallBuiltin(builtin);
489 } else {
490 Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
491 Call(code_target, RelocInfo::CODE_TARGET);
492 }
493 }
494 }
495
496 #ifdef V8_IS_TSAN
497 void TurboAssembler::CallTSANStoreStub(Register address, Register value,
498 SaveFPRegsMode fp_mode, int size,
499 StubCallMode mode,
500 std::memory_order order) {
501 ASM_CODE_COMMENT(this);
502 DCHECK(!AreAliased(address, value));
503 TSANStoreDescriptor descriptor;
504 RegList registers = descriptor.allocatable_registers();
505
506 MaybeSaveRegisters(registers);
507
508 Register address_parameter(
509 descriptor.GetRegisterParameter(TSANStoreDescriptor::kAddress));
510 Register value_parameter(
511 descriptor.GetRegisterParameter(TSANStoreDescriptor::kValue));
512
513 // Prepare argument registers for calling GetTSANStoreStub.
514 MovePair(address_parameter, address, value_parameter, value);
515
516 if (isolate()) {
517 Builtin builtin = CodeFactory::GetTSANStoreStub(fp_mode, size, order);
518 Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
519 Call(code_target, RelocInfo::CODE_TARGET);
520 }
521 #if V8_ENABLE_WEBASSEMBLY
522 // There are two different kinds of wasm-to-js functions: one lives in the
523 // wasm code space, and another one lives on the heap. Both of them have the
524 // same CodeKind (WASM_TO_JS_FUNCTION), but depending on where they are they
525 // have to either use the wasm stub calls, or call the builtin using the
526 // isolate like JS does. In order to know which wasm-to-js function we are
527 // compiling right now, we check if the isolate is null.
528 // TODO(solanes, v8:11600): Split CodeKind::WASM_TO_JS_FUNCTION into two
529 // different CodeKinds and pass the CodeKind as a parameter so that we can use
530 // that instead of a nullptr check.
531 // NOLINTNEXTLINE(readability/braces)
532 else {
533 DCHECK_EQ(mode, StubCallMode::kCallWasmRuntimeStub);
534 // Use {near_call} for direct Wasm call within a module.
535 auto wasm_target = wasm::WasmCode::GetTSANStoreStub(fp_mode, size, order);
536 near_call(wasm_target, RelocInfo::WASM_STUB_CALL);
537 }
538 #endif // V8_ENABLE_WEBASSEMBLY
539
540 MaybeRestoreRegisters(registers);
541 }
542
543 void TurboAssembler::CallTSANRelaxedLoadStub(Register address,
544 SaveFPRegsMode fp_mode, int size,
545 StubCallMode mode) {
546 TSANLoadDescriptor descriptor;
547 RegList registers = descriptor.allocatable_registers();
548
549 MaybeSaveRegisters(registers);
550
551 Register address_parameter(
552 descriptor.GetRegisterParameter(TSANLoadDescriptor::kAddress));
553
554 // Prepare argument registers for calling TSANRelaxedLoad.
555 Move(address_parameter, address);
556
557 if (isolate()) {
558 Builtin builtin = CodeFactory::GetTSANRelaxedLoadStub(fp_mode, size);
559 Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
560 Call(code_target, RelocInfo::CODE_TARGET);
561 }
562 #if V8_ENABLE_WEBASSEMBLY
563 // There are two different kinds of wasm-to-js functions: one lives in the
564 // wasm code space, and another one lives on the heap. Both of them have the
565 // same CodeKind (WASM_TO_JS_FUNCTION), but depending on where they are they
566 // have to either use the wasm stub calls, or call the builtin using the
567 // isolate like JS does. In order to know which wasm-to-js function we are
568 // compiling right now, we check if the isolate is null.
569 // TODO(solanes, v8:11600): Split CodeKind::WASM_TO_JS_FUNCTION into two
570 // different CodeKinds and pass the CodeKind as a parameter so that we can use
571 // that instead of a nullptr check.
572 // NOLINTNEXTLINE(readability/braces)
573 else {
574 DCHECK_EQ(mode, StubCallMode::kCallWasmRuntimeStub);
575 // Use {near_call} for direct Wasm call within a module.
576 auto wasm_target = wasm::WasmCode::GetTSANRelaxedLoadStub(fp_mode, size);
577 near_call(wasm_target, RelocInfo::WASM_STUB_CALL);
578 }
579 #endif // V8_ENABLE_WEBASSEMBLY
580
581 MaybeRestoreRegisters(registers);
582 }
583 #endif // V8_IS_TSAN
584
585 void MacroAssembler::RecordWrite(Register object, Register slot_address,
586 Register value, SaveFPRegsMode fp_mode,
587 RememberedSetAction remembered_set_action,
588 SmiCheck smi_check) {
589 ASM_CODE_COMMENT(this);
590 DCHECK(!AreAliased(object, slot_address, value));
591 AssertNotSmi(object);
592
593 if ((remembered_set_action == RememberedSetAction::kOmit &&
594 !FLAG_incremental_marking) ||
595 FLAG_disable_write_barriers) {
596 return;
597 }
598
599 if (FLAG_debug_code) {
600 ASM_CODE_COMMENT_STRING(this, "Debug check slot_address");
601 Label ok;
602 cmp_tagged(value, Operand(slot_address, 0));
603 j(equal, &ok, Label::kNear);
604 int3();
605 bind(&ok);
606 }
607
608 // First, check if a write barrier is even needed. The tests below
609 // catch stores of smis and stores into the young generation.
610 Label done;
611
612 if (smi_check == SmiCheck::kInline) {
613 // Skip barrier if writing a smi.
614 JumpIfSmi(value, &done);
615 }
616
617 CheckPageFlag(value,
618 value, // Used as scratch.
619 MemoryChunk::kPointersToHereAreInterestingMask, zero, &done,
620 Label::kNear);
621
622 CheckPageFlag(object,
623 value, // Used as scratch.
624 MemoryChunk::kPointersFromHereAreInterestingMask, zero, &done,
625 Label::kNear);
626
627 CallRecordWriteStub(object, slot_address, remembered_set_action, fp_mode);
628
629 bind(&done);
630
631 // Clobber clobbered registers when running with the debug-code flag
632 // turned on to provoke errors.
633 if (FLAG_debug_code) {
634 ASM_CODE_COMMENT_STRING(this, "Zap scratch registers");
635 Move(slot_address, kZapValue, RelocInfo::NONE);
636 Move(value, kZapValue, RelocInfo::NONE);
637 }
638 }
639
640 void TurboAssembler::Assert(Condition cc, AbortReason reason) {
641 if (FLAG_debug_code) Check(cc, reason);
642 }
643
644 void TurboAssembler::AssertUnreachable(AbortReason reason) {
645 if (FLAG_debug_code) Abort(reason);
646 }
647
648 void TurboAssembler::Check(Condition cc, AbortReason reason) {
649 Label L;
650 j(cc, &L, Label::kNear);
651 Abort(reason);
652 // Control will not return here.
653 bind(&L);
654 }
655
656 void TurboAssembler::CheckStackAlignment() {
657 int frame_alignment = base::OS::ActivationFrameAlignment();
658 int frame_alignment_mask = frame_alignment - 1;
659 if (frame_alignment > kSystemPointerSize) {
660 ASM_CODE_COMMENT(this);
661 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
662 Label alignment_as_expected;
663 testq(rsp, Immediate(frame_alignment_mask));
664 j(zero, &alignment_as_expected, Label::kNear);
665 // Abort if stack is not aligned.
666 int3();
667 bind(&alignment_as_expected);
668 }
669 }
670
671 void TurboAssembler::Abort(AbortReason reason) {
672 ASM_CODE_COMMENT(this);
673 if (FLAG_code_comments) {
674 const char* msg = GetAbortReason(reason);
675 RecordComment("Abort message: ");
676 RecordComment(msg);
677 }
678
679 // Avoid emitting call to builtin if requested.
680 if (trap_on_abort()) {
681 int3();
682 return;
683 }
684
685 if (should_abort_hard()) {
686 // We don't care if we constructed a frame. Just pretend we did.
687 FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
688 Move(arg_reg_1, static_cast<int>(reason));
689 PrepareCallCFunction(1);
690 LoadAddress(rax, ExternalReference::abort_with_reason());
691 call(rax);
692 return;
693 }
694
695 Move(rdx, Smi::FromInt(static_cast<int>(reason)));
696
697 if (!has_frame()) {
698 // We don't actually want to generate a pile of code for this, so just
699 // claim there is a stack frame, without generating one.
700 FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
701 Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
702 } else {
703 Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
704 }
705 // Control will not return here.
706 int3();
707 }
708
709 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
710 SaveFPRegsMode save_doubles) {
711 ASM_CODE_COMMENT(this);
712 // If the expected number of arguments of the runtime function is
713 // constant, we check that the actual number of arguments match the
714 // expectation.
715 CHECK(f->nargs < 0 || f->nargs == num_arguments);
716
717 // TODO(1236192): Most runtime routines don't need the number of
718 // arguments passed in because it is constant. At some point we
719 // should remove this need and make the runtime routine entry code
720 // smarter.
721 Move(rax, num_arguments);
722 LoadAddress(rbx, ExternalReference::Create(f));
723 Handle<Code> code =
724 CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
725 Call(code, RelocInfo::CODE_TARGET);
726 }
727
728 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
729 // ----------- S t a t e -------------
730 // -- rsp[0] : return address
731 // -- rsp[8] : argument num_arguments - 1
732 // ...
733 // -- rsp[8 * num_arguments] : argument 0 (receiver)
734 //
735 // For runtime functions with variable arguments:
736 // -- rax : number of arguments
737 // -----------------------------------
738 ASM_CODE_COMMENT(this);
739 const Runtime::Function* function = Runtime::FunctionForId(fid);
740 DCHECK_EQ(1, function->result_size);
741 if (function->nargs >= 0) {
742 Move(rax, function->nargs);
743 }
744 JumpToExternalReference(ExternalReference::Create(fid));
745 }
746
747 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
748 bool builtin_exit_frame) {
749 ASM_CODE_COMMENT(this);
750 // Set the entry point and jump to the C entry runtime stub.
751 LoadAddress(rbx, ext);
752 Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
753 ArgvMode::kStack, builtin_exit_frame);
754 Jump(code, RelocInfo::CODE_TARGET);
755 }
756
757 static constexpr Register saved_regs[] = {rax, rcx, rdx, rbx, rbp, rsi,
758 rdi, r8, r9, r10, r11};
759
760 static constexpr int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
761
762 int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
763 Register exclusion1,
764 Register exclusion2,
765 Register exclusion3) const {
766 int bytes = 0;
767 for (int i = 0; i < kNumberOfSavedRegs; i++) {
768 Register reg = saved_regs[i];
769 if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
770 bytes += kSystemPointerSize;
771 }
772 }
773
774 // R12 to r15 are callee save on all platforms.
775 if (fp_mode == SaveFPRegsMode::kSave) {
776 bytes += kStackSavedSavedFPSize * XMMRegister::kNumRegisters;
777 }
778
779 return bytes;
780 }
781
782 int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
783 Register exclusion2, Register exclusion3) {
784 ASM_CODE_COMMENT(this);
785 // We don't allow a GC during a store buffer overflow so there is no need to
786 // store the registers in any particular way, but we do have to store and
787 // restore them.
788 int bytes = 0;
789 for (int i = 0; i < kNumberOfSavedRegs; i++) {
790 Register reg = saved_regs[i];
791 if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
792 pushq(reg);
793 bytes += kSystemPointerSize;
794 }
795 }
796
797 // R12 to r15 are callee save on all platforms.
798 if (fp_mode == SaveFPRegsMode::kSave) {
799 const int delta = kStackSavedSavedFPSize * XMMRegister::kNumRegisters;
800 AllocateStackSpace(delta);
801 for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
802 XMMRegister reg = XMMRegister::from_code(i);
803 #if V8_ENABLE_WEBASSEMBLY
804 Movdqu(Operand(rsp, i * kStackSavedSavedFPSize), reg);
805 #else
806 Movsd(Operand(rsp, i * kStackSavedSavedFPSize), reg);
807 #endif // V8_ENABLE_WEBASSEMBLY
808 }
809 bytes += delta;
810 }
811
812 return bytes;
813 }
814
815 int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
816 Register exclusion2, Register exclusion3) {
817 ASM_CODE_COMMENT(this);
818 int bytes = 0;
819 if (fp_mode == SaveFPRegsMode::kSave) {
820 for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
821 XMMRegister reg = XMMRegister::from_code(i);
822 #if V8_ENABLE_WEBASSEMBLY
823 Movdqu(reg, Operand(rsp, i * kStackSavedSavedFPSize));
824 #else
825 Movsd(reg, Operand(rsp, i * kStackSavedSavedFPSize));
826 #endif // V8_ENABLE_WEBASSEMBLY
827 }
828 const int delta = kStackSavedSavedFPSize * XMMRegister::kNumRegisters;
829 addq(rsp, Immediate(delta));
830 bytes += delta;
831 }
832
833 for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
834 Register reg = saved_regs[i];
835 if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
836 popq(reg);
837 bytes += kSystemPointerSize;
838 }
839 }
840
841 return bytes;
842 }
843
844 void TurboAssembler::Movq(XMMRegister dst, Register src) {
845 if (CpuFeatures::IsSupported(AVX)) {
846 CpuFeatureScope avx_scope(this, AVX);
847 vmovq(dst, src);
848 } else {
849 movq(dst, src);
850 }
851 }
852
853 void TurboAssembler::Movq(Register dst, XMMRegister src) {
854 if (CpuFeatures::IsSupported(AVX)) {
855 CpuFeatureScope avx_scope(this, AVX);
856 vmovq(dst, src);
857 } else {
858 movq(dst, src);
859 }
860 }
861
862 void TurboAssembler::Pextrq(Register dst, XMMRegister src, int8_t imm8) {
863 if (CpuFeatures::IsSupported(AVX)) {
864 CpuFeatureScope avx_scope(this, AVX);
865 vpextrq(dst, src, imm8);
866 } else {
867 CpuFeatureScope sse_scope(this, SSE4_1);
868 pextrq(dst, src, imm8);
869 }
870 }
871
872 // Helper macro to define qfma macro-assembler. This takes care of every
873 // possible case of register aliasing to minimize the number of instructions.
874 #define QFMA(ps_or_pd) \
875 if (CpuFeatures::IsSupported(FMA3)) { \
876 CpuFeatureScope fma3_scope(this, FMA3); \
877 if (dst == src1) { \
878 vfmadd231##ps_or_pd(dst, src2, src3); \
879 } else if (dst == src2) { \
880 vfmadd132##ps_or_pd(dst, src1, src3); \
881 } else if (dst == src3) { \
882 vfmadd213##ps_or_pd(dst, src2, src1); \
883 } else { \
884 vmovups(dst, src1); \
885 vfmadd231##ps_or_pd(dst, src2, src3); \
886 } \
887 } else if (CpuFeatures::IsSupported(AVX)) { \
888 CpuFeatureScope avx_scope(this, AVX); \
889 vmul##ps_or_pd(tmp, src2, src3); \
890 vadd##ps_or_pd(dst, src1, tmp); \
891 } else { \
892 if (dst == src1) { \
893 movaps(tmp, src2); \
894 mul##ps_or_pd(tmp, src3); \
895 add##ps_or_pd(dst, tmp); \
896 } else if (dst == src2) { \
897 DCHECK_NE(src2, src1); \
898 mul##ps_or_pd(src2, src3); \
899 add##ps_or_pd(src2, src1); \
900 } else if (dst == src3) { \
901 DCHECK_NE(src3, src1); \
902 mul##ps_or_pd(src3, src2); \
903 add##ps_or_pd(src3, src1); \
904 } else { \
905 movaps(dst, src2); \
906 mul##ps_or_pd(dst, src3); \
907 add##ps_or_pd(dst, src1); \
908 } \
909 }
910
911 // Helper macro to define qfms macro-assembler. This takes care of every
912 // possible case of register aliasing to minimize the number of instructions.
913 #define QFMS(ps_or_pd) \
914 if (CpuFeatures::IsSupported(FMA3)) { \
915 CpuFeatureScope fma3_scope(this, FMA3); \
916 if (dst == src1) { \
917 vfnmadd231##ps_or_pd(dst, src2, src3); \
918 } else if (dst == src2) { \
919 vfnmadd132##ps_or_pd(dst, src1, src3); \
920 } else if (dst == src3) { \
921 vfnmadd213##ps_or_pd(dst, src2, src1); \
922 } else { \
923 vmovups(dst, src1); \
924 vfnmadd231##ps_or_pd(dst, src2, src3); \
925 } \
926 } else if (CpuFeatures::IsSupported(AVX)) { \
927 CpuFeatureScope avx_scope(this, AVX); \
928 vmul##ps_or_pd(tmp, src2, src3); \
929 vsub##ps_or_pd(dst, src1, tmp); \
930 } else { \
931 movaps(tmp, src2); \
932 mul##ps_or_pd(tmp, src3); \
933 if (dst != src1) { \
934 movaps(dst, src1); \
935 } \
936 sub##ps_or_pd(dst, tmp); \
937 }
938
939 void TurboAssembler::F32x4Qfma(XMMRegister dst, XMMRegister src1,
940 XMMRegister src2, XMMRegister src3,
941 XMMRegister tmp) {
942 QFMA(ps)
943 }
944
945 void TurboAssembler::F32x4Qfms(XMMRegister dst, XMMRegister src1,
946 XMMRegister src2, XMMRegister src3,
947 XMMRegister tmp) {
948 QFMS(ps)
949 }
950
951 void TurboAssembler::F64x2Qfma(XMMRegister dst, XMMRegister src1,
952 XMMRegister src2, XMMRegister src3,
953 XMMRegister tmp) {
954 QFMA(pd);
955 }
956
957 void TurboAssembler::F64x2Qfms(XMMRegister dst, XMMRegister src1,
958 XMMRegister src2, XMMRegister src3,
959 XMMRegister tmp) {
960 QFMS(pd);
961 }
962
963 #undef QFMOP
964
965 void TurboAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) {
966 if (CpuFeatures::IsSupported(AVX)) {
967 CpuFeatureScope scope(this, AVX);
968 vcvtss2sd(dst, src, src);
969 } else {
970 cvtss2sd(dst, src);
971 }
972 }
973
974 void TurboAssembler::Cvtss2sd(XMMRegister dst, Operand src) {
975 if (CpuFeatures::IsSupported(AVX)) {
976 CpuFeatureScope scope(this, AVX);
977 vcvtss2sd(dst, dst, src);
978 } else {
979 cvtss2sd(dst, src);
980 }
981 }
982
983 void TurboAssembler::Cvtsd2ss(XMMRegister dst, XMMRegister src) {
984 if (CpuFeatures::IsSupported(AVX)) {
985 CpuFeatureScope scope(this, AVX);
986 vcvtsd2ss(dst, src, src);
987 } else {
988 cvtsd2ss(dst, src);
989 }
990 }
991
992 void TurboAssembler::Cvtsd2ss(XMMRegister dst, Operand src) {
993 if (CpuFeatures::IsSupported(AVX)) {
994 CpuFeatureScope scope(this, AVX);
995 vcvtsd2ss(dst, dst, src);
996 } else {
997 cvtsd2ss(dst, src);
998 }
999 }
1000
1001 void TurboAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
1002 if (CpuFeatures::IsSupported(AVX)) {
1003 CpuFeatureScope scope(this, AVX);
1004 vcvtlsi2sd(dst, kScratchDoubleReg, src);
1005 } else {
1006 xorpd(dst, dst);
1007 cvtlsi2sd(dst, src);
1008 }
1009 }
1010
1011 void TurboAssembler::Cvtlsi2sd(XMMRegister dst, Operand src) {
1012 if (CpuFeatures::IsSupported(AVX)) {
1013 CpuFeatureScope scope(this, AVX);
1014 vcvtlsi2sd(dst, kScratchDoubleReg, src);
1015 } else {
1016 xorpd(dst, dst);
1017 cvtlsi2sd(dst, src);
1018 }
1019 }
1020
1021 void TurboAssembler::Cvtlsi2ss(XMMRegister dst, Register src) {
1022 if (CpuFeatures::IsSupported(AVX)) {
1023 CpuFeatureScope scope(this, AVX);
1024 vcvtlsi2ss(dst, kScratchDoubleReg, src);
1025 } else {
1026 xorps(dst, dst);
1027 cvtlsi2ss(dst, src);
1028 }
1029 }
1030
1031 void TurboAssembler::Cvtlsi2ss(XMMRegister dst, Operand src) {
1032 if (CpuFeatures::IsSupported(AVX)) {
1033 CpuFeatureScope scope(this, AVX);
1034 vcvtlsi2ss(dst, kScratchDoubleReg, src);
1035 } else {
1036 xorps(dst, dst);
1037 cvtlsi2ss(dst, src);
1038 }
1039 }
1040
1041 void TurboAssembler::Cvtqsi2ss(XMMRegister dst, Register src) {
1042 if (CpuFeatures::IsSupported(AVX)) {
1043 CpuFeatureScope scope(this, AVX);
1044 vcvtqsi2ss(dst, kScratchDoubleReg, src);
1045 } else {
1046 xorps(dst, dst);
1047 cvtqsi2ss(dst, src);
1048 }
1049 }
1050
1051 void TurboAssembler::Cvtqsi2ss(XMMRegister dst, Operand src) {
1052 if (CpuFeatures::IsSupported(AVX)) {
1053 CpuFeatureScope scope(this, AVX);
1054 vcvtqsi2ss(dst, kScratchDoubleReg, src);
1055 } else {
1056 xorps(dst, dst);
1057 cvtqsi2ss(dst, src);
1058 }
1059 }
1060
1061 void TurboAssembler::Cvtqsi2sd(XMMRegister dst, Register src) {
1062 if (CpuFeatures::IsSupported(AVX)) {
1063 CpuFeatureScope scope(this, AVX);
1064 vcvtqsi2sd(dst, kScratchDoubleReg, src);
1065 } else {
1066 xorpd(dst, dst);
1067 cvtqsi2sd(dst, src);
1068 }
1069 }
1070
1071 void TurboAssembler::Cvtqsi2sd(XMMRegister dst, Operand src) {
1072 if (CpuFeatures::IsSupported(AVX)) {
1073 CpuFeatureScope scope(this, AVX);
1074 vcvtqsi2sd(dst, kScratchDoubleReg, src);
1075 } else {
1076 xorpd(dst, dst);
1077 cvtqsi2sd(dst, src);
1078 }
1079 }
1080
1081 void TurboAssembler::Cvtlui2ss(XMMRegister dst, Register src) {
1082 // Zero-extend the 32 bit value to 64 bit.
1083 movl(kScratchRegister, src);
1084 Cvtqsi2ss(dst, kScratchRegister);
1085 }
1086
1087 void TurboAssembler::Cvtlui2ss(XMMRegister dst, Operand src) {
1088 // Zero-extend the 32 bit value to 64 bit.
1089 movl(kScratchRegister, src);
1090 Cvtqsi2ss(dst, kScratchRegister);
1091 }
1092
1093 void TurboAssembler::Cvtlui2sd(XMMRegister dst, Register src) {
1094 // Zero-extend the 32 bit value to 64 bit.
1095 movl(kScratchRegister, src);
1096 Cvtqsi2sd(dst, kScratchRegister);
1097 }
1098
1099 void TurboAssembler::Cvtlui2sd(XMMRegister dst, Operand src) {
1100 // Zero-extend the 32 bit value to 64 bit.
1101 movl(kScratchRegister, src);
1102 Cvtqsi2sd(dst, kScratchRegister);
1103 }
1104
1105 void TurboAssembler::Cvtqui2ss(XMMRegister dst, Register src) {
1106 Label done;
1107 Cvtqsi2ss(dst, src);
1108 testq(src, src);
1109 j(positive, &done, Label::kNear);
1110
1111 // Compute {src/2 | (src&1)} (retain the LSB to avoid rounding errors).
1112 if (src != kScratchRegister) movq(kScratchRegister, src);
1113 shrq(kScratchRegister, Immediate(1));
1114 // The LSB is shifted into CF. If it is set, set the LSB in {tmp}.
1115 Label msb_not_set;
1116 j(not_carry, &msb_not_set, Label::kNear);
1117 orq(kScratchRegister, Immediate(1));
1118 bind(&msb_not_set);
1119 Cvtqsi2ss(dst, kScratchRegister);
1120 Addss(dst, dst);
1121 bind(&done);
1122 }
1123
1124 void TurboAssembler::Cvtqui2ss(XMMRegister dst, Operand src) {
1125 movq(kScratchRegister, src);
1126 Cvtqui2ss(dst, kScratchRegister);
1127 }
1128
1129 void TurboAssembler::Cvtqui2sd(XMMRegister dst, Register src) {
1130 Label done;
1131 Cvtqsi2sd(dst, src);
1132 testq(src, src);
1133 j(positive, &done, Label::kNear);
1134
1135 // Compute {src/2 | (src&1)} (retain the LSB to avoid rounding errors).
1136 if (src != kScratchRegister) movq(kScratchRegister, src);
1137 shrq(kScratchRegister, Immediate(1));
1138 // The LSB is shifted into CF. If it is set, set the LSB in {tmp}.
1139 Label msb_not_set;
1140 j(not_carry, &msb_not_set, Label::kNear);
1141 orq(kScratchRegister, Immediate(1));
1142 bind(&msb_not_set);
1143 Cvtqsi2sd(dst, kScratchRegister);
1144 Addsd(dst, dst);
1145 bind(&done);
1146 }
1147
1148 void TurboAssembler::Cvtqui2sd(XMMRegister dst, Operand src) {
1149 movq(kScratchRegister, src);
1150 Cvtqui2sd(dst, kScratchRegister);
1151 }
1152
1153 void TurboAssembler::Cvttss2si(Register dst, XMMRegister src) {
1154 if (CpuFeatures::IsSupported(AVX)) {
1155 CpuFeatureScope scope(this, AVX);
1156 vcvttss2si(dst, src);
1157 } else {
1158 cvttss2si(dst, src);
1159 }
1160 }
1161
1162 void TurboAssembler::Cvttss2si(Register dst, Operand src) {
1163 if (CpuFeatures::IsSupported(AVX)) {
1164 CpuFeatureScope scope(this, AVX);
1165 vcvttss2si(dst, src);
1166 } else {
1167 cvttss2si(dst, src);
1168 }
1169 }
1170
1171 void TurboAssembler::Cvttsd2si(Register dst, XMMRegister src) {
1172 if (CpuFeatures::IsSupported(AVX)) {
1173 CpuFeatureScope scope(this, AVX);
1174 vcvttsd2si(dst, src);
1175 } else {
1176 cvttsd2si(dst, src);
1177 }
1178 }
1179
1180 void TurboAssembler::Cvttsd2si(Register dst, Operand src) {
1181 if (CpuFeatures::IsSupported(AVX)) {
1182 CpuFeatureScope scope(this, AVX);
1183 vcvttsd2si(dst, src);
1184 } else {
1185 cvttsd2si(dst, src);
1186 }
1187 }
1188
1189 void TurboAssembler::Cvttss2siq(Register dst, XMMRegister src) {
1190 if (CpuFeatures::IsSupported(AVX)) {
1191 CpuFeatureScope scope(this, AVX);
1192 vcvttss2siq(dst, src);
1193 } else {
1194 cvttss2siq(dst, src);
1195 }
1196 }
1197
1198 void TurboAssembler::Cvttss2siq(Register dst, Operand src) {
1199 if (CpuFeatures::IsSupported(AVX)) {
1200 CpuFeatureScope scope(this, AVX);
1201 vcvttss2siq(dst, src);
1202 } else {
1203 cvttss2siq(dst, src);
1204 }
1205 }
1206
1207 void TurboAssembler::Cvttsd2siq(Register dst, XMMRegister src) {
1208 if (CpuFeatures::IsSupported(AVX)) {
1209 CpuFeatureScope scope(this, AVX);
1210 vcvttsd2siq(dst, src);
1211 } else {
1212 cvttsd2siq(dst, src);
1213 }
1214 }
1215
1216 void TurboAssembler::Cvttsd2siq(Register dst, Operand src) {
1217 if (CpuFeatures::IsSupported(AVX)) {
1218 CpuFeatureScope scope(this, AVX);
1219 vcvttsd2siq(dst, src);
1220 } else {
1221 cvttsd2siq(dst, src);
1222 }
1223 }
1224
1225 namespace {
1226 template <typename OperandOrXMMRegister, bool is_double>
1227 void ConvertFloatToUint64(TurboAssembler* tasm, Register dst,
1228 OperandOrXMMRegister src, Label* fail) {
1229 Label success;
1230 // There does not exist a native float-to-uint instruction, so we have to use
1231 // a float-to-int, and postprocess the result.
1232 if (is_double) {
1233 tasm->Cvttsd2siq(dst, src);
1234 } else {
1235 tasm->Cvttss2siq(dst, src);
1236 }
1237 // If the result of the conversion is positive, we are already done.
1238 tasm->testq(dst, dst);
1239 tasm->j(positive, &success);
1240 // The result of the first conversion was negative, which means that the
1241 // input value was not within the positive int64 range. We subtract 2^63
1242 // and convert it again to see if it is within the uint64 range.
1243 if (is_double) {
1244 tasm->Move(kScratchDoubleReg, -9223372036854775808.0);
1245 tasm->Addsd(kScratchDoubleReg, src);
1246 tasm->Cvttsd2siq(dst, kScratchDoubleReg);
1247 } else {
1248 tasm->Move(kScratchDoubleReg, -9223372036854775808.0f);
1249 tasm->Addss(kScratchDoubleReg, src);
1250 tasm->Cvttss2siq(dst, kScratchDoubleReg);
1251 }
1252 tasm->testq(dst, dst);
1253 // The only possible negative value here is 0x80000000000000000, which is
1254 // used on x64 to indicate an integer overflow.
1255 tasm->j(negative, fail ? fail : &success);
1256 // The input value is within uint64 range and the second conversion worked
1257 // successfully, but we still have to undo the subtraction we did
1258 // earlier.
1259 tasm->Move(kScratchRegister, 0x8000000000000000);
1260 tasm->orq(dst, kScratchRegister);
1261 tasm->bind(&success);
1262 }
1263 } // namespace
1264
1265 void TurboAssembler::Cvttsd2uiq(Register dst, Operand src, Label* fail) {
1266 ConvertFloatToUint64<Operand, true>(this, dst, src, fail);
1267 }
1268
1269 void TurboAssembler::Cvttsd2uiq(Register dst, XMMRegister src, Label* fail) {
1270 ConvertFloatToUint64<XMMRegister, true>(this, dst, src, fail);
1271 }
1272
1273 void TurboAssembler::Cvttss2uiq(Register dst, Operand src, Label* fail) {
1274 ConvertFloatToUint64<Operand, false>(this, dst, src, fail);
1275 }
1276
1277 void TurboAssembler::Cvttss2uiq(Register dst, XMMRegister src, Label* fail) {
1278 ConvertFloatToUint64<XMMRegister, false>(this, dst, src, fail);
1279 }
1280
1281
1282 // ----------------------------------------------------------------------------
1283 // Smi tagging, untagging and tag detection.
1284
1285 Register TurboAssembler::GetSmiConstant(Smi source) {
1286 Move(kScratchRegister, source);
1287 return kScratchRegister;
1288 }
1289
1290 void TurboAssembler::Cmp(Register dst, int32_t src) {
1291 if (src == 0) {
1292 testl(dst, dst);
1293 } else {
1294 cmpl(dst, Immediate(src));
1295 }
1296 }
1297
1298 void TurboAssembler::SmiTag(Register reg) {
1299 STATIC_ASSERT(kSmiTag == 0);
1300 DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
1301 if (COMPRESS_POINTERS_BOOL) {
1302 shll(reg, Immediate(kSmiShift));
1303 } else {
1304 shlq(reg, Immediate(kSmiShift));
1305 }
1306 }
1307
1308 void TurboAssembler::SmiTag(Register dst, Register src) {
1309 DCHECK(dst != src);
1310 if (COMPRESS_POINTERS_BOOL) {
1311 movl(dst, src);
1312 } else {
1313 movq(dst, src);
1314 }
1315 SmiTag(dst);
1316 }
1317
1318 void TurboAssembler::SmiUntag(Register reg) {
1319 STATIC_ASSERT(kSmiTag == 0);
1320 DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
1321 // TODO(v8:7703): Is there a way to avoid this sign extension when pointer
1322 // compression is enabled?
1323 if (COMPRESS_POINTERS_BOOL) {
1324 movsxlq(reg, reg);
1325 }
1326 sarq(reg, Immediate(kSmiShift));
1327 }
1328
1329 void TurboAssembler::SmiUntag(Register dst, Register src) {
1330 DCHECK(dst != src);
1331 if (COMPRESS_POINTERS_BOOL) {
1332 movsxlq(dst, src);
1333 } else {
1334 movq(dst, src);
1335 }
1336 // TODO(v8:7703): Call SmiUntag(reg) if we can find a way to avoid the extra
1337 // mov when pointer compression is enabled.
1338 STATIC_ASSERT(kSmiTag == 0);
1339 DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
1340 sarq(dst, Immediate(kSmiShift));
1341 }
1342
1343 void TurboAssembler::SmiUntag(Register dst, Operand src) {
1344 if (SmiValuesAre32Bits()) {
1345 movl(dst, Operand(src, kSmiShift / kBitsPerByte));
1346 // Sign extend to 64-bit.
1347 movsxlq(dst, dst);
1348 } else {
1349 DCHECK(SmiValuesAre31Bits());
1350 if (COMPRESS_POINTERS_BOOL) {
1351 movsxlq(dst, src);
1352 } else {
1353 movq(dst, src);
1354 }
1355 sarq(dst, Immediate(kSmiShift));
1356 }
1357 }
1358
1359 void TurboAssembler::SmiToInt32(Register reg) {
1360 STATIC_ASSERT(kSmiTag == 0);
1361 DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
1362 if (COMPRESS_POINTERS_BOOL) {
1363 sarl(reg, Immediate(kSmiShift));
1364 } else {
1365 shrq(reg, Immediate(kSmiShift));
1366 }
1367 }
1368
1369 void TurboAssembler::SmiCompare(Register smi1, Register smi2) {
1370 AssertSmi(smi1);
1371 AssertSmi(smi2);
1372 cmp_tagged(smi1, smi2);
1373 }
1374
1375 void TurboAssembler::SmiCompare(Register dst, Smi src) {
1376 AssertSmi(dst);
1377 Cmp(dst, src);
1378 }
1379
1380 void TurboAssembler::Cmp(Register dst, Smi src) {
1381 if (src.value() == 0) {
1382 test_tagged(dst, dst);
1383 } else {
1384 DCHECK_NE(dst, kScratchRegister);
1385 Register constant_reg = GetSmiConstant(src);
1386 cmp_tagged(dst, constant_reg);
1387 }
1388 }
1389
1390 void TurboAssembler::SmiCompare(Register dst, Operand src) {
1391 AssertSmi(dst);
1392 AssertSmi(src);
1393 cmp_tagged(dst, src);
1394 }
1395
1396 void TurboAssembler::SmiCompare(Operand dst, Register src) {
1397 AssertSmi(dst);
1398 AssertSmi(src);
1399 cmp_tagged(dst, src);
1400 }
1401
1402 void TurboAssembler::SmiCompare(Operand dst, Smi src) {
1403 AssertSmi(dst);
1404 if (SmiValuesAre32Bits()) {
1405 cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src.value()));
1406 } else {
1407 DCHECK(SmiValuesAre31Bits());
1408 cmpl(dst, Immediate(src));
1409 }
1410 }
1411
1412 void TurboAssembler::Cmp(Operand dst, Smi src) {
1413 // The Operand cannot use the smi register.
1414 Register smi_reg = GetSmiConstant(src);
1415 DCHECK(!dst.AddressUsesRegister(smi_reg));
1416 cmp_tagged(dst, smi_reg);
1417 }
1418
1419 Condition TurboAssembler::CheckSmi(Register src) {
1420 STATIC_ASSERT(kSmiTag == 0);
1421 testb(src, Immediate(kSmiTagMask));
1422 return zero;
1423 }
1424
1425 Condition TurboAssembler::CheckSmi(Operand src) {
1426 STATIC_ASSERT(kSmiTag == 0);
1427 testb(src, Immediate(kSmiTagMask));
1428 return zero;
1429 }
1430
1431 void TurboAssembler::JumpIfSmi(Register src, Label* on_smi,
1432 Label::Distance near_jump) {
1433 Condition smi = CheckSmi(src);
1434 j(smi, on_smi, near_jump);
1435 }
1436
1437 void TurboAssembler::JumpIfNotSmi(Register src, Label* on_not_smi,
1438 Label::Distance near_jump) {
1439 Condition smi = CheckSmi(src);
1440 j(NegateCondition(smi), on_not_smi, near_jump);
1441 }
1442
1443 void TurboAssembler::JumpIfNotSmi(Operand src, Label* on_not_smi,
1444 Label::Distance near_jump) {
1445 Condition smi = CheckSmi(src);
1446 j(NegateCondition(smi), on_not_smi, near_jump);
1447 }
1448
1449 void TurboAssembler::SmiAddConstant(Operand dst, Smi constant) {
1450 if (constant.value() != 0) {
1451 if (SmiValuesAre32Bits()) {
1452 addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant.value()));
1453 } else {
1454 DCHECK(SmiValuesAre31Bits());
1455 if (kTaggedSize == kInt64Size) {
1456 // Sign-extend value after addition
1457 movl(kScratchRegister, dst);
1458 addl(kScratchRegister, Immediate(constant));
1459 movsxlq(kScratchRegister, kScratchRegister);
1460 movq(dst, kScratchRegister);
1461 } else {
1462 DCHECK_EQ(kTaggedSize, kInt32Size);
1463 addl(dst, Immediate(constant));
1464 }
1465 }
1466 }
1467 }
1468
1469 SmiIndex TurboAssembler::SmiToIndex(Register dst, Register src, int shift) {
1470 if (SmiValuesAre32Bits()) {
1471 DCHECK(is_uint6(shift));
1472 // There is a possible optimization if shift is in the range 60-63, but that
1473 // will (and must) never happen.
1474 if (dst != src) {
1475 movq(dst, src);
1476 }
1477 if (shift < kSmiShift) {
1478 sarq(dst, Immediate(kSmiShift - shift));
1479 } else {
1480 shlq(dst, Immediate(shift - kSmiShift));
1481 }
1482 return SmiIndex(dst, times_1);
1483 } else {
1484 DCHECK(SmiValuesAre31Bits());
1485 // We have to sign extend the index register to 64-bit as the SMI might
1486 // be negative.
1487 movsxlq(dst, src);
1488 if (shift < kSmiShift) {
1489 sarq(dst, Immediate(kSmiShift - shift));
1490 } else if (shift != kSmiShift) {
1491 if (shift - kSmiShift <= static_cast<int>(times_8)) {
1492 return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiShift));
1493 }
1494 shlq(dst, Immediate(shift - kSmiShift));
1495 }
1496 return SmiIndex(dst, times_1);
1497 }
1498 }
1499
1500 void TurboAssembler::Push(Smi source) {
1501 intptr_t smi = static_cast<intptr_t>(source.ptr());
1502 if (is_int32(smi)) {
1503 Push(Immediate(static_cast<int32_t>(smi)));
1504 return;
1505 }
1506 int first_byte_set = base::bits::CountTrailingZeros64(smi) / 8;
1507 int last_byte_set = (63 - base::bits::CountLeadingZeros64(smi)) / 8;
1508 if (first_byte_set == last_byte_set) {
1509 // This sequence has only 7 bytes, compared to the 12 bytes below.
1510 Push(Immediate(0));
1511 movb(Operand(rsp, first_byte_set),
1512 Immediate(static_cast<int8_t>(smi >> (8 * first_byte_set))));
1513 return;
1514 }
1515 Register constant = GetSmiConstant(source);
1516 Push(constant);
1517 }
1518
1519 // ----------------------------------------------------------------------------
1520
1521 void TurboAssembler::Move(Register dst, Smi source) {
1522 STATIC_ASSERT(kSmiTag == 0);
1523 int value = source.value();
1524 if (value == 0) {
1525 xorl(dst, dst);
1526 } else if (SmiValuesAre32Bits() || value < 0) {
1527 Move(dst, source.ptr(), RelocInfo::NONE);
1528 } else {
1529 uint32_t uvalue = static_cast<uint32_t>(source.ptr());
1530 Move(dst, uvalue);
1531 }
1532 }
1533
1534 void TurboAssembler::Move(Operand dst, intptr_t x) {
1535 if (is_int32(x)) {
1536 movq(dst, Immediate(static_cast<int32_t>(x)));
1537 } else {
1538 Move(kScratchRegister, x);
1539 movq(dst, kScratchRegister);
1540 }
1541 }
1542
1543 void TurboAssembler::Move(Register dst, ExternalReference ext) {
1544 // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
1545 // non-isolate-independent code. In many cases it might be cheaper than
1546 // embedding the relocatable value.
1547 if (root_array_available_ && options().isolate_independent_code) {
1548 IndirectLoadExternalReference(dst, ext);
1549 return;
1550 }
1551 movq(dst, Immediate64(ext.address(), RelocInfo::EXTERNAL_REFERENCE));
1552 }
1553
1554 void TurboAssembler::Move(Register dst, Register src) {
1555 if (dst != src) {
1556 movq(dst, src);
1557 }
1558 }
1559
1560 void TurboAssembler::Move(Register dst, Operand src) { movq(dst, src); }
1561 void TurboAssembler::Move(Register dst, Immediate src) {
1562 if (src.rmode() == RelocInfo::Mode::NONE) {
1563 Move(dst, src.value());
1564 } else {
1565 movl(dst, src);
1566 }
1567 }
1568
1569 void TurboAssembler::Move(XMMRegister dst, XMMRegister src) {
1570 if (dst != src) {
1571 Movaps(dst, src);
1572 }
1573 }
1574
1575 void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1,
1576 Register src1) {
1577 if (dst0 != src1) {
1578 // Normal case: Writing to dst0 does not destroy src1.
1579 Move(dst0, src0);
1580 Move(dst1, src1);
1581 } else if (dst1 != src0) {
1582 // Only dst0 and src1 are the same register,
1583 // but writing to dst1 does not destroy src0.
1584 Move(dst1, src1);
1585 Move(dst0, src0);
1586 } else {
1587 // dst0 == src1, and dst1 == src0, a swap is required:
1588 // dst0 \/ src0
1589 // dst1 /\ src1
1590 xchgq(dst0, dst1);
1591 }
1592 }
1593
1594 void TurboAssembler::MoveNumber(Register dst, double value) {
1595 int32_t smi;
1596 if (DoubleToSmiInteger(value, &smi)) {
1597 Move(dst, Smi::FromInt(smi));
1598 } else {
1599 movq_heap_number(dst, value);
1600 }
1601 }
1602
1603 void TurboAssembler::Move(XMMRegister dst, uint32_t src) {
1604 if (src == 0) {
1605 Xorps(dst, dst);
1606 } else {
1607 unsigned nlz = base::bits::CountLeadingZeros(src);
1608 unsigned ntz = base::bits::CountTrailingZeros(src);
1609 unsigned pop = base::bits::CountPopulation(src);
1610 DCHECK_NE(0u, pop);
1611 if (pop + ntz + nlz == 32) {
1612 Pcmpeqd(dst, dst);
1613 if (ntz) Pslld(dst, static_cast<byte>(ntz + nlz));
1614 if (nlz) Psrld(dst, static_cast<byte>(nlz));
1615 } else {
1616 movl(kScratchRegister, Immediate(src));
1617 Movd(dst, kScratchRegister);
1618 }
1619 }
1620 }
1621
1622 void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
1623 if (src == 0) {
1624 Xorpd(dst, dst);
1625 } else {
1626 unsigned nlz = base::bits::CountLeadingZeros(src);
1627 unsigned ntz = base::bits::CountTrailingZeros(src);
1628 unsigned pop = base::bits::CountPopulation(src);
1629 DCHECK_NE(0u, pop);
1630 if (pop + ntz + nlz == 64) {
1631 Pcmpeqd(dst, dst);
1632 if (ntz) Psllq(dst, static_cast<byte>(ntz + nlz));
1633 if (nlz) Psrlq(dst, static_cast<byte>(nlz));
1634 } else {
1635 uint32_t lower = static_cast<uint32_t>(src);
1636 uint32_t upper = static_cast<uint32_t>(src >> 32);
1637 if (upper == 0) {
1638 Move(dst, lower);
1639 } else {
1640 movq(kScratchRegister, src);
1641 Movq(dst, kScratchRegister);
1642 }
1643 }
1644 }
1645 }
1646
1647 void TurboAssembler::Move(XMMRegister dst, uint64_t high, uint64_t low) {
1648 if (high == low) {
1649 Move(dst, low);
1650 Punpcklqdq(dst, dst);
1651 return;
1652 }
1653
1654 Move(dst, low);
1655 movq(kScratchRegister, high);
1656 Pinsrq(dst, dst, kScratchRegister, uint8_t{1});
1657 }
1658
1659 // ----------------------------------------------------------------------------
1660
1661 void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
1662 if (source->IsSmi()) {
1663 Cmp(dst, Smi::cast(*source));
1664 } else {
1665 Move(kScratchRegister, Handle<HeapObject>::cast(source));
1666 cmp_tagged(dst, kScratchRegister);
1667 }
1668 }
1669
1670 void MacroAssembler::Cmp(Operand dst, Handle<Object> source) {
1671 if (source->IsSmi()) {
1672 Cmp(dst, Smi::cast(*source));
1673 } else {
1674 Move(kScratchRegister, Handle<HeapObject>::cast(source));
1675 cmp_tagged(dst, kScratchRegister);
1676 }
1677 }
1678
1679 void MacroAssembler::CompareRange(Register value, unsigned lower_limit,
1680 unsigned higher_limit) {
1681 ASM_CODE_COMMENT(this);
1682 DCHECK_LT(lower_limit, higher_limit);
1683 if (lower_limit != 0) {
1684 leal(kScratchRegister, Operand(value, 0u - lower_limit));
1685 cmpl(kScratchRegister, Immediate(higher_limit - lower_limit));
1686 } else {
1687 cmpl(value, Immediate(higher_limit));
1688 }
1689 }
1690
1691 void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
1692 unsigned higher_limit, Label* on_in_range,
1693 Label::Distance near_jump) {
1694 CompareRange(value, lower_limit, higher_limit);
1695 j(below_equal, on_in_range, near_jump);
1696 }
1697
1698 void TurboAssembler::Push(Handle<HeapObject> source) {
1699 Move(kScratchRegister, source);
1700 Push(kScratchRegister);
1701 }
1702
1703 void TurboAssembler::PushArray(Register array, Register size, Register scratch,
1704 PushArrayOrder order) {
1705 DCHECK(!AreAliased(array, size, scratch));
1706 Register counter = scratch;
1707 Label loop, entry;
1708 if (order == PushArrayOrder::kReverse) {
1709 Move(counter, 0);
1710 jmp(&entry);
1711 bind(&loop);
1712 Push(Operand(array, counter, times_system_pointer_size, 0));
1713 incq(counter);
1714 bind(&entry);
1715 cmpq(counter, size);
1716 j(less, &loop, Label::kNear);
1717 } else {
1718 movq(counter, size);
1719 jmp(&entry);
1720 bind(&loop);
1721 Push(Operand(array, counter, times_system_pointer_size, 0));
1722 bind(&entry);
1723 decq(counter);
1724 j(greater_equal, &loop, Label::kNear);
1725 }
1726 }
1727
1728 void TurboAssembler::Move(Register result, Handle<HeapObject> object,
1729 RelocInfo::Mode rmode) {
1730 // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
1731 // non-isolate-independent code. In many cases it might be cheaper than
1732 // embedding the relocatable value.
1733 if (root_array_available_ && options().isolate_independent_code) {
1734 // TODO(v8:9706): Fix-it! This load will always uncompress the value
1735 // even when we are loading a compressed embedded object.
1736 IndirectLoadConstant(result, object);
1737 } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
1738 EmbeddedObjectIndex index = AddEmbeddedObject(object);
1739 DCHECK(is_uint32(index));
1740 movl(result, Immediate(static_cast<int>(index), rmode));
1741 } else {
1742 DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
1743 movq(result, Immediate64(object.address(), rmode));
1744 }
1745 }
1746
1747 void TurboAssembler::Move(Operand dst, Handle<HeapObject> object,
1748 RelocInfo::Mode rmode) {
1749 Move(kScratchRegister, object, rmode);
1750 movq(dst, kScratchRegister);
1751 }
1752
1753 void TurboAssembler::MoveStringConstant(Register result,
1754 const StringConstantBase* string,
1755 RelocInfo::Mode rmode) {
1756 movq_string(result, string);
1757 }
1758
1759 void MacroAssembler::Drop(int stack_elements) {
1760 if (stack_elements > 0) {
1761 addq(rsp, Immediate(stack_elements * kSystemPointerSize));
1762 }
1763 }
1764
1765 void MacroAssembler::DropUnderReturnAddress(int stack_elements,
1766 Register scratch) {
1767 DCHECK_GT(stack_elements, 0);
1768 if (stack_elements == 1) {
1769 popq(MemOperand(rsp, 0));
1770 return;
1771 }
1772
1773 PopReturnAddressTo(scratch);
1774 Drop(stack_elements);
1775 PushReturnAddressFrom(scratch);
1776 }
1777
1778 void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
1779 ArgumentsCountMode mode) {
1780 int receiver_bytes =
1781 (mode == kCountExcludesReceiver) ? kSystemPointerSize : 0;
1782 switch (type) {
1783 case kCountIsInteger: {
1784 leaq(rsp, Operand(rsp, count, times_system_pointer_size, receiver_bytes));
1785 break;
1786 }
1787 case kCountIsSmi: {
1788 SmiIndex index = SmiToIndex(count, count, kSystemPointerSizeLog2);
1789 leaq(rsp, Operand(rsp, index.reg, index.scale, receiver_bytes));
1790 break;
1791 }
1792 case kCountIsBytes: {
1793 if (receiver_bytes == 0) {
1794 addq(rsp, count);
1795 } else {
1796 leaq(rsp, Operand(rsp, count, times_1, receiver_bytes));
1797 }
1798 break;
1799 }
1800 }
1801 }
1802
1803 void TurboAssembler::DropArguments(Register count, Register scratch,
1804 ArgumentsCountType type,
1805 ArgumentsCountMode mode) {
1806 DCHECK(!AreAliased(count, scratch));
1807 PopReturnAddressTo(scratch);
1808 DropArguments(count, type, mode);
1809 PushReturnAddressFrom(scratch);
1810 }
1811
1812 void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
1813 Register receiver,
1814 Register scratch,
1815 ArgumentsCountType type,
1816 ArgumentsCountMode mode) {
1817 DCHECK(!AreAliased(argc, receiver, scratch));
1818 PopReturnAddressTo(scratch);
1819 DropArguments(argc, type, mode);
1820 Push(receiver);
1821 PushReturnAddressFrom(scratch);
1822 }
1823
1824 void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
1825 Operand receiver,
1826 Register scratch,
1827 ArgumentsCountType type,
1828 ArgumentsCountMode mode) {
1829 DCHECK(!AreAliased(argc, scratch));
1830 DCHECK(!receiver.AddressUsesRegister(scratch));
1831 PopReturnAddressTo(scratch);
1832 DropArguments(argc, type, mode);
1833 Push(receiver);
1834 PushReturnAddressFrom(scratch);
1835 }
1836
1837 void TurboAssembler::Push(Register src) { pushq(src); }
1838
1839 void TurboAssembler::Push(Operand src) { pushq(src); }
1840
1841 void MacroAssembler::PushQuad(Operand src) { pushq(src); }
1842
1843 void TurboAssembler::Push(Immediate value) { pushq(value); }
1844
1845 void MacroAssembler::PushImm32(int32_t imm32) { pushq_imm32(imm32); }
1846
1847 void MacroAssembler::Pop(Register dst) { popq(dst); }
1848
1849 void MacroAssembler::Pop(Operand dst) { popq(dst); }
1850
1851 void MacroAssembler::PopQuad(Operand dst) { popq(dst); }
1852
1853 void TurboAssembler::Jump(const ExternalReference& reference) {
1854 DCHECK(root_array_available());
1855 jmp(Operand(kRootRegister, RootRegisterOffsetForExternalReferenceTableEntry(
1856 isolate(), reference)));
1857 }
1858
1859 void TurboAssembler::Jump(Operand op) { jmp(op); }
1860
1861 void TurboAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
1862 Move(kScratchRegister, destination, rmode);
1863 jmp(kScratchRegister);
1864 }
1865
1866 void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode,
1867 Condition cc) {
1868 DCHECK_IMPLIES(options().isolate_independent_code,
1869 Builtins::IsIsolateIndependentBuiltin(*code_object));
1870 if (options().inline_offheap_trampolines) {
1871 Builtin builtin = Builtin::kNoBuiltinId;
1872 if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin)) {
1873 Label skip;
1874 if (cc != always) {
1875 if (cc == never) return;
1876 j(NegateCondition(cc), &skip, Label::kNear);
1877 }
1878 TailCallBuiltin(builtin);
1879 bind(&skip);
1880 return;
1881 }
1882 }
1883 j(cc, code_object, rmode);
1884 }
1885
1886 void MacroAssembler::JumpToInstructionStream(Address entry) {
1887 Move(kOffHeapTrampolineRegister, entry, RelocInfo::OFF_HEAP_TARGET);
1888 jmp(kOffHeapTrampolineRegister);
1889 }
1890
1891 void TurboAssembler::Call(ExternalReference ext) {
1892 LoadAddress(kScratchRegister, ext);
1893 call(kScratchRegister);
1894 }
1895
1896 void TurboAssembler::Call(Operand op) {
1897 if (!CpuFeatures::IsSupported(ATOM)) {
1898 call(op);
1899 } else {
1900 movq(kScratchRegister, op);
1901 call(kScratchRegister);
1902 }
1903 }
1904
1905 void TurboAssembler::Call(Address destination, RelocInfo::Mode rmode) {
1906 Move(kScratchRegister, destination, rmode);
1907 call(kScratchRegister);
1908 }
1909
1910 void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
1911 DCHECK_IMPLIES(options().isolate_independent_code,
1912 Builtins::IsIsolateIndependentBuiltin(*code_object));
1913 if (options().inline_offheap_trampolines) {
1914 Builtin builtin = Builtin::kNoBuiltinId;
1915 if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin)) {
1916 // Inline the trampoline.
1917 CallBuiltin(builtin);
1918 return;
1919 }
1920 }
1921 DCHECK(RelocInfo::IsCodeTarget(rmode));
1922 call(code_object, rmode);
1923 }
1924
1925 Operand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
1926 DCHECK(root_array_available());
1927 return Operand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin));
1928 }
1929
1930 Operand TurboAssembler::EntryFromBuiltinIndexAsOperand(Register builtin_index) {
1931 if (SmiValuesAre32Bits()) {
1932 // The builtin_index register contains the builtin index as a Smi.
1933 SmiUntag(builtin_index);
1934 return Operand(kRootRegister, builtin_index, times_system_pointer_size,
1935 IsolateData::builtin_entry_table_offset());
1936 } else {
1937 DCHECK(SmiValuesAre31Bits());
1938
1939 // The builtin_index register contains the builtin index as a Smi.
1940 // Untagging is folded into the indexing operand below (we use
1941 // times_half_system_pointer_size since smis are already shifted by one).
1942 return Operand(kRootRegister, builtin_index, times_half_system_pointer_size,
1943 IsolateData::builtin_entry_table_offset());
1944 }
1945 }
1946
1947 void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
1948 Call(EntryFromBuiltinIndexAsOperand(builtin_index));
1949 }
1950
1951 void TurboAssembler::CallBuiltin(Builtin builtin) {
1952 ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin));
1953 if (options().short_builtin_calls) {
1954 call(BuiltinEntry(builtin), RelocInfo::RUNTIME_ENTRY);
1955 } else {
1956 Move(kScratchRegister, BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET);
1957 call(kScratchRegister);
1958 }
1959 }
1960
1961 void TurboAssembler::TailCallBuiltin(Builtin builtin) {
1962 ASM_CODE_COMMENT_STRING(this,
1963 CommentForOffHeapTrampoline("tail call", builtin));
1964 if (options().short_builtin_calls) {
1965 jmp(BuiltinEntry(builtin), RelocInfo::RUNTIME_ENTRY);
1966 } else {
1967 Jump(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET);
1968 }
1969 }
1970
1971 void TurboAssembler::LoadCodeObjectEntry(Register destination,
1972 Register code_object) {
1973 ASM_CODE_COMMENT(this);
1974 // Code objects are called differently depending on whether we are generating
1975 // builtin code (which will later be embedded into the binary) or compiling
1976 // user JS code at runtime.
1977 // * Builtin code runs in --jitless mode and thus must not call into on-heap
1978 // Code targets. Instead, we dispatch through the builtins entry table.
1979 // * Codegen at runtime does not have this restriction and we can use the
1980 // shorter, branchless instruction sequence. The assumption here is that
1981 // targets are usually generated code and not builtin Code objects.
1982
1983 if (options().isolate_independent_code) {
1984 DCHECK(root_array_available());
1985 Label if_code_is_off_heap, out;
1986
1987 // Check whether the Code object is an off-heap trampoline. If so, call its
1988 // (off-heap) entry point directly without going through the (on-heap)
1989 // trampoline. Otherwise, just call the Code object as always.
1990 testl(FieldOperand(code_object, Code::kFlagsOffset),
1991 Immediate(Code::IsOffHeapTrampoline::kMask));
1992 j(not_equal, &if_code_is_off_heap);
1993
1994 // Not an off-heap trampoline, the entry point is at
1995 // Code::raw_instruction_start().
1996 Move(destination, code_object);
1997 addq(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
1998 jmp(&out);
1999
2000 // An off-heap trampoline, the entry point is loaded from the builtin entry
2001 // table.
2002 bind(&if_code_is_off_heap);
2003 movl(destination, FieldOperand(code_object, Code::kBuiltinIndexOffset));
2004 movq(destination,
2005 Operand(kRootRegister, destination, times_system_pointer_size,
2006 IsolateData::builtin_entry_table_offset()));
2007
2008 bind(&out);
2009 } else {
2010 Move(destination, code_object);
2011 addq(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
2012 }
2013 }
2014
2015 void TurboAssembler::CallCodeObject(Register code_object) {
2016 LoadCodeObjectEntry(code_object, code_object);
2017 call(code_object);
2018 }
2019
2020 void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
2021 LoadCodeObjectEntry(code_object, code_object);
2022 switch (jump_mode) {
2023 case JumpMode::kJump:
2024 jmp(code_object);
2025 return;
2026 case JumpMode::kPushAndReturn:
2027 pushq(code_object);
2028 Ret();
2029 return;
2030 }
2031 }
2032
2033 void TurboAssembler::LoadCodeDataContainerEntry(
2034 Register destination, Register code_data_container_object) {
2035 ASM_CODE_COMMENT(this);
2036 CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
2037 LoadExternalPointerField(
2038 destination,
2039 FieldOperand(code_data_container_object,
2040 CodeDataContainer::kCodeEntryPointOffset),
2041 kCodeEntryPointTag, kScratchRegister);
2042 }
2043
2044 void TurboAssembler::LoadCodeDataContainerCodeNonBuiltin(
2045 Register destination, Register code_data_container_object) {
2046 ASM_CODE_COMMENT(this);
2047 LoadTaggedPointerField(
2048 destination,
2049 FieldOperand(code_data_container_object, CodeDataContainer::kCodeOffset));
2050 }
2051
2052 void TurboAssembler::CallCodeDataContainerObject(
2053 Register code_data_container_object) {
2054 LoadCodeDataContainerEntry(code_data_container_object,
2055 code_data_container_object);
2056 call(code_data_container_object);
2057 }
2058
2059 void TurboAssembler::JumpCodeDataContainerObject(
2060 Register code_data_container_object, JumpMode jump_mode) {
2061 LoadCodeDataContainerEntry(code_data_container_object,
2062 code_data_container_object);
2063 switch (jump_mode) {
2064 case JumpMode::kJump:
2065 jmp(code_data_container_object);
2066 return;
2067 case JumpMode::kPushAndReturn:
2068 pushq(code_data_container_object);
2069 Ret();
2070 return;
2071 }
2072 }
2073
2074 void TurboAssembler::LoadCodeTEntry(Register destination, Register code) {
2075 ASM_CODE_COMMENT(this);
2076 if (V8_EXTERNAL_CODE_SPACE_BOOL) {
2077 LoadCodeDataContainerEntry(destination, code);
2078 } else {
2079 leaq(destination, Operand(code, Code::kHeaderSize - kHeapObjectTag));
2080 }
2081 }
2082
2083 void TurboAssembler::CallCodeTObject(Register code) {
2084 if (V8_EXTERNAL_CODE_SPACE_BOOL) {
2085 CallCodeDataContainerObject(code);
2086 } else {
2087 CallCodeObject(code);
2088 }
2089 }
2090
2091 void TurboAssembler::JumpCodeTObject(Register code, JumpMode jump_mode) {
2092 if (V8_EXTERNAL_CODE_SPACE_BOOL) {
2093 JumpCodeDataContainerObject(code, jump_mode);
2094 } else {
2095 JumpCodeObject(code, jump_mode);
2096 }
2097 }
2098
2099 void TurboAssembler::PextrdPreSse41(Register dst, XMMRegister src,
2100 uint8_t imm8) {
2101 if (imm8 == 0) {
2102 Movd(dst, src);
2103 return;
2104 }
2105 DCHECK_EQ(1, imm8);
2106 movq(dst, src);
2107 shrq(dst, Immediate(32));
2108 }
2109
2110 namespace {
2111 template <typename Op>
2112 void PinsrdPreSse41Helper(TurboAssembler* tasm, XMMRegister dst, Op src,
2113 uint8_t imm8, uint32_t* load_pc_offset) {
2114 tasm->Movd(kScratchDoubleReg, src);
2115 if (load_pc_offset) *load_pc_offset = tasm->pc_offset();
2116 if (imm8 == 1) {
2117 tasm->punpckldq(dst, kScratchDoubleReg);
2118 } else {
2119 DCHECK_EQ(0, imm8);
2120 tasm->Movss(dst, kScratchDoubleReg);
2121 }
2122 }
2123 } // namespace
2124
2125 void TurboAssembler::PinsrdPreSse41(XMMRegister dst, Register src, uint8_t imm8,
2126 uint32_t* load_pc_offset) {
2127 PinsrdPreSse41Helper(this, dst, src, imm8, load_pc_offset);
2128 }
2129
2130 void TurboAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8,
2131 uint32_t* load_pc_offset) {
2132 PinsrdPreSse41Helper(this, dst, src, imm8, load_pc_offset);
2133 }
2134
2135 void TurboAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Register src2,
2136 uint8_t imm8, uint32_t* load_pc_offset) {
2137 PinsrHelper(this, &Assembler::vpinsrq, &Assembler::pinsrq, dst, src1, src2,
2138 imm8, load_pc_offset, {SSE4_1});
2139 }
2140
2141 void TurboAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Operand src2,
2142 uint8_t imm8, uint32_t* load_pc_offset) {
2143 PinsrHelper(this, &Assembler::vpinsrq, &Assembler::pinsrq, dst, src1, src2,
2144 imm8, load_pc_offset, {SSE4_1});
2145 }
2146
2147 void TurboAssembler::Lzcntl(Register dst, Register src) {
2148 if (CpuFeatures::IsSupported(LZCNT)) {
2149 CpuFeatureScope scope(this, LZCNT);
2150 lzcntl(dst, src);
2151 return;
2152 }
2153 Label not_zero_src;
2154 bsrl(dst, src);
2155 j(not_zero, ¬_zero_src, Label::kNear);
2156 Move(dst, 63); // 63^31 == 32
2157 bind(¬_zero_src);
2158 xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x
2159 }
2160
2161 void TurboAssembler::Lzcntl(Register dst, Operand src) {
2162 if (CpuFeatures::IsSupported(LZCNT)) {
2163 CpuFeatureScope scope(this, LZCNT);
2164 lzcntl(dst, src);
2165 return;
2166 }
2167 Label not_zero_src;
2168 bsrl(dst, src);
2169 j(not_zero, ¬_zero_src, Label::kNear);
2170 Move(dst, 63); // 63^31 == 32
2171 bind(¬_zero_src);
2172 xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x
2173 }
2174
2175 void TurboAssembler::Lzcntq(Register dst, Register src) {
2176 if (CpuFeatures::IsSupported(LZCNT)) {
2177 CpuFeatureScope scope(this, LZCNT);
2178 lzcntq(dst, src);
2179 return;
2180 }
2181 Label not_zero_src;
2182 bsrq(dst, src);
2183 j(not_zero, ¬_zero_src, Label::kNear);
2184 Move(dst, 127); // 127^63 == 64
2185 bind(¬_zero_src);
2186 xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x
2187 }
2188
2189 void TurboAssembler::Lzcntq(Register dst, Operand src) {
2190 if (CpuFeatures::IsSupported(LZCNT)) {
2191 CpuFeatureScope scope(this, LZCNT);
2192 lzcntq(dst, src);
2193 return;
2194 }
2195 Label not_zero_src;
2196 bsrq(dst, src);
2197 j(not_zero, ¬_zero_src, Label::kNear);
2198 Move(dst, 127); // 127^63 == 64
2199 bind(¬_zero_src);
2200 xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x
2201 }
2202
2203 void TurboAssembler::Tzcntq(Register dst, Register src) {
2204 if (CpuFeatures::IsSupported(BMI1)) {
2205 CpuFeatureScope scope(this, BMI1);
2206 tzcntq(dst, src);
2207 return;
2208 }
2209 Label not_zero_src;
2210 bsfq(dst, src);
2211 j(not_zero, ¬_zero_src, Label::kNear);
2212 // Define the result of tzcnt(0) separately, because bsf(0) is undefined.
2213 Move(dst, 64);
2214 bind(¬_zero_src);
2215 }
2216
2217 void TurboAssembler::Tzcntq(Register dst, Operand src) {
2218 if (CpuFeatures::IsSupported(BMI1)) {
2219 CpuFeatureScope scope(this, BMI1);
2220 tzcntq(dst, src);
2221 return;
2222 }
2223 Label not_zero_src;
2224 bsfq(dst, src);
2225 j(not_zero, ¬_zero_src, Label::kNear);
2226 // Define the result of tzcnt(0) separately, because bsf(0) is undefined.
2227 Move(dst, 64);
2228 bind(¬_zero_src);
2229 }
2230
2231 void TurboAssembler::Tzcntl(Register dst, Register src) {
2232 if (CpuFeatures::IsSupported(BMI1)) {
2233 CpuFeatureScope scope(this, BMI1);
2234 tzcntl(dst, src);
2235 return;
2236 }
2237 Label not_zero_src;
2238 bsfl(dst, src);
2239 j(not_zero, ¬_zero_src, Label::kNear);
2240 Move(dst, 32); // The result of tzcnt is 32 if src = 0.
2241 bind(¬_zero_src);
2242 }
2243
2244 void TurboAssembler::Tzcntl(Register dst, Operand src) {
2245 if (CpuFeatures::IsSupported(BMI1)) {
2246 CpuFeatureScope scope(this, BMI1);
2247 tzcntl(dst, src);
2248 return;
2249 }
2250 Label not_zero_src;
2251 bsfl(dst, src);
2252 j(not_zero, ¬_zero_src, Label::kNear);
2253 Move(dst, 32); // The result of tzcnt is 32 if src = 0.
2254 bind(¬_zero_src);
2255 }
2256
2257 void TurboAssembler::Popcntl(Register dst, Register src) {
2258 if (CpuFeatures::IsSupported(POPCNT)) {
2259 CpuFeatureScope scope(this, POPCNT);
2260 popcntl(dst, src);
2261 return;
2262 }
2263 UNREACHABLE();
2264 }
2265
2266 void TurboAssembler::Popcntl(Register dst, Operand src) {
2267 if (CpuFeatures::IsSupported(POPCNT)) {
2268 CpuFeatureScope scope(this, POPCNT);
2269 popcntl(dst, src);
2270 return;
2271 }
2272 UNREACHABLE();
2273 }
2274
2275 void TurboAssembler::Popcntq(Register dst, Register src) {
2276 if (CpuFeatures::IsSupported(POPCNT)) {
2277 CpuFeatureScope scope(this, POPCNT);
2278 popcntq(dst, src);
2279 return;
2280 }
2281 UNREACHABLE();
2282 }
2283
2284 void TurboAssembler::Popcntq(Register dst, Operand src) {
2285 if (CpuFeatures::IsSupported(POPCNT)) {
2286 CpuFeatureScope scope(this, POPCNT);
2287 popcntq(dst, src);
2288 return;
2289 }
2290 UNREACHABLE();
2291 }
2292
2293 void MacroAssembler::PushStackHandler() {
2294 // Adjust this code if not the case.
2295 STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kSystemPointerSize);
2296 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2297
2298 Push(Immediate(0)); // Padding.
2299
2300 // Link the current handler as the next handler.
2301 ExternalReference handler_address =
2302 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate());
2303 Push(ExternalReferenceAsOperand(handler_address));
2304
2305 // Set this new handler as the current one.
2306 movq(ExternalReferenceAsOperand(handler_address), rsp);
2307 }
2308
2309 void MacroAssembler::PopStackHandler() {
2310 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2311 ExternalReference handler_address =
2312 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate());
2313 Pop(ExternalReferenceAsOperand(handler_address));
2314 addq(rsp, Immediate(StackHandlerConstants::kSize - kSystemPointerSize));
2315 }
2316
2317 void TurboAssembler::Ret() { ret(0); }
2318
2319 void TurboAssembler::Ret(int bytes_dropped, Register scratch) {
2320 if (is_uint16(bytes_dropped)) {
2321 ret(bytes_dropped);
2322 } else {
2323 PopReturnAddressTo(scratch);
2324 addq(rsp, Immediate(bytes_dropped));
2325 PushReturnAddressFrom(scratch);
2326 ret(0);
2327 }
2328 }
2329
2330 void MacroAssembler::CmpObjectType(Register heap_object, InstanceType type,
2331 Register map) {
2332 LoadMap(map, heap_object);
2333 CmpInstanceType(map, type);
2334 }
2335
2336 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
2337 cmpw(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
2338 }
2339
2340 void MacroAssembler::CmpInstanceTypeRange(Register map,
2341 Register instance_type_out,
2342 InstanceType lower_limit,
2343 InstanceType higher_limit) {
2344 DCHECK_LT(lower_limit, higher_limit);
2345 movzxwl(instance_type_out, FieldOperand(map, Map::kInstanceTypeOffset));
2346 CompareRange(instance_type_out, lower_limit, higher_limit);
2347 }
2348
2349 void TurboAssembler::AssertNotSmi(Register object) {
2350 if (!FLAG_debug_code) return;
2351 ASM_CODE_COMMENT(this);
2352 Condition is_smi = CheckSmi(object);
2353 Check(NegateCondition(is_smi), AbortReason::kOperandIsASmi);
2354 }
2355
2356 void TurboAssembler::AssertSmi(Register object) {
2357 if (!FLAG_debug_code) return;
2358 ASM_CODE_COMMENT(this);
2359 Condition is_smi = CheckSmi(object);
2360 Check(is_smi, AbortReason::kOperandIsNotASmi);
2361 }
2362
2363 void TurboAssembler::AssertSmi(Operand object) {
2364 if (!FLAG_debug_code) return;
2365 ASM_CODE_COMMENT(this);
2366 Condition is_smi = CheckSmi(object);
2367 Check(is_smi, AbortReason::kOperandIsNotASmi);
2368 }
2369
2370 void TurboAssembler::AssertZeroExtended(Register int32_register) {
2371 if (!FLAG_debug_code) return;
2372 ASM_CODE_COMMENT(this);
2373 DCHECK_NE(int32_register, kScratchRegister);
2374 movq(kScratchRegister, int64_t{0x0000000100000000});
2375 cmpq(kScratchRegister, int32_register);
2376 Check(above, AbortReason::k32BitValueInRegisterIsNotZeroExtended);
2377 }
2378
2379 void MacroAssembler::AssertCodeT(Register object) {
2380 if (!FLAG_debug_code) return;
2381 ASM_CODE_COMMENT(this);
2382 testb(object, Immediate(kSmiTagMask));
2383 Check(not_equal, AbortReason::kOperandIsNotACodeT);
2384 Push(object);
2385 LoadMap(object, object);
2386 CmpInstanceType(object, CODET_TYPE);
2387 Pop(object);
2388 Check(equal, AbortReason::kOperandIsNotACodeT);
2389 }
2390
2391 void MacroAssembler::AssertConstructor(Register object) {
2392 if (!FLAG_debug_code) return;
2393 ASM_CODE_COMMENT(this);
2394 testb(object, Immediate(kSmiTagMask));
2395 Check(not_equal, AbortReason::kOperandIsASmiAndNotAConstructor);
2396 Push(object);
2397 LoadMap(object, object);
2398 testb(FieldOperand(object, Map::kBitFieldOffset),
2399 Immediate(Map::Bits1::IsConstructorBit::kMask));
2400 Pop(object);
2401 Check(not_zero, AbortReason::kOperandIsNotAConstructor);
2402 }
2403
2404 void MacroAssembler::AssertFunction(Register object) {
2405 if (!FLAG_debug_code) return;
2406 ASM_CODE_COMMENT(this);
2407 testb(object, Immediate(kSmiTagMask));
2408 Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
2409 Push(object);
2410 LoadMap(object, object);
2411 CmpInstanceTypeRange(object, object, FIRST_JS_FUNCTION_TYPE,
2412 LAST_JS_FUNCTION_TYPE);
2413 Pop(object);
2414 Check(below_equal, AbortReason::kOperandIsNotAFunction);
2415 }
2416
2417 void MacroAssembler::AssertBoundFunction(Register object) {
2418 if (!FLAG_debug_code) return;
2419 ASM_CODE_COMMENT(this);
2420 testb(object, Immediate(kSmiTagMask));
2421 Check(not_equal, AbortReason::kOperandIsASmiAndNotABoundFunction);
2422 Push(object);
2423 CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
2424 Pop(object);
2425 Check(equal, AbortReason::kOperandIsNotABoundFunction);
2426 }
2427
2428 void MacroAssembler::AssertGeneratorObject(Register object) {
2429 if (!FLAG_debug_code) return;
2430 ASM_CODE_COMMENT(this);
2431 testb(object, Immediate(kSmiTagMask));
2432 Check(not_equal, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
2433
2434 // Load map
2435 Register map = object;
2436 Push(object);
2437 LoadMap(map, object);
2438
2439 Label do_check;
2440 // Check if JSGeneratorObject
2441 CmpInstanceType(map, JS_GENERATOR_OBJECT_TYPE);
2442 j(equal, &do_check);
2443
2444 // Check if JSAsyncFunctionObject
2445 CmpInstanceType(map, JS_ASYNC_FUNCTION_OBJECT_TYPE);
2446 j(equal, &do_check);
2447
2448 // Check if JSAsyncGeneratorObject
2449 CmpInstanceType(map, JS_ASYNC_GENERATOR_OBJECT_TYPE);
2450
2451 bind(&do_check);
2452 // Restore generator object to register and perform assertion
2453 Pop(object);
2454 Check(equal, AbortReason::kOperandIsNotAGeneratorObject);
2455 }
2456
2457 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
2458 if (!FLAG_debug_code) return;
2459 ASM_CODE_COMMENT(this);
2460 Label done_checking;
2461 AssertNotSmi(object);
2462 Cmp(object, isolate()->factory()->undefined_value());
2463 j(equal, &done_checking);
2464 Register map = object;
2465 Push(object);
2466 LoadMap(map, object);
2467 Cmp(map, isolate()->factory()->allocation_site_map());
2468 Pop(object);
2469 Assert(equal, AbortReason::kExpectedUndefinedOrCell);
2470 bind(&done_checking);
2471 }
2472
2473 void MacroAssembler::LoadWeakValue(Register in_out, Label* target_if_cleared) {
2474 cmpl(in_out, Immediate(kClearedWeakHeapObjectLower32));
2475 j(equal, target_if_cleared);
2476
2477 andq(in_out, Immediate(~static_cast<int32_t>(kWeakHeapObjectMask)));
2478 }
2479
2480 void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value) {
2481 DCHECK_GT(value, 0);
2482 if (FLAG_native_code_counters && counter->Enabled()) {
2483 ASM_CODE_COMMENT(this);
2484 Operand counter_operand =
2485 ExternalReferenceAsOperand(ExternalReference::Create(counter));
2486 // This operation has to be exactly 32-bit wide in case the external
2487 // reference table redirects the counter to a uint32_t dummy_stats_counter_
2488 // field.
2489 if (value == 1) {
2490 incl(counter_operand);
2491 } else {
2492 addl(counter_operand, Immediate(value));
2493 }
2494 }
2495 }
2496
2497 void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value) {
2498 DCHECK_GT(value, 0);
2499 if (FLAG_native_code_counters && counter->Enabled()) {
2500 ASM_CODE_COMMENT(this);
2501 Operand counter_operand =
2502 ExternalReferenceAsOperand(ExternalReference::Create(counter));
2503 // This operation has to be exactly 32-bit wide in case the external
2504 // reference table redirects the counter to a uint32_t dummy_stats_counter_
2505 // field.
2506 if (value == 1) {
2507 decl(counter_operand);
2508 } else {
2509 subl(counter_operand, Immediate(value));
2510 }
2511 }
2512 }
2513
2514 void MacroAssembler::InvokeFunction(Register function, Register new_target,
2515 Register actual_parameter_count,
2516 InvokeType type) {
2517 ASM_CODE_COMMENT(this);
2518 LoadTaggedPointerField(
2519 rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
2520 movzxwq(rbx,
2521 FieldOperand(rbx, SharedFunctionInfo::kFormalParameterCountOffset));
2522
2523 InvokeFunction(function, new_target, rbx, actual_parameter_count, type);
2524 }
2525
2526 void MacroAssembler::InvokeFunction(Register function, Register new_target,
2527 Register expected_parameter_count,
2528 Register actual_parameter_count,
2529 InvokeType type) {
2530 DCHECK_EQ(function, rdi);
2531 LoadTaggedPointerField(rsi,
2532 FieldOperand(function, JSFunction::kContextOffset));
2533 InvokeFunctionCode(rdi, new_target, expected_parameter_count,
2534 actual_parameter_count, type);
2535 }
2536
2537 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
2538 Register expected_parameter_count,
2539 Register actual_parameter_count,
2540 InvokeType type) {
2541 ASM_CODE_COMMENT(this);
2542 // You can't call a function without a valid frame.
2543 DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
2544 DCHECK_EQ(function, rdi);
2545 DCHECK_IMPLIES(new_target.is_valid(), new_target == rdx);
2546
2547 // On function call, call into the debugger if necessary.
2548 Label debug_hook, continue_after_hook;
2549 {
2550 ExternalReference debug_hook_active =
2551 ExternalReference::debug_hook_on_function_call_address(isolate());
2552 Operand debug_hook_active_operand =
2553 ExternalReferenceAsOperand(debug_hook_active);
2554 cmpb(debug_hook_active_operand, Immediate(0));
2555 j(not_equal, &debug_hook);
2556 }
2557 bind(&continue_after_hook);
2558
2559 // Clear the new.target register if not given.
2560 if (!new_target.is_valid()) {
2561 LoadRoot(rdx, RootIndex::kUndefinedValue);
2562 }
2563
2564 Label done;
2565 InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
2566 // We call indirectly through the code field in the function to
2567 // allow recompilation to take effect without changing any of the
2568 // call sites.
2569 static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
2570 LoadTaggedPointerField(rcx, FieldOperand(function, JSFunction::kCodeOffset));
2571 switch (type) {
2572 case InvokeType::kCall:
2573 CallCodeTObject(rcx);
2574 break;
2575 case InvokeType::kJump:
2576 JumpCodeTObject(rcx);
2577 break;
2578 }
2579 jmp(&done, Label::kNear);
2580
2581 // Deferred debug hook.
2582 bind(&debug_hook);
2583 CallDebugOnFunctionCall(function, new_target, expected_parameter_count,
2584 actual_parameter_count);
2585 jmp(&continue_after_hook);
2586
2587 bind(&done);
2588 }
2589
2590 Operand MacroAssembler::StackLimitAsOperand(StackLimitKind kind) {
2591 DCHECK(root_array_available());
2592 Isolate* isolate = this->isolate();
2593 ExternalReference limit =
2594 kind == StackLimitKind::kRealStackLimit
2595 ? ExternalReference::address_of_real_jslimit(isolate)
2596 : ExternalReference::address_of_jslimit(isolate);
2597 DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
2598
2599 intptr_t offset =
2600 TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
2601 CHECK(is_int32(offset));
2602 return Operand(kRootRegister, static_cast<int32_t>(offset));
2603 }
2604
2605 void MacroAssembler::StackOverflowCheck(
2606 Register num_args, Label* stack_overflow,
2607 Label::Distance stack_overflow_distance) {
2608 ASM_CODE_COMMENT(this);
2609 DCHECK_NE(num_args, kScratchRegister);
2610 // Check the stack for overflow. We are not trying to catch
2611 // interruptions (e.g. debug break and preemption) here, so the "real stack
2612 // limit" is checked.
2613 movq(kScratchRegister, rsp);
2614 // Make kScratchRegister the space we have left. The stack might already be
2615 // overflowed here which will cause kScratchRegister to become negative.
2616 subq(kScratchRegister, StackLimitAsOperand(StackLimitKind::kRealStackLimit));
2617 // TODO(victorgomes): Use ia32 approach with leaq, since it requires less
2618 // instructions.
2619 sarq(kScratchRegister, Immediate(kSystemPointerSizeLog2));
2620 // Check if the arguments will overflow the stack.
2621 cmpq(kScratchRegister, num_args);
2622 // Signed comparison.
2623 // TODO(victorgomes): Save some bytes in the builtins that use stack checks
2624 // by jumping to a builtin that throws the exception.
2625 j(less_equal, stack_overflow, stack_overflow_distance);
2626 }
2627
2628 void MacroAssembler::InvokePrologue(Register expected_parameter_count,
2629 Register actual_parameter_count,
2630 Label* done, InvokeType type) {
2631 ASM_CODE_COMMENT(this);
2632 if (expected_parameter_count == actual_parameter_count) {
2633 Move(rax, actual_parameter_count);
2634 return;
2635 }
2636 Label regular_invoke;
2637 // If the expected parameter count is equal to the adaptor sentinel, no need
2638 // to push undefined value as arguments.
2639 if (kDontAdaptArgumentsSentinel != 0) {
2640 cmpl(expected_parameter_count, Immediate(kDontAdaptArgumentsSentinel));
2641 j(equal, ®ular_invoke, Label::kFar);
2642 }
2643
2644 // If overapplication or if the actual argument count is equal to the
2645 // formal parameter count, no need to push extra undefined values.
2646 subq(expected_parameter_count, actual_parameter_count);
2647 j(less_equal, ®ular_invoke, Label::kFar);
2648
2649 Label stack_overflow;
2650 StackOverflowCheck(expected_parameter_count, &stack_overflow);
2651
2652 // Underapplication. Move the arguments already in the stack, including the
2653 // receiver and the return address.
2654 {
2655 Label copy, check;
2656 Register src = r8, dest = rsp, num = r9, current = r11;
2657 movq(src, rsp);
2658 leaq(kScratchRegister,
2659 Operand(expected_parameter_count, times_system_pointer_size, 0));
2660 AllocateStackSpace(kScratchRegister);
2661 // Extra words are the receiver (if not already included in argc) and the
2662 // return address (if a jump).
2663 int extra_words =
2664 type == InvokeType::kCall ? 0 : kReturnAddressStackSlotCount;
2665 if (!kJSArgcIncludesReceiver) extra_words++;
2666
2667 leaq(num, Operand(rax, extra_words)); // Number of words to copy.
2668 Move(current, 0);
2669 // Fall-through to the loop body because there are non-zero words to copy.
2670 bind(©);
2671 movq(kScratchRegister,
2672 Operand(src, current, times_system_pointer_size, 0));
2673 movq(Operand(dest, current, times_system_pointer_size, 0),
2674 kScratchRegister);
2675 incq(current);
2676 bind(&check);
2677 cmpq(current, num);
2678 j(less, ©);
2679 leaq(r8, Operand(rsp, num, times_system_pointer_size, 0));
2680 }
2681 // Fill remaining expected arguments with undefined values.
2682 LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
2683 {
2684 Label loop;
2685 bind(&loop);
2686 decq(expected_parameter_count);
2687 movq(Operand(r8, expected_parameter_count, times_system_pointer_size, 0),
2688 kScratchRegister);
2689 j(greater, &loop, Label::kNear);
2690 }
2691 jmp(®ular_invoke);
2692
2693 bind(&stack_overflow);
2694 {
2695 FrameScope frame(
2696 this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
2697 CallRuntime(Runtime::kThrowStackOverflow);
2698 int3(); // This should be unreachable.
2699 }
2700 bind(®ular_invoke);
2701 }
2702
2703 void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
2704 Register expected_parameter_count,
2705 Register actual_parameter_count) {
2706 ASM_CODE_COMMENT(this);
2707 FrameScope frame(
2708 this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
2709
2710 SmiTag(expected_parameter_count);
2711 Push(expected_parameter_count);
2712
2713 SmiTag(actual_parameter_count);
2714 Push(actual_parameter_count);
2715 SmiUntag(actual_parameter_count);
2716
2717 if (new_target.is_valid()) {
2718 Push(new_target);
2719 }
2720 Push(fun);
2721 Push(fun);
2722 // Arguments are located 2 words below the base pointer.
2723 Operand receiver_op = Operand(rbp, kSystemPointerSize * 2);
2724 Push(receiver_op);
2725 CallRuntime(Runtime::kDebugOnFunctionCall);
2726 Pop(fun);
2727 if (new_target.is_valid()) {
2728 Pop(new_target);
2729 }
2730 Pop(actual_parameter_count);
2731 SmiUntag(actual_parameter_count);
2732 Pop(expected_parameter_count);
2733 SmiUntag(expected_parameter_count);
2734 }
2735
2736 void TurboAssembler::StubPrologue(StackFrame::Type type) {
2737 ASM_CODE_COMMENT(this);
2738 pushq(rbp); // Caller's frame pointer.
2739 movq(rbp, rsp);
2740 Push(Immediate(StackFrame::TypeToMarker(type)));
2741 }
2742
2743 void TurboAssembler::Prologue() {
2744 ASM_CODE_COMMENT(this);
2745 pushq(rbp); // Caller's frame pointer.
2746 movq(rbp, rsp);
2747 Push(kContextRegister); // Callee's context.
2748 Push(kJSFunctionRegister); // Callee's JS function.
2749 Push(kJavaScriptCallArgCountRegister); // Actual argument count.
2750 }
2751
2752 void TurboAssembler::EnterFrame(StackFrame::Type type) {
2753 ASM_CODE_COMMENT(this);
2754 pushq(rbp);
2755 movq(rbp, rsp);
2756 if (!StackFrame::IsJavaScript(type)) {
2757 Push(Immediate(StackFrame::TypeToMarker(type)));
2758 }
2759 #if V8_ENABLE_WEBASSEMBLY
2760 if (type == StackFrame::WASM) Push(kWasmInstanceRegister);
2761 #endif // V8_ENABLE_WEBASSEMBLY
2762 }
2763
2764 void TurboAssembler::LeaveFrame(StackFrame::Type type) {
2765 ASM_CODE_COMMENT(this);
2766 // TODO(v8:11429): Consider passing BASELINE instead, and checking for
2767 // IsJSFrame or similar. Could then unify with manual frame leaves in the
2768 // interpreter too.
2769 if (FLAG_debug_code && !StackFrame::IsJavaScript(type)) {
2770 cmpq(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
2771 Immediate(StackFrame::TypeToMarker(type)));
2772 Check(equal, AbortReason::kStackFrameTypesMustMatch);
2773 }
2774 movq(rsp, rbp);
2775 popq(rbp);
2776 }
2777
2778 #if defined(V8_TARGET_OS_WIN) || defined(V8_TARGET_OS_MACOSX)
2779 void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
2780 ASM_CODE_COMMENT(this);
2781 // On Windows and on macOS, we cannot increment the stack size by more than
2782 // one page (minimum page size is 4KB) without accessing at least one byte on
2783 // the page. Check this:
2784 // https://msdn.microsoft.com/en-us/library/aa227153(v=vs.60).aspx.
2785 Label check_offset;
2786 Label touch_next_page;
2787 jmp(&check_offset);
2788 bind(&touch_next_page);
2789 subq(rsp, Immediate(kStackPageSize));
2790 // Just to touch the page, before we increment further.
2791 movb(Operand(rsp, 0), Immediate(0));
2792 subq(bytes_scratch, Immediate(kStackPageSize));
2793
2794 bind(&check_offset);
2795 cmpq(bytes_scratch, Immediate(kStackPageSize));
2796 j(greater_equal, &touch_next_page);
2797
2798 subq(rsp, bytes_scratch);
2799 }
2800
2801 void TurboAssembler::AllocateStackSpace(int bytes) {
2802 ASM_CODE_COMMENT(this);
2803 DCHECK_GE(bytes, 0);
2804 while (bytes >= kStackPageSize) {
2805 subq(rsp, Immediate(kStackPageSize));
2806 movb(Operand(rsp, 0), Immediate(0));
2807 bytes -= kStackPageSize;
2808 }
2809 if (bytes == 0) return;
2810 subq(rsp, Immediate(bytes));
2811 }
2812 #endif
2813
2814 void MacroAssembler::EnterExitFramePrologue(Register saved_rax_reg,
2815 StackFrame::Type frame_type) {
2816 ASM_CODE_COMMENT(this);
2817 DCHECK(frame_type == StackFrame::EXIT ||
2818 frame_type == StackFrame::BUILTIN_EXIT);
2819
2820 // Set up the frame structure on the stack.
2821 // All constants are relative to the frame pointer of the exit frame.
2822 DCHECK_EQ(kFPOnStackSize + kPCOnStackSize,
2823 ExitFrameConstants::kCallerSPDisplacement);
2824 DCHECK_EQ(kFPOnStackSize, ExitFrameConstants::kCallerPCOffset);
2825 DCHECK_EQ(0 * kSystemPointerSize, ExitFrameConstants::kCallerFPOffset);
2826 pushq(rbp);
2827 movq(rbp, rsp);
2828
2829 // Reserve room for entry stack pointer.
2830 Push(Immediate(StackFrame::TypeToMarker(frame_type)));
2831 DCHECK_EQ(-2 * kSystemPointerSize, ExitFrameConstants::kSPOffset);
2832 Push(Immediate(0)); // Saved entry sp, patched before call.
2833
2834 // Save the frame pointer and the context in top.
2835 if (saved_rax_reg != no_reg) {
2836 movq(saved_rax_reg, rax); // Backup rax in callee-save register.
2837 }
2838
2839 Store(
2840 ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate()),
2841 rbp);
2842 Store(ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()),
2843 rsi);
2844 Store(
2845 ExternalReference::Create(IsolateAddressId::kCFunctionAddress, isolate()),
2846 rbx);
2847 }
2848
2849 #ifdef V8_TARGET_OS_WIN
2850 static const int kRegisterPassedArguments = 4;
2851 #else
2852 static const int kRegisterPassedArguments = 6;
2853 #endif
2854
2855 void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
2856 bool save_doubles) {
2857 ASM_CODE_COMMENT(this);
2858 #ifdef V8_TARGET_OS_WIN
2859 arg_stack_space += kRegisterPassedArguments;
2860 #endif
2861 // Optionally save all XMM registers.
2862 if (save_doubles) {
2863 int space = XMMRegister::kNumRegisters * kDoubleSize +
2864 arg_stack_space * kSystemPointerSize;
2865 AllocateStackSpace(space);
2866 int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
2867 const RegisterConfiguration* config = RegisterConfiguration::Default();
2868 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
2869 DoubleRegister reg =
2870 DoubleRegister::from_code(config->GetAllocatableDoubleCode(i));
2871 Movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
2872 }
2873 } else if (arg_stack_space > 0) {
2874 AllocateStackSpace(arg_stack_space * kSystemPointerSize);
2875 }
2876
2877 // Get the required frame alignment for the OS.
2878 const int kFrameAlignment = base::OS::ActivationFrameAlignment();
2879 if (kFrameAlignment > 0) {
2880 DCHECK(base::bits::IsPowerOfTwo(kFrameAlignment));
2881 DCHECK(is_int8(kFrameAlignment));
2882 andq(rsp, Immediate(-kFrameAlignment));
2883 }
2884
2885 // Patch the saved entry sp.
2886 movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
2887 }
2888
2889 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles,
2890 StackFrame::Type frame_type) {
2891 ASM_CODE_COMMENT(this);
2892 Register saved_rax_reg = r12;
2893 EnterExitFramePrologue(saved_rax_reg, frame_type);
2894
2895 // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
2896 // so it must be retained across the C-call.
2897 int offset = StandardFrameConstants::kCallerSPOffset - kSystemPointerSize;
2898 leaq(r15, Operand(rbp, saved_rax_reg, times_system_pointer_size, offset));
2899
2900 EnterExitFrameEpilogue(arg_stack_space, save_doubles);
2901 }
2902
2903 void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
2904 ASM_CODE_COMMENT(this);
2905 EnterExitFramePrologue(no_reg, StackFrame::EXIT);
2906 EnterExitFrameEpilogue(arg_stack_space, false);
2907 }
2908
2909 void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
2910 ASM_CODE_COMMENT(this);
2911 // Registers:
2912 // r15 : argv
2913 if (save_doubles) {
2914 int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
2915 const RegisterConfiguration* config = RegisterConfiguration::Default();
2916 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
2917 DoubleRegister reg =
2918 DoubleRegister::from_code(config->GetAllocatableDoubleCode(i));
2919 Movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
2920 }
2921 }
2922
2923 if (pop_arguments) {
2924 // Get the return address from the stack and restore the frame pointer.
2925 movq(rcx, Operand(rbp, kFPOnStackSize));
2926 movq(rbp, Operand(rbp, 0 * kSystemPointerSize));
2927
2928 // Drop everything up to and including the arguments and the receiver
2929 // from the caller stack.
2930 leaq(rsp, Operand(r15, 1 * kSystemPointerSize));
2931
2932 PushReturnAddressFrom(rcx);
2933 } else {
2934 // Otherwise just leave the exit frame.
2935 leave();
2936 }
2937
2938 LeaveExitFrameEpilogue();
2939 }
2940
2941 void MacroAssembler::LeaveApiExitFrame() {
2942 ASM_CODE_COMMENT(this);
2943 movq(rsp, rbp);
2944 popq(rbp);
2945
2946 LeaveExitFrameEpilogue();
2947 }
2948
2949 void MacroAssembler::LeaveExitFrameEpilogue() {
2950 ASM_CODE_COMMENT(this);
2951 // Restore current context from top and clear it in debug mode.
2952 ExternalReference context_address =
2953 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate());
2954 Operand context_operand = ExternalReferenceAsOperand(context_address);
2955 movq(rsi, context_operand);
2956 #ifdef DEBUG
2957 Move(context_operand, Context::kInvalidContext);
2958 #endif
2959
2960 // Clear the top frame.
2961 ExternalReference c_entry_fp_address =
2962 ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate());
2963 Operand c_entry_fp_operand = ExternalReferenceAsOperand(c_entry_fp_address);
2964 Move(c_entry_fp_operand, 0);
2965 }
2966
2967 void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
2968 ASM_CODE_COMMENT(this);
2969 // Load native context.
2970 LoadMap(dst, rsi);
2971 LoadTaggedPointerField(
2972 dst,
2973 FieldOperand(dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
2974 // Load value from native context.
2975 LoadTaggedPointerField(dst, Operand(dst, Context::SlotOffset(index)));
2976 }
2977
2978 int TurboAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
2979 // On Windows 64 stack slots are reserved by the caller for all arguments
2980 // including the ones passed in registers, and space is always allocated for
2981 // the four register arguments even if the function takes fewer than four
2982 // arguments.
2983 // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
2984 // and the caller does not reserve stack slots for them.
2985 DCHECK_GE(num_arguments, 0);
2986 #ifdef V8_TARGET_OS_WIN
2987 const int kMinimumStackSlots = kRegisterPassedArguments;
2988 if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
2989 return num_arguments;
2990 #else
2991 if (num_arguments < kRegisterPassedArguments) return 0;
2992 return num_arguments - kRegisterPassedArguments;
2993 #endif
2994 }
2995
2996 void TurboAssembler::PrepareCallCFunction(int num_arguments) {
2997 ASM_CODE_COMMENT(this);
2998 int frame_alignment = base::OS::ActivationFrameAlignment();
2999 DCHECK_NE(frame_alignment, 0);
3000 DCHECK_GE(num_arguments, 0);
3001
3002 // Make stack end at alignment and allocate space for arguments and old rsp.
3003 movq(kScratchRegister, rsp);
3004 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
3005 int argument_slots_on_stack =
3006 ArgumentStackSlotsForCFunctionCall(num_arguments);
3007 AllocateStackSpace((argument_slots_on_stack + 1) * kSystemPointerSize);
3008 andq(rsp, Immediate(-frame_alignment));
3009 movq(Operand(rsp, argument_slots_on_stack * kSystemPointerSize),
3010 kScratchRegister);
3011 }
3012
3013 void TurboAssembler::CallCFunction(ExternalReference function,
3014 int num_arguments) {
3015 ASM_CODE_COMMENT(this);
3016 LoadAddress(rax, function);
3017 CallCFunction(rax, num_arguments);
3018 }
3019
3020 void TurboAssembler::CallCFunction(Register function, int num_arguments) {
3021 ASM_CODE_COMMENT(this);
3022 DCHECK_LE(num_arguments, kMaxCParameters);
3023 DCHECK(has_frame());
3024 // Check stack alignment.
3025 if (FLAG_debug_code) {
3026 CheckStackAlignment();
3027 }
3028
3029 // Save the frame pointer and PC so that the stack layout remains iterable,
3030 // even without an ExitFrame which normally exists between JS and C frames.
3031 Label get_pc;
3032 DCHECK(!AreAliased(kScratchRegister, function));
3033 leaq(kScratchRegister, Operand(&get_pc, 0));
3034 bind(&get_pc);
3035
3036 // Addressing the following external references is tricky because we need
3037 // this to work in three situations:
3038 // 1. In wasm compilation, the isolate is nullptr and thus no
3039 // ExternalReference can be created, but we can construct the address
3040 // directly using the root register and a static offset.
3041 // 2. In normal JIT (and builtin) compilation, the external reference is
3042 // usually addressed through the root register, so we can use the direct
3043 // offset directly in most cases.
3044 // 3. In regexp compilation, the external reference is embedded into the reloc
3045 // info.
3046 // The solution here is to use root register offsets wherever possible in
3047 // which case we can construct it directly. When falling back to external
3048 // references we need to ensure that the scratch register does not get
3049 // accidentally overwritten. If we run into more such cases in the future, we
3050 // should implement a more general solution.
3051 if (root_array_available()) {
3052 movq(Operand(kRootRegister, IsolateData::fast_c_call_caller_pc_offset()),
3053 kScratchRegister);
3054 movq(Operand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()),
3055 rbp);
3056 } else {
3057 DCHECK_NOT_NULL(isolate());
3058 // Use alternative scratch register in order not to overwrite
3059 // kScratchRegister.
3060 Register scratch = r12;
3061 pushq(scratch);
3062
3063 movq(ExternalReferenceAsOperand(
3064 ExternalReference::fast_c_call_caller_pc_address(isolate()),
3065 scratch),
3066 kScratchRegister);
3067 movq(ExternalReferenceAsOperand(
3068 ExternalReference::fast_c_call_caller_fp_address(isolate())),
3069 rbp);
3070
3071 popq(scratch);
3072 }
3073
3074 call(function);
3075
3076 // We don't unset the PC; the FP is the source of truth.
3077 if (root_array_available()) {
3078 movq(Operand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()),
3079 Immediate(0));
3080 } else {
3081 DCHECK_NOT_NULL(isolate());
3082 movq(ExternalReferenceAsOperand(
3083 ExternalReference::fast_c_call_caller_fp_address(isolate())),
3084 Immediate(0));
3085 }
3086
3087 DCHECK_NE(base::OS::ActivationFrameAlignment(), 0);
3088 DCHECK_GE(num_arguments, 0);
3089 int argument_slots_on_stack =
3090 ArgumentStackSlotsForCFunctionCall(num_arguments);
3091 movq(rsp, Operand(rsp, argument_slots_on_stack * kSystemPointerSize));
3092 }
3093
3094 void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
3095 Condition cc, Label* condition_met,
3096 Label::Distance condition_met_distance) {
3097 ASM_CODE_COMMENT(this);
3098 DCHECK(cc == zero || cc == not_zero);
3099 if (scratch == object) {
3100 andq(scratch, Immediate(~kPageAlignmentMask));
3101 } else {
3102 movq(scratch, Immediate(~kPageAlignmentMask));
3103 andq(scratch, object);
3104 }
3105 if (mask < (1 << kBitsPerByte)) {
3106 testb(Operand(scratch, BasicMemoryChunk::kFlagsOffset),
3107 Immediate(static_cast<uint8_t>(mask)));
3108 } else {
3109 testl(Operand(scratch, BasicMemoryChunk::kFlagsOffset), Immediate(mask));
3110 }
3111 j(cc, condition_met, condition_met_distance);
3112 }
3113
3114 void TurboAssembler::ComputeCodeStartAddress(Register dst) {
3115 Label current;
3116 bind(¤t);
3117 int pc = pc_offset();
3118 // Load effective address to get the address of the current instruction.
3119 leaq(dst, Operand(¤t, -pc));
3120 }
3121
3122 void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
3123 DeoptimizeKind kind, Label* ret,
3124 Label*) {
3125 ASM_CODE_COMMENT(this);
3126 // Note: Assembler::call is used here on purpose to guarantee fixed-size
3127 // exits even on Atom CPUs; see TurboAssembler::Call for Atom-specific
3128 // performance tuning which emits a different instruction sequence.
3129 call(EntryFromBuiltinAsOperand(target));
3130 DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
3131 (kind == DeoptimizeKind::kLazy)
3132 ? Deoptimizer::kLazyDeoptExitSize
3133 : Deoptimizer::kNonLazyDeoptExitSize);
3134
3135 if (kind == DeoptimizeKind::kEagerWithResume) {
3136 bool old_predictable_code_size = predictable_code_size();
3137 set_predictable_code_size(true);
3138 jmp(ret);
3139
3140 DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
3141 Deoptimizer::kEagerWithResumeBeforeArgsSize);
3142 set_predictable_code_size(old_predictable_code_size);
3143 }
3144 }
3145
3146 void TurboAssembler::Trap() { int3(); }
3147 void TurboAssembler::DebugBreak() { int3(); }
3148
3149 } // namespace internal
3150 } // namespace v8
3151
3152 #endif // V8_TARGET_ARCH_X64
3153