1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if V8_TARGET_ARCH_IA32
6
7 #include "src/base/bits.h"
8 #include "src/base/division-by-constant.h"
9 #include "src/base/utils/random-number-generator.h"
10 #include "src/codegen/callable.h"
11 #include "src/codegen/code-factory.h"
12 #include "src/codegen/external-reference-table.h"
13 #include "src/codegen/ia32/assembler-ia32-inl.h"
14 #include "src/codegen/macro-assembler.h"
15 #include "src/debug/debug.h"
16 #include "src/execution/frame-constants.h"
17 #include "src/execution/frames-inl.h"
18 #include "src/heap/heap-inl.h" // For MemoryChunk.
19 #include "src/init/bootstrapper.h"
20 #include "src/logging/counters.h"
21 #include "src/runtime/runtime.h"
22 #include "src/snapshot/embedded/embedded-data.h"
23 #include "src/snapshot/snapshot.h"
24
25 // Satisfy cpplint check, but don't include platform-specific header. It is
26 // included recursively via macro-assembler.h.
27 #if 0
28 #include "src/codegen/ia32/macro-assembler-ia32.h"
29 #endif
30
31 namespace v8 {
32 namespace internal {
33
GetArgumentOperand(int index) const34 Operand StackArgumentsAccessor::GetArgumentOperand(int index) const {
35 DCHECK_GE(index, 0);
36 #ifdef V8_REVERSE_JSARGS
37 // arg[0] = esp + kPCOnStackSize;
38 // arg[i] = arg[0] + i * kSystemPointerSize;
39 return Operand(esp, kPCOnStackSize + index * kSystemPointerSize);
40 #else
41 // arg[0] = (esp + kPCOnStackSize) + argc * kSystemPointerSize;
42 // arg[i] = arg[0] - i * kSystemPointerSize;
43 return Operand(esp, argc_, times_system_pointer_size,
44 kPCOnStackSize - index * kSystemPointerSize);
45 #endif
46 }
47
48 // -------------------------------------------------------------------------
49 // MacroAssembler implementation.
50
InitializeRootRegister()51 void TurboAssembler::InitializeRootRegister() {
52 ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
53 Move(kRootRegister, Immediate(isolate_root));
54 }
55
LoadRoot(Register destination,RootIndex index)56 void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
57 if (root_array_available()) {
58 mov(destination,
59 Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
60 return;
61 }
62
63 if (RootsTable::IsImmortalImmovable(index)) {
64 Handle<Object> object = isolate()->root_handle(index);
65 if (object->IsSmi()) {
66 mov(destination, Immediate(Smi::cast(*object)));
67 return;
68 } else {
69 DCHECK(object->IsHeapObject());
70 mov(destination, Handle<HeapObject>::cast(object));
71 return;
72 }
73 }
74
75 ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
76 lea(destination,
77 Operand(isolate_root.address(), RelocInfo::EXTERNAL_REFERENCE));
78 mov(destination, Operand(destination, RootRegisterOffsetForRootIndex(index)));
79 }
80
CompareRoot(Register with,Register scratch,RootIndex index)81 void TurboAssembler::CompareRoot(Register with, Register scratch,
82 RootIndex index) {
83 if (root_array_available()) {
84 CompareRoot(with, index);
85 } else {
86 ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
87 lea(scratch,
88 Operand(isolate_root.address(), RelocInfo::EXTERNAL_REFERENCE));
89 cmp(with, Operand(scratch, RootRegisterOffsetForRootIndex(index)));
90 }
91 }
92
CompareRoot(Register with,RootIndex index)93 void TurboAssembler::CompareRoot(Register with, RootIndex index) {
94 if (root_array_available()) {
95 cmp(with, Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
96 return;
97 }
98
99 DCHECK(RootsTable::IsImmortalImmovable(index));
100 Handle<Object> object = isolate()->root_handle(index);
101 if (object->IsHeapObject()) {
102 cmp(with, Handle<HeapObject>::cast(object));
103 } else {
104 cmp(with, Immediate(Smi::cast(*object)));
105 }
106 }
107
PushRoot(RootIndex index)108 void MacroAssembler::PushRoot(RootIndex index) {
109 if (root_array_available()) {
110 DCHECK(RootsTable::IsImmortalImmovable(index));
111 push(Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
112 return;
113 }
114
115 // TODO(v8:6666): Add a scratch register or remove all uses.
116 DCHECK(RootsTable::IsImmortalImmovable(index));
117 Handle<Object> object = isolate()->root_handle(index);
118 if (object->IsHeapObject()) {
119 Push(Handle<HeapObject>::cast(object));
120 } else {
121 Push(Smi::cast(*object));
122 }
123 }
124
JumpIfIsInRange(Register value,unsigned lower_limit,unsigned higher_limit,Register scratch,Label * on_in_range,Label::Distance near_jump)125 void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
126 unsigned higher_limit, Register scratch,
127 Label* on_in_range,
128 Label::Distance near_jump) {
129 if (lower_limit != 0) {
130 lea(scratch, Operand(value, 0u - lower_limit));
131 cmp(scratch, Immediate(higher_limit - lower_limit));
132 } else {
133 cmp(value, Immediate(higher_limit));
134 }
135 j(below_equal, on_in_range, near_jump);
136 }
137
ExternalReferenceAsOperand(ExternalReference reference,Register scratch)138 Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference,
139 Register scratch) {
140 // TODO(jgruber): Add support for enable_root_array_delta_access.
141 if (root_array_available() && options().isolate_independent_code) {
142 if (IsAddressableThroughRootRegister(isolate(), reference)) {
143 // Some external references can be efficiently loaded as an offset from
144 // kRootRegister.
145 intptr_t offset =
146 RootRegisterOffsetForExternalReference(isolate(), reference);
147 return Operand(kRootRegister, offset);
148 } else {
149 // Otherwise, do a memory load from the external reference table.
150 mov(scratch, Operand(kRootRegister,
151 RootRegisterOffsetForExternalReferenceTableEntry(
152 isolate(), reference)));
153 return Operand(scratch, 0);
154 }
155 }
156 Move(scratch, Immediate(reference));
157 return Operand(scratch, 0);
158 }
159
160 // TODO(v8:6666): If possible, refactor into a platform-independent function in
161 // TurboAssembler.
ExternalReferenceAddressAsOperand(ExternalReference reference)162 Operand TurboAssembler::ExternalReferenceAddressAsOperand(
163 ExternalReference reference) {
164 DCHECK(root_array_available());
165 DCHECK(options().isolate_independent_code);
166 return Operand(
167 kRootRegister,
168 RootRegisterOffsetForExternalReferenceTableEntry(isolate(), reference));
169 }
170
171 // TODO(v8:6666): If possible, refactor into a platform-independent function in
172 // TurboAssembler.
HeapObjectAsOperand(Handle<HeapObject> object)173 Operand TurboAssembler::HeapObjectAsOperand(Handle<HeapObject> object) {
174 DCHECK(root_array_available());
175
176 int builtin_index;
177 RootIndex root_index;
178 if (isolate()->roots_table().IsRootHandle(object, &root_index)) {
179 return Operand(kRootRegister, RootRegisterOffsetForRootIndex(root_index));
180 } else if (isolate()->builtins()->IsBuiltinHandle(object, &builtin_index)) {
181 return Operand(kRootRegister,
182 RootRegisterOffsetForBuiltinIndex(builtin_index));
183 } else if (object.is_identical_to(code_object_) &&
184 Builtins::IsBuiltinId(maybe_builtin_index_)) {
185 return Operand(kRootRegister,
186 RootRegisterOffsetForBuiltinIndex(maybe_builtin_index_));
187 } else {
188 // Objects in the constants table need an additional indirection, which
189 // cannot be represented as a single Operand.
190 UNREACHABLE();
191 }
192 }
193
LoadFromConstantsTable(Register destination,int constant_index)194 void TurboAssembler::LoadFromConstantsTable(Register destination,
195 int constant_index) {
196 DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
197 LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
198 mov(destination,
199 FieldOperand(destination, FixedArray::OffsetOfElementAt(constant_index)));
200 }
201
LoadRootRegisterOffset(Register destination,intptr_t offset)202 void TurboAssembler::LoadRootRegisterOffset(Register destination,
203 intptr_t offset) {
204 DCHECK(is_int32(offset));
205 DCHECK(root_array_available());
206 if (offset == 0) {
207 mov(destination, kRootRegister);
208 } else {
209 lea(destination, Operand(kRootRegister, static_cast<int32_t>(offset)));
210 }
211 }
212
LoadRootRelative(Register destination,int32_t offset)213 void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
214 DCHECK(root_array_available());
215 mov(destination, Operand(kRootRegister, offset));
216 }
217
LoadAddress(Register destination,ExternalReference source)218 void TurboAssembler::LoadAddress(Register destination,
219 ExternalReference source) {
220 // TODO(jgruber): Add support for enable_root_array_delta_access.
221 if (root_array_available() && options().isolate_independent_code) {
222 IndirectLoadExternalReference(destination, source);
223 return;
224 }
225 mov(destination, Immediate(source));
226 }
227
228 static constexpr Register saved_regs[] = {eax, ecx, edx};
229
230 static constexpr int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
231
RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3) const232 int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
233 Register exclusion1,
234 Register exclusion2,
235 Register exclusion3) const {
236 int bytes = 0;
237 for (int i = 0; i < kNumberOfSavedRegs; i++) {
238 Register reg = saved_regs[i];
239 if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
240 bytes += kSystemPointerSize;
241 }
242 }
243
244 if (fp_mode == kSaveFPRegs) {
245 // Count all XMM registers except XMM0.
246 bytes += kDoubleSize * (XMMRegister::kNumRegisters - 1);
247 }
248
249 return bytes;
250 }
251
PushCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)252 int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
253 Register exclusion2, Register exclusion3) {
254 // We don't allow a GC during a store buffer overflow so there is no need to
255 // store the registers in any particular way, but we do have to store and
256 // restore them.
257 int bytes = 0;
258 for (int i = 0; i < kNumberOfSavedRegs; i++) {
259 Register reg = saved_regs[i];
260 if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
261 push(reg);
262 bytes += kSystemPointerSize;
263 }
264 }
265
266 if (fp_mode == kSaveFPRegs) {
267 // Save all XMM registers except XMM0.
268 int delta = kDoubleSize * (XMMRegister::kNumRegisters - 1);
269 AllocateStackSpace(delta);
270 for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
271 XMMRegister reg = XMMRegister::from_code(i);
272 movsd(Operand(esp, (i - 1) * kDoubleSize), reg);
273 }
274 bytes += delta;
275 }
276
277 return bytes;
278 }
279
PopCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)280 int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
281 Register exclusion2, Register exclusion3) {
282 int bytes = 0;
283 if (fp_mode == kSaveFPRegs) {
284 // Restore all XMM registers except XMM0.
285 int delta = kDoubleSize * (XMMRegister::kNumRegisters - 1);
286 for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
287 XMMRegister reg = XMMRegister::from_code(i);
288 movsd(reg, Operand(esp, (i - 1) * kDoubleSize));
289 }
290 add(esp, Immediate(delta));
291 bytes += delta;
292 }
293
294 for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
295 Register reg = saved_regs[i];
296 if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
297 pop(reg);
298 bytes += kSystemPointerSize;
299 }
300 }
301
302 return bytes;
303 }
304
RecordWriteField(Register object,int offset,Register value,Register dst,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check)305 void MacroAssembler::RecordWriteField(Register object, int offset,
306 Register value, Register dst,
307 SaveFPRegsMode save_fp,
308 RememberedSetAction remembered_set_action,
309 SmiCheck smi_check) {
310 // First, check if a write barrier is even needed. The tests below
311 // catch stores of Smis.
312 Label done;
313
314 // Skip barrier if writing a smi.
315 if (smi_check == INLINE_SMI_CHECK) {
316 JumpIfSmi(value, &done);
317 }
318
319 // Although the object register is tagged, the offset is relative to the start
320 // of the object, so so offset must be a multiple of kTaggedSize.
321 DCHECK(IsAligned(offset, kTaggedSize));
322
323 lea(dst, FieldOperand(object, offset));
324 if (emit_debug_code()) {
325 Label ok;
326 test_b(dst, Immediate(kTaggedSize - 1));
327 j(zero, &ok, Label::kNear);
328 int3();
329 bind(&ok);
330 }
331
332 RecordWrite(object, dst, value, save_fp, remembered_set_action,
333 OMIT_SMI_CHECK);
334
335 bind(&done);
336
337 // Clobber clobbered input registers when running with the debug-code flag
338 // turned on to provoke errors.
339 if (emit_debug_code()) {
340 mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
341 mov(dst, Immediate(bit_cast<int32_t>(kZapValue)));
342 }
343 }
344
SaveRegisters(RegList registers)345 void TurboAssembler::SaveRegisters(RegList registers) {
346 DCHECK_GT(NumRegs(registers), 0);
347 for (int i = 0; i < Register::kNumRegisters; ++i) {
348 if ((registers >> i) & 1u) {
349 push(Register::from_code(i));
350 }
351 }
352 }
353
RestoreRegisters(RegList registers)354 void TurboAssembler::RestoreRegisters(RegList registers) {
355 DCHECK_GT(NumRegs(registers), 0);
356 for (int i = Register::kNumRegisters - 1; i >= 0; --i) {
357 if ((registers >> i) & 1u) {
358 pop(Register::from_code(i));
359 }
360 }
361 }
362
CallEphemeronKeyBarrier(Register object,Register address,SaveFPRegsMode fp_mode)363 void TurboAssembler::CallEphemeronKeyBarrier(Register object, Register address,
364 SaveFPRegsMode fp_mode) {
365 EphemeronKeyBarrierDescriptor descriptor;
366 RegList registers = descriptor.allocatable_registers();
367
368 SaveRegisters(registers);
369
370 Register object_parameter(
371 descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kObject));
372 Register slot_parameter(descriptor.GetRegisterParameter(
373 EphemeronKeyBarrierDescriptor::kSlotAddress));
374 Register fp_mode_parameter(
375 descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kFPMode));
376
377 push(object);
378 push(address);
379
380 pop(slot_parameter);
381 pop(object_parameter);
382
383 Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
384 Call(isolate()->builtins()->builtin_handle(Builtins::kEphemeronKeyBarrier),
385 RelocInfo::CODE_TARGET);
386
387 RestoreRegisters(registers);
388 }
389
CallRecordWriteStub(Register object,Register address,RememberedSetAction remembered_set_action,SaveFPRegsMode fp_mode)390 void TurboAssembler::CallRecordWriteStub(
391 Register object, Register address,
392 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
393 CallRecordWriteStub(
394 object, address, remembered_set_action, fp_mode,
395 isolate()->builtins()->builtin_handle(Builtins::kRecordWrite),
396 kNullAddress);
397 }
398
CallRecordWriteStub(Register object,Register address,RememberedSetAction remembered_set_action,SaveFPRegsMode fp_mode,Address wasm_target)399 void TurboAssembler::CallRecordWriteStub(
400 Register object, Register address,
401 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
402 Address wasm_target) {
403 CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
404 Handle<Code>::null(), wasm_target);
405 }
406
CallRecordWriteStub(Register object,Register address,RememberedSetAction remembered_set_action,SaveFPRegsMode fp_mode,Handle<Code> code_target,Address wasm_target)407 void TurboAssembler::CallRecordWriteStub(
408 Register object, Register address,
409 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
410 Handle<Code> code_target, Address wasm_target) {
411 DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress);
412 // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
413 // i.e. always emit remember set and save FP registers in RecordWriteStub. If
414 // large performance regression is observed, we should use these values to
415 // avoid unnecessary work.
416
417 RecordWriteDescriptor descriptor;
418 RegList registers = descriptor.allocatable_registers();
419
420 SaveRegisters(registers);
421
422 Register object_parameter(
423 descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
424 Register slot_parameter(
425 descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
426 Register remembered_set_parameter(
427 descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
428 Register fp_mode_parameter(
429 descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
430
431 push(object);
432 push(address);
433
434 pop(slot_parameter);
435 pop(object_parameter);
436
437 Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
438 Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
439 if (code_target.is_null()) {
440 // Use {wasm_call} for direct Wasm call within a module.
441 wasm_call(wasm_target, RelocInfo::WASM_STUB_CALL);
442 } else {
443 Call(code_target, RelocInfo::CODE_TARGET);
444 }
445
446 RestoreRegisters(registers);
447 }
448
RecordWrite(Register object,Register address,Register value,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check)449 void MacroAssembler::RecordWrite(Register object, Register address,
450 Register value, SaveFPRegsMode fp_mode,
451 RememberedSetAction remembered_set_action,
452 SmiCheck smi_check) {
453 DCHECK(object != value);
454 DCHECK(object != address);
455 DCHECK(value != address);
456 AssertNotSmi(object);
457
458 if ((remembered_set_action == OMIT_REMEMBERED_SET &&
459 !FLAG_incremental_marking) ||
460 FLAG_disable_write_barriers) {
461 return;
462 }
463
464 if (emit_debug_code()) {
465 Label ok;
466 cmp(value, Operand(address, 0));
467 j(equal, &ok, Label::kNear);
468 int3();
469 bind(&ok);
470 }
471
472 // First, check if a write barrier is even needed. The tests below
473 // catch stores of Smis and stores into young gen.
474 Label done;
475
476 if (smi_check == INLINE_SMI_CHECK) {
477 // Skip barrier if writing a smi.
478 JumpIfSmi(value, &done, Label::kNear);
479 }
480
481 CheckPageFlag(value,
482 value, // Used as scratch.
483 MemoryChunk::kPointersToHereAreInterestingMask, zero, &done,
484 Label::kNear);
485 CheckPageFlag(object,
486 value, // Used as scratch.
487 MemoryChunk::kPointersFromHereAreInterestingMask, zero, &done,
488 Label::kNear);
489
490 CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
491
492 bind(&done);
493
494 // Clobber clobbered registers when running with the debug-code flag
495 // turned on to provoke errors.
496 if (emit_debug_code()) {
497 mov(address, Immediate(bit_cast<int32_t>(kZapValue)));
498 mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
499 }
500 }
501
MaybeDropFrames()502 void MacroAssembler::MaybeDropFrames() {
503 // Check whether we need to drop frames to restart a function on the stack.
504 Label dont_drop;
505 ExternalReference restart_fp =
506 ExternalReference::debug_restart_fp_address(isolate());
507 mov(eax, ExternalReferenceAsOperand(restart_fp, eax));
508 test(eax, eax);
509 j(zero, &dont_drop, Label::kNear);
510
511 Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET);
512 bind(&dont_drop);
513 }
514
Cvtsi2ss(XMMRegister dst,Operand src)515 void TurboAssembler::Cvtsi2ss(XMMRegister dst, Operand src) {
516 xorps(dst, dst);
517 cvtsi2ss(dst, src);
518 }
519
Cvtsi2sd(XMMRegister dst,Operand src)520 void TurboAssembler::Cvtsi2sd(XMMRegister dst, Operand src) {
521 xorpd(dst, dst);
522 cvtsi2sd(dst, src);
523 }
524
Cvtui2ss(XMMRegister dst,Operand src,Register tmp)525 void TurboAssembler::Cvtui2ss(XMMRegister dst, Operand src, Register tmp) {
526 Label done;
527 Register src_reg = src.is_reg_only() ? src.reg() : tmp;
528 if (src_reg == tmp) mov(tmp, src);
529 cvtsi2ss(dst, src_reg);
530 test(src_reg, src_reg);
531 j(positive, &done, Label::kNear);
532
533 // Compute {src/2 | (src&1)} (retain the LSB to avoid rounding errors).
534 if (src_reg != tmp) mov(tmp, src_reg);
535 shr(tmp, 1);
536 // The LSB is shifted into CF. If it is set, set the LSB in {tmp}.
537 Label msb_not_set;
538 j(not_carry, &msb_not_set, Label::kNear);
539 or_(tmp, Immediate(1));
540 bind(&msb_not_set);
541 cvtsi2ss(dst, tmp);
542 addss(dst, dst);
543 bind(&done);
544 }
545
Cvttss2ui(Register dst,Operand src,XMMRegister tmp)546 void TurboAssembler::Cvttss2ui(Register dst, Operand src, XMMRegister tmp) {
547 Label done;
548 cvttss2si(dst, src);
549 test(dst, dst);
550 j(positive, &done);
551 Move(tmp, static_cast<float>(INT32_MIN));
552 addss(tmp, src);
553 cvttss2si(dst, tmp);
554 or_(dst, Immediate(0x80000000));
555 bind(&done);
556 }
557
Cvtui2sd(XMMRegister dst,Operand src,Register scratch)558 void TurboAssembler::Cvtui2sd(XMMRegister dst, Operand src, Register scratch) {
559 Label done;
560 cmp(src, Immediate(0));
561 ExternalReference uint32_bias = ExternalReference::address_of_uint32_bias();
562 Cvtsi2sd(dst, src);
563 j(not_sign, &done, Label::kNear);
564 addsd(dst, ExternalReferenceAsOperand(uint32_bias, scratch));
565 bind(&done);
566 }
567
Cvttsd2ui(Register dst,Operand src,XMMRegister tmp)568 void TurboAssembler::Cvttsd2ui(Register dst, Operand src, XMMRegister tmp) {
569 Move(tmp, -2147483648.0);
570 addsd(tmp, src);
571 cvttsd2si(dst, tmp);
572 add(dst, Immediate(0x80000000));
573 }
574
ShlPair(Register high,Register low,uint8_t shift)575 void TurboAssembler::ShlPair(Register high, Register low, uint8_t shift) {
576 DCHECK_GE(63, shift);
577 if (shift >= 32) {
578 mov(high, low);
579 if (shift != 32) shl(high, shift - 32);
580 xor_(low, low);
581 } else {
582 shld(high, low, shift);
583 shl(low, shift);
584 }
585 }
586
ShlPair_cl(Register high,Register low)587 void TurboAssembler::ShlPair_cl(Register high, Register low) {
588 shld_cl(high, low);
589 shl_cl(low);
590 Label done;
591 test(ecx, Immediate(0x20));
592 j(equal, &done, Label::kNear);
593 mov(high, low);
594 xor_(low, low);
595 bind(&done);
596 }
597
ShrPair(Register high,Register low,uint8_t shift)598 void TurboAssembler::ShrPair(Register high, Register low, uint8_t shift) {
599 DCHECK_GE(63, shift);
600 if (shift >= 32) {
601 mov(low, high);
602 if (shift != 32) shr(low, shift - 32);
603 xor_(high, high);
604 } else {
605 shrd(low, high, shift);
606 shr(high, shift);
607 }
608 }
609
ShrPair_cl(Register high,Register low)610 void TurboAssembler::ShrPair_cl(Register high, Register low) {
611 shrd_cl(low, high);
612 shr_cl(high);
613 Label done;
614 test(ecx, Immediate(0x20));
615 j(equal, &done, Label::kNear);
616 mov(low, high);
617 xor_(high, high);
618 bind(&done);
619 }
620
SarPair(Register high,Register low,uint8_t shift)621 void TurboAssembler::SarPair(Register high, Register low, uint8_t shift) {
622 DCHECK_GE(63, shift);
623 if (shift >= 32) {
624 mov(low, high);
625 if (shift != 32) sar(low, shift - 32);
626 sar(high, 31);
627 } else {
628 shrd(low, high, shift);
629 sar(high, shift);
630 }
631 }
632
SarPair_cl(Register high,Register low)633 void TurboAssembler::SarPair_cl(Register high, Register low) {
634 shrd_cl(low, high);
635 sar_cl(high);
636 Label done;
637 test(ecx, Immediate(0x20));
638 j(equal, &done, Label::kNear);
639 mov(low, high);
640 sar(high, 31);
641 bind(&done);
642 }
643
LoadMap(Register destination,Register object)644 void TurboAssembler::LoadMap(Register destination, Register object) {
645 mov(destination, FieldOperand(object, HeapObject::kMapOffset));
646 }
647
CmpObjectType(Register heap_object,InstanceType type,Register map)648 void MacroAssembler::CmpObjectType(Register heap_object, InstanceType type,
649 Register map) {
650 LoadMap(map, heap_object);
651 CmpInstanceType(map, type);
652 }
653
CmpInstanceType(Register map,InstanceType type)654 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
655 cmpw(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
656 }
657
AssertSmi(Register object)658 void MacroAssembler::AssertSmi(Register object) {
659 if (emit_debug_code()) {
660 test(object, Immediate(kSmiTagMask));
661 Check(equal, AbortReason::kOperandIsNotASmi);
662 }
663 }
664
AssertConstructor(Register object)665 void MacroAssembler::AssertConstructor(Register object) {
666 if (emit_debug_code()) {
667 test(object, Immediate(kSmiTagMask));
668 Check(not_equal, AbortReason::kOperandIsASmiAndNotAConstructor);
669 Push(object);
670 LoadMap(object, object);
671 test_b(FieldOperand(object, Map::kBitFieldOffset),
672 Immediate(Map::Bits1::IsConstructorBit::kMask));
673 Pop(object);
674 Check(not_zero, AbortReason::kOperandIsNotAConstructor);
675 }
676 }
677
AssertFunction(Register object)678 void MacroAssembler::AssertFunction(Register object) {
679 if (emit_debug_code()) {
680 test(object, Immediate(kSmiTagMask));
681 Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
682 Push(object);
683 CmpObjectType(object, JS_FUNCTION_TYPE, object);
684 Pop(object);
685 Check(equal, AbortReason::kOperandIsNotAFunction);
686 }
687 }
688
AssertBoundFunction(Register object)689 void MacroAssembler::AssertBoundFunction(Register object) {
690 if (emit_debug_code()) {
691 test(object, Immediate(kSmiTagMask));
692 Check(not_equal, AbortReason::kOperandIsASmiAndNotABoundFunction);
693 Push(object);
694 CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
695 Pop(object);
696 Check(equal, AbortReason::kOperandIsNotABoundFunction);
697 }
698 }
699
AssertGeneratorObject(Register object)700 void MacroAssembler::AssertGeneratorObject(Register object) {
701 if (!emit_debug_code()) return;
702
703 test(object, Immediate(kSmiTagMask));
704 Check(not_equal, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
705
706 {
707 Push(object);
708 Register map = object;
709
710 LoadMap(map, object);
711
712 Label do_check;
713 // Check if JSGeneratorObject
714 CmpInstanceType(map, JS_GENERATOR_OBJECT_TYPE);
715 j(equal, &do_check, Label::kNear);
716
717 // Check if JSAsyncFunctionObject.
718 CmpInstanceType(map, JS_ASYNC_FUNCTION_OBJECT_TYPE);
719 j(equal, &do_check, Label::kNear);
720
721 // Check if JSAsyncGeneratorObject
722 CmpInstanceType(map, JS_ASYNC_GENERATOR_OBJECT_TYPE);
723
724 bind(&do_check);
725 Pop(object);
726 }
727
728 Check(equal, AbortReason::kOperandIsNotAGeneratorObject);
729 }
730
AssertUndefinedOrAllocationSite(Register object,Register scratch)731 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
732 Register scratch) {
733 if (emit_debug_code()) {
734 Label done_checking;
735 AssertNotSmi(object);
736 CompareRoot(object, scratch, RootIndex::kUndefinedValue);
737 j(equal, &done_checking);
738 LoadRoot(scratch, RootIndex::kAllocationSiteWithWeakNextMap);
739 cmp(FieldOperand(object, 0), scratch);
740 Assert(equal, AbortReason::kExpectedUndefinedOrCell);
741 bind(&done_checking);
742 }
743 }
744
AssertNotSmi(Register object)745 void MacroAssembler::AssertNotSmi(Register object) {
746 if (emit_debug_code()) {
747 test(object, Immediate(kSmiTagMask));
748 Check(not_equal, AbortReason::kOperandIsASmi);
749 }
750 }
751
StubPrologue(StackFrame::Type type)752 void TurboAssembler::StubPrologue(StackFrame::Type type) {
753 push(ebp); // Caller's frame pointer.
754 mov(ebp, esp);
755 push(Immediate(StackFrame::TypeToMarker(type)));
756 }
757
Prologue()758 void TurboAssembler::Prologue() {
759 push(ebp); // Caller's frame pointer.
760 mov(ebp, esp);
761 push(esi); // Callee's context.
762 push(edi); // Callee's JS function.
763 }
764
EnterFrame(StackFrame::Type type)765 void TurboAssembler::EnterFrame(StackFrame::Type type) {
766 push(ebp);
767 mov(ebp, esp);
768 push(Immediate(StackFrame::TypeToMarker(type)));
769 }
770
LeaveFrame(StackFrame::Type type)771 void TurboAssembler::LeaveFrame(StackFrame::Type type) {
772 if (emit_debug_code()) {
773 cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
774 Immediate(StackFrame::TypeToMarker(type)));
775 Check(equal, AbortReason::kStackFrameTypesMustMatch);
776 }
777 leave();
778 }
779
780 #ifdef V8_OS_WIN
AllocateStackSpace(Register bytes_scratch)781 void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
782 // In windows, we cannot increment the stack size by more than one page
783 // (minimum page size is 4KB) without accessing at least one byte on the
784 // page. Check this:
785 // https://msdn.microsoft.com/en-us/library/aa227153(v=vs.60).aspx.
786 Label check_offset;
787 Label touch_next_page;
788 jmp(&check_offset);
789 bind(&touch_next_page);
790 sub(esp, Immediate(kStackPageSize));
791 // Just to touch the page, before we increment further.
792 mov(Operand(esp, 0), Immediate(0));
793 sub(bytes_scratch, Immediate(kStackPageSize));
794
795 bind(&check_offset);
796 cmp(bytes_scratch, kStackPageSize);
797 j(greater, &touch_next_page);
798
799 sub(esp, bytes_scratch);
800 }
801
AllocateStackSpace(int bytes)802 void TurboAssembler::AllocateStackSpace(int bytes) {
803 while (bytes > kStackPageSize) {
804 sub(esp, Immediate(kStackPageSize));
805 mov(Operand(esp, 0), Immediate(0));
806 bytes -= kStackPageSize;
807 }
808 sub(esp, Immediate(bytes));
809 }
810 #endif
811
EnterExitFramePrologue(StackFrame::Type frame_type,Register scratch)812 void MacroAssembler::EnterExitFramePrologue(StackFrame::Type frame_type,
813 Register scratch) {
814 DCHECK(frame_type == StackFrame::EXIT ||
815 frame_type == StackFrame::BUILTIN_EXIT);
816
817 // Set up the frame structure on the stack.
818 DCHECK_EQ(+2 * kSystemPointerSize, ExitFrameConstants::kCallerSPDisplacement);
819 DCHECK_EQ(+1 * kSystemPointerSize, ExitFrameConstants::kCallerPCOffset);
820 DCHECK_EQ(0 * kSystemPointerSize, ExitFrameConstants::kCallerFPOffset);
821 push(ebp);
822 mov(ebp, esp);
823
824 // Reserve room for entry stack pointer.
825 push(Immediate(StackFrame::TypeToMarker(frame_type)));
826 DCHECK_EQ(-2 * kSystemPointerSize, ExitFrameConstants::kSPOffset);
827 push(Immediate(0)); // Saved entry sp, patched before call.
828
829 STATIC_ASSERT(edx == kRuntimeCallFunctionRegister);
830 STATIC_ASSERT(esi == kContextRegister);
831
832 // Save the frame pointer and the context in top.
833 ExternalReference c_entry_fp_address =
834 ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate());
835 ExternalReference context_address =
836 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate());
837 ExternalReference c_function_address =
838 ExternalReference::Create(IsolateAddressId::kCFunctionAddress, isolate());
839
840 DCHECK(!AreAliased(scratch, ebp, esi, edx));
841 mov(ExternalReferenceAsOperand(c_entry_fp_address, scratch), ebp);
842 mov(ExternalReferenceAsOperand(context_address, scratch), esi);
843 mov(ExternalReferenceAsOperand(c_function_address, scratch), edx);
844 }
845
EnterExitFrameEpilogue(int argc,bool save_doubles)846 void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
847 // Optionally save all XMM registers.
848 if (save_doubles) {
849 int space =
850 XMMRegister::kNumRegisters * kDoubleSize + argc * kSystemPointerSize;
851 AllocateStackSpace(space);
852 const int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
853 for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
854 XMMRegister reg = XMMRegister::from_code(i);
855 movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
856 }
857 } else {
858 AllocateStackSpace(argc * kSystemPointerSize);
859 }
860
861 // Get the required frame alignment for the OS.
862 const int kFrameAlignment = base::OS::ActivationFrameAlignment();
863 if (kFrameAlignment > 0) {
864 DCHECK(base::bits::IsPowerOfTwo(kFrameAlignment));
865 and_(esp, -kFrameAlignment);
866 }
867
868 // Patch the saved entry sp.
869 mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp);
870 }
871
EnterExitFrame(int argc,bool save_doubles,StackFrame::Type frame_type)872 void MacroAssembler::EnterExitFrame(int argc, bool save_doubles,
873 StackFrame::Type frame_type) {
874 EnterExitFramePrologue(frame_type, edi);
875
876 // Set up argc and argv in callee-saved registers.
877 int offset = StandardFrameConstants::kCallerSPOffset - kSystemPointerSize;
878 mov(edi, eax);
879 lea(esi, Operand(ebp, eax, times_system_pointer_size, offset));
880
881 // Reserve space for argc, argv and isolate.
882 EnterExitFrameEpilogue(argc, save_doubles);
883 }
884
EnterApiExitFrame(int argc,Register scratch)885 void MacroAssembler::EnterApiExitFrame(int argc, Register scratch) {
886 EnterExitFramePrologue(StackFrame::EXIT, scratch);
887 EnterExitFrameEpilogue(argc, false);
888 }
889
LeaveExitFrame(bool save_doubles,bool pop_arguments)890 void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
891 // Optionally restore all XMM registers.
892 if (save_doubles) {
893 const int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
894 for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
895 XMMRegister reg = XMMRegister::from_code(i);
896 movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
897 }
898 }
899
900 if (pop_arguments) {
901 // Get the return address from the stack and restore the frame pointer.
902 mov(ecx, Operand(ebp, 1 * kSystemPointerSize));
903 mov(ebp, Operand(ebp, 0 * kSystemPointerSize));
904
905 // Pop the arguments and the receiver from the caller stack.
906 lea(esp, Operand(esi, 1 * kSystemPointerSize));
907
908 // Push the return address to get ready to return.
909 push(ecx);
910 } else {
911 // Otherwise just leave the exit frame.
912 leave();
913 }
914
915 LeaveExitFrameEpilogue();
916 }
917
LeaveExitFrameEpilogue()918 void MacroAssembler::LeaveExitFrameEpilogue() {
919 // Clear the top frame.
920 ExternalReference c_entry_fp_address =
921 ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate());
922 mov(ExternalReferenceAsOperand(c_entry_fp_address, esi), Immediate(0));
923
924 // Restore current context from top and clear it in debug mode.
925 ExternalReference context_address =
926 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate());
927 mov(esi, ExternalReferenceAsOperand(context_address, esi));
928 #ifdef DEBUG
929 push(eax);
930 mov(ExternalReferenceAsOperand(context_address, eax),
931 Immediate(Context::kInvalidContext));
932 pop(eax);
933 #endif
934 }
935
LeaveApiExitFrame()936 void MacroAssembler::LeaveApiExitFrame() {
937 mov(esp, ebp);
938 pop(ebp);
939
940 LeaveExitFrameEpilogue();
941 }
942
PushStackHandler(Register scratch)943 void MacroAssembler::PushStackHandler(Register scratch) {
944 // Adjust this code if not the case.
945 STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kSystemPointerSize);
946 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
947
948 push(Immediate(0)); // Padding.
949
950 // Link the current handler as the next handler.
951 ExternalReference handler_address =
952 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate());
953 push(ExternalReferenceAsOperand(handler_address, scratch));
954
955 // Set this new handler as the current one.
956 mov(ExternalReferenceAsOperand(handler_address, scratch), esp);
957 }
958
PopStackHandler(Register scratch)959 void MacroAssembler::PopStackHandler(Register scratch) {
960 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
961 ExternalReference handler_address =
962 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate());
963 pop(ExternalReferenceAsOperand(handler_address, scratch));
964 add(esp, Immediate(StackHandlerConstants::kSize - kSystemPointerSize));
965 }
966
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)967 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
968 SaveFPRegsMode save_doubles) {
969 // If the expected number of arguments of the runtime function is
970 // constant, we check that the actual number of arguments match the
971 // expectation.
972 CHECK(f->nargs < 0 || f->nargs == num_arguments);
973
974 // TODO(1236192): Most runtime routines don't need the number of
975 // arguments passed in because it is constant. At some point we
976 // should remove this need and make the runtime routine entry code
977 // smarter.
978 Move(kRuntimeCallArgCountRegister, Immediate(num_arguments));
979 Move(kRuntimeCallFunctionRegister, Immediate(ExternalReference::Create(f)));
980 Handle<Code> code =
981 CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
982 Call(code, RelocInfo::CODE_TARGET);
983 }
984
TailCallRuntime(Runtime::FunctionId fid)985 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
986 // ----------- S t a t e -------------
987 // -- esp[0] : return address
988 // -- esp[8] : argument num_arguments - 1
989 // ...
990 // -- esp[8 * num_arguments] : argument 0 (receiver)
991 //
992 // For runtime functions with variable arguments:
993 // -- eax : number of arguments
994 // -----------------------------------
995
996 const Runtime::Function* function = Runtime::FunctionForId(fid);
997 DCHECK_EQ(1, function->result_size);
998 if (function->nargs >= 0) {
999 // TODO(1236192): Most runtime routines don't need the number of
1000 // arguments passed in because it is constant. At some point we
1001 // should remove this need and make the runtime routine entry code
1002 // smarter.
1003 Move(kRuntimeCallArgCountRegister, Immediate(function->nargs));
1004 }
1005 JumpToExternalReference(ExternalReference::Create(fid));
1006 }
1007
JumpToExternalReference(const ExternalReference & ext,bool builtin_exit_frame)1008 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
1009 bool builtin_exit_frame) {
1010 // Set the entry point and jump to the C entry runtime stub.
1011 Move(kRuntimeCallFunctionRegister, Immediate(ext));
1012 Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
1013 kArgvOnStack, builtin_exit_frame);
1014 Jump(code, RelocInfo::CODE_TARGET);
1015 }
1016
JumpToInstructionStream(Address entry)1017 void MacroAssembler::JumpToInstructionStream(Address entry) {
1018 jmp(entry, RelocInfo::OFF_HEAP_TARGET);
1019 }
1020
PrepareForTailCall(Register callee_args_count,Register caller_args_count,Register scratch0,Register scratch1,int number_of_temp_values_after_return_address)1021 void TurboAssembler::PrepareForTailCall(
1022 Register callee_args_count, Register caller_args_count, Register scratch0,
1023 Register scratch1, int number_of_temp_values_after_return_address) {
1024 DCHECK(!AreAliased(callee_args_count, caller_args_count, scratch0, scratch1));
1025
1026 // Calculate the destination address where we will put the return address
1027 // after we drop current frame.
1028 Register new_sp_reg = scratch0;
1029 sub(caller_args_count, callee_args_count);
1030 lea(new_sp_reg, Operand(ebp, caller_args_count, times_system_pointer_size,
1031 StandardFrameConstants::kCallerPCOffset -
1032 number_of_temp_values_after_return_address *
1033 kSystemPointerSize));
1034
1035 if (FLAG_debug_code) {
1036 cmp(esp, new_sp_reg);
1037 Check(below, AbortReason::kStackAccessBelowStackPointer);
1038 }
1039
1040 // Copy return address from caller's frame to current frame's return address
1041 // to avoid its trashing and let the following loop copy it to the right
1042 // place.
1043 Register tmp_reg = scratch1;
1044 mov(tmp_reg, Operand(ebp, StandardFrameConstants::kCallerPCOffset));
1045 mov(Operand(esp,
1046 number_of_temp_values_after_return_address * kSystemPointerSize),
1047 tmp_reg);
1048
1049 // Restore caller's frame pointer now as it could be overwritten by
1050 // the copying loop.
1051 mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
1052
1053 // +2 here is to copy both receiver and return address.
1054 Register count_reg = caller_args_count;
1055 lea(count_reg, Operand(callee_args_count,
1056 2 + number_of_temp_values_after_return_address));
1057
1058 // Now copy callee arguments to the caller frame going backwards to avoid
1059 // callee arguments corruption (source and destination areas could overlap).
1060 Label loop, entry;
1061 jmp(&entry, Label::kNear);
1062 bind(&loop);
1063 dec(count_reg);
1064 mov(tmp_reg, Operand(esp, count_reg, times_system_pointer_size, 0));
1065 mov(Operand(new_sp_reg, count_reg, times_system_pointer_size, 0), tmp_reg);
1066 bind(&entry);
1067 cmp(count_reg, Immediate(0));
1068 j(not_equal, &loop, Label::kNear);
1069
1070 // Leave current frame.
1071 mov(esp, new_sp_reg);
1072 }
1073
InvokePrologue(Register expected_parameter_count,Register actual_parameter_count,Label * done,InvokeFlag flag)1074 void MacroAssembler::InvokePrologue(Register expected_parameter_count,
1075 Register actual_parameter_count,
1076 Label* done, InvokeFlag flag) {
1077 DCHECK_EQ(actual_parameter_count, eax);
1078
1079 if (expected_parameter_count != actual_parameter_count) {
1080 DCHECK_EQ(expected_parameter_count, ecx);
1081
1082 Label regular_invoke;
1083 cmp(expected_parameter_count, actual_parameter_count);
1084 j(equal, ®ular_invoke);
1085 Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
1086 if (flag == CALL_FUNCTION) {
1087 Call(adaptor, RelocInfo::CODE_TARGET);
1088 jmp(done, Label::kNear);
1089 } else {
1090 Jump(adaptor, RelocInfo::CODE_TARGET);
1091 }
1092 bind(®ular_invoke);
1093 }
1094 }
1095
CallDebugOnFunctionCall(Register fun,Register new_target,Register expected_parameter_count,Register actual_parameter_count)1096 void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
1097 Register expected_parameter_count,
1098 Register actual_parameter_count) {
1099 FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
1100 SmiTag(expected_parameter_count);
1101 Push(expected_parameter_count);
1102
1103 SmiTag(actual_parameter_count);
1104 Push(actual_parameter_count);
1105 SmiUntag(actual_parameter_count);
1106
1107 if (new_target.is_valid()) {
1108 Push(new_target);
1109 }
1110 Push(fun);
1111 Push(fun);
1112 // Arguments are located 2 words below the base pointer.
1113 #ifdef V8_REVERSE_JSARGS
1114 Operand receiver_op = Operand(ebp, kSystemPointerSize * 2);
1115 #else
1116 Operand receiver_op =
1117 Operand(ebp, actual_parameter_count, times_system_pointer_size,
1118 kSystemPointerSize * 2);
1119 #endif
1120 Push(receiver_op);
1121 CallRuntime(Runtime::kDebugOnFunctionCall);
1122 Pop(fun);
1123 if (new_target.is_valid()) {
1124 Pop(new_target);
1125 }
1126 Pop(actual_parameter_count);
1127 SmiUntag(actual_parameter_count);
1128
1129 Pop(expected_parameter_count);
1130 SmiUntag(expected_parameter_count);
1131 }
1132
InvokeFunctionCode(Register function,Register new_target,Register expected_parameter_count,Register actual_parameter_count,InvokeFlag flag)1133 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
1134 Register expected_parameter_count,
1135 Register actual_parameter_count,
1136 InvokeFlag flag) {
1137 // You can't call a function without a valid frame.
1138 DCHECK(flag == JUMP_FUNCTION || has_frame());
1139 DCHECK_EQ(function, edi);
1140 DCHECK_IMPLIES(new_target.is_valid(), new_target == edx);
1141 DCHECK(expected_parameter_count == ecx || expected_parameter_count == eax);
1142 DCHECK_EQ(actual_parameter_count, eax);
1143
1144 // On function call, call into the debugger if necessary.
1145 Label debug_hook, continue_after_hook;
1146 {
1147 ExternalReference debug_hook_active =
1148 ExternalReference::debug_hook_on_function_call_address(isolate());
1149 push(eax);
1150 cmpb(ExternalReferenceAsOperand(debug_hook_active, eax), Immediate(0));
1151 pop(eax);
1152 j(not_equal, &debug_hook, Label::kNear);
1153 }
1154 bind(&continue_after_hook);
1155
1156 // Clear the new.target register if not given.
1157 if (!new_target.is_valid()) {
1158 Move(edx, isolate()->factory()->undefined_value());
1159 }
1160
1161 Label done;
1162 InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag);
1163 // We call indirectly through the code field in the function to
1164 // allow recompilation to take effect without changing any of the
1165 // call sites.
1166 static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
1167 mov(ecx, FieldOperand(function, JSFunction::kCodeOffset));
1168 if (flag == CALL_FUNCTION) {
1169 CallCodeObject(ecx);
1170 } else {
1171 DCHECK(flag == JUMP_FUNCTION);
1172 JumpCodeObject(ecx);
1173 }
1174 jmp(&done, Label::kNear);
1175
1176 // Deferred debug hook.
1177 bind(&debug_hook);
1178 CallDebugOnFunctionCall(function, new_target, expected_parameter_count,
1179 actual_parameter_count);
1180 jmp(&continue_after_hook, Label::kNear);
1181
1182 bind(&done);
1183 }
1184
InvokeFunction(Register fun,Register new_target,Register actual_parameter_count,InvokeFlag flag)1185 void MacroAssembler::InvokeFunction(Register fun, Register new_target,
1186 Register actual_parameter_count,
1187 InvokeFlag flag) {
1188 // You can't call a function without a valid frame.
1189 DCHECK(flag == JUMP_FUNCTION || has_frame());
1190
1191 DCHECK(fun == edi);
1192 mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
1193 mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
1194 movzx_w(ecx,
1195 FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
1196
1197 InvokeFunctionCode(edi, new_target, ecx, actual_parameter_count, flag);
1198 }
1199
LoadGlobalProxy(Register dst)1200 void MacroAssembler::LoadGlobalProxy(Register dst) {
1201 LoadNativeContextSlot(dst, Context::GLOBAL_PROXY_INDEX);
1202 }
1203
LoadNativeContextSlot(Register destination,int index)1204 void MacroAssembler::LoadNativeContextSlot(Register destination, int index) {
1205 // Load the native context from the current context.
1206 LoadMap(destination, esi);
1207 mov(destination,
1208 FieldOperand(destination,
1209 Map::kConstructorOrBackPointerOrNativeContextOffset));
1210 // Load the function from the native context.
1211 mov(destination, Operand(destination, Context::SlotOffset(index)));
1212 }
1213
SafepointRegisterStackIndex(int reg_code)1214 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
1215 // The registers are pushed starting with the lowest encoding,
1216 // which means that lowest encodings are furthest away from
1217 // the stack pointer.
1218 DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
1219 return kNumSafepointRegisters - reg_code - 1;
1220 }
1221
Ret()1222 void TurboAssembler::Ret() { ret(0); }
1223
Ret(int bytes_dropped,Register scratch)1224 void TurboAssembler::Ret(int bytes_dropped, Register scratch) {
1225 if (is_uint16(bytes_dropped)) {
1226 ret(bytes_dropped);
1227 } else {
1228 pop(scratch);
1229 add(esp, Immediate(bytes_dropped));
1230 push(scratch);
1231 ret(0);
1232 }
1233 }
1234
Push(Immediate value)1235 void TurboAssembler::Push(Immediate value) {
1236 if (root_array_available() && options().isolate_independent_code) {
1237 if (value.is_embedded_object()) {
1238 Push(HeapObjectAsOperand(value.embedded_object()));
1239 return;
1240 } else if (value.is_external_reference()) {
1241 Push(ExternalReferenceAddressAsOperand(value.external_reference()));
1242 return;
1243 }
1244 }
1245 push(value);
1246 }
1247
Drop(int stack_elements)1248 void MacroAssembler::Drop(int stack_elements) {
1249 if (stack_elements > 0) {
1250 add(esp, Immediate(stack_elements * kSystemPointerSize));
1251 }
1252 }
1253
Move(Register dst,Register src)1254 void TurboAssembler::Move(Register dst, Register src) {
1255 if (dst != src) {
1256 mov(dst, src);
1257 }
1258 }
1259
Move(Register dst,const Immediate & src)1260 void TurboAssembler::Move(Register dst, const Immediate& src) {
1261 if (!src.is_heap_object_request() && src.is_zero()) {
1262 xor_(dst, dst); // Shorter than mov of 32-bit immediate 0.
1263 } else if (src.is_external_reference()) {
1264 LoadAddress(dst, src.external_reference());
1265 } else {
1266 mov(dst, src);
1267 }
1268 }
1269
Move(Operand dst,const Immediate & src)1270 void TurboAssembler::Move(Operand dst, const Immediate& src) {
1271 // Since there's no scratch register available, take a detour through the
1272 // stack.
1273 if (root_array_available() && options().isolate_independent_code) {
1274 if (src.is_embedded_object() || src.is_external_reference() ||
1275 src.is_heap_object_request()) {
1276 Push(src);
1277 pop(dst);
1278 return;
1279 }
1280 }
1281
1282 if (src.is_embedded_object()) {
1283 mov(dst, src.embedded_object());
1284 } else {
1285 mov(dst, src);
1286 }
1287 }
1288
Move(Register dst,Handle<HeapObject> src)1289 void TurboAssembler::Move(Register dst, Handle<HeapObject> src) {
1290 if (root_array_available() && options().isolate_independent_code) {
1291 IndirectLoadConstant(dst, src);
1292 return;
1293 }
1294 mov(dst, src);
1295 }
1296
Move(XMMRegister dst,uint32_t src)1297 void TurboAssembler::Move(XMMRegister dst, uint32_t src) {
1298 if (src == 0) {
1299 pxor(dst, dst);
1300 } else {
1301 unsigned cnt = base::bits::CountPopulation(src);
1302 unsigned nlz = base::bits::CountLeadingZeros32(src);
1303 unsigned ntz = base::bits::CountTrailingZeros32(src);
1304 if (nlz + cnt + ntz == 32) {
1305 pcmpeqd(dst, dst);
1306 if (ntz == 0) {
1307 psrld(dst, 32 - cnt);
1308 } else {
1309 pslld(dst, 32 - cnt);
1310 if (nlz != 0) psrld(dst, nlz);
1311 }
1312 } else {
1313 push(eax);
1314 mov(eax, Immediate(src));
1315 movd(dst, Operand(eax));
1316 pop(eax);
1317 }
1318 }
1319 }
1320
Move(XMMRegister dst,uint64_t src)1321 void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
1322 if (src == 0) {
1323 pxor(dst, dst);
1324 } else {
1325 uint32_t lower = static_cast<uint32_t>(src);
1326 uint32_t upper = static_cast<uint32_t>(src >> 32);
1327 unsigned cnt = base::bits::CountPopulation(src);
1328 unsigned nlz = base::bits::CountLeadingZeros64(src);
1329 unsigned ntz = base::bits::CountTrailingZeros64(src);
1330 if (nlz + cnt + ntz == 64) {
1331 pcmpeqd(dst, dst);
1332 if (ntz == 0) {
1333 psrlq(dst, 64 - cnt);
1334 } else {
1335 psllq(dst, 64 - cnt);
1336 if (nlz != 0) psrlq(dst, nlz);
1337 }
1338 } else if (lower == 0) {
1339 Move(dst, upper);
1340 psllq(dst, 32);
1341 } else if (CpuFeatures::IsSupported(SSE4_1)) {
1342 CpuFeatureScope scope(this, SSE4_1);
1343 push(eax);
1344 Move(eax, Immediate(lower));
1345 movd(dst, Operand(eax));
1346 if (upper != lower) {
1347 Move(eax, Immediate(upper));
1348 }
1349 pinsrd(dst, Operand(eax), 1);
1350 pop(eax);
1351 } else {
1352 push(Immediate(upper));
1353 push(Immediate(lower));
1354 movsd(dst, Operand(esp, 0));
1355 add(esp, Immediate(kDoubleSize));
1356 }
1357 }
1358 }
1359
Pshufhw(XMMRegister dst,Operand src,uint8_t shuffle)1360 void TurboAssembler::Pshufhw(XMMRegister dst, Operand src, uint8_t shuffle) {
1361 if (CpuFeatures::IsSupported(AVX)) {
1362 CpuFeatureScope scope(this, AVX);
1363 vpshufhw(dst, src, shuffle);
1364 } else {
1365 pshufhw(dst, src, shuffle);
1366 }
1367 }
1368
Pshuflw(XMMRegister dst,Operand src,uint8_t shuffle)1369 void TurboAssembler::Pshuflw(XMMRegister dst, Operand src, uint8_t shuffle) {
1370 if (CpuFeatures::IsSupported(AVX)) {
1371 CpuFeatureScope scope(this, AVX);
1372 vpshuflw(dst, src, shuffle);
1373 } else {
1374 pshuflw(dst, src, shuffle);
1375 }
1376 }
1377
Pshufd(XMMRegister dst,Operand src,uint8_t shuffle)1378 void TurboAssembler::Pshufd(XMMRegister dst, Operand src, uint8_t shuffle) {
1379 if (CpuFeatures::IsSupported(AVX)) {
1380 CpuFeatureScope scope(this, AVX);
1381 vpshufd(dst, src, shuffle);
1382 } else {
1383 pshufd(dst, src, shuffle);
1384 }
1385 }
1386
Psraw(XMMRegister dst,uint8_t shift)1387 void TurboAssembler::Psraw(XMMRegister dst, uint8_t shift) {
1388 if (CpuFeatures::IsSupported(AVX)) {
1389 CpuFeatureScope scope(this, AVX);
1390 vpsraw(dst, dst, shift);
1391 } else {
1392 psraw(dst, shift);
1393 }
1394 }
1395
Psrlw(XMMRegister dst,uint8_t shift)1396 void TurboAssembler::Psrlw(XMMRegister dst, uint8_t shift) {
1397 if (CpuFeatures::IsSupported(AVX)) {
1398 CpuFeatureScope scope(this, AVX);
1399 vpsrlw(dst, dst, shift);
1400 } else {
1401 psrlw(dst, shift);
1402 }
1403 }
1404
Psrlq(XMMRegister dst,uint8_t shift)1405 void TurboAssembler::Psrlq(XMMRegister dst, uint8_t shift) {
1406 if (CpuFeatures::IsSupported(AVX)) {
1407 CpuFeatureScope scope(this, AVX);
1408 vpsrlq(dst, dst, shift);
1409 } else {
1410 psrlq(dst, shift);
1411 }
1412 }
1413
Psignb(XMMRegister dst,Operand src)1414 void TurboAssembler::Psignb(XMMRegister dst, Operand src) {
1415 if (CpuFeatures::IsSupported(AVX)) {
1416 CpuFeatureScope scope(this, AVX);
1417 vpsignb(dst, dst, src);
1418 return;
1419 }
1420 if (CpuFeatures::IsSupported(SSSE3)) {
1421 CpuFeatureScope sse_scope(this, SSSE3);
1422 psignb(dst, src);
1423 return;
1424 }
1425 FATAL("no AVX or SSE3 support");
1426 }
1427
Psignw(XMMRegister dst,Operand src)1428 void TurboAssembler::Psignw(XMMRegister dst, Operand src) {
1429 if (CpuFeatures::IsSupported(AVX)) {
1430 CpuFeatureScope scope(this, AVX);
1431 vpsignw(dst, dst, src);
1432 return;
1433 }
1434 if (CpuFeatures::IsSupported(SSSE3)) {
1435 CpuFeatureScope sse_scope(this, SSSE3);
1436 psignw(dst, src);
1437 return;
1438 }
1439 FATAL("no AVX or SSE3 support");
1440 }
1441
Psignd(XMMRegister dst,Operand src)1442 void TurboAssembler::Psignd(XMMRegister dst, Operand src) {
1443 if (CpuFeatures::IsSupported(AVX)) {
1444 CpuFeatureScope scope(this, AVX);
1445 vpsignd(dst, dst, src);
1446 return;
1447 }
1448 if (CpuFeatures::IsSupported(SSSE3)) {
1449 CpuFeatureScope sse_scope(this, SSSE3);
1450 psignd(dst, src);
1451 return;
1452 }
1453 FATAL("no AVX or SSE3 support");
1454 }
1455
Pshufb(XMMRegister dst,Operand src)1456 void TurboAssembler::Pshufb(XMMRegister dst, Operand src) {
1457 if (CpuFeatures::IsSupported(AVX)) {
1458 CpuFeatureScope scope(this, AVX);
1459 vpshufb(dst, dst, src);
1460 return;
1461 }
1462 if (CpuFeatures::IsSupported(SSSE3)) {
1463 CpuFeatureScope sse_scope(this, SSSE3);
1464 pshufb(dst, src);
1465 return;
1466 }
1467 FATAL("no AVX or SSE3 support");
1468 }
1469
Pblendw(XMMRegister dst,Operand src,uint8_t imm8)1470 void TurboAssembler::Pblendw(XMMRegister dst, Operand src, uint8_t imm8) {
1471 if (CpuFeatures::IsSupported(AVX)) {
1472 CpuFeatureScope scope(this, AVX);
1473 vpblendw(dst, dst, src, imm8);
1474 return;
1475 }
1476 if (CpuFeatures::IsSupported(SSE4_1)) {
1477 CpuFeatureScope sse_scope(this, SSE4_1);
1478 pblendw(dst, src, imm8);
1479 return;
1480 }
1481 FATAL("no AVX or SSE4.1 support");
1482 }
1483
Palignr(XMMRegister dst,Operand src,uint8_t imm8)1484 void TurboAssembler::Palignr(XMMRegister dst, Operand src, uint8_t imm8) {
1485 if (CpuFeatures::IsSupported(AVX)) {
1486 CpuFeatureScope scope(this, AVX);
1487 vpalignr(dst, dst, src, imm8);
1488 return;
1489 }
1490 if (CpuFeatures::IsSupported(SSSE3)) {
1491 CpuFeatureScope sse_scope(this, SSSE3);
1492 palignr(dst, src, imm8);
1493 return;
1494 }
1495 FATAL("no AVX or SSE3 support");
1496 }
1497
Pextrb(Register dst,XMMRegister src,uint8_t imm8)1498 void TurboAssembler::Pextrb(Register dst, XMMRegister src, uint8_t imm8) {
1499 if (CpuFeatures::IsSupported(AVX)) {
1500 CpuFeatureScope scope(this, AVX);
1501 vpextrb(dst, src, imm8);
1502 return;
1503 }
1504 if (CpuFeatures::IsSupported(SSE4_1)) {
1505 CpuFeatureScope sse_scope(this, SSE4_1);
1506 pextrb(dst, src, imm8);
1507 return;
1508 }
1509 FATAL("no AVX or SSE4.1 support");
1510 }
1511
Pextrw(Register dst,XMMRegister src,uint8_t imm8)1512 void TurboAssembler::Pextrw(Register dst, XMMRegister src, uint8_t imm8) {
1513 if (CpuFeatures::IsSupported(AVX)) {
1514 CpuFeatureScope scope(this, AVX);
1515 vpextrw(dst, src, imm8);
1516 return;
1517 }
1518 if (CpuFeatures::IsSupported(SSE4_1)) {
1519 CpuFeatureScope sse_scope(this, SSE4_1);
1520 pextrw(dst, src, imm8);
1521 return;
1522 }
1523 FATAL("no AVX or SSE4.1 support");
1524 }
1525
Pextrd(Register dst,XMMRegister src,uint8_t imm8)1526 void TurboAssembler::Pextrd(Register dst, XMMRegister src, uint8_t imm8) {
1527 if (imm8 == 0) {
1528 Movd(dst, src);
1529 return;
1530 }
1531 if (CpuFeatures::IsSupported(AVX)) {
1532 CpuFeatureScope scope(this, AVX);
1533 vpextrd(dst, src, imm8);
1534 return;
1535 }
1536 if (CpuFeatures::IsSupported(SSE4_1)) {
1537 CpuFeatureScope sse_scope(this, SSE4_1);
1538 pextrd(dst, src, imm8);
1539 return;
1540 }
1541 // Without AVX or SSE, we can only have 64-bit values in xmm registers.
1542 // We don't have an xmm scratch register, so move the data via the stack. This
1543 // path is rarely required, so it's acceptable to be slow.
1544 DCHECK_LT(imm8, 2);
1545 AllocateStackSpace(kDoubleSize);
1546 movsd(Operand(esp, 0), src);
1547 mov(dst, Operand(esp, imm8 * kUInt32Size));
1548 add(esp, Immediate(kDoubleSize));
1549 }
1550
Pinsrb(XMMRegister dst,Operand src,int8_t imm8)1551 void TurboAssembler::Pinsrb(XMMRegister dst, Operand src, int8_t imm8) {
1552 if (CpuFeatures::IsSupported(AVX)) {
1553 CpuFeatureScope scope(this, AVX);
1554 vpinsrb(dst, dst, src, imm8);
1555 return;
1556 }
1557 if (CpuFeatures::IsSupported(SSE4_1)) {
1558 CpuFeatureScope sse_scope(this, SSE4_1);
1559 pinsrb(dst, src, imm8);
1560 return;
1561 }
1562 FATAL("no AVX or SSE4.1 support");
1563 }
1564
Pinsrd(XMMRegister dst,Operand src,uint8_t imm8)1565 void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, uint8_t imm8) {
1566 if (CpuFeatures::IsSupported(AVX)) {
1567 CpuFeatureScope scope(this, AVX);
1568 vpinsrd(dst, dst, src, imm8);
1569 return;
1570 }
1571 if (CpuFeatures::IsSupported(SSE4_1)) {
1572 CpuFeatureScope sse_scope(this, SSE4_1);
1573 pinsrd(dst, src, imm8);
1574 return;
1575 }
1576 // Without AVX or SSE, we can only have 64-bit values in xmm registers.
1577 // We don't have an xmm scratch register, so move the data via the stack. This
1578 // path is rarely required, so it's acceptable to be slow.
1579 DCHECK_LT(imm8, 2);
1580 AllocateStackSpace(kDoubleSize);
1581 // Write original content of {dst} to the stack.
1582 movsd(Operand(esp, 0), dst);
1583 // Overwrite the portion specified in {imm8}.
1584 if (src.is_reg_only()) {
1585 mov(Operand(esp, imm8 * kUInt32Size), src.reg());
1586 } else {
1587 movss(dst, src);
1588 movss(Operand(esp, imm8 * kUInt32Size), dst);
1589 }
1590 // Load back the full value into {dst}.
1591 movsd(dst, Operand(esp, 0));
1592 add(esp, Immediate(kDoubleSize));
1593 }
1594
Pinsrw(XMMRegister dst,Operand src,int8_t imm8)1595 void TurboAssembler::Pinsrw(XMMRegister dst, Operand src, int8_t imm8) {
1596 if (CpuFeatures::IsSupported(AVX)) {
1597 CpuFeatureScope scope(this, AVX);
1598 vpinsrw(dst, dst, src, imm8);
1599 return;
1600 } else {
1601 pinsrw(dst, src, imm8);
1602 return;
1603 }
1604 }
1605
Vbroadcastss(XMMRegister dst,Operand src)1606 void TurboAssembler::Vbroadcastss(XMMRegister dst, Operand src) {
1607 if (CpuFeatures::IsSupported(AVX)) {
1608 CpuFeatureScope avx_scope(this, AVX);
1609 vbroadcastss(dst, src);
1610 return;
1611 }
1612 movss(dst, src);
1613 shufps(dst, dst, static_cast<byte>(0));
1614 }
1615
Lzcnt(Register dst,Operand src)1616 void TurboAssembler::Lzcnt(Register dst, Operand src) {
1617 if (CpuFeatures::IsSupported(LZCNT)) {
1618 CpuFeatureScope scope(this, LZCNT);
1619 lzcnt(dst, src);
1620 return;
1621 }
1622 Label not_zero_src;
1623 bsr(dst, src);
1624 j(not_zero, ¬_zero_src, Label::kNear);
1625 mov(dst, 63); // 63^31 == 32
1626 bind(¬_zero_src);
1627 xor_(dst, Immediate(31)); // for x in [0..31], 31^x == 31-x.
1628 }
1629
Tzcnt(Register dst,Operand src)1630 void TurboAssembler::Tzcnt(Register dst, Operand src) {
1631 if (CpuFeatures::IsSupported(BMI1)) {
1632 CpuFeatureScope scope(this, BMI1);
1633 tzcnt(dst, src);
1634 return;
1635 }
1636 Label not_zero_src;
1637 bsf(dst, src);
1638 j(not_zero, ¬_zero_src, Label::kNear);
1639 mov(dst, 32); // The result of tzcnt is 32 if src = 0.
1640 bind(¬_zero_src);
1641 }
1642
Popcnt(Register dst,Operand src)1643 void TurboAssembler::Popcnt(Register dst, Operand src) {
1644 if (CpuFeatures::IsSupported(POPCNT)) {
1645 CpuFeatureScope scope(this, POPCNT);
1646 popcnt(dst, src);
1647 return;
1648 }
1649 FATAL("no POPCNT support");
1650 }
1651
LoadWeakValue(Register in_out,Label * target_if_cleared)1652 void MacroAssembler::LoadWeakValue(Register in_out, Label* target_if_cleared) {
1653 cmp(in_out, Immediate(kClearedWeakHeapObjectLower32));
1654 j(equal, target_if_cleared);
1655
1656 and_(in_out, Immediate(~kWeakHeapObjectMask));
1657 }
1658
IncrementCounter(StatsCounter * counter,int value,Register scratch)1659 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
1660 Register scratch) {
1661 DCHECK_GT(value, 0);
1662 if (FLAG_native_code_counters && counter->Enabled()) {
1663 Operand operand =
1664 ExternalReferenceAsOperand(ExternalReference::Create(counter), scratch);
1665 if (value == 1) {
1666 inc(operand);
1667 } else {
1668 add(operand, Immediate(value));
1669 }
1670 }
1671 }
1672
DecrementCounter(StatsCounter * counter,int value,Register scratch)1673 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
1674 Register scratch) {
1675 DCHECK_GT(value, 0);
1676 if (FLAG_native_code_counters && counter->Enabled()) {
1677 Operand operand =
1678 ExternalReferenceAsOperand(ExternalReference::Create(counter), scratch);
1679 if (value == 1) {
1680 dec(operand);
1681 } else {
1682 sub(operand, Immediate(value));
1683 }
1684 }
1685 }
1686
Assert(Condition cc,AbortReason reason)1687 void TurboAssembler::Assert(Condition cc, AbortReason reason) {
1688 if (emit_debug_code()) Check(cc, reason);
1689 }
1690
AssertUnreachable(AbortReason reason)1691 void TurboAssembler::AssertUnreachable(AbortReason reason) {
1692 if (emit_debug_code()) Abort(reason);
1693 }
1694
Check(Condition cc,AbortReason reason)1695 void TurboAssembler::Check(Condition cc, AbortReason reason) {
1696 Label L;
1697 j(cc, &L);
1698 Abort(reason);
1699 // will not return here
1700 bind(&L);
1701 }
1702
CheckStackAlignment()1703 void TurboAssembler::CheckStackAlignment() {
1704 int frame_alignment = base::OS::ActivationFrameAlignment();
1705 int frame_alignment_mask = frame_alignment - 1;
1706 if (frame_alignment > kSystemPointerSize) {
1707 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
1708 Label alignment_as_expected;
1709 test(esp, Immediate(frame_alignment_mask));
1710 j(zero, &alignment_as_expected);
1711 // Abort if stack is not aligned.
1712 int3();
1713 bind(&alignment_as_expected);
1714 }
1715 }
1716
Abort(AbortReason reason)1717 void TurboAssembler::Abort(AbortReason reason) {
1718 #ifdef DEBUG
1719 const char* msg = GetAbortReason(reason);
1720 RecordComment("Abort message: ");
1721 RecordComment(msg);
1722 #endif
1723
1724 // Avoid emitting call to builtin if requested.
1725 if (trap_on_abort()) {
1726 int3();
1727 return;
1728 }
1729
1730 if (should_abort_hard()) {
1731 // We don't care if we constructed a frame. Just pretend we did.
1732 FrameScope assume_frame(this, StackFrame::NONE);
1733 PrepareCallCFunction(1, eax);
1734 mov(Operand(esp, 0), Immediate(static_cast<int>(reason)));
1735 CallCFunction(ExternalReference::abort_with_reason(), 1);
1736 return;
1737 }
1738
1739 Move(edx, Smi::FromInt(static_cast<int>(reason)));
1740
1741 // Disable stub call restrictions to always allow calls to abort.
1742 if (!has_frame()) {
1743 // We don't actually want to generate a pile of code for this, so just
1744 // claim there is a stack frame, without generating one.
1745 FrameScope scope(this, StackFrame::NONE);
1746 Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
1747 } else {
1748 Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
1749 }
1750 // will not return here
1751 int3();
1752 }
1753
PrepareCallCFunction(int num_arguments,Register scratch)1754 void TurboAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
1755 int frame_alignment = base::OS::ActivationFrameAlignment();
1756 if (frame_alignment != 0) {
1757 // Make stack end at alignment and make room for num_arguments words
1758 // and the original value of esp.
1759 mov(scratch, esp);
1760 AllocateStackSpace((num_arguments + 1) * kSystemPointerSize);
1761 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
1762 and_(esp, -frame_alignment);
1763 mov(Operand(esp, num_arguments * kSystemPointerSize), scratch);
1764 } else {
1765 AllocateStackSpace(num_arguments * kSystemPointerSize);
1766 }
1767 }
1768
CallCFunction(ExternalReference function,int num_arguments)1769 void TurboAssembler::CallCFunction(ExternalReference function,
1770 int num_arguments) {
1771 // Trashing eax is ok as it will be the return value.
1772 Move(eax, Immediate(function));
1773 CallCFunction(eax, num_arguments);
1774 }
1775
CallCFunction(Register function,int num_arguments)1776 void TurboAssembler::CallCFunction(Register function, int num_arguments) {
1777 DCHECK_LE(num_arguments, kMaxCParameters);
1778 DCHECK(has_frame());
1779 // Check stack alignment.
1780 if (emit_debug_code()) {
1781 CheckStackAlignment();
1782 }
1783
1784 // Save the frame pointer and PC so that the stack layout remains iterable,
1785 // even without an ExitFrame which normally exists between JS and C frames.
1786 // Find two caller-saved scratch registers.
1787 Register pc_scratch = eax;
1788 Register scratch = ecx;
1789 if (function == eax) pc_scratch = edx;
1790 if (function == ecx) scratch = edx;
1791 PushPC();
1792 pop(pc_scratch);
1793
1794 // See x64 code for reasoning about how to address the isolate data fields.
1795 DCHECK_IMPLIES(!root_array_available(), isolate() != nullptr);
1796 mov(root_array_available()
1797 ? Operand(kRootRegister, IsolateData::fast_c_call_caller_pc_offset())
1798 : ExternalReferenceAsOperand(
1799 ExternalReference::fast_c_call_caller_pc_address(isolate()),
1800 scratch),
1801 pc_scratch);
1802 mov(root_array_available()
1803 ? Operand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset())
1804 : ExternalReferenceAsOperand(
1805 ExternalReference::fast_c_call_caller_fp_address(isolate()),
1806 scratch),
1807 ebp);
1808
1809 call(function);
1810
1811 // We don't unset the PC; the FP is the source of truth.
1812 mov(root_array_available()
1813 ? Operand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset())
1814 : ExternalReferenceAsOperand(
1815 ExternalReference::fast_c_call_caller_fp_address(isolate()),
1816 scratch),
1817 Immediate(0));
1818
1819 if (base::OS::ActivationFrameAlignment() != 0) {
1820 mov(esp, Operand(esp, num_arguments * kSystemPointerSize));
1821 } else {
1822 add(esp, Immediate(num_arguments * kSystemPointerSize));
1823 }
1824 }
1825
PushPC()1826 void TurboAssembler::PushPC() {
1827 // Push the current PC onto the stack as "return address" via calling
1828 // the next instruction.
1829 Label get_pc;
1830 call(&get_pc);
1831 bind(&get_pc);
1832 }
1833
Call(Handle<Code> code_object,RelocInfo::Mode rmode)1834 void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
1835 DCHECK_IMPLIES(options().isolate_independent_code,
1836 Builtins::IsIsolateIndependentBuiltin(*code_object));
1837 if (options().inline_offheap_trampolines) {
1838 int builtin_index = Builtins::kNoBuiltinId;
1839 if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
1840 Builtins::IsIsolateIndependent(builtin_index)) {
1841 // Inline the trampoline.
1842 CallBuiltin(builtin_index);
1843 return;
1844 }
1845 }
1846 DCHECK(RelocInfo::IsCodeTarget(rmode));
1847 call(code_object, rmode);
1848 }
1849
LoadEntryFromBuiltinIndex(Register builtin_index)1850 void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
1851 STATIC_ASSERT(kSystemPointerSize == 4);
1852 STATIC_ASSERT(kSmiShiftSize == 0);
1853 STATIC_ASSERT(kSmiTagSize == 1);
1854 STATIC_ASSERT(kSmiTag == 0);
1855
1856 // The builtin_index register contains the builtin index as a Smi.
1857 // Untagging is folded into the indexing operand below (we use
1858 // times_half_system_pointer_size instead of times_system_pointer_size since
1859 // smis are already shifted by one).
1860 mov(builtin_index,
1861 Operand(kRootRegister, builtin_index, times_half_system_pointer_size,
1862 IsolateData::builtin_entry_table_offset()));
1863 }
1864
CallBuiltinByIndex(Register builtin_index)1865 void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
1866 LoadEntryFromBuiltinIndex(builtin_index);
1867 call(builtin_index);
1868 }
1869
CallBuiltin(int builtin_index)1870 void TurboAssembler::CallBuiltin(int builtin_index) {
1871 DCHECK(Builtins::IsBuiltinId(builtin_index));
1872 RecordCommentForOffHeapTrampoline(builtin_index);
1873 CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
1874 EmbeddedData d = EmbeddedData::FromBlob();
1875 Address entry = d.InstructionStartOfBuiltin(builtin_index);
1876 call(entry, RelocInfo::OFF_HEAP_TARGET);
1877 }
1878
LoadCodeObjectEntry(Register destination,Register code_object)1879 void TurboAssembler::LoadCodeObjectEntry(Register destination,
1880 Register code_object) {
1881 // Code objects are called differently depending on whether we are generating
1882 // builtin code (which will later be embedded into the binary) or compiling
1883 // user JS code at runtime.
1884 // * Builtin code runs in --jitless mode and thus must not call into on-heap
1885 // Code targets. Instead, we dispatch through the builtins entry table.
1886 // * Codegen at runtime does not have this restriction and we can use the
1887 // shorter, branchless instruction sequence. The assumption here is that
1888 // targets are usually generated code and not builtin Code objects.
1889
1890 if (options().isolate_independent_code) {
1891 DCHECK(root_array_available());
1892 Label if_code_is_off_heap, out;
1893
1894 // Check whether the Code object is an off-heap trampoline. If so, call its
1895 // (off-heap) entry point directly without going through the (on-heap)
1896 // trampoline. Otherwise, just call the Code object as always.
1897 test(FieldOperand(code_object, Code::kFlagsOffset),
1898 Immediate(Code::IsOffHeapTrampoline::kMask));
1899 j(not_equal, &if_code_is_off_heap);
1900
1901 // Not an off-heap trampoline, the entry point is at
1902 // Code::raw_instruction_start().
1903 Move(destination, code_object);
1904 add(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
1905 jmp(&out);
1906
1907 // An off-heap trampoline, the entry point is loaded from the builtin entry
1908 // table.
1909 bind(&if_code_is_off_heap);
1910 mov(destination, FieldOperand(code_object, Code::kBuiltinIndexOffset));
1911 mov(destination,
1912 Operand(kRootRegister, destination, times_system_pointer_size,
1913 IsolateData::builtin_entry_table_offset()));
1914
1915 bind(&out);
1916 } else {
1917 Move(destination, code_object);
1918 add(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
1919 }
1920 }
1921
CallCodeObject(Register code_object)1922 void TurboAssembler::CallCodeObject(Register code_object) {
1923 LoadCodeObjectEntry(code_object, code_object);
1924 call(code_object);
1925 }
1926
JumpCodeObject(Register code_object)1927 void TurboAssembler::JumpCodeObject(Register code_object) {
1928 LoadCodeObjectEntry(code_object, code_object);
1929 jmp(code_object);
1930 }
1931
Jump(const ExternalReference & reference)1932 void TurboAssembler::Jump(const ExternalReference& reference) {
1933 DCHECK(root_array_available());
1934 jmp(Operand(kRootRegister, RootRegisterOffsetForExternalReferenceTableEntry(
1935 isolate(), reference)));
1936 }
1937
Jump(Handle<Code> code_object,RelocInfo::Mode rmode)1938 void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
1939 DCHECK_IMPLIES(options().isolate_independent_code,
1940 Builtins::IsIsolateIndependentBuiltin(*code_object));
1941 if (options().inline_offheap_trampolines) {
1942 int builtin_index = Builtins::kNoBuiltinId;
1943 if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
1944 Builtins::IsIsolateIndependent(builtin_index)) {
1945 // Inline the trampoline.
1946 RecordCommentForOffHeapTrampoline(builtin_index);
1947 CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
1948 EmbeddedData d = EmbeddedData::FromBlob();
1949 Address entry = d.InstructionStartOfBuiltin(builtin_index);
1950 jmp(entry, RelocInfo::OFF_HEAP_TARGET);
1951 return;
1952 }
1953 }
1954 DCHECK(RelocInfo::IsCodeTarget(rmode));
1955 jmp(code_object, rmode);
1956 }
1957
RetpolineCall(Register reg)1958 void TurboAssembler::RetpolineCall(Register reg) {
1959 Label setup_return, setup_target, inner_indirect_branch, capture_spec;
1960
1961 jmp(&setup_return); // Jump past the entire retpoline below.
1962
1963 bind(&inner_indirect_branch);
1964 call(&setup_target);
1965
1966 bind(&capture_spec);
1967 pause();
1968 jmp(&capture_spec);
1969
1970 bind(&setup_target);
1971 mov(Operand(esp, 0), reg);
1972 ret(0);
1973
1974 bind(&setup_return);
1975 call(&inner_indirect_branch); // Callee will return after this instruction.
1976 }
1977
RetpolineCall(Address destination,RelocInfo::Mode rmode)1978 void TurboAssembler::RetpolineCall(Address destination, RelocInfo::Mode rmode) {
1979 Label setup_return, setup_target, inner_indirect_branch, capture_spec;
1980
1981 jmp(&setup_return); // Jump past the entire retpoline below.
1982
1983 bind(&inner_indirect_branch);
1984 call(&setup_target);
1985
1986 bind(&capture_spec);
1987 pause();
1988 jmp(&capture_spec);
1989
1990 bind(&setup_target);
1991 mov(Operand(esp, 0), destination, rmode);
1992 ret(0);
1993
1994 bind(&setup_return);
1995 call(&inner_indirect_branch); // Callee will return after this instruction.
1996 }
1997
RetpolineJump(Register reg)1998 void TurboAssembler::RetpolineJump(Register reg) {
1999 Label setup_target, capture_spec;
2000
2001 call(&setup_target);
2002
2003 bind(&capture_spec);
2004 pause();
2005 jmp(&capture_spec);
2006
2007 bind(&setup_target);
2008 mov(Operand(esp, 0), reg);
2009 ret(0);
2010 }
2011
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met,Label::Distance condition_met_distance)2012 void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
2013 Condition cc, Label* condition_met,
2014 Label::Distance condition_met_distance) {
2015 DCHECK(cc == zero || cc == not_zero);
2016 if (scratch == object) {
2017 and_(scratch, Immediate(~kPageAlignmentMask));
2018 } else {
2019 mov(scratch, Immediate(~kPageAlignmentMask));
2020 and_(scratch, object);
2021 }
2022 if (mask < (1 << kBitsPerByte)) {
2023 test_b(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
2024 } else {
2025 test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
2026 }
2027 j(cc, condition_met, condition_met_distance);
2028 }
2029
ComputeCodeStartAddress(Register dst)2030 void TurboAssembler::ComputeCodeStartAddress(Register dst) {
2031 // In order to get the address of the current instruction, we first need
2032 // to use a call and then use a pop, thus pushing the return address to
2033 // the stack and then popping it into the register.
2034 Label current;
2035 call(¤t);
2036 int pc = pc_offset();
2037 bind(¤t);
2038 pop(dst);
2039 if (pc != 0) {
2040 sub(dst, Immediate(pc));
2041 }
2042 }
2043
CallForDeoptimization(Address target,int deopt_id,Label * exit,DeoptimizeKind kind)2044 void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
2045 Label* exit, DeoptimizeKind kind) {
2046 USE(exit, kind);
2047 NoRootArrayScope no_root_array(this);
2048 // Save the deopt id in ebx (we don't need the roots array from now on).
2049 mov(ebx, deopt_id);
2050 call(target, RelocInfo::RUNTIME_ENTRY);
2051 }
2052
Trap()2053 void TurboAssembler::Trap() { int3(); }
DebugBreak()2054 void TurboAssembler::DebugBreak() { int3(); }
2055
2056 } // namespace internal
2057 } // namespace v8
2058
2059 #endif // V8_TARGET_ARCH_IA32
2060