1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <assert.h>  // For assert
6 #include <limits.h>  // For LONG_MIN, LONG_MAX.
7 
8 #if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
9 
10 #include "src/base/bits.h"
11 #include "src/base/division-by-constant.h"
12 #include "src/codegen/callable.h"
13 #include "src/codegen/code-factory.h"
14 #include "src/codegen/external-reference-table.h"
15 #include "src/codegen/macro-assembler.h"
16 #include "src/codegen/register-configuration.h"
17 #include "src/debug/debug.h"
18 #include "src/execution/frames-inl.h"
19 #include "src/heap/heap-inl.h"  // For MemoryChunk.
20 #include "src/init/bootstrapper.h"
21 #include "src/logging/counters.h"
22 #include "src/runtime/runtime.h"
23 #include "src/snapshot/embedded/embedded-data.h"
24 #include "src/snapshot/snapshot.h"
25 #include "src/wasm/wasm-code-manager.h"
26 
27 // Satisfy cpplint check, but don't include platform-specific header. It is
28 // included recursively via macro-assembler.h.
29 #if 0
30 #include "src/codegen/ppc/macro-assembler-ppc.h"
31 #endif
32 
33 namespace v8 {
34 namespace internal {
35 
RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3) const36 int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
37                                                     Register exclusion1,
38                                                     Register exclusion2,
39                                                     Register exclusion3) const {
40   int bytes = 0;
41   RegList exclusions = 0;
42   if (exclusion1 != no_reg) {
43     exclusions |= exclusion1.bit();
44     if (exclusion2 != no_reg) {
45       exclusions |= exclusion2.bit();
46       if (exclusion3 != no_reg) {
47         exclusions |= exclusion3.bit();
48       }
49     }
50   }
51 
52   RegList list = kJSCallerSaved & ~exclusions;
53   bytes += NumRegs(list) * kPointerSize;
54 
55   if (fp_mode == kSaveFPRegs) {
56     bytes += kNumCallerSavedDoubles * kDoubleSize;
57   }
58 
59   return bytes;
60 }
61 
PushCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)62 int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
63                                     Register exclusion2, Register exclusion3) {
64   int bytes = 0;
65   RegList exclusions = 0;
66   if (exclusion1 != no_reg) {
67     exclusions |= exclusion1.bit();
68     if (exclusion2 != no_reg) {
69       exclusions |= exclusion2.bit();
70       if (exclusion3 != no_reg) {
71         exclusions |= exclusion3.bit();
72       }
73     }
74   }
75 
76   RegList list = kJSCallerSaved & ~exclusions;
77   MultiPush(list);
78   bytes += NumRegs(list) * kPointerSize;
79 
80   if (fp_mode == kSaveFPRegs) {
81     MultiPushDoubles(kCallerSavedDoubles);
82     bytes += kNumCallerSavedDoubles * kDoubleSize;
83   }
84 
85   return bytes;
86 }
87 
PopCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)88 int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
89                                    Register exclusion2, Register exclusion3) {
90   int bytes = 0;
91   if (fp_mode == kSaveFPRegs) {
92     MultiPopDoubles(kCallerSavedDoubles);
93     bytes += kNumCallerSavedDoubles * kDoubleSize;
94   }
95 
96   RegList exclusions = 0;
97   if (exclusion1 != no_reg) {
98     exclusions |= exclusion1.bit();
99     if (exclusion2 != no_reg) {
100       exclusions |= exclusion2.bit();
101       if (exclusion3 != no_reg) {
102         exclusions |= exclusion3.bit();
103       }
104     }
105   }
106 
107   RegList list = kJSCallerSaved & ~exclusions;
108   MultiPop(list);
109   bytes += NumRegs(list) * kPointerSize;
110 
111   return bytes;
112 }
113 
Jump(Register target)114 void TurboAssembler::Jump(Register target) {
115   mtctr(target);
116   bctr();
117 }
118 
LoadFromConstantsTable(Register destination,int constant_index)119 void TurboAssembler::LoadFromConstantsTable(Register destination,
120                                             int constant_index) {
121   DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
122 
123   const uint32_t offset =
124       FixedArray::kHeaderSize + constant_index * kPointerSize - kHeapObjectTag;
125 
126   CHECK(is_uint19(offset));
127   DCHECK_NE(destination, r0);
128   LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
129   LoadP(destination, MemOperand(destination, offset), r0);
130 }
131 
LoadRootRelative(Register destination,int32_t offset)132 void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
133   LoadP(destination, MemOperand(kRootRegister, offset), r0);
134 }
135 
LoadRootRegisterOffset(Register destination,intptr_t offset)136 void TurboAssembler::LoadRootRegisterOffset(Register destination,
137                                             intptr_t offset) {
138   if (offset == 0) {
139     mr(destination, kRootRegister);
140   } else if (is_int16(offset)) {
141     addi(destination, kRootRegister, Operand(offset));
142   } else {
143     mov(destination, Operand(offset));
144     add(destination, kRootRegister, destination);
145   }
146 }
147 
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond,CRegister cr)148 void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
149                           Condition cond, CRegister cr) {
150   Label skip;
151 
152   if (cond != al) b(NegateCondition(cond), &skip, cr);
153 
154   DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY);
155 
156   mov(ip, Operand(target, rmode));
157   mtctr(ip);
158   bctr();
159 
160   bind(&skip);
161 }
162 
Jump(Address target,RelocInfo::Mode rmode,Condition cond,CRegister cr)163 void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
164                           CRegister cr) {
165   DCHECK(!RelocInfo::IsCodeTarget(rmode));
166   Jump(static_cast<intptr_t>(target), rmode, cond, cr);
167 }
168 
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond,CRegister cr)169 void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
170                           Condition cond, CRegister cr) {
171   DCHECK(RelocInfo::IsCodeTarget(rmode));
172   DCHECK_IMPLIES(options().isolate_independent_code,
173                  Builtins::IsIsolateIndependentBuiltin(*code));
174 
175   int builtin_index = Builtins::kNoBuiltinId;
176   bool target_is_isolate_independent_builtin =
177       isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
178       Builtins::IsIsolateIndependent(builtin_index);
179 
180   if (root_array_available_ && options().isolate_independent_code) {
181     Label skip;
182     Register scratch = ip;
183     int offset = code->builtin_index() * kSystemPointerSize +
184                  IsolateData::builtin_entry_table_offset();
185     LoadP(scratch, MemOperand(kRootRegister, offset), r0);
186     if (cond != al) b(NegateCondition(cond), &skip, cr);
187     Jump(scratch);
188     bind(&skip);
189     return;
190   } else if (options().inline_offheap_trampolines &&
191              target_is_isolate_independent_builtin) {
192     // Inline the trampoline.
193     Label skip;
194     RecordCommentForOffHeapTrampoline(builtin_index);
195     EmbeddedData d = EmbeddedData::FromBlob();
196     Address entry = d.InstructionStartOfBuiltin(builtin_index);
197     // Use ip directly instead of using UseScratchRegisterScope, as we do
198     // not preserve scratch registers across calls.
199     mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
200     if (cond != al) b(NegateCondition(cond), &skip, cr);
201     Jump(ip);
202     bind(&skip);
203     return;
204   }
205   Jump(static_cast<intptr_t>(code.address()), rmode, cond, cr);
206 }
207 
Jump(const ExternalReference & reference)208 void TurboAssembler::Jump(const ExternalReference& reference) {
209   UseScratchRegisterScope temps(this);
210   Register scratch = temps.Acquire();
211   Move(scratch, reference);
212   if (ABI_USES_FUNCTION_DESCRIPTORS) {
213     // AIX uses a function descriptor. When calling C code be
214     // aware of this descriptor and pick up values from it.
215     LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(scratch, kPointerSize));
216     LoadP(scratch, MemOperand(scratch, 0));
217   }
218   Jump(scratch);
219 }
220 
Call(Register target)221 void TurboAssembler::Call(Register target) {
222   BlockTrampolinePoolScope block_trampoline_pool(this);
223   // branch via link register and set LK bit for return point
224   mtctr(target);
225   bctrl();
226 }
227 
CallJSEntry(Register target)228 void MacroAssembler::CallJSEntry(Register target) {
229   CHECK(target == r5);
230   Call(target);
231 }
232 
CallSizeNotPredictableCodeSize(Address target,RelocInfo::Mode rmode,Condition cond)233 int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
234                                                    RelocInfo::Mode rmode,
235                                                    Condition cond) {
236   return (2 + kMovInstructionsNoConstantPool) * kInstrSize;
237 }
238 
Call(Address target,RelocInfo::Mode rmode,Condition cond)239 void TurboAssembler::Call(Address target, RelocInfo::Mode rmode,
240                           Condition cond) {
241   BlockTrampolinePoolScope block_trampoline_pool(this);
242   DCHECK(cond == al);
243 
244   // This can likely be optimized to make use of bc() with 24bit relative
245   //
246   // RecordRelocInfo(x.rmode_, x.immediate);
247   // bc( BA, .... offset, LKset);
248   //
249 
250   mov(ip, Operand(target, rmode));
251   mtctr(ip);
252   bctrl();
253 }
254 
Call(Handle<Code> code,RelocInfo::Mode rmode,Condition cond)255 void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
256                           Condition cond) {
257   BlockTrampolinePoolScope block_trampoline_pool(this);
258   DCHECK(RelocInfo::IsCodeTarget(rmode));
259   DCHECK_IMPLIES(options().isolate_independent_code,
260                  Builtins::IsIsolateIndependentBuiltin(*code));
261   DCHECK_IMPLIES(options().use_pc_relative_calls_and_jumps,
262                  Builtins::IsIsolateIndependentBuiltin(*code));
263 
264   int builtin_index = Builtins::kNoBuiltinId;
265   bool target_is_isolate_independent_builtin =
266       isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
267       Builtins::IsIsolateIndependent(builtin_index);
268 
269   if (root_array_available_ && options().isolate_independent_code) {
270     Label skip;
271     int offset = code->builtin_index() * kSystemPointerSize +
272                  IsolateData::builtin_entry_table_offset();
273     LoadP(ip, MemOperand(kRootRegister, offset));
274     if (cond != al) b(NegateCondition(cond), &skip);
275     Call(ip);
276     bind(&skip);
277     return;
278   } else if (options().inline_offheap_trampolines &&
279              target_is_isolate_independent_builtin) {
280     // Inline the trampoline.
281     RecordCommentForOffHeapTrampoline(builtin_index);
282     EmbeddedData d = EmbeddedData::FromBlob();
283     Address entry = d.InstructionStartOfBuiltin(builtin_index);
284     // Use ip directly instead of using UseScratchRegisterScope, as we do
285     // not preserve scratch registers across calls.
286     mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
287     Label skip;
288     if (cond != al) b(NegateCondition(cond), &skip);
289     Call(ip);
290     bind(&skip);
291     return;
292   }
293   DCHECK(code->IsExecutable());
294   Call(code.address(), rmode, cond);
295 }
296 
Drop(int count)297 void TurboAssembler::Drop(int count) {
298   if (count > 0) {
299     Add(sp, sp, count * kPointerSize, r0);
300   }
301 }
302 
Drop(Register count,Register scratch)303 void TurboAssembler::Drop(Register count, Register scratch) {
304   ShiftLeftImm(scratch, count, Operand(kPointerSizeLog2));
305   add(sp, sp, scratch);
306 }
307 
Call(Label * target)308 void TurboAssembler::Call(Label* target) { b(target, SetLK); }
309 
Push(Handle<HeapObject> handle)310 void TurboAssembler::Push(Handle<HeapObject> handle) {
311   mov(r0, Operand(handle));
312   push(r0);
313 }
314 
Push(Smi smi)315 void TurboAssembler::Push(Smi smi) {
316   mov(r0, Operand(smi));
317   push(r0);
318 }
319 
Move(Register dst,Handle<HeapObject> value)320 void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
321   // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
322   // non-isolate-independent code. In many cases it might be cheaper than
323   // embedding the relocatable value.
324   if (root_array_available_ && options().isolate_independent_code) {
325     IndirectLoadConstant(dst, value);
326     return;
327   }
328   mov(dst, Operand(value));
329 }
330 
Move(Register dst,ExternalReference reference)331 void TurboAssembler::Move(Register dst, ExternalReference reference) {
332   // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
333   // non-isolate-independent code. In many cases it might be cheaper than
334   // embedding the relocatable value.
335   if (root_array_available_ && options().isolate_independent_code) {
336     IndirectLoadExternalReference(dst, reference);
337     return;
338   }
339   mov(dst, Operand(reference));
340 }
341 
Move(Register dst,Register src,Condition cond)342 void TurboAssembler::Move(Register dst, Register src, Condition cond) {
343   DCHECK(cond == al);
344   if (dst != src) {
345     mr(dst, src);
346   }
347 }
348 
Move(DoubleRegister dst,DoubleRegister src)349 void TurboAssembler::Move(DoubleRegister dst, DoubleRegister src) {
350   if (dst != src) {
351     fmr(dst, src);
352   }
353 }
354 
MultiPush(RegList regs,Register location)355 void TurboAssembler::MultiPush(RegList regs, Register location) {
356   int16_t num_to_push = base::bits::CountPopulation(regs);
357   int16_t stack_offset = num_to_push * kPointerSize;
358 
359   subi(location, location, Operand(stack_offset));
360   for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) {
361     if ((regs & (1 << i)) != 0) {
362       stack_offset -= kPointerSize;
363       StoreP(ToRegister(i), MemOperand(location, stack_offset));
364     }
365   }
366 }
367 
MultiPop(RegList regs,Register location)368 void TurboAssembler::MultiPop(RegList regs, Register location) {
369   int16_t stack_offset = 0;
370 
371   for (int16_t i = 0; i < Register::kNumRegisters; i++) {
372     if ((regs & (1 << i)) != 0) {
373       LoadP(ToRegister(i), MemOperand(location, stack_offset));
374       stack_offset += kPointerSize;
375     }
376   }
377   addi(location, location, Operand(stack_offset));
378 }
379 
MultiPushDoubles(RegList dregs,Register location)380 void TurboAssembler::MultiPushDoubles(RegList dregs, Register location) {
381   int16_t num_to_push = base::bits::CountPopulation(dregs);
382   int16_t stack_offset = num_to_push * kDoubleSize;
383 
384   subi(location, location, Operand(stack_offset));
385   for (int16_t i = DoubleRegister::kNumRegisters - 1; i >= 0; i--) {
386     if ((dregs & (1 << i)) != 0) {
387       DoubleRegister dreg = DoubleRegister::from_code(i);
388       stack_offset -= kDoubleSize;
389       stfd(dreg, MemOperand(location, stack_offset));
390     }
391   }
392 }
393 
MultiPopDoubles(RegList dregs,Register location)394 void TurboAssembler::MultiPopDoubles(RegList dregs, Register location) {
395   int16_t stack_offset = 0;
396 
397   for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) {
398     if ((dregs & (1 << i)) != 0) {
399       DoubleRegister dreg = DoubleRegister::from_code(i);
400       lfd(dreg, MemOperand(location, stack_offset));
401       stack_offset += kDoubleSize;
402     }
403   }
404   addi(location, location, Operand(stack_offset));
405 }
406 
LoadRoot(Register destination,RootIndex index,Condition cond)407 void TurboAssembler::LoadRoot(Register destination, RootIndex index,
408                               Condition cond) {
409   DCHECK(cond == al);
410   LoadP(destination,
411         MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), r0);
412 }
413 
RecordWriteField(Register object,int offset,Register value,Register dst,LinkRegisterStatus lr_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check)414 void MacroAssembler::RecordWriteField(Register object, int offset,
415                                       Register value, Register dst,
416                                       LinkRegisterStatus lr_status,
417                                       SaveFPRegsMode save_fp,
418                                       RememberedSetAction remembered_set_action,
419                                       SmiCheck smi_check) {
420   // First, check if a write barrier is even needed. The tests below
421   // catch stores of Smis.
422   Label done;
423 
424   // Skip barrier if writing a smi.
425   if (smi_check == INLINE_SMI_CHECK) {
426     JumpIfSmi(value, &done);
427   }
428 
429   // Although the object register is tagged, the offset is relative to the start
430   // of the object, so so offset must be a multiple of kPointerSize.
431   DCHECK(IsAligned(offset, kPointerSize));
432 
433   Add(dst, object, offset - kHeapObjectTag, r0);
434   if (emit_debug_code()) {
435     Label ok;
436     andi(r0, dst, Operand(kPointerSize - 1));
437     beq(&ok, cr0);
438     stop();
439     bind(&ok);
440   }
441 
442   RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
443               OMIT_SMI_CHECK);
444 
445   bind(&done);
446 
447   // Clobber clobbered input registers when running with the debug-code flag
448   // turned on to provoke errors.
449   if (emit_debug_code()) {
450     mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4)));
451     mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 8)));
452   }
453 }
454 
SaveRegisters(RegList registers)455 void TurboAssembler::SaveRegisters(RegList registers) {
456   DCHECK_GT(NumRegs(registers), 0);
457   RegList regs = 0;
458   for (int i = 0; i < Register::kNumRegisters; ++i) {
459     if ((registers >> i) & 1u) {
460       regs |= Register::from_code(i).bit();
461     }
462   }
463 
464   MultiPush(regs);
465 }
466 
RestoreRegisters(RegList registers)467 void TurboAssembler::RestoreRegisters(RegList registers) {
468   DCHECK_GT(NumRegs(registers), 0);
469   RegList regs = 0;
470   for (int i = 0; i < Register::kNumRegisters; ++i) {
471     if ((registers >> i) & 1u) {
472       regs |= Register::from_code(i).bit();
473     }
474   }
475   MultiPop(regs);
476 }
477 
CallEphemeronKeyBarrier(Register object,Register address,SaveFPRegsMode fp_mode)478 void TurboAssembler::CallEphemeronKeyBarrier(Register object, Register address,
479                                              SaveFPRegsMode fp_mode) {
480   EphemeronKeyBarrierDescriptor descriptor;
481   RegList registers = descriptor.allocatable_registers();
482 
483   SaveRegisters(registers);
484 
485   Register object_parameter(
486       descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kObject));
487   Register slot_parameter(descriptor.GetRegisterParameter(
488       EphemeronKeyBarrierDescriptor::kSlotAddress));
489   Register fp_mode_parameter(
490       descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kFPMode));
491 
492   push(object);
493   push(address);
494 
495   pop(slot_parameter);
496   pop(object_parameter);
497 
498   Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
499   Call(isolate()->builtins()->builtin_handle(Builtins::kEphemeronKeyBarrier),
500        RelocInfo::CODE_TARGET);
501   RestoreRegisters(registers);
502 }
503 
CallRecordWriteStub(Register object,Register address,RememberedSetAction remembered_set_action,SaveFPRegsMode fp_mode)504 void TurboAssembler::CallRecordWriteStub(
505     Register object, Register address,
506     RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
507   CallRecordWriteStub(
508       object, address, remembered_set_action, fp_mode,
509       isolate()->builtins()->builtin_handle(Builtins::kRecordWrite),
510       kNullAddress);
511 }
512 
CallRecordWriteStub(Register object,Register address,RememberedSetAction remembered_set_action,SaveFPRegsMode fp_mode,Address wasm_target)513 void TurboAssembler::CallRecordWriteStub(
514     Register object, Register address,
515     RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
516     Address wasm_target) {
517   CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
518                       Handle<Code>::null(), wasm_target);
519 }
520 
CallRecordWriteStub(Register object,Register address,RememberedSetAction remembered_set_action,SaveFPRegsMode fp_mode,Handle<Code> code_target,Address wasm_target)521 void TurboAssembler::CallRecordWriteStub(
522     Register object, Register address,
523     RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
524     Handle<Code> code_target, Address wasm_target) {
525   DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress);
526   // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
527   // i.e. always emit remember set and save FP registers in RecordWriteStub. If
528   // large performance regression is observed, we should use these values to
529   // avoid unnecessary work.
530 
531   RecordWriteDescriptor descriptor;
532   RegList registers = descriptor.allocatable_registers();
533 
534   SaveRegisters(registers);
535 
536   Register object_parameter(
537       descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
538   Register slot_parameter(
539       descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
540   Register remembered_set_parameter(
541       descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
542   Register fp_mode_parameter(
543       descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
544 
545   push(object);
546   push(address);
547 
548   pop(slot_parameter);
549   pop(object_parameter);
550 
551   Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
552   Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
553   if (code_target.is_null()) {
554     Call(wasm_target, RelocInfo::WASM_STUB_CALL);
555   } else {
556     Call(code_target, RelocInfo::CODE_TARGET);
557   }
558 
559   RestoreRegisters(registers);
560 }
561 
562 // Will clobber 4 registers: object, address, scratch, ip.  The
563 // register 'object' contains a heap object pointer.  The heap object
564 // tag is shifted away.
RecordWrite(Register object,Register address,Register value,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check)565 void MacroAssembler::RecordWrite(Register object, Register address,
566                                  Register value, LinkRegisterStatus lr_status,
567                                  SaveFPRegsMode fp_mode,
568                                  RememberedSetAction remembered_set_action,
569                                  SmiCheck smi_check) {
570   DCHECK(object != value);
571   if (emit_debug_code()) {
572     LoadP(r0, MemOperand(address));
573     cmp(r0, value);
574     Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
575   }
576 
577   if ((remembered_set_action == OMIT_REMEMBERED_SET &&
578        !FLAG_incremental_marking) ||
579       FLAG_disable_write_barriers) {
580     return;
581   }
582 
583   // First, check if a write barrier is even needed. The tests below
584   // catch stores of smis and stores into the young generation.
585   Label done;
586 
587   if (smi_check == INLINE_SMI_CHECK) {
588     JumpIfSmi(value, &done);
589   }
590 
591   CheckPageFlag(value,
592                 value,  // Used as scratch.
593                 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
594   CheckPageFlag(object,
595                 value,  // Used as scratch.
596                 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
597 
598   // Record the actual write.
599   if (lr_status == kLRHasNotBeenSaved) {
600     mflr(r0);
601     push(r0);
602   }
603   CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
604   if (lr_status == kLRHasNotBeenSaved) {
605     pop(r0);
606     mtlr(r0);
607   }
608 
609   bind(&done);
610 
611   // Clobber clobbered registers when running with the debug-code flag
612   // turned on to provoke errors.
613   if (emit_debug_code()) {
614     mov(address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
615     mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16)));
616   }
617 }
618 
PushCommonFrame(Register marker_reg)619 void TurboAssembler::PushCommonFrame(Register marker_reg) {
620   int fp_delta = 0;
621   mflr(r0);
622   if (FLAG_enable_embedded_constant_pool) {
623     if (marker_reg.is_valid()) {
624       Push(r0, fp, kConstantPoolRegister, marker_reg);
625       fp_delta = 2;
626     } else {
627       Push(r0, fp, kConstantPoolRegister);
628       fp_delta = 1;
629     }
630   } else {
631     if (marker_reg.is_valid()) {
632       Push(r0, fp, marker_reg);
633       fp_delta = 1;
634     } else {
635       Push(r0, fp);
636       fp_delta = 0;
637     }
638   }
639   addi(fp, sp, Operand(fp_delta * kPointerSize));
640 }
641 
PushStandardFrame(Register function_reg)642 void TurboAssembler::PushStandardFrame(Register function_reg) {
643   int fp_delta = 0;
644   mflr(r0);
645   if (FLAG_enable_embedded_constant_pool) {
646     if (function_reg.is_valid()) {
647       Push(r0, fp, kConstantPoolRegister, cp, function_reg);
648       fp_delta = 3;
649     } else {
650       Push(r0, fp, kConstantPoolRegister, cp);
651       fp_delta = 2;
652     }
653   } else {
654     if (function_reg.is_valid()) {
655       Push(r0, fp, cp, function_reg);
656       fp_delta = 2;
657     } else {
658       Push(r0, fp, cp);
659       fp_delta = 1;
660     }
661   }
662   addi(fp, sp, Operand(fp_delta * kPointerSize));
663 }
664 
RestoreFrameStateForTailCall()665 void TurboAssembler::RestoreFrameStateForTailCall() {
666   if (FLAG_enable_embedded_constant_pool) {
667     LoadP(kConstantPoolRegister,
668           MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
669     set_constant_pool_available(false);
670   }
671   LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
672   LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
673   mtlr(r0);
674 }
675 
CanonicalizeNaN(const DoubleRegister dst,const DoubleRegister src)676 void TurboAssembler::CanonicalizeNaN(const DoubleRegister dst,
677                                      const DoubleRegister src) {
678   // Turn potential sNaN into qNaN.
679   fsub(dst, src, kDoubleRegZero);
680 }
681 
ConvertIntToDouble(Register src,DoubleRegister dst)682 void TurboAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) {
683   MovIntToDouble(dst, src, r0);
684   fcfid(dst, dst);
685 }
686 
ConvertUnsignedIntToDouble(Register src,DoubleRegister dst)687 void TurboAssembler::ConvertUnsignedIntToDouble(Register src,
688                                                 DoubleRegister dst) {
689   MovUnsignedIntToDouble(dst, src, r0);
690   fcfid(dst, dst);
691 }
692 
ConvertIntToFloat(Register src,DoubleRegister dst)693 void TurboAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) {
694   MovIntToDouble(dst, src, r0);
695   fcfids(dst, dst);
696 }
697 
ConvertUnsignedIntToFloat(Register src,DoubleRegister dst)698 void TurboAssembler::ConvertUnsignedIntToFloat(Register src,
699                                                DoubleRegister dst) {
700   MovUnsignedIntToDouble(dst, src, r0);
701   fcfids(dst, dst);
702 }
703 
704 #if V8_TARGET_ARCH_PPC64
ConvertInt64ToDouble(Register src,DoubleRegister double_dst)705 void TurboAssembler::ConvertInt64ToDouble(Register src,
706                                           DoubleRegister double_dst) {
707   MovInt64ToDouble(double_dst, src);
708   fcfid(double_dst, double_dst);
709 }
710 
ConvertUnsignedInt64ToFloat(Register src,DoubleRegister double_dst)711 void TurboAssembler::ConvertUnsignedInt64ToFloat(Register src,
712                                                  DoubleRegister double_dst) {
713   MovInt64ToDouble(double_dst, src);
714   fcfidus(double_dst, double_dst);
715 }
716 
ConvertUnsignedInt64ToDouble(Register src,DoubleRegister double_dst)717 void TurboAssembler::ConvertUnsignedInt64ToDouble(Register src,
718                                                   DoubleRegister double_dst) {
719   MovInt64ToDouble(double_dst, src);
720   fcfidu(double_dst, double_dst);
721 }
722 
ConvertInt64ToFloat(Register src,DoubleRegister double_dst)723 void TurboAssembler::ConvertInt64ToFloat(Register src,
724                                          DoubleRegister double_dst) {
725   MovInt64ToDouble(double_dst, src);
726   fcfids(double_dst, double_dst);
727 }
728 #endif
729 
ConvertDoubleToInt64(const DoubleRegister double_input,const Register dst_hi,const Register dst,const DoubleRegister double_dst,FPRoundingMode rounding_mode)730 void TurboAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
731 #if !V8_TARGET_ARCH_PPC64
732                                           const Register dst_hi,
733 #endif
734                                           const Register dst,
735                                           const DoubleRegister double_dst,
736                                           FPRoundingMode rounding_mode) {
737   if (rounding_mode == kRoundToZero) {
738     fctidz(double_dst, double_input);
739   } else {
740     SetRoundingMode(rounding_mode);
741     fctid(double_dst, double_input);
742     ResetRoundingMode();
743   }
744 
745   MovDoubleToInt64(
746 #if !V8_TARGET_ARCH_PPC64
747       dst_hi,
748 #endif
749       dst, double_dst);
750 }
751 
752 #if V8_TARGET_ARCH_PPC64
ConvertDoubleToUnsignedInt64(const DoubleRegister double_input,const Register dst,const DoubleRegister double_dst,FPRoundingMode rounding_mode)753 void TurboAssembler::ConvertDoubleToUnsignedInt64(
754     const DoubleRegister double_input, const Register dst,
755     const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
756   if (rounding_mode == kRoundToZero) {
757     fctiduz(double_dst, double_input);
758   } else {
759     SetRoundingMode(rounding_mode);
760     fctidu(double_dst, double_input);
761     ResetRoundingMode();
762   }
763 
764   MovDoubleToInt64(dst, double_dst);
765 }
766 #endif
767 
768 #if !V8_TARGET_ARCH_PPC64
ShiftLeftPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)769 void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
770                                    Register src_low, Register src_high,
771                                    Register scratch, Register shift) {
772   DCHECK(!AreAliased(dst_low, src_high));
773   DCHECK(!AreAliased(dst_high, src_low));
774   DCHECK(!AreAliased(dst_low, dst_high, shift));
775   Label less_than_32;
776   Label done;
777   cmpi(shift, Operand(32));
778   blt(&less_than_32);
779   // If shift >= 32
780   andi(scratch, shift, Operand(0x1F));
781   slw(dst_high, src_low, scratch);
782   li(dst_low, Operand::Zero());
783   b(&done);
784   bind(&less_than_32);
785   // If shift < 32
786   subfic(scratch, shift, Operand(32));
787   slw(dst_high, src_high, shift);
788   srw(scratch, src_low, scratch);
789   orx(dst_high, dst_high, scratch);
790   slw(dst_low, src_low, shift);
791   bind(&done);
792 }
793 
ShiftLeftPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)794 void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
795                                    Register src_low, Register src_high,
796                                    uint32_t shift) {
797   DCHECK(!AreAliased(dst_low, src_high));
798   DCHECK(!AreAliased(dst_high, src_low));
799   if (shift == 32) {
800     Move(dst_high, src_low);
801     li(dst_low, Operand::Zero());
802   } else if (shift > 32) {
803     shift &= 0x1F;
804     slwi(dst_high, src_low, Operand(shift));
805     li(dst_low, Operand::Zero());
806   } else if (shift == 0) {
807     Move(dst_low, src_low);
808     Move(dst_high, src_high);
809   } else {
810     slwi(dst_high, src_high, Operand(shift));
811     rlwimi(dst_high, src_low, shift, 32 - shift, 31);
812     slwi(dst_low, src_low, Operand(shift));
813   }
814 }
815 
ShiftRightPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)816 void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
817                                     Register src_low, Register src_high,
818                                     Register scratch, Register shift) {
819   DCHECK(!AreAliased(dst_low, src_high));
820   DCHECK(!AreAliased(dst_high, src_low));
821   DCHECK(!AreAliased(dst_low, dst_high, shift));
822   Label less_than_32;
823   Label done;
824   cmpi(shift, Operand(32));
825   blt(&less_than_32);
826   // If shift >= 32
827   andi(scratch, shift, Operand(0x1F));
828   srw(dst_low, src_high, scratch);
829   li(dst_high, Operand::Zero());
830   b(&done);
831   bind(&less_than_32);
832   // If shift < 32
833   subfic(scratch, shift, Operand(32));
834   srw(dst_low, src_low, shift);
835   slw(scratch, src_high, scratch);
836   orx(dst_low, dst_low, scratch);
837   srw(dst_high, src_high, shift);
838   bind(&done);
839 }
840 
ShiftRightPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)841 void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
842                                     Register src_low, Register src_high,
843                                     uint32_t shift) {
844   DCHECK(!AreAliased(dst_low, src_high));
845   DCHECK(!AreAliased(dst_high, src_low));
846   if (shift == 32) {
847     Move(dst_low, src_high);
848     li(dst_high, Operand::Zero());
849   } else if (shift > 32) {
850     shift &= 0x1F;
851     srwi(dst_low, src_high, Operand(shift));
852     li(dst_high, Operand::Zero());
853   } else if (shift == 0) {
854     Move(dst_low, src_low);
855     Move(dst_high, src_high);
856   } else {
857     srwi(dst_low, src_low, Operand(shift));
858     rlwimi(dst_low, src_high, 32 - shift, 0, shift - 1);
859     srwi(dst_high, src_high, Operand(shift));
860   }
861 }
862 
ShiftRightAlgPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)863 void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
864                                        Register src_low, Register src_high,
865                                        Register scratch, Register shift) {
866   DCHECK(!AreAliased(dst_low, src_high, shift));
867   DCHECK(!AreAliased(dst_high, src_low, shift));
868   Label less_than_32;
869   Label done;
870   cmpi(shift, Operand(32));
871   blt(&less_than_32);
872   // If shift >= 32
873   andi(scratch, shift, Operand(0x1F));
874   sraw(dst_low, src_high, scratch);
875   srawi(dst_high, src_high, 31);
876   b(&done);
877   bind(&less_than_32);
878   // If shift < 32
879   subfic(scratch, shift, Operand(32));
880   srw(dst_low, src_low, shift);
881   slw(scratch, src_high, scratch);
882   orx(dst_low, dst_low, scratch);
883   sraw(dst_high, src_high, shift);
884   bind(&done);
885 }
886 
ShiftRightAlgPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)887 void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
888                                        Register src_low, Register src_high,
889                                        uint32_t shift) {
890   DCHECK(!AreAliased(dst_low, src_high));
891   DCHECK(!AreAliased(dst_high, src_low));
892   if (shift == 32) {
893     Move(dst_low, src_high);
894     srawi(dst_high, src_high, 31);
895   } else if (shift > 32) {
896     shift &= 0x1F;
897     srawi(dst_low, src_high, shift);
898     srawi(dst_high, src_high, 31);
899   } else if (shift == 0) {
900     Move(dst_low, src_low);
901     Move(dst_high, src_high);
902   } else {
903     srwi(dst_low, src_low, Operand(shift));
904     rlwimi(dst_low, src_high, 32 - shift, 0, shift - 1);
905     srawi(dst_high, src_high, shift);
906   }
907 }
908 #endif
909 
LoadConstantPoolPointerRegisterFromCodeTargetAddress(Register code_target_address)910 void TurboAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
911     Register code_target_address) {
912   lwz(kConstantPoolRegister,
913       MemOperand(code_target_address,
914                  Code::kConstantPoolOffsetOffset - Code::kHeaderSize));
915   add(kConstantPoolRegister, kConstantPoolRegister, code_target_address);
916 }
917 
LoadPC(Register dst)918 void TurboAssembler::LoadPC(Register dst) {
919   b(4, SetLK);
920   mflr(dst);
921 }
922 
ComputeCodeStartAddress(Register dst)923 void TurboAssembler::ComputeCodeStartAddress(Register dst) {
924   mflr(r0);
925   LoadPC(dst);
926   subi(dst, dst, Operand(pc_offset() - kInstrSize));
927   mtlr(r0);
928 }
929 
LoadConstantPoolPointerRegister()930 void TurboAssembler::LoadConstantPoolPointerRegister() {
931   LoadPC(kConstantPoolRegister);
932   int32_t delta = -pc_offset() + 4;
933   add_label_offset(kConstantPoolRegister, kConstantPoolRegister,
934                    ConstantPoolPosition(), delta);
935 }
936 
StubPrologue(StackFrame::Type type)937 void TurboAssembler::StubPrologue(StackFrame::Type type) {
938   {
939     ConstantPoolUnavailableScope constant_pool_unavailable(this);
940     mov(r11, Operand(StackFrame::TypeToMarker(type)));
941     PushCommonFrame(r11);
942   }
943   if (FLAG_enable_embedded_constant_pool) {
944     LoadConstantPoolPointerRegister();
945     set_constant_pool_available(true);
946   }
947 }
948 
Prologue()949 void TurboAssembler::Prologue() {
950   PushStandardFrame(r4);
951   if (FLAG_enable_embedded_constant_pool) {
952     // base contains prologue address
953     LoadConstantPoolPointerRegister();
954     set_constant_pool_available(true);
955   }
956 }
957 
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)958 void TurboAssembler::EnterFrame(StackFrame::Type type,
959                                 bool load_constant_pool_pointer_reg) {
960   if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
961     // Push type explicitly so we can leverage the constant pool.
962     // This path cannot rely on ip containing code entry.
963     PushCommonFrame();
964     LoadConstantPoolPointerRegister();
965     mov(ip, Operand(StackFrame::TypeToMarker(type)));
966     push(ip);
967   } else {
968     mov(ip, Operand(StackFrame::TypeToMarker(type)));
969     PushCommonFrame(ip);
970   }
971 }
972 
LeaveFrame(StackFrame::Type type,int stack_adjustment)973 int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
974   ConstantPoolUnavailableScope constant_pool_unavailable(this);
975   // r3: preserved
976   // r4: preserved
977   // r5: preserved
978 
979   // Drop the execution stack down to the frame pointer and restore
980   // the caller's state.
981   int frame_ends;
982   LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
983   LoadP(ip, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
984   if (FLAG_enable_embedded_constant_pool) {
985     LoadP(kConstantPoolRegister,
986           MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
987   }
988   mtlr(r0);
989   frame_ends = pc_offset();
990   Add(sp, fp, StandardFrameConstants::kCallerSPOffset + stack_adjustment, r0);
991   mr(fp, ip);
992   return frame_ends;
993 }
994 
995 // ExitFrame layout (probably wrongish.. needs updating)
996 //
997 //  SP -> previousSP
998 //        LK reserved
999 //        sp_on_exit (for debug?)
1000 // oldSP->prev SP
1001 //        LK
1002 //        <parameters on stack>
1003 
1004 // Prior to calling EnterExitFrame, we've got a bunch of parameters
1005 // on the stack that we need to wrap a real frame around.. so first
1006 // we reserve a slot for LK and push the previous SP which is captured
1007 // in the fp register (r31)
1008 // Then - we buy a new frame
1009 
EnterExitFrame(bool save_doubles,int stack_space,StackFrame::Type frame_type)1010 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
1011                                     StackFrame::Type frame_type) {
1012   DCHECK(frame_type == StackFrame::EXIT ||
1013          frame_type == StackFrame::BUILTIN_EXIT);
1014   // Set up the frame structure on the stack.
1015   DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
1016   DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
1017   DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
1018   DCHECK_GT(stack_space, 0);
1019 
1020   // This is an opportunity to build a frame to wrap
1021   // all of the pushes that have happened inside of V8
1022   // since we were called from C code
1023 
1024   mov(ip, Operand(StackFrame::TypeToMarker(frame_type)));
1025   PushCommonFrame(ip);
1026   // Reserve room for saved entry sp.
1027   subi(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
1028 
1029   if (emit_debug_code()) {
1030     li(r8, Operand::Zero());
1031     StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
1032   }
1033   if (FLAG_enable_embedded_constant_pool) {
1034     StoreP(kConstantPoolRegister,
1035            MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
1036   }
1037 
1038   // Save the frame pointer and the context in top.
1039   Move(r8, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
1040                                      isolate()));
1041   StoreP(fp, MemOperand(r8));
1042   Move(r8,
1043        ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1044   StoreP(cp, MemOperand(r8));
1045 
1046   // Optionally save all volatile double registers.
1047   if (save_doubles) {
1048     MultiPushDoubles(kCallerSavedDoubles);
1049     // Note that d0 will be accessible at
1050     //   fp - ExitFrameConstants::kFrameSize -
1051     //   kNumCallerSavedDoubles * kDoubleSize,
1052     // since the sp slot and code slot were pushed after the fp.
1053   }
1054 
1055   addi(sp, sp, Operand(-stack_space * kPointerSize));
1056 
1057   // Allocate and align the frame preparing for calling the runtime
1058   // function.
1059   const int frame_alignment = ActivationFrameAlignment();
1060   if (frame_alignment > kPointerSize) {
1061     DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
1062     ClearRightImm(sp, sp,
1063                   Operand(base::bits::WhichPowerOfTwo(frame_alignment)));
1064   }
1065   li(r0, Operand::Zero());
1066   StorePU(r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
1067 
1068   // Set the exit frame sp value to point just before the return address
1069   // location.
1070   addi(r8, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize));
1071   StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
1072 }
1073 
ActivationFrameAlignment()1074 int TurboAssembler::ActivationFrameAlignment() {
1075 #if !defined(USE_SIMULATOR)
1076   // Running on the real platform. Use the alignment as mandated by the local
1077   // environment.
1078   // Note: This will break if we ever start generating snapshots on one PPC
1079   // platform for another PPC platform with a different alignment.
1080   return base::OS::ActivationFrameAlignment();
1081 #else  // Simulated
1082   // If we are using the simulator then we should always align to the expected
1083   // alignment. As the simulator is used to generate snapshots we do not know
1084   // if the target platform will need alignment, so this is controlled from a
1085   // flag.
1086   return FLAG_sim_stack_alignment;
1087 #endif
1088 }
1089 
LeaveExitFrame(bool save_doubles,Register argument_count,bool argument_count_is_length)1090 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
1091                                     bool argument_count_is_length) {
1092   ConstantPoolUnavailableScope constant_pool_unavailable(this);
1093   // Optionally restore all double registers.
1094   if (save_doubles) {
1095     // Calculate the stack location of the saved doubles and restore them.
1096     const int kNumRegs = kNumCallerSavedDoubles;
1097     const int offset =
1098         (ExitFrameConstants::kFixedFrameSizeFromFp + kNumRegs * kDoubleSize);
1099     addi(r6, fp, Operand(-offset));
1100     MultiPopDoubles(kCallerSavedDoubles, r6);
1101   }
1102 
1103   // Clear top frame.
1104   li(r6, Operand::Zero());
1105   Move(ip, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
1106                                      isolate()));
1107   StoreP(r6, MemOperand(ip));
1108 
1109   // Restore current context from top and clear it in debug mode.
1110   Move(ip,
1111        ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1112   LoadP(cp, MemOperand(ip));
1113 
1114 #ifdef DEBUG
1115   mov(r6, Operand(Context::kInvalidContext));
1116   Move(ip,
1117        ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1118   StoreP(r6, MemOperand(ip));
1119 #endif
1120 
1121   // Tear down the exit frame, pop the arguments, and return.
1122   LeaveFrame(StackFrame::EXIT);
1123 
1124   if (argument_count.is_valid()) {
1125     if (!argument_count_is_length) {
1126       ShiftLeftImm(argument_count, argument_count, Operand(kPointerSizeLog2));
1127     }
1128     add(sp, sp, argument_count);
1129   }
1130 }
1131 
MovFromFloatResult(const DoubleRegister dst)1132 void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) {
1133   Move(dst, d1);
1134 }
1135 
MovFromFloatParameter(const DoubleRegister dst)1136 void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) {
1137   Move(dst, d1);
1138 }
1139 
PrepareForTailCall(Register callee_args_count,Register caller_args_count,Register scratch0,Register scratch1)1140 void TurboAssembler::PrepareForTailCall(Register callee_args_count,
1141                                         Register caller_args_count,
1142                                         Register scratch0, Register scratch1) {
1143   DCHECK(!AreAliased(callee_args_count, caller_args_count, scratch0, scratch1));
1144 
1145   // Calculate the end of destination area where we will put the arguments
1146   // after we drop current frame. We add kPointerSize to count the receiver
1147   // argument which is not included into formal parameters count.
1148   Register dst_reg = scratch0;
1149   ShiftLeftImm(dst_reg, caller_args_count, Operand(kPointerSizeLog2));
1150   add(dst_reg, fp, dst_reg);
1151   addi(dst_reg, dst_reg,
1152        Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
1153 
1154   Register src_reg = caller_args_count;
1155   // Calculate the end of source area. +kPointerSize is for the receiver.
1156   ShiftLeftImm(src_reg, callee_args_count, Operand(kPointerSizeLog2));
1157   add(src_reg, sp, src_reg);
1158   addi(src_reg, src_reg, Operand(kPointerSize));
1159 
1160   if (FLAG_debug_code) {
1161     cmpl(src_reg, dst_reg);
1162     Check(lt, AbortReason::kStackAccessBelowStackPointer);
1163   }
1164 
1165   // Restore caller's frame pointer and return address now as they will be
1166   // overwritten by the copying loop.
1167   RestoreFrameStateForTailCall();
1168 
1169   // Now copy callee arguments to the caller frame going backwards to avoid
1170   // callee arguments corruption (source and destination areas could overlap).
1171 
1172   // Both src_reg and dst_reg are pointing to the word after the one to copy,
1173   // so they must be pre-decremented in the loop.
1174   Register tmp_reg = scratch1;
1175   Label loop;
1176   addi(tmp_reg, callee_args_count, Operand(1));  // +1 for receiver
1177   mtctr(tmp_reg);
1178   bind(&loop);
1179   LoadPU(tmp_reg, MemOperand(src_reg, -kPointerSize));
1180   StorePU(tmp_reg, MemOperand(dst_reg, -kPointerSize));
1181   bdnz(&loop);
1182 
1183   // Leave current frame.
1184   mr(sp, dst_reg);
1185 }
1186 
InvokePrologue(Register expected_parameter_count,Register actual_parameter_count,Label * done,InvokeFlag flag)1187 void MacroAssembler::InvokePrologue(Register expected_parameter_count,
1188                                     Register actual_parameter_count,
1189                                     Label* done, InvokeFlag flag) {
1190   Label regular_invoke;
1191 
1192   // Check whether the expected and actual arguments count match. If not,
1193   // setup registers according to contract with ArgumentsAdaptorTrampoline:
1194   //  r3: actual arguments count
1195   //  r4: function (passed through to callee)
1196   //  r5: expected arguments count
1197 
1198   // The code below is made a lot easier because the calling code already sets
1199   // up actual and expected registers according to the contract if values are
1200   // passed in registers.
1201 
1202   // The code below is made a lot easier because the calling code already sets
1203   // up actual and expected registers according to the contract.
1204   // ARM has some sanity checks as per below, considering add them for PPC
1205   // DCHECK_EQ(actual_parameter_count, r3);
1206   // DCHECK_EQ(expected_parameter_count, r5);
1207 
1208   cmp(expected_parameter_count, actual_parameter_count);
1209   beq(&regular_invoke);
1210 
1211   Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
1212   if (flag == CALL_FUNCTION) {
1213     Call(adaptor);
1214     b(done);
1215   } else {
1216     Jump(adaptor, RelocInfo::CODE_TARGET);
1217   }
1218     bind(&regular_invoke);
1219 }
1220 
CheckDebugHook(Register fun,Register new_target,Register expected_parameter_count,Register actual_parameter_count)1221 void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
1222                                     Register expected_parameter_count,
1223                                     Register actual_parameter_count) {
1224   Label skip_hook;
1225 
1226   ExternalReference debug_hook_active =
1227       ExternalReference::debug_hook_on_function_call_address(isolate());
1228   Move(r7, debug_hook_active);
1229   LoadByte(r7, MemOperand(r7), r0);
1230   extsb(r7, r7);
1231   CmpSmiLiteral(r7, Smi::zero(), r0);
1232   beq(&skip_hook);
1233 
1234   {
1235     // Load receiver to pass it later to DebugOnFunctionCall hook.
1236     ShiftLeftImm(r7, actual_parameter_count, Operand(kPointerSizeLog2));
1237     LoadPX(r7, MemOperand(sp, r7));
1238     FrameScope frame(this,
1239                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
1240 
1241     SmiTag(expected_parameter_count);
1242     Push(expected_parameter_count);
1243 
1244     SmiTag(actual_parameter_count);
1245     Push(actual_parameter_count);
1246 
1247     if (new_target.is_valid()) {
1248       Push(new_target);
1249     }
1250     Push(fun, fun, r7);
1251     CallRuntime(Runtime::kDebugOnFunctionCall);
1252     Pop(fun);
1253     if (new_target.is_valid()) {
1254       Pop(new_target);
1255     }
1256 
1257     Pop(actual_parameter_count);
1258     SmiUntag(actual_parameter_count);
1259 
1260     Pop(expected_parameter_count);
1261     SmiUntag(expected_parameter_count);
1262   }
1263   bind(&skip_hook);
1264 }
1265 
InvokeFunctionCode(Register function,Register new_target,Register expected_parameter_count,Register actual_parameter_count,InvokeFlag flag)1266 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
1267                                         Register expected_parameter_count,
1268                                         Register actual_parameter_count,
1269                                         InvokeFlag flag) {
1270   // You can't call a function without a valid frame.
1271   DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
1272   DCHECK_EQ(function, r4);
1273   DCHECK_IMPLIES(new_target.is_valid(), new_target == r6);
1274 
1275   // On function call, call into the debugger if necessary.
1276   CheckDebugHook(function, new_target, expected_parameter_count,
1277                  actual_parameter_count);
1278 
1279   // Clear the new.target register if not given.
1280   if (!new_target.is_valid()) {
1281     LoadRoot(r6, RootIndex::kUndefinedValue);
1282   }
1283 
1284   Label done;
1285   InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag);
1286   // We call indirectly through the code field in the function to
1287   // allow recompilation to take effect without changing any of the
1288   // call sites.
1289   Register code = kJavaScriptCallCodeStartRegister;
1290   LoadP(code, FieldMemOperand(function, JSFunction::kCodeOffset));
1291   if (flag == CALL_FUNCTION) {
1292     CallCodeObject(code);
1293   } else {
1294     DCHECK(flag == JUMP_FUNCTION);
1295     JumpCodeObject(code);
1296   }
1297 
1298     // Continue here if InvokePrologue does handle the invocation due to
1299     // mismatched parameter counts.
1300     bind(&done);
1301 }
1302 
InvokeFunctionWithNewTarget(Register fun,Register new_target,Register actual_parameter_count,InvokeFlag flag)1303 void MacroAssembler::InvokeFunctionWithNewTarget(
1304     Register fun, Register new_target, Register actual_parameter_count,
1305     InvokeFlag flag) {
1306   // You can't call a function without a valid frame.
1307   DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
1308 
1309   // Contract with called JS functions requires that function is passed in r4.
1310   DCHECK_EQ(fun, r4);
1311 
1312   Register expected_reg = r5;
1313   Register temp_reg = r7;
1314 
1315   LoadP(temp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
1316   LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
1317   LoadHalfWord(expected_reg,
1318                FieldMemOperand(
1319                    temp_reg, SharedFunctionInfo::kFormalParameterCountOffset));
1320 
1321   InvokeFunctionCode(fun, new_target, expected_reg, actual_parameter_count,
1322                      flag);
1323 }
1324 
InvokeFunction(Register function,Register expected_parameter_count,Register actual_parameter_count,InvokeFlag flag)1325 void MacroAssembler::InvokeFunction(Register function,
1326                                     Register expected_parameter_count,
1327                                     Register actual_parameter_count,
1328                                     InvokeFlag flag) {
1329   // You can't call a function without a valid frame.
1330   DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
1331 
1332   // Contract with called JS functions requires that function is passed in r4.
1333   DCHECK_EQ(function, r4);
1334 
1335   // Get the function and setup the context.
1336   LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
1337 
1338   InvokeFunctionCode(r4, no_reg, expected_parameter_count,
1339                      actual_parameter_count, flag);
1340 }
1341 
MaybeDropFrames()1342 void MacroAssembler::MaybeDropFrames() {
1343   // Check whether we need to drop frames to restart a function on the stack.
1344   ExternalReference restart_fp =
1345       ExternalReference::debug_restart_fp_address(isolate());
1346   Move(r4, restart_fp);
1347   LoadP(r4, MemOperand(r4));
1348   cmpi(r4, Operand::Zero());
1349   Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
1350        ne);
1351 }
1352 
PushStackHandler()1353 void MacroAssembler::PushStackHandler() {
1354   // Adjust this code if not the case.
1355   STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
1356   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1357 
1358   Push(Smi::zero());  // Padding.
1359 
1360   // Link the current handler as the next handler.
1361   // Preserve r4-r8.
1362   Move(r3,
1363        ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
1364   LoadP(r0, MemOperand(r3));
1365   push(r0);
1366 
1367   // Set this new handler as the current one.
1368   StoreP(sp, MemOperand(r3));
1369 }
1370 
PopStackHandler()1371 void MacroAssembler::PopStackHandler() {
1372   STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
1373   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1374 
1375   pop(r4);
1376   Move(ip,
1377        ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
1378   StoreP(r4, MemOperand(ip));
1379 
1380   Drop(1);  // Drop padding.
1381 }
1382 
CompareObjectType(Register object,Register map,Register type_reg,InstanceType type)1383 void MacroAssembler::CompareObjectType(Register object, Register map,
1384                                        Register type_reg, InstanceType type) {
1385   const Register temp = type_reg == no_reg ? r0 : type_reg;
1386 
1387   LoadMap(map, object);
1388   CompareInstanceType(map, temp, type);
1389 }
1390 
CompareInstanceType(Register map,Register type_reg,InstanceType type)1391 void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
1392                                          InstanceType type) {
1393   STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
1394   STATIC_ASSERT(LAST_TYPE <= 0xFFFF);
1395   lhz(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1396   cmpi(type_reg, Operand(type));
1397 }
1398 
CompareRoot(Register obj,RootIndex index)1399 void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
1400   DCHECK(obj != r0);
1401   LoadRoot(r0, index);
1402   cmp(obj, r0);
1403 }
1404 
AddAndCheckForOverflow(Register dst,Register left,Register right,Register overflow_dst,Register scratch)1405 void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left,
1406                                             Register right,
1407                                             Register overflow_dst,
1408                                             Register scratch) {
1409   DCHECK(dst != overflow_dst);
1410   DCHECK(dst != scratch);
1411   DCHECK(overflow_dst != scratch);
1412   DCHECK(overflow_dst != left);
1413   DCHECK(overflow_dst != right);
1414 
1415   bool left_is_right = left == right;
1416   RCBit xorRC = left_is_right ? SetRC : LeaveRC;
1417 
1418   // C = A+B; C overflows if A/B have same sign and C has diff sign than A
1419   if (dst == left) {
1420     mr(scratch, left);                        // Preserve left.
1421     add(dst, left, right);                    // Left is overwritten.
1422     xor_(overflow_dst, dst, scratch, xorRC);  // Original left.
1423     if (!left_is_right) xor_(scratch, dst, right);
1424   } else if (dst == right) {
1425     mr(scratch, right);     // Preserve right.
1426     add(dst, left, right);  // Right is overwritten.
1427     xor_(overflow_dst, dst, left, xorRC);
1428     if (!left_is_right) xor_(scratch, dst, scratch);  // Original right.
1429   } else {
1430     add(dst, left, right);
1431     xor_(overflow_dst, dst, left, xorRC);
1432     if (!left_is_right) xor_(scratch, dst, right);
1433   }
1434   if (!left_is_right) and_(overflow_dst, scratch, overflow_dst, SetRC);
1435 }
1436 
AddAndCheckForOverflow(Register dst,Register left,intptr_t right,Register overflow_dst,Register scratch)1437 void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left,
1438                                             intptr_t right,
1439                                             Register overflow_dst,
1440                                             Register scratch) {
1441   Register original_left = left;
1442   DCHECK(dst != overflow_dst);
1443   DCHECK(dst != scratch);
1444   DCHECK(overflow_dst != scratch);
1445   DCHECK(overflow_dst != left);
1446 
1447   // C = A+B; C overflows if A/B have same sign and C has diff sign than A
1448   if (dst == left) {
1449     // Preserve left.
1450     original_left = overflow_dst;
1451     mr(original_left, left);
1452   }
1453   Add(dst, left, right, scratch);
1454   xor_(overflow_dst, dst, original_left);
1455   if (right >= 0) {
1456     and_(overflow_dst, overflow_dst, dst, SetRC);
1457   } else {
1458     andc(overflow_dst, overflow_dst, dst, SetRC);
1459   }
1460 }
1461 
SubAndCheckForOverflow(Register dst,Register left,Register right,Register overflow_dst,Register scratch)1462 void TurboAssembler::SubAndCheckForOverflow(Register dst, Register left,
1463                                             Register right,
1464                                             Register overflow_dst,
1465                                             Register scratch) {
1466   DCHECK(dst != overflow_dst);
1467   DCHECK(dst != scratch);
1468   DCHECK(overflow_dst != scratch);
1469   DCHECK(overflow_dst != left);
1470   DCHECK(overflow_dst != right);
1471 
1472   // C = A-B; C overflows if A/B have diff signs and C has diff sign than A
1473   if (dst == left) {
1474     mr(scratch, left);      // Preserve left.
1475     sub(dst, left, right);  // Left is overwritten.
1476     xor_(overflow_dst, dst, scratch);
1477     xor_(scratch, scratch, right);
1478     and_(overflow_dst, overflow_dst, scratch, SetRC);
1479   } else if (dst == right) {
1480     mr(scratch, right);     // Preserve right.
1481     sub(dst, left, right);  // Right is overwritten.
1482     xor_(overflow_dst, dst, left);
1483     xor_(scratch, left, scratch);
1484     and_(overflow_dst, overflow_dst, scratch, SetRC);
1485   } else {
1486     sub(dst, left, right);
1487     xor_(overflow_dst, dst, left);
1488     xor_(scratch, left, right);
1489     and_(overflow_dst, scratch, overflow_dst, SetRC);
1490   }
1491 }
1492 
JumpIfIsInRange(Register value,unsigned lower_limit,unsigned higher_limit,Label * on_in_range)1493 void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
1494                                      unsigned higher_limit,
1495                                      Label* on_in_range) {
1496   Register scratch = r0;
1497   if (lower_limit != 0) {
1498     mov(scratch, Operand(lower_limit));
1499     sub(scratch, value, scratch);
1500     cmpli(scratch, Operand(higher_limit - lower_limit));
1501   } else {
1502     mov(scratch, Operand(higher_limit));
1503     cmpl(value, scratch);
1504   }
1505   ble(on_in_range);
1506 }
1507 
TruncateDoubleToI(Isolate * isolate,Zone * zone,Register result,DoubleRegister double_input,StubCallMode stub_mode)1508 void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
1509                                        Register result,
1510                                        DoubleRegister double_input,
1511                                        StubCallMode stub_mode) {
1512   Label done;
1513 
1514   TryInlineTruncateDoubleToI(result, double_input, &done);
1515 
1516   // If we fell through then inline version didn't succeed - call stub instead.
1517   mflr(r0);
1518   push(r0);
1519   // Put input on stack.
1520   stfdu(double_input, MemOperand(sp, -kDoubleSize));
1521 
1522   if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
1523     Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
1524   } else {
1525     Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
1526   }
1527 
1528   LoadP(result, MemOperand(sp));
1529   addi(sp, sp, Operand(kDoubleSize));
1530   pop(r0);
1531   mtlr(r0);
1532 
1533   bind(&done);
1534 }
1535 
TryInlineTruncateDoubleToI(Register result,DoubleRegister double_input,Label * done)1536 void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
1537                                                 DoubleRegister double_input,
1538                                                 Label* done) {
1539   DoubleRegister double_scratch = kScratchDoubleReg;
1540 #if !V8_TARGET_ARCH_PPC64
1541   Register scratch = ip;
1542 #endif
1543 
1544   ConvertDoubleToInt64(double_input,
1545 #if !V8_TARGET_ARCH_PPC64
1546                        scratch,
1547 #endif
1548                        result, double_scratch);
1549 
1550 // Test for overflow
1551 #if V8_TARGET_ARCH_PPC64
1552   TestIfInt32(result, r0);
1553 #else
1554   TestIfInt32(scratch, result, r0);
1555 #endif
1556   beq(done);
1557 }
1558 
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)1559 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
1560                                  SaveFPRegsMode save_doubles) {
1561   // All parameters are on the stack.  r3 has the return value after call.
1562 
1563   // If the expected number of arguments of the runtime function is
1564   // constant, we check that the actual number of arguments match the
1565   // expectation.
1566   CHECK(f->nargs < 0 || f->nargs == num_arguments);
1567 
1568   // TODO(1236192): Most runtime routines don't need the number of
1569   // arguments passed in because it is constant. At some point we
1570   // should remove this need and make the runtime routine entry code
1571   // smarter.
1572   mov(r3, Operand(num_arguments));
1573   Move(r4, ExternalReference::Create(f));
1574 #if V8_TARGET_ARCH_PPC64
1575   Handle<Code> code =
1576       CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
1577 #else
1578   Handle<Code> code = CodeFactory::CEntry(isolate(), 1, save_doubles);
1579 #endif
1580   Call(code, RelocInfo::CODE_TARGET);
1581 }
1582 
TailCallRuntime(Runtime::FunctionId fid)1583 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
1584   const Runtime::Function* function = Runtime::FunctionForId(fid);
1585   DCHECK_EQ(1, function->result_size);
1586   if (function->nargs >= 0) {
1587     mov(r3, Operand(function->nargs));
1588   }
1589   JumpToExternalReference(ExternalReference::Create(fid));
1590 }
1591 
JumpToExternalReference(const ExternalReference & builtin,bool builtin_exit_frame)1592 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
1593                                              bool builtin_exit_frame) {
1594   Move(r4, builtin);
1595   Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
1596                                           kArgvOnStack, builtin_exit_frame);
1597   Jump(code, RelocInfo::CODE_TARGET);
1598 }
1599 
JumpToInstructionStream(Address entry)1600 void MacroAssembler::JumpToInstructionStream(Address entry) {
1601   mov(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
1602   Jump(kOffHeapTrampolineRegister);
1603 }
1604 
LoadWeakValue(Register out,Register in,Label * target_if_cleared)1605 void MacroAssembler::LoadWeakValue(Register out, Register in,
1606                                    Label* target_if_cleared) {
1607   cmpi(in, Operand(kClearedWeakHeapObjectLower32));
1608   beq(target_if_cleared);
1609 
1610   mov(r0, Operand(~kWeakHeapObjectMask));
1611   and_(out, in, r0);
1612 }
1613 
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)1614 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
1615                                       Register scratch1, Register scratch2) {
1616   DCHECK_GT(value, 0);
1617   if (FLAG_native_code_counters && counter->Enabled()) {
1618     // This operation has to be exactly 32-bit wide in case the external
1619     // reference table redirects the counter to a uint32_t dummy_stats_counter_
1620     // field.
1621     Move(scratch2, ExternalReference::Create(counter));
1622     lwz(scratch1, MemOperand(scratch2));
1623     addi(scratch1, scratch1, Operand(value));
1624     stw(scratch1, MemOperand(scratch2));
1625   }
1626 }
1627 
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)1628 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
1629                                       Register scratch1, Register scratch2) {
1630   DCHECK_GT(value, 0);
1631   if (FLAG_native_code_counters && counter->Enabled()) {
1632     // This operation has to be exactly 32-bit wide in case the external
1633     // reference table redirects the counter to a uint32_t dummy_stats_counter_
1634     // field.
1635     Move(scratch2, ExternalReference::Create(counter));
1636     lwz(scratch1, MemOperand(scratch2));
1637     subi(scratch1, scratch1, Operand(value));
1638     stw(scratch1, MemOperand(scratch2));
1639   }
1640 }
1641 
Assert(Condition cond,AbortReason reason,CRegister cr)1642 void TurboAssembler::Assert(Condition cond, AbortReason reason, CRegister cr) {
1643   if (emit_debug_code()) Check(cond, reason, cr);
1644 }
1645 
Check(Condition cond,AbortReason reason,CRegister cr)1646 void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
1647   Label L;
1648   b(cond, &L, cr);
1649   Abort(reason);
1650   // will not return here
1651   bind(&L);
1652 }
1653 
Abort(AbortReason reason)1654 void TurboAssembler::Abort(AbortReason reason) {
1655   Label abort_start;
1656   bind(&abort_start);
1657 #ifdef DEBUG
1658   const char* msg = GetAbortReason(reason);
1659   RecordComment("Abort message: ");
1660   RecordComment(msg);
1661 #endif
1662 
1663   // Avoid emitting call to builtin if requested.
1664   if (trap_on_abort()) {
1665     stop();
1666     return;
1667   }
1668 
1669   if (should_abort_hard()) {
1670     // We don't care if we constructed a frame. Just pretend we did.
1671     FrameScope assume_frame(this, StackFrame::NONE);
1672     mov(r3, Operand(static_cast<int>(reason)));
1673     PrepareCallCFunction(1, r4);
1674     CallCFunction(ExternalReference::abort_with_reason(), 1);
1675     return;
1676   }
1677 
1678   LoadSmiLiteral(r4, Smi::FromInt(static_cast<int>(reason)));
1679 
1680   // Disable stub call restrictions to always allow calls to abort.
1681   if (!has_frame_) {
1682     // We don't actually want to generate a pile of code for this, so just
1683     // claim there is a stack frame, without generating one.
1684     FrameScope scope(this, StackFrame::NONE);
1685     Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
1686   } else {
1687     Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
1688   }
1689   // will not return here
1690 }
1691 
LoadMap(Register destination,Register object)1692 void MacroAssembler::LoadMap(Register destination, Register object) {
1693   LoadP(destination, FieldMemOperand(object, HeapObject::kMapOffset));
1694 }
1695 
LoadNativeContextSlot(int index,Register dst)1696 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
1697   LoadMap(dst, cp);
1698   LoadP(dst, FieldMemOperand(
1699                  dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
1700   LoadP(dst, MemOperand(dst, Context::SlotOffset(index)));
1701 }
1702 
AssertNotSmi(Register object)1703 void MacroAssembler::AssertNotSmi(Register object) {
1704   if (emit_debug_code()) {
1705     STATIC_ASSERT(kSmiTag == 0);
1706     TestIfSmi(object, r0);
1707     Check(ne, AbortReason::kOperandIsASmi, cr0);
1708   }
1709 }
1710 
AssertSmi(Register object)1711 void MacroAssembler::AssertSmi(Register object) {
1712   if (emit_debug_code()) {
1713     STATIC_ASSERT(kSmiTag == 0);
1714     TestIfSmi(object, r0);
1715     Check(eq, AbortReason::kOperandIsNotASmi, cr0);
1716   }
1717 }
1718 
AssertConstructor(Register object)1719 void MacroAssembler::AssertConstructor(Register object) {
1720   if (emit_debug_code()) {
1721     STATIC_ASSERT(kSmiTag == 0);
1722     TestIfSmi(object, r0);
1723     Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, cr0);
1724     push(object);
1725     LoadMap(object, object);
1726     lbz(object, FieldMemOperand(object, Map::kBitFieldOffset));
1727     andi(object, object, Operand(Map::Bits1::IsConstructorBit::kMask));
1728     pop(object);
1729     Check(ne, AbortReason::kOperandIsNotAConstructor, cr0);
1730   }
1731 }
1732 
AssertFunction(Register object)1733 void MacroAssembler::AssertFunction(Register object) {
1734   if (emit_debug_code()) {
1735     STATIC_ASSERT(kSmiTag == 0);
1736     TestIfSmi(object, r0);
1737     Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, cr0);
1738     push(object);
1739     CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
1740     pop(object);
1741     Check(eq, AbortReason::kOperandIsNotAFunction);
1742   }
1743 }
1744 
AssertBoundFunction(Register object)1745 void MacroAssembler::AssertBoundFunction(Register object) {
1746   if (emit_debug_code()) {
1747     STATIC_ASSERT(kSmiTag == 0);
1748     TestIfSmi(object, r0);
1749     Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, cr0);
1750     push(object);
1751     CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
1752     pop(object);
1753     Check(eq, AbortReason::kOperandIsNotABoundFunction);
1754   }
1755 }
1756 
AssertGeneratorObject(Register object)1757 void MacroAssembler::AssertGeneratorObject(Register object) {
1758   if (!emit_debug_code()) return;
1759   TestIfSmi(object, r0);
1760   Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, cr0);
1761 
1762   // Load map
1763   Register map = object;
1764   push(object);
1765   LoadMap(map, object);
1766 
1767   // Check if JSGeneratorObject
1768   Label do_check;
1769   Register instance_type = object;
1770   CompareInstanceType(map, instance_type, JS_GENERATOR_OBJECT_TYPE);
1771   beq(&do_check);
1772 
1773   // Check if JSAsyncFunctionObject (See MacroAssembler::CompareInstanceType)
1774   cmpi(instance_type, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE));
1775   beq(&do_check);
1776 
1777   // Check if JSAsyncGeneratorObject (See MacroAssembler::CompareInstanceType)
1778   cmpi(instance_type, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
1779 
1780   bind(&do_check);
1781   // Restore generator object to register and perform assertion
1782   pop(object);
1783   Check(eq, AbortReason::kOperandIsNotAGeneratorObject);
1784 }
1785 
AssertUndefinedOrAllocationSite(Register object,Register scratch)1786 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
1787                                                      Register scratch) {
1788   if (emit_debug_code()) {
1789     Label done_checking;
1790     AssertNotSmi(object);
1791     CompareRoot(object, RootIndex::kUndefinedValue);
1792     beq(&done_checking);
1793     LoadMap(scratch, object);
1794     CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
1795     Assert(eq, AbortReason::kExpectedUndefinedOrCell);
1796     bind(&done_checking);
1797   }
1798 }
1799 
1800 static const int kRegisterPassedArguments = 8;
1801 
CalculateStackPassedWords(int num_reg_arguments,int num_double_arguments)1802 int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
1803                                               int num_double_arguments) {
1804   int stack_passed_words = 0;
1805   if (num_double_arguments > DoubleRegister::kNumRegisters) {
1806     stack_passed_words +=
1807         2 * (num_double_arguments - DoubleRegister::kNumRegisters);
1808   }
1809   // Up to 8 simple arguments are passed in registers r3..r10.
1810   if (num_reg_arguments > kRegisterPassedArguments) {
1811     stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
1812   }
1813   return stack_passed_words;
1814 }
1815 
PrepareCallCFunction(int num_reg_arguments,int num_double_arguments,Register scratch)1816 void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
1817                                           int num_double_arguments,
1818                                           Register scratch) {
1819   int frame_alignment = ActivationFrameAlignment();
1820   int stack_passed_arguments =
1821       CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
1822   int stack_space = kNumRequiredStackFrameSlots;
1823 
1824   if (frame_alignment > kPointerSize) {
1825     // Make stack end at alignment and make room for stack arguments
1826     // -- preserving original value of sp.
1827     mr(scratch, sp);
1828     addi(sp, sp, Operand(-(stack_passed_arguments + 1) * kPointerSize));
1829     DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
1830     ClearRightImm(sp, sp,
1831                   Operand(base::bits::WhichPowerOfTwo(frame_alignment)));
1832     StoreP(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
1833   } else {
1834     // Make room for stack arguments
1835     stack_space += stack_passed_arguments;
1836   }
1837 
1838   // Allocate frame with required slots to make ABI work.
1839   li(r0, Operand::Zero());
1840   StorePU(r0, MemOperand(sp, -stack_space * kPointerSize));
1841 }
1842 
PrepareCallCFunction(int num_reg_arguments,Register scratch)1843 void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
1844                                           Register scratch) {
1845   PrepareCallCFunction(num_reg_arguments, 0, scratch);
1846 }
1847 
MovToFloatParameter(DoubleRegister src)1848 void TurboAssembler::MovToFloatParameter(DoubleRegister src) { Move(d1, src); }
1849 
MovToFloatResult(DoubleRegister src)1850 void TurboAssembler::MovToFloatResult(DoubleRegister src) { Move(d1, src); }
1851 
MovToFloatParameters(DoubleRegister src1,DoubleRegister src2)1852 void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
1853                                           DoubleRegister src2) {
1854   if (src2 == d1) {
1855     DCHECK(src1 != d2);
1856     Move(d2, src2);
1857     Move(d1, src1);
1858   } else {
1859     Move(d1, src1);
1860     Move(d2, src2);
1861   }
1862 }
1863 
CallCFunction(ExternalReference function,int num_reg_arguments,int num_double_arguments,bool has_function_descriptor)1864 void TurboAssembler::CallCFunction(ExternalReference function,
1865                                    int num_reg_arguments,
1866                                    int num_double_arguments,
1867                                    bool has_function_descriptor) {
1868   Move(ip, function);
1869   CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments,
1870                       has_function_descriptor);
1871 }
1872 
CallCFunction(Register function,int num_reg_arguments,int num_double_arguments,bool has_function_descriptor)1873 void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
1874                                    int num_double_arguments,
1875                                    bool has_function_descriptor) {
1876   CallCFunctionHelper(function, num_reg_arguments, num_double_arguments,
1877                       has_function_descriptor);
1878 }
1879 
CallCFunction(ExternalReference function,int num_arguments,bool has_function_descriptor)1880 void TurboAssembler::CallCFunction(ExternalReference function,
1881                                    int num_arguments,
1882                                    bool has_function_descriptor) {
1883   CallCFunction(function, num_arguments, 0, has_function_descriptor);
1884 }
1885 
CallCFunction(Register function,int num_arguments,bool has_function_descriptor)1886 void TurboAssembler::CallCFunction(Register function, int num_arguments,
1887                                    bool has_function_descriptor) {
1888   CallCFunction(function, num_arguments, 0, has_function_descriptor);
1889 }
1890 
CallCFunctionHelper(Register function,int num_reg_arguments,int num_double_arguments,bool has_function_descriptor)1891 void TurboAssembler::CallCFunctionHelper(Register function,
1892                                          int num_reg_arguments,
1893                                          int num_double_arguments,
1894                                          bool has_function_descriptor) {
1895   DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
1896   DCHECK(has_frame());
1897 
1898   // Save the frame pointer and PC so that the stack layout remains iterable,
1899   // even without an ExitFrame which normally exists between JS and C frames.
1900   Register addr_scratch = r7;
1901   Register scratch = r8;
1902   Push(scratch);
1903   mflr(scratch);
1904   // See x64 code for reasoning about how to address the isolate data fields.
1905   if (root_array_available()) {
1906     LoadPC(r0);
1907     StoreP(r0, MemOperand(kRootRegister,
1908                           IsolateData::fast_c_call_caller_pc_offset()));
1909     StoreP(fp, MemOperand(kRootRegister,
1910                           IsolateData::fast_c_call_caller_fp_offset()));
1911   } else {
1912     DCHECK_NOT_NULL(isolate());
1913     Push(addr_scratch);
1914 
1915     Move(addr_scratch,
1916          ExternalReference::fast_c_call_caller_pc_address(isolate()));
1917     LoadPC(r0);
1918     StoreP(r0, MemOperand(addr_scratch));
1919     Move(addr_scratch,
1920          ExternalReference::fast_c_call_caller_fp_address(isolate()));
1921     StoreP(fp, MemOperand(addr_scratch));
1922     Pop(addr_scratch);
1923   }
1924   mtlr(scratch);
1925   Pop(scratch);
1926 
1927   // Just call directly. The function called cannot cause a GC, or
1928   // allow preemption, so the return address in the link register
1929   // stays correct.
1930   Register dest = function;
1931   if (ABI_USES_FUNCTION_DESCRIPTORS && has_function_descriptor) {
1932     // AIX/PPC64BE Linux uses a function descriptor. When calling C code be
1933     // aware of this descriptor and pick up values from it
1934     LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(function, kPointerSize));
1935     LoadP(ip, MemOperand(function, 0));
1936     dest = ip;
1937   } else if (ABI_CALL_VIA_IP) {
1938     // pLinux and Simualtor, not AIX
1939     Move(ip, function);
1940     dest = ip;
1941   }
1942 
1943   Call(dest);
1944 
1945   // We don't unset the PC; the FP is the source of truth.
1946   Register zero_scratch = r0;
1947   mov(zero_scratch, Operand::Zero());
1948 
1949   if (root_array_available()) {
1950     StoreP(
1951         zero_scratch,
1952         MemOperand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()));
1953   } else {
1954     DCHECK_NOT_NULL(isolate());
1955     Push(addr_scratch);
1956     Move(addr_scratch,
1957          ExternalReference::fast_c_call_caller_fp_address(isolate()));
1958     StoreP(zero_scratch, MemOperand(addr_scratch));
1959     Pop(addr_scratch);
1960   }
1961 
1962   // Remove frame bought in PrepareCallCFunction
1963   int stack_passed_arguments =
1964       CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
1965   int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
1966   if (ActivationFrameAlignment() > kPointerSize) {
1967     LoadP(sp, MemOperand(sp, stack_space * kPointerSize));
1968   } else {
1969     addi(sp, sp, Operand(stack_space * kPointerSize));
1970   }
1971 }
1972 
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met)1973 void TurboAssembler::CheckPageFlag(
1974     Register object,
1975     Register scratch,  // scratch may be same register as object
1976     int mask, Condition cc, Label* condition_met) {
1977   DCHECK(cc == ne || cc == eq);
1978   ClearRightImm(scratch, object, Operand(kPageSizeBits));
1979   LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
1980 
1981   mov(r0, Operand(mask));
1982   and_(r0, scratch, r0, SetRC);
1983 
1984   if (cc == ne) {
1985     bne(condition_met, cr0);
1986   }
1987   if (cc == eq) {
1988     beq(condition_met, cr0);
1989   }
1990 }
1991 
SetRoundingMode(FPRoundingMode RN)1992 void TurboAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); }
1993 
ResetRoundingMode()1994 void TurboAssembler::ResetRoundingMode() {
1995   mtfsfi(7, kRoundToNearest);  // reset (default is kRoundToNearest)
1996 }
1997 
1998 ////////////////////////////////////////////////////////////////////////////////
1999 //
2000 // New MacroAssembler Interfaces added for PPC
2001 //
2002 ////////////////////////////////////////////////////////////////////////////////
LoadIntLiteral(Register dst,int value)2003 void TurboAssembler::LoadIntLiteral(Register dst, int value) {
2004   mov(dst, Operand(value));
2005 }
2006 
LoadSmiLiteral(Register dst,Smi smi)2007 void TurboAssembler::LoadSmiLiteral(Register dst, Smi smi) {
2008   mov(dst, Operand(smi));
2009 }
2010 
LoadDoubleLiteral(DoubleRegister result,Double value,Register scratch)2011 void TurboAssembler::LoadDoubleLiteral(DoubleRegister result, Double value,
2012                                        Register scratch) {
2013   if (FLAG_enable_embedded_constant_pool && is_constant_pool_available() &&
2014       !(scratch == r0 && ConstantPoolAccessIsInOverflow())) {
2015     ConstantPoolEntry::Access access = ConstantPoolAddEntry(value);
2016     if (access == ConstantPoolEntry::OVERFLOWED) {
2017       addis(scratch, kConstantPoolRegister, Operand::Zero());
2018       lfd(result, MemOperand(scratch, 0));
2019     } else {
2020       lfd(result, MemOperand(kConstantPoolRegister, 0));
2021     }
2022     return;
2023   }
2024 
2025   // avoid gcc strict aliasing error using union cast
2026   union {
2027     uint64_t dval;
2028 #if V8_TARGET_ARCH_PPC64
2029     intptr_t ival;
2030 #else
2031     intptr_t ival[2];
2032 #endif
2033   } litVal;
2034 
2035   litVal.dval = value.AsUint64();
2036 
2037 #if V8_TARGET_ARCH_PPC64
2038   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2039     mov(scratch, Operand(litVal.ival));
2040     mtfprd(result, scratch);
2041     return;
2042   }
2043 #endif
2044 
2045   addi(sp, sp, Operand(-kDoubleSize));
2046 #if V8_TARGET_ARCH_PPC64
2047   mov(scratch, Operand(litVal.ival));
2048   std(scratch, MemOperand(sp));
2049 #else
2050   LoadIntLiteral(scratch, litVal.ival[0]);
2051   stw(scratch, MemOperand(sp, 0));
2052   LoadIntLiteral(scratch, litVal.ival[1]);
2053   stw(scratch, MemOperand(sp, 4));
2054 #endif
2055   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2056   lfd(result, MemOperand(sp, 0));
2057   addi(sp, sp, Operand(kDoubleSize));
2058 }
2059 
MovIntToDouble(DoubleRegister dst,Register src,Register scratch)2060 void TurboAssembler::MovIntToDouble(DoubleRegister dst, Register src,
2061                                     Register scratch) {
2062 // sign-extend src to 64-bit
2063 #if V8_TARGET_ARCH_PPC64
2064   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2065     mtfprwa(dst, src);
2066     return;
2067   }
2068 #endif
2069 
2070   DCHECK(src != scratch);
2071   subi(sp, sp, Operand(kDoubleSize));
2072 #if V8_TARGET_ARCH_PPC64
2073   extsw(scratch, src);
2074   std(scratch, MemOperand(sp, 0));
2075 #else
2076   srawi(scratch, src, 31);
2077   stw(scratch, MemOperand(sp, Register::kExponentOffset));
2078   stw(src, MemOperand(sp, Register::kMantissaOffset));
2079 #endif
2080   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2081   lfd(dst, MemOperand(sp, 0));
2082   addi(sp, sp, Operand(kDoubleSize));
2083 }
2084 
MovUnsignedIntToDouble(DoubleRegister dst,Register src,Register scratch)2085 void TurboAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src,
2086                                             Register scratch) {
2087 // zero-extend src to 64-bit
2088 #if V8_TARGET_ARCH_PPC64
2089   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2090     mtfprwz(dst, src);
2091     return;
2092   }
2093 #endif
2094 
2095   DCHECK(src != scratch);
2096   subi(sp, sp, Operand(kDoubleSize));
2097 #if V8_TARGET_ARCH_PPC64
2098   clrldi(scratch, src, Operand(32));
2099   std(scratch, MemOperand(sp, 0));
2100 #else
2101   li(scratch, Operand::Zero());
2102   stw(scratch, MemOperand(sp, Register::kExponentOffset));
2103   stw(src, MemOperand(sp, Register::kMantissaOffset));
2104 #endif
2105   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2106   lfd(dst, MemOperand(sp, 0));
2107   addi(sp, sp, Operand(kDoubleSize));
2108 }
2109 
MovInt64ToDouble(DoubleRegister dst,Register src_hi,Register src)2110 void TurboAssembler::MovInt64ToDouble(DoubleRegister dst,
2111 #if !V8_TARGET_ARCH_PPC64
2112                                       Register src_hi,
2113 #endif
2114                                       Register src) {
2115 #if V8_TARGET_ARCH_PPC64
2116   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2117     mtfprd(dst, src);
2118     return;
2119   }
2120 #endif
2121 
2122   subi(sp, sp, Operand(kDoubleSize));
2123 #if V8_TARGET_ARCH_PPC64
2124   std(src, MemOperand(sp, 0));
2125 #else
2126   stw(src_hi, MemOperand(sp, Register::kExponentOffset));
2127   stw(src, MemOperand(sp, Register::kMantissaOffset));
2128 #endif
2129   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2130   lfd(dst, MemOperand(sp, 0));
2131   addi(sp, sp, Operand(kDoubleSize));
2132 }
2133 
2134 #if V8_TARGET_ARCH_PPC64
MovInt64ComponentsToDouble(DoubleRegister dst,Register src_hi,Register src_lo,Register scratch)2135 void TurboAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
2136                                                 Register src_hi,
2137                                                 Register src_lo,
2138                                                 Register scratch) {
2139   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2140     sldi(scratch, src_hi, Operand(32));
2141     rldimi(scratch, src_lo, 0, 32);
2142     mtfprd(dst, scratch);
2143     return;
2144   }
2145 
2146   subi(sp, sp, Operand(kDoubleSize));
2147   stw(src_hi, MemOperand(sp, Register::kExponentOffset));
2148   stw(src_lo, MemOperand(sp, Register::kMantissaOffset));
2149   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2150   lfd(dst, MemOperand(sp));
2151   addi(sp, sp, Operand(kDoubleSize));
2152 }
2153 #endif
2154 
InsertDoubleLow(DoubleRegister dst,Register src,Register scratch)2155 void TurboAssembler::InsertDoubleLow(DoubleRegister dst, Register src,
2156                                      Register scratch) {
2157 #if V8_TARGET_ARCH_PPC64
2158   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2159     mffprd(scratch, dst);
2160     rldimi(scratch, src, 0, 32);
2161     mtfprd(dst, scratch);
2162     return;
2163   }
2164 #endif
2165 
2166   subi(sp, sp, Operand(kDoubleSize));
2167   stfd(dst, MemOperand(sp));
2168   stw(src, MemOperand(sp, Register::kMantissaOffset));
2169   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2170   lfd(dst, MemOperand(sp));
2171   addi(sp, sp, Operand(kDoubleSize));
2172 }
2173 
InsertDoubleHigh(DoubleRegister dst,Register src,Register scratch)2174 void TurboAssembler::InsertDoubleHigh(DoubleRegister dst, Register src,
2175                                       Register scratch) {
2176 #if V8_TARGET_ARCH_PPC64
2177   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2178     mffprd(scratch, dst);
2179     rldimi(scratch, src, 32, 0);
2180     mtfprd(dst, scratch);
2181     return;
2182   }
2183 #endif
2184 
2185   subi(sp, sp, Operand(kDoubleSize));
2186   stfd(dst, MemOperand(sp));
2187   stw(src, MemOperand(sp, Register::kExponentOffset));
2188   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2189   lfd(dst, MemOperand(sp));
2190   addi(sp, sp, Operand(kDoubleSize));
2191 }
2192 
MovDoubleLowToInt(Register dst,DoubleRegister src)2193 void TurboAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) {
2194 #if V8_TARGET_ARCH_PPC64
2195   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2196     mffprwz(dst, src);
2197     return;
2198   }
2199 #endif
2200 
2201   subi(sp, sp, Operand(kDoubleSize));
2202   stfd(src, MemOperand(sp));
2203   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2204   lwz(dst, MemOperand(sp, Register::kMantissaOffset));
2205   addi(sp, sp, Operand(kDoubleSize));
2206 }
2207 
MovDoubleHighToInt(Register dst,DoubleRegister src)2208 void TurboAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) {
2209 #if V8_TARGET_ARCH_PPC64
2210   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2211     mffprd(dst, src);
2212     srdi(dst, dst, Operand(32));
2213     return;
2214   }
2215 #endif
2216 
2217   subi(sp, sp, Operand(kDoubleSize));
2218   stfd(src, MemOperand(sp));
2219   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2220   lwz(dst, MemOperand(sp, Register::kExponentOffset));
2221   addi(sp, sp, Operand(kDoubleSize));
2222 }
2223 
MovDoubleToInt64(Register dst_hi,Register dst,DoubleRegister src)2224 void TurboAssembler::MovDoubleToInt64(
2225 #if !V8_TARGET_ARCH_PPC64
2226     Register dst_hi,
2227 #endif
2228     Register dst, DoubleRegister src) {
2229 #if V8_TARGET_ARCH_PPC64
2230   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2231     mffprd(dst, src);
2232     return;
2233   }
2234 #endif
2235 
2236   subi(sp, sp, Operand(kDoubleSize));
2237   stfd(src, MemOperand(sp));
2238   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2239 #if V8_TARGET_ARCH_PPC64
2240   ld(dst, MemOperand(sp, 0));
2241 #else
2242   lwz(dst_hi, MemOperand(sp, Register::kExponentOffset));
2243   lwz(dst, MemOperand(sp, Register::kMantissaOffset));
2244 #endif
2245   addi(sp, sp, Operand(kDoubleSize));
2246 }
2247 
MovIntToFloat(DoubleRegister dst,Register src)2248 void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
2249   subi(sp, sp, Operand(kFloatSize));
2250   stw(src, MemOperand(sp, 0));
2251   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2252   lfs(dst, MemOperand(sp, 0));
2253   addi(sp, sp, Operand(kFloatSize));
2254 }
2255 
MovFloatToInt(Register dst,DoubleRegister src)2256 void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
2257   subi(sp, sp, Operand(kFloatSize));
2258   stfs(src, MemOperand(sp, 0));
2259   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2260   lwz(dst, MemOperand(sp, 0));
2261   addi(sp, sp, Operand(kFloatSize));
2262 }
2263 
Add(Register dst,Register src,intptr_t value,Register scratch)2264 void TurboAssembler::Add(Register dst, Register src, intptr_t value,
2265                          Register scratch) {
2266   if (is_int16(value)) {
2267     addi(dst, src, Operand(value));
2268   } else {
2269     mov(scratch, Operand(value));
2270     add(dst, src, scratch);
2271   }
2272 }
2273 
Cmpi(Register src1,const Operand & src2,Register scratch,CRegister cr)2274 void TurboAssembler::Cmpi(Register src1, const Operand& src2, Register scratch,
2275                           CRegister cr) {
2276   intptr_t value = src2.immediate();
2277   if (is_int16(value)) {
2278     cmpi(src1, src2, cr);
2279   } else {
2280     mov(scratch, src2);
2281     cmp(src1, scratch, cr);
2282   }
2283 }
2284 
Cmpli(Register src1,const Operand & src2,Register scratch,CRegister cr)2285 void TurboAssembler::Cmpli(Register src1, const Operand& src2, Register scratch,
2286                            CRegister cr) {
2287   intptr_t value = src2.immediate();
2288   if (is_uint16(value)) {
2289     cmpli(src1, src2, cr);
2290   } else {
2291     mov(scratch, src2);
2292     cmpl(src1, scratch, cr);
2293   }
2294 }
2295 
Cmpwi(Register src1,const Operand & src2,Register scratch,CRegister cr)2296 void TurboAssembler::Cmpwi(Register src1, const Operand& src2, Register scratch,
2297                            CRegister cr) {
2298   intptr_t value = src2.immediate();
2299   if (is_int16(value)) {
2300     cmpwi(src1, src2, cr);
2301   } else {
2302     mov(scratch, src2);
2303     cmpw(src1, scratch, cr);
2304   }
2305 }
2306 
Cmplwi(Register src1,const Operand & src2,Register scratch,CRegister cr)2307 void MacroAssembler::Cmplwi(Register src1, const Operand& src2,
2308                             Register scratch, CRegister cr) {
2309   intptr_t value = src2.immediate();
2310   if (is_uint16(value)) {
2311     cmplwi(src1, src2, cr);
2312   } else {
2313     mov(scratch, src2);
2314     cmplw(src1, scratch, cr);
2315   }
2316 }
2317 
And(Register ra,Register rs,const Operand & rb,RCBit rc)2318 void MacroAssembler::And(Register ra, Register rs, const Operand& rb,
2319                          RCBit rc) {
2320   if (rb.is_reg()) {
2321     and_(ra, rs, rb.rm(), rc);
2322   } else {
2323     if (is_uint16(rb.immediate()) && RelocInfo::IsNone(rb.rmode_) &&
2324         rc == SetRC) {
2325       andi(ra, rs, rb);
2326     } else {
2327       // mov handles the relocation.
2328       DCHECK(rs != r0);
2329       mov(r0, rb);
2330       and_(ra, rs, r0, rc);
2331     }
2332   }
2333 }
2334 
Or(Register ra,Register rs,const Operand & rb,RCBit rc)2335 void MacroAssembler::Or(Register ra, Register rs, const Operand& rb, RCBit rc) {
2336   if (rb.is_reg()) {
2337     orx(ra, rs, rb.rm(), rc);
2338   } else {
2339     if (is_uint16(rb.immediate()) && RelocInfo::IsNone(rb.rmode_) &&
2340         rc == LeaveRC) {
2341       ori(ra, rs, rb);
2342     } else {
2343       // mov handles the relocation.
2344       DCHECK(rs != r0);
2345       mov(r0, rb);
2346       orx(ra, rs, r0, rc);
2347     }
2348   }
2349 }
2350 
Xor(Register ra,Register rs,const Operand & rb,RCBit rc)2351 void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb,
2352                          RCBit rc) {
2353   if (rb.is_reg()) {
2354     xor_(ra, rs, rb.rm(), rc);
2355   } else {
2356     if (is_uint16(rb.immediate()) && RelocInfo::IsNone(rb.rmode_) &&
2357         rc == LeaveRC) {
2358       xori(ra, rs, rb);
2359     } else {
2360       // mov handles the relocation.
2361       DCHECK(rs != r0);
2362       mov(r0, rb);
2363       xor_(ra, rs, r0, rc);
2364     }
2365   }
2366 }
2367 
CmpSmiLiteral(Register src1,Smi smi,Register scratch,CRegister cr)2368 void MacroAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch,
2369                                    CRegister cr) {
2370 #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
2371   Cmpi(src1, Operand(smi), scratch, cr);
2372 #else
2373   LoadSmiLiteral(scratch, smi);
2374   cmp(src1, scratch, cr);
2375 #endif
2376 }
2377 
CmplSmiLiteral(Register src1,Smi smi,Register scratch,CRegister cr)2378 void MacroAssembler::CmplSmiLiteral(Register src1, Smi smi, Register scratch,
2379                                     CRegister cr) {
2380 #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
2381   Cmpli(src1, Operand(smi), scratch, cr);
2382 #else
2383   LoadSmiLiteral(scratch, smi);
2384   cmpl(src1, scratch, cr);
2385 #endif
2386 }
2387 
AddSmiLiteral(Register dst,Register src,Smi smi,Register scratch)2388 void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi smi,
2389                                    Register scratch) {
2390 #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
2391   Add(dst, src, static_cast<intptr_t>(smi.ptr()), scratch);
2392 #else
2393   LoadSmiLiteral(scratch, smi);
2394   add(dst, src, scratch);
2395 #endif
2396 }
2397 
SubSmiLiteral(Register dst,Register src,Smi smi,Register scratch)2398 void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi smi,
2399                                    Register scratch) {
2400 #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
2401   Add(dst, src, -(static_cast<intptr_t>(smi.ptr())), scratch);
2402 #else
2403   LoadSmiLiteral(scratch, smi);
2404   sub(dst, src, scratch);
2405 #endif
2406 }
2407 
AndSmiLiteral(Register dst,Register src,Smi smi,Register scratch,RCBit rc)2408 void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi smi,
2409                                    Register scratch, RCBit rc) {
2410 #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
2411   And(dst, src, Operand(smi), rc);
2412 #else
2413   LoadSmiLiteral(scratch, smi);
2414   and_(dst, src, scratch, rc);
2415 #endif
2416 }
2417 
2418 // Load a "pointer" sized value from the memory location
LoadP(Register dst,const MemOperand & mem,Register scratch)2419 void TurboAssembler::LoadP(Register dst, const MemOperand& mem,
2420                            Register scratch) {
2421   DCHECK_EQ(mem.rb(), no_reg);
2422   int offset = mem.offset();
2423   int misaligned = (offset & 3);
2424   int adj = (offset & 3) - 4;
2425   int alignedOffset = (offset & ~3) + 4;
2426 
2427   if (!is_int16(offset) || (misaligned && !is_int16(alignedOffset))) {
2428     /* cannot use d-form */
2429     mov(scratch, Operand(offset));
2430     LoadPX(dst, MemOperand(mem.ra(), scratch));
2431   } else {
2432     if (misaligned) {
2433       // adjust base to conform to offset alignment requirements
2434       // Todo: enhance to use scratch if dst is unsuitable
2435       DCHECK_NE(dst, r0);
2436       addi(dst, mem.ra(), Operand(adj));
2437       ld(dst, MemOperand(dst, alignedOffset));
2438     } else {
2439       ld(dst, mem);
2440     }
2441   }
2442 }
2443 
LoadPU(Register dst,const MemOperand & mem,Register scratch)2444 void TurboAssembler::LoadPU(Register dst, const MemOperand& mem,
2445                             Register scratch) {
2446   int offset = mem.offset();
2447 
2448   if (!is_int16(offset)) {
2449     /* cannot use d-form */
2450     DCHECK(scratch != no_reg);
2451     mov(scratch, Operand(offset));
2452     LoadPUX(dst, MemOperand(mem.ra(), scratch));
2453   } else {
2454 #if V8_TARGET_ARCH_PPC64
2455     ldu(dst, mem);
2456 #else
2457     lwzu(dst, mem);
2458 #endif
2459   }
2460 }
2461 
2462 // Store a "pointer" sized value to the memory location
StoreP(Register src,const MemOperand & mem,Register scratch)2463 void TurboAssembler::StoreP(Register src, const MemOperand& mem,
2464                             Register scratch) {
2465   int offset = mem.offset();
2466 
2467   if (!is_int16(offset)) {
2468     /* cannot use d-form */
2469     DCHECK(scratch != no_reg);
2470     mov(scratch, Operand(offset));
2471     StorePX(src, MemOperand(mem.ra(), scratch));
2472   } else {
2473 #if V8_TARGET_ARCH_PPC64
2474     int misaligned = (offset & 3);
2475     if (misaligned) {
2476       // adjust base to conform to offset alignment requirements
2477       // a suitable scratch is required here
2478       DCHECK(scratch != no_reg);
2479       if (scratch == r0) {
2480         LoadIntLiteral(scratch, offset);
2481         stdx(src, MemOperand(mem.ra(), scratch));
2482       } else {
2483         addi(scratch, mem.ra(), Operand((offset & 3) - 4));
2484         std(src, MemOperand(scratch, (offset & ~3) + 4));
2485       }
2486     } else {
2487       std(src, mem);
2488     }
2489 #else
2490     stw(src, mem);
2491 #endif
2492   }
2493 }
2494 
StorePU(Register src,const MemOperand & mem,Register scratch)2495 void TurboAssembler::StorePU(Register src, const MemOperand& mem,
2496                              Register scratch) {
2497   int offset = mem.offset();
2498 
2499   if (!is_int16(offset)) {
2500     /* cannot use d-form */
2501     DCHECK(scratch != no_reg);
2502     mov(scratch, Operand(offset));
2503     StorePUX(src, MemOperand(mem.ra(), scratch));
2504   } else {
2505 #if V8_TARGET_ARCH_PPC64
2506     stdu(src, mem);
2507 #else
2508     stwu(src, mem);
2509 #endif
2510   }
2511 }
2512 
LoadWordArith(Register dst,const MemOperand & mem,Register scratch)2513 void TurboAssembler::LoadWordArith(Register dst, const MemOperand& mem,
2514                                    Register scratch) {
2515   int offset = mem.offset();
2516 
2517   if (!is_int16(offset)) {
2518     DCHECK(scratch != no_reg);
2519     mov(scratch, Operand(offset));
2520     lwax(dst, MemOperand(mem.ra(), scratch));
2521   } else {
2522 #if V8_TARGET_ARCH_PPC64
2523     int misaligned = (offset & 3);
2524     if (misaligned) {
2525       // adjust base to conform to offset alignment requirements
2526       // Todo: enhance to use scratch if dst is unsuitable
2527       DCHECK(dst != r0);
2528       addi(dst, mem.ra(), Operand((offset & 3) - 4));
2529       lwa(dst, MemOperand(dst, (offset & ~3) + 4));
2530     } else {
2531       lwa(dst, mem);
2532     }
2533 #else
2534     lwz(dst, mem);
2535 #endif
2536   }
2537 }
2538 
2539 // Variable length depending on whether offset fits into immediate field
2540 // MemOperand currently only supports d-form
LoadWord(Register dst,const MemOperand & mem,Register scratch)2541 void MacroAssembler::LoadWord(Register dst, const MemOperand& mem,
2542                               Register scratch) {
2543   Register base = mem.ra();
2544   int offset = mem.offset();
2545 
2546   if (!is_int16(offset)) {
2547     LoadIntLiteral(scratch, offset);
2548     lwzx(dst, MemOperand(base, scratch));
2549   } else {
2550     lwz(dst, mem);
2551   }
2552 }
2553 
2554 // Variable length depending on whether offset fits into immediate field
2555 // MemOperand current only supports d-form
StoreWord(Register src,const MemOperand & mem,Register scratch)2556 void MacroAssembler::StoreWord(Register src, const MemOperand& mem,
2557                                Register scratch) {
2558   Register base = mem.ra();
2559   int offset = mem.offset();
2560 
2561   if (!is_int16(offset)) {
2562     LoadIntLiteral(scratch, offset);
2563     stwx(src, MemOperand(base, scratch));
2564   } else {
2565     stw(src, mem);
2566   }
2567 }
2568 
LoadHalfWordArith(Register dst,const MemOperand & mem,Register scratch)2569 void MacroAssembler::LoadHalfWordArith(Register dst, const MemOperand& mem,
2570                                        Register scratch) {
2571   int offset = mem.offset();
2572 
2573   if (!is_int16(offset)) {
2574     DCHECK(scratch != no_reg);
2575     mov(scratch, Operand(offset));
2576     lhax(dst, MemOperand(mem.ra(), scratch));
2577   } else {
2578     lha(dst, mem);
2579   }
2580 }
2581 
2582 // Variable length depending on whether offset fits into immediate field
2583 // MemOperand currently only supports d-form
LoadHalfWord(Register dst,const MemOperand & mem,Register scratch)2584 void MacroAssembler::LoadHalfWord(Register dst, const MemOperand& mem,
2585                                   Register scratch) {
2586   Register base = mem.ra();
2587   int offset = mem.offset();
2588 
2589   if (!is_int16(offset)) {
2590     DCHECK_NE(scratch, no_reg);
2591     LoadIntLiteral(scratch, offset);
2592     lhzx(dst, MemOperand(base, scratch));
2593   } else {
2594     lhz(dst, mem);
2595   }
2596 }
2597 
2598 // Variable length depending on whether offset fits into immediate field
2599 // MemOperand current only supports d-form
StoreHalfWord(Register src,const MemOperand & mem,Register scratch)2600 void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem,
2601                                    Register scratch) {
2602   Register base = mem.ra();
2603   int offset = mem.offset();
2604 
2605   if (!is_int16(offset)) {
2606     LoadIntLiteral(scratch, offset);
2607     sthx(src, MemOperand(base, scratch));
2608   } else {
2609     sth(src, mem);
2610   }
2611 }
2612 
2613 // Variable length depending on whether offset fits into immediate field
2614 // MemOperand currently only supports d-form
LoadByte(Register dst,const MemOperand & mem,Register scratch)2615 void MacroAssembler::LoadByte(Register dst, const MemOperand& mem,
2616                               Register scratch) {
2617   Register base = mem.ra();
2618   int offset = mem.offset();
2619 
2620   if (!is_int16(offset)) {
2621     LoadIntLiteral(scratch, offset);
2622     lbzx(dst, MemOperand(base, scratch));
2623   } else {
2624     lbz(dst, mem);
2625   }
2626 }
2627 
2628 // Variable length depending on whether offset fits into immediate field
2629 // MemOperand current only supports d-form
StoreByte(Register src,const MemOperand & mem,Register scratch)2630 void MacroAssembler::StoreByte(Register src, const MemOperand& mem,
2631                                Register scratch) {
2632   Register base = mem.ra();
2633   int offset = mem.offset();
2634 
2635   if (!is_int16(offset)) {
2636     LoadIntLiteral(scratch, offset);
2637     stbx(src, MemOperand(base, scratch));
2638   } else {
2639     stb(src, mem);
2640   }
2641 }
2642 
LoadDouble(DoubleRegister dst,const MemOperand & mem,Register scratch)2643 void TurboAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem,
2644                                 Register scratch) {
2645   Register base = mem.ra();
2646   int offset = mem.offset();
2647 
2648   if (!is_int16(offset)) {
2649     mov(scratch, Operand(offset));
2650     lfdx(dst, MemOperand(base, scratch));
2651   } else {
2652     lfd(dst, mem);
2653   }
2654 }
2655 
LoadFloat32(DoubleRegister dst,const MemOperand & mem,Register scratch)2656 void TurboAssembler::LoadFloat32(DoubleRegister dst, const MemOperand& mem,
2657                                  Register scratch) {
2658   Register base = mem.ra();
2659   int offset = mem.offset();
2660 
2661   if (!is_int16(offset)) {
2662     mov(scratch, Operand(offset));
2663     lfsx(dst, MemOperand(base, scratch));
2664   } else {
2665     lfs(dst, mem);
2666   }
2667 }
2668 
LoadDoubleU(DoubleRegister dst,const MemOperand & mem,Register scratch)2669 void MacroAssembler::LoadDoubleU(DoubleRegister dst, const MemOperand& mem,
2670                                  Register scratch) {
2671   Register base = mem.ra();
2672   int offset = mem.offset();
2673 
2674   if (!is_int16(offset)) {
2675     mov(scratch, Operand(offset));
2676     lfdux(dst, MemOperand(base, scratch));
2677   } else {
2678     lfdu(dst, mem);
2679   }
2680 }
2681 
LoadSingle(DoubleRegister dst,const MemOperand & mem,Register scratch)2682 void TurboAssembler::LoadSingle(DoubleRegister dst, const MemOperand& mem,
2683                                 Register scratch) {
2684   Register base = mem.ra();
2685   int offset = mem.offset();
2686 
2687   if (!is_int16(offset)) {
2688     mov(scratch, Operand(offset));
2689     lfsx(dst, MemOperand(base, scratch));
2690   } else {
2691     lfs(dst, mem);
2692   }
2693 }
2694 
LoadSingleU(DoubleRegister dst,const MemOperand & mem,Register scratch)2695 void TurboAssembler::LoadSingleU(DoubleRegister dst, const MemOperand& mem,
2696                                  Register scratch) {
2697   Register base = mem.ra();
2698   int offset = mem.offset();
2699 
2700   if (!is_int16(offset)) {
2701     mov(scratch, Operand(offset));
2702     lfsux(dst, MemOperand(base, scratch));
2703   } else {
2704     lfsu(dst, mem);
2705   }
2706 }
2707 
StoreDouble(DoubleRegister src,const MemOperand & mem,Register scratch)2708 void TurboAssembler::StoreDouble(DoubleRegister src, const MemOperand& mem,
2709                                  Register scratch) {
2710   Register base = mem.ra();
2711   int offset = mem.offset();
2712 
2713   if (!is_int16(offset)) {
2714     mov(scratch, Operand(offset));
2715     stfdx(src, MemOperand(base, scratch));
2716   } else {
2717     stfd(src, mem);
2718   }
2719 }
2720 
StoreDoubleU(DoubleRegister src,const MemOperand & mem,Register scratch)2721 void TurboAssembler::StoreDoubleU(DoubleRegister src, const MemOperand& mem,
2722                                   Register scratch) {
2723   Register base = mem.ra();
2724   int offset = mem.offset();
2725 
2726   if (!is_int16(offset)) {
2727     mov(scratch, Operand(offset));
2728     stfdux(src, MemOperand(base, scratch));
2729   } else {
2730     stfdu(src, mem);
2731   }
2732 }
2733 
StoreSingle(DoubleRegister src,const MemOperand & mem,Register scratch)2734 void TurboAssembler::StoreSingle(DoubleRegister src, const MemOperand& mem,
2735                                  Register scratch) {
2736   Register base = mem.ra();
2737   int offset = mem.offset();
2738 
2739   if (!is_int16(offset)) {
2740     mov(scratch, Operand(offset));
2741     stfsx(src, MemOperand(base, scratch));
2742   } else {
2743     stfs(src, mem);
2744   }
2745 }
2746 
StoreSingleU(DoubleRegister src,const MemOperand & mem,Register scratch)2747 void TurboAssembler::StoreSingleU(DoubleRegister src, const MemOperand& mem,
2748                                   Register scratch) {
2749   Register base = mem.ra();
2750   int offset = mem.offset();
2751 
2752   if (!is_int16(offset)) {
2753     mov(scratch, Operand(offset));
2754     stfsux(src, MemOperand(base, scratch));
2755   } else {
2756     stfsu(src, mem);
2757   }
2758 }
2759 
GetRegisterThatIsNotOneOf(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6)2760 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
2761                                    Register reg4, Register reg5,
2762                                    Register reg6) {
2763   RegList regs = 0;
2764   if (reg1.is_valid()) regs |= reg1.bit();
2765   if (reg2.is_valid()) regs |= reg2.bit();
2766   if (reg3.is_valid()) regs |= reg3.bit();
2767   if (reg4.is_valid()) regs |= reg4.bit();
2768   if (reg5.is_valid()) regs |= reg5.bit();
2769   if (reg6.is_valid()) regs |= reg6.bit();
2770 
2771   const RegisterConfiguration* config = RegisterConfiguration::Default();
2772   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
2773     int code = config->GetAllocatableGeneralCode(i);
2774     Register candidate = Register::from_code(code);
2775     if (regs & candidate.bit()) continue;
2776     return candidate;
2777   }
2778   UNREACHABLE();
2779 }
2780 
SwapP(Register src,Register dst,Register scratch)2781 void TurboAssembler::SwapP(Register src, Register dst, Register scratch) {
2782   if (src == dst) return;
2783   DCHECK(!AreAliased(src, dst, scratch));
2784   mr(scratch, src);
2785   mr(src, dst);
2786   mr(dst, scratch);
2787 }
2788 
SwapP(Register src,MemOperand dst,Register scratch)2789 void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
2790   if (dst.ra() != r0 && dst.ra().is_valid())
2791     DCHECK(!AreAliased(src, dst.ra(), scratch));
2792   if (dst.rb() != r0 && dst.rb().is_valid())
2793     DCHECK(!AreAliased(src, dst.rb(), scratch));
2794   DCHECK(!AreAliased(src, scratch));
2795   mr(scratch, src);
2796   LoadP(src, dst, r0);
2797   StoreP(scratch, dst, r0);
2798 }
2799 
SwapP(MemOperand src,MemOperand dst,Register scratch_0,Register scratch_1)2800 void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
2801                            Register scratch_1) {
2802   if (src.ra() != r0 && src.ra().is_valid())
2803     DCHECK(!AreAliased(src.ra(), scratch_0, scratch_1));
2804   if (src.rb() != r0 && src.rb().is_valid())
2805     DCHECK(!AreAliased(src.rb(), scratch_0, scratch_1));
2806   if (dst.ra() != r0 && dst.ra().is_valid())
2807     DCHECK(!AreAliased(dst.ra(), scratch_0, scratch_1));
2808   if (dst.rb() != r0 && dst.rb().is_valid())
2809     DCHECK(!AreAliased(dst.rb(), scratch_0, scratch_1));
2810   DCHECK(!AreAliased(scratch_0, scratch_1));
2811   if (is_int16(src.offset()) || is_int16(dst.offset())) {
2812     if (!is_int16(src.offset())) {
2813       // swap operand
2814       MemOperand temp = src;
2815       src = dst;
2816       dst = temp;
2817     }
2818     LoadP(scratch_1, dst, scratch_0);
2819     LoadP(scratch_0, src);
2820     StoreP(scratch_1, src);
2821     StoreP(scratch_0, dst, scratch_1);
2822   } else {
2823     LoadP(scratch_1, dst, scratch_0);
2824     push(scratch_1);
2825     LoadP(scratch_0, src, scratch_1);
2826     StoreP(scratch_0, dst, scratch_1);
2827     pop(scratch_1);
2828     StoreP(scratch_1, src, scratch_0);
2829   }
2830 }
2831 
SwapFloat32(DoubleRegister src,DoubleRegister dst,DoubleRegister scratch)2832 void TurboAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst,
2833                                  DoubleRegister scratch) {
2834   if (src == dst) return;
2835   DCHECK(!AreAliased(src, dst, scratch));
2836   fmr(scratch, src);
2837   fmr(src, dst);
2838   fmr(dst, scratch);
2839 }
2840 
SwapFloat32(DoubleRegister src,MemOperand dst,DoubleRegister scratch)2841 void TurboAssembler::SwapFloat32(DoubleRegister src, MemOperand dst,
2842                                  DoubleRegister scratch) {
2843   DCHECK(!AreAliased(src, scratch));
2844   fmr(scratch, src);
2845   LoadSingle(src, dst, r0);
2846   StoreSingle(scratch, dst, r0);
2847 }
2848 
SwapFloat32(MemOperand src,MemOperand dst,DoubleRegister scratch_0,DoubleRegister scratch_1)2849 void TurboAssembler::SwapFloat32(MemOperand src, MemOperand dst,
2850                                  DoubleRegister scratch_0,
2851                                  DoubleRegister scratch_1) {
2852   DCHECK(!AreAliased(scratch_0, scratch_1));
2853   LoadSingle(scratch_0, src, r0);
2854   LoadSingle(scratch_1, dst, r0);
2855   StoreSingle(scratch_0, dst, r0);
2856   StoreSingle(scratch_1, src, r0);
2857 }
2858 
SwapDouble(DoubleRegister src,DoubleRegister dst,DoubleRegister scratch)2859 void TurboAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst,
2860                                 DoubleRegister scratch) {
2861   if (src == dst) return;
2862   DCHECK(!AreAliased(src, dst, scratch));
2863   fmr(scratch, src);
2864   fmr(src, dst);
2865   fmr(dst, scratch);
2866 }
2867 
SwapDouble(DoubleRegister src,MemOperand dst,DoubleRegister scratch)2868 void TurboAssembler::SwapDouble(DoubleRegister src, MemOperand dst,
2869                                 DoubleRegister scratch) {
2870   DCHECK(!AreAliased(src, scratch));
2871   fmr(scratch, src);
2872   LoadDouble(src, dst, r0);
2873   StoreDouble(scratch, dst, r0);
2874 }
2875 
SwapDouble(MemOperand src,MemOperand dst,DoubleRegister scratch_0,DoubleRegister scratch_1)2876 void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst,
2877                                 DoubleRegister scratch_0,
2878                                 DoubleRegister scratch_1) {
2879   DCHECK(!AreAliased(scratch_0, scratch_1));
2880   LoadDouble(scratch_0, src, r0);
2881   LoadDouble(scratch_1, dst, r0);
2882   StoreDouble(scratch_0, dst, r0);
2883   StoreDouble(scratch_1, src, r0);
2884 }
2885 
ResetSpeculationPoisonRegister()2886 void TurboAssembler::ResetSpeculationPoisonRegister() {
2887   mov(kSpeculationPoisonRegister, Operand(-1));
2888 }
2889 
JumpIfEqual(Register x,int32_t y,Label * dest)2890 void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
2891   Cmpi(x, Operand(y), r0);
2892   beq(dest);
2893 }
2894 
JumpIfLessThan(Register x,int32_t y,Label * dest)2895 void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
2896   Cmpi(x, Operand(y), r0);
2897   blt(dest);
2898 }
2899 
LoadEntryFromBuiltinIndex(Register builtin_index)2900 void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
2901   STATIC_ASSERT(kSystemPointerSize == 8);
2902   STATIC_ASSERT(kSmiTagSize == 1);
2903   STATIC_ASSERT(kSmiTag == 0);
2904 
2905   // The builtin_index register contains the builtin index as a Smi.
2906   // Untagging is folded into the indexing operand below.
2907 #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
2908   ShiftLeftImm(builtin_index, builtin_index,
2909                Operand(kSystemPointerSizeLog2 - kSmiShift));
2910 #else
2911   ShiftRightArithImm(builtin_index, builtin_index,
2912                      kSmiShift - kSystemPointerSizeLog2);
2913 #endif
2914   addi(builtin_index, builtin_index,
2915        Operand(IsolateData::builtin_entry_table_offset()));
2916   LoadPX(builtin_index, MemOperand(kRootRegister, builtin_index));
2917 }
2918 
CallBuiltinByIndex(Register builtin_index)2919 void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
2920   LoadEntryFromBuiltinIndex(builtin_index);
2921   Call(builtin_index);
2922 }
2923 
LoadCodeObjectEntry(Register destination,Register code_object)2924 void TurboAssembler::LoadCodeObjectEntry(Register destination,
2925                                          Register code_object) {
2926   // Code objects are called differently depending on whether we are generating
2927   // builtin code (which will later be embedded into the binary) or compiling
2928   // user JS code at runtime.
2929   // * Builtin code runs in --jitless mode and thus must not call into on-heap
2930   //   Code targets. Instead, we dispatch through the builtins entry table.
2931   // * Codegen at runtime does not have this restriction and we can use the
2932   //   shorter, branchless instruction sequence. The assumption here is that
2933   //   targets are usually generated code and not builtin Code objects.
2934 
2935   if (options().isolate_independent_code) {
2936     DCHECK(root_array_available());
2937     Label if_code_is_off_heap, out;
2938 
2939     Register scratch = r11;
2940 
2941     DCHECK(!AreAliased(destination, scratch));
2942     DCHECK(!AreAliased(code_object, scratch));
2943 
2944     // Check whether the Code object is an off-heap trampoline. If so, call its
2945     // (off-heap) entry point directly without going through the (on-heap)
2946     // trampoline.  Otherwise, just call the Code object as always.
2947     LoadWordArith(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
2948     mov(r0, Operand(Code::IsOffHeapTrampoline::kMask));
2949     and_(r0, scratch, r0, SetRC);
2950     bne(&if_code_is_off_heap, cr0);
2951 
2952     // Not an off-heap trampoline, the entry point is at
2953     // Code::raw_instruction_start().
2954     addi(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
2955     b(&out);
2956 
2957     // An off-heap trampoline, the entry point is loaded from the builtin entry
2958     // table.
2959     bind(&if_code_is_off_heap);
2960     LoadWordArith(scratch,
2961                   FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
2962     ShiftLeftImm(destination, scratch, Operand(kSystemPointerSizeLog2));
2963     add(destination, destination, kRootRegister);
2964     LoadP(destination,
2965           MemOperand(destination, IsolateData::builtin_entry_table_offset()),
2966           r0);
2967 
2968     bind(&out);
2969   } else {
2970     addi(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
2971   }
2972 }
2973 
CallCodeObject(Register code_object)2974 void TurboAssembler::CallCodeObject(Register code_object) {
2975   LoadCodeObjectEntry(code_object, code_object);
2976   Call(code_object);
2977 }
2978 
JumpCodeObject(Register code_object)2979 void TurboAssembler::JumpCodeObject(Register code_object) {
2980   LoadCodeObjectEntry(code_object, code_object);
2981   Jump(code_object);
2982 }
2983 
StoreReturnAddressAndCall(Register target)2984 void TurboAssembler::StoreReturnAddressAndCall(Register target) {
2985   // This generates the final instruction sequence for calls to C functions
2986   // once an exit frame has been constructed.
2987   //
2988   // Note that this assumes the caller code (i.e. the Code object currently
2989   // being generated) is immovable or that the callee function cannot trigger
2990   // GC, since the callee function will return to it.
2991 
2992   static constexpr int after_call_offset = 5 * kInstrSize;
2993   Label start_call;
2994   Register dest = target;
2995 
2996   if (ABI_USES_FUNCTION_DESCRIPTORS) {
2997     // AIX/PPC64BE Linux uses a function descriptor. When calling C code be
2998     // aware of this descriptor and pick up values from it
2999     LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kPointerSize));
3000     LoadP(ip, MemOperand(target, 0));
3001     dest = ip;
3002   } else if (ABI_CALL_VIA_IP && dest != ip) {
3003     Move(ip, target);
3004     dest = ip;
3005   }
3006 
3007   LoadPC(r7);
3008   bind(&start_call);
3009   addi(r7, r7, Operand(after_call_offset));
3010   StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
3011   Call(dest);
3012 
3013   DCHECK_EQ(after_call_offset - kInstrSize,
3014             SizeOfCodeGeneratedSince(&start_call));
3015 }
3016 
CallForDeoptimization(Address target,int deopt_id,Label * exit,DeoptimizeKind kind)3017 void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
3018                                            Label* exit, DeoptimizeKind kind) {
3019   USE(exit, kind);
3020   NoRootArrayScope no_root_array(this);
3021 
3022   // Save the deopt id in r29 (we don't need the roots array from now on).
3023   DCHECK_LE(deopt_id, 0xFFFF);
3024 
3025   mov(r29, Operand(deopt_id));
3026   Call(target, RelocInfo::RUNTIME_ENTRY);
3027 }
3028 
ZeroExtByte(Register dst,Register src)3029 void TurboAssembler::ZeroExtByte(Register dst, Register src) {
3030   clrldi(dst, src, Operand(56));
3031 }
3032 
ZeroExtHalfWord(Register dst,Register src)3033 void TurboAssembler::ZeroExtHalfWord(Register dst, Register src) {
3034   clrldi(dst, src, Operand(48));
3035 }
3036 
ZeroExtWord32(Register dst,Register src)3037 void TurboAssembler::ZeroExtWord32(Register dst, Register src) {
3038   clrldi(dst, src, Operand(32));
3039 }
3040 
Trap()3041 void TurboAssembler::Trap() { stop(); }
DebugBreak()3042 void TurboAssembler::DebugBreak() { stop(); }
3043 
3044 }  // namespace internal
3045 }  // namespace v8
3046 
3047 #endif  // V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
3048