1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include <assert.h>  // For assert
6 #include <limits.h>  // For LONG_MIN, LONG_MAX.
7 
8 #if V8_TARGET_ARCH_PPC
9 
10 #include "src/base/bits.h"
11 #include "src/base/division-by-constant.h"
12 #include "src/bootstrapper.h"
13 #include "src/builtins/constants-table-builder.h"
14 #include "src/callable.h"
15 #include "src/code-factory.h"
16 #include "src/code-stubs.h"
17 #include "src/debug/debug.h"
18 #include "src/external-reference-table.h"
19 #include "src/frames-inl.h"
20 #include "src/instruction-stream.h"
21 #include "src/register-configuration.h"
22 #include "src/runtime/runtime.h"
23 #include "src/snapshot/serializer-common.h"
24 
25 #include "src/ppc/macro-assembler-ppc.h"
26 
27 namespace v8 {
28 namespace internal {
29 
MacroAssembler(Isolate * isolate,void * buffer,int size,CodeObjectRequired create_code_object)30 MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
31                                CodeObjectRequired create_code_object)
32     : TurboAssembler(isolate, buffer, size, create_code_object) {
33   if (create_code_object == CodeObjectRequired::kYes) {
34     // Unlike TurboAssembler, which can be used off the main thread and may not
35     // allocate, macro assembler creates its own copy of the self-reference
36     // marker in order to disambiguate between self-references during nested
37     // code generation (e.g.: codegen of the current object triggers stub
38     // compilation through CodeStub::GetCode()).
39     code_object_ = Handle<HeapObject>::New(
40         *isolate->factory()->NewSelfReferenceMarker(), isolate);
41   }
42 }
43 
TurboAssembler(Isolate * isolate,void * buffer,int buffer_size,CodeObjectRequired create_code_object)44 TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
45                                CodeObjectRequired create_code_object)
46     : Assembler(isolate, buffer, buffer_size), isolate_(isolate) {
47   if (create_code_object == CodeObjectRequired::kYes) {
48     code_object_ = Handle<HeapObject>::New(
49         isolate->heap()->self_reference_marker(), isolate);
50   }
51 }
52 
RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3) const53 int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
54                                                     Register exclusion1,
55                                                     Register exclusion2,
56                                                     Register exclusion3) const {
57   int bytes = 0;
58   RegList exclusions = 0;
59   if (exclusion1 != no_reg) {
60     exclusions |= exclusion1.bit();
61     if (exclusion2 != no_reg) {
62       exclusions |= exclusion2.bit();
63       if (exclusion3 != no_reg) {
64         exclusions |= exclusion3.bit();
65       }
66     }
67   }
68 
69   RegList list = kJSCallerSaved & ~exclusions;
70   bytes += NumRegs(list) * kPointerSize;
71 
72   if (fp_mode == kSaveFPRegs) {
73     bytes += kNumCallerSavedDoubles * kDoubleSize;
74   }
75 
76   return bytes;
77 }
78 
PushCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)79 int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
80                                     Register exclusion2, Register exclusion3) {
81   int bytes = 0;
82   RegList exclusions = 0;
83   if (exclusion1 != no_reg) {
84     exclusions |= exclusion1.bit();
85     if (exclusion2 != no_reg) {
86       exclusions |= exclusion2.bit();
87       if (exclusion3 != no_reg) {
88         exclusions |= exclusion3.bit();
89       }
90     }
91   }
92 
93   RegList list = kJSCallerSaved & ~exclusions;
94   MultiPush(list);
95   bytes += NumRegs(list) * kPointerSize;
96 
97   if (fp_mode == kSaveFPRegs) {
98     MultiPushDoubles(kCallerSavedDoubles);
99     bytes += kNumCallerSavedDoubles * kDoubleSize;
100   }
101 
102   return bytes;
103 }
104 
PopCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)105 int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
106                                    Register exclusion2, Register exclusion3) {
107   int bytes = 0;
108   if (fp_mode == kSaveFPRegs) {
109     MultiPopDoubles(kCallerSavedDoubles);
110     bytes += kNumCallerSavedDoubles * kDoubleSize;
111   }
112 
113   RegList exclusions = 0;
114   if (exclusion1 != no_reg) {
115     exclusions |= exclusion1.bit();
116     if (exclusion2 != no_reg) {
117       exclusions |= exclusion2.bit();
118       if (exclusion3 != no_reg) {
119         exclusions |= exclusion3.bit();
120       }
121     }
122   }
123 
124   RegList list = kJSCallerSaved & ~exclusions;
125   MultiPop(list);
126   bytes += NumRegs(list) * kPointerSize;
127 
128   return bytes;
129 }
130 
Jump(Register target)131 void TurboAssembler::Jump(Register target) {
132   mtctr(target);
133   bctr();
134 }
135 
136 #ifdef V8_EMBEDDED_BUILTINS
LookupConstant(Register destination,Handle<Object> object)137 void TurboAssembler::LookupConstant(Register destination,
138                                     Handle<Object> object) {
139   CHECK(isolate()->ShouldLoadConstantsFromRootList());
140   CHECK(root_array_available_);
141 
142   // Ensure the given object is in the builtins constants table and fetch its
143   // index.
144   BuiltinsConstantsTableBuilder* builder =
145       isolate()->builtins_constants_table_builder();
146   uint32_t index = builder->AddObject(object);
147 
148   // TODO(jgruber): Load builtins from the builtins table.
149   // TODO(jgruber): Ensure that code generation can recognize constant targets
150   // in kArchCallCodeObject.
151 
152   DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
153       Heap::kBuiltinsConstantsTableRootIndex));
154 
155   const uint32_t offset =
156       FixedArray::kHeaderSize + index * kPointerSize - kHeapObjectTag;
157 
158   CHECK(is_uint19(offset));
159   DCHECK_NE(destination, r0);
160   LoadRoot(destination, Heap::kBuiltinsConstantsTableRootIndex);
161   LoadP(destination, MemOperand(destination, offset), r0);
162 }
163 
LookupExternalReference(Register destination,ExternalReference reference)164 void TurboAssembler::LookupExternalReference(Register destination,
165                                              ExternalReference reference) {
166   CHECK(reference.address() !=
167         ExternalReference::roots_array_start(isolate()).address());
168   CHECK(isolate()->ShouldLoadConstantsFromRootList());
169   CHECK(root_array_available_);
170 
171   // Encode as an index into the external reference table stored on the isolate.
172 
173   ExternalReferenceEncoder encoder(isolate());
174   ExternalReferenceEncoder::Value v = encoder.Encode(reference.address());
175   CHECK(!v.is_from_api());
176   uint32_t index = v.index();
177 
178   // Generate code to load from the external reference table.
179 
180   int32_t roots_to_external_reference_offset =
181       Heap::roots_to_external_reference_table_offset() +
182       ExternalReferenceTable::OffsetOfEntry(index);
183 
184   LoadP(destination,
185         MemOperand(kRootRegister, roots_to_external_reference_offset), r0);
186 }
187 #endif  // V8_EMBEDDED_BUILTINS
188 
JumpToJSEntry(Register target)189 void MacroAssembler::JumpToJSEntry(Register target) {
190   Move(ip, target);
191   Jump(ip);
192 }
193 
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond,CRegister cr)194 void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
195                           Condition cond, CRegister cr) {
196   Label skip;
197 
198   if (cond != al) b(NegateCondition(cond), &skip, cr);
199 
200   DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY);
201 
202   mov(ip, Operand(target, rmode));
203   mtctr(ip);
204   bctr();
205 
206   bind(&skip);
207 }
208 
Jump(Address target,RelocInfo::Mode rmode,Condition cond,CRegister cr)209 void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
210                           CRegister cr) {
211   DCHECK(!RelocInfo::IsCodeTarget(rmode));
212   Jump(static_cast<intptr_t>(target), rmode, cond, cr);
213 }
214 
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond,CRegister cr)215 void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
216                           Condition cond, CRegister cr) {
217   DCHECK(RelocInfo::IsCodeTarget(rmode));
218   // 'code' is always generated ppc code, never THUMB code
219 #ifdef V8_EMBEDDED_BUILTINS
220   if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
221     Register scratch = ip;
222     LookupConstant(scratch, code);
223     addi(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
224     Label skip;
225     if (cond != al) b(NegateCondition(cond), &skip, cr);
226     Jump(scratch);
227     bind(&skip);
228     return;
229   }
230 #endif  // V8_EMBEDDED_BUILTINS
231   Jump(static_cast<intptr_t>(code.address()), rmode, cond, cr);
232 }
233 
CallSize(Register target)234 int TurboAssembler::CallSize(Register target) { return 2 * kInstrSize; }
235 
Call(Register target)236 void TurboAssembler::Call(Register target) {
237   BlockTrampolinePoolScope block_trampoline_pool(this);
238   Label start;
239   bind(&start);
240 
241   // branch via link register and set LK bit for return point
242   mtctr(target);
243   bctrl();
244 
245   DCHECK_EQ(CallSize(target), SizeOfCodeGeneratedSince(&start));
246 }
247 
CallJSEntry(Register target)248 void MacroAssembler::CallJSEntry(Register target) {
249   CHECK(target == r5);
250   Call(target);
251 }
252 
CallSize(Address target,RelocInfo::Mode rmode,Condition cond)253 int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode,
254                              Condition cond) {
255   Operand mov_operand = Operand(target, rmode);
256   return (2 + instructions_required_for_mov(ip, mov_operand)) * kInstrSize;
257 }
258 
CallSizeNotPredictableCodeSize(Address target,RelocInfo::Mode rmode,Condition cond)259 int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
260                                                    RelocInfo::Mode rmode,
261                                                    Condition cond) {
262   return (2 + kMovInstructionsNoConstantPool) * kInstrSize;
263 }
264 
Call(Address target,RelocInfo::Mode rmode,Condition cond)265 void TurboAssembler::Call(Address target, RelocInfo::Mode rmode,
266                           Condition cond) {
267   BlockTrampolinePoolScope block_trampoline_pool(this);
268   DCHECK(cond == al);
269 
270 #ifdef DEBUG
271   // Check the expected size before generating code to ensure we assume the same
272   // constant pool availability (e.g., whether constant pool is full or not).
273   int expected_size = CallSize(target, rmode, cond);
274   Label start;
275   bind(&start);
276 #endif
277   // This can likely be optimized to make use of bc() with 24bit relative
278   //
279   // RecordRelocInfo(x.rmode_, x.immediate);
280   // bc( BA, .... offset, LKset);
281   //
282 
283   mov(ip, Operand(target, rmode));
284   mtctr(ip);
285   bctrl();
286 
287   DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
288 }
289 
CallSize(Handle<Code> code,RelocInfo::Mode rmode,Condition cond)290 int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
291                              Condition cond) {
292   return CallSize(code.address(), rmode, cond);
293 }
294 
Call(Handle<Code> code,RelocInfo::Mode rmode,Condition cond)295 void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
296                           Condition cond) {
297   BlockTrampolinePoolScope block_trampoline_pool(this);
298   DCHECK(RelocInfo::IsCodeTarget(rmode));
299 
300 #ifdef V8_EMBEDDED_BUILTINS
301   if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
302     // Use ip directly instead of using UseScratchRegisterScope, as we do not
303     // preserve scratch registers across calls.
304     LookupConstant(ip, code);
305     addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
306     Label skip;
307     if (cond != al) b(NegateCondition(cond), &skip);
308     Call(ip);
309     bind(&skip);
310     return;
311   }
312 #endif  // V8_EMBEDDED_BUILTINS
313   Call(code.address(), rmode, cond);
314 }
315 
Drop(int count)316 void TurboAssembler::Drop(int count) {
317   if (count > 0) {
318     Add(sp, sp, count * kPointerSize, r0);
319   }
320 }
321 
Drop(Register count,Register scratch)322 void TurboAssembler::Drop(Register count, Register scratch) {
323   ShiftLeftImm(scratch, count, Operand(kPointerSizeLog2));
324   add(sp, sp, scratch);
325 }
326 
Call(Label * target)327 void TurboAssembler::Call(Label* target) { b(target, SetLK); }
328 
Push(Handle<HeapObject> handle)329 void TurboAssembler::Push(Handle<HeapObject> handle) {
330   mov(r0, Operand(handle));
331   push(r0);
332 }
333 
Push(Smi * smi)334 void TurboAssembler::Push(Smi* smi) {
335   mov(r0, Operand(smi));
336   push(r0);
337 }
338 
Move(Register dst,Handle<HeapObject> value)339 void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
340 #ifdef V8_EMBEDDED_BUILTINS
341   if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
342     Heap::RootListIndex root_index;
343     if (!isolate()->heap()->IsRootHandle(value, &root_index)) {
344       LookupConstant(dst, value);
345     } else {
346       LoadRoot(dst, root_index);
347     }
348     return;
349   }
350 #endif  // V8_EMBEDDED_BUILTINS
351   mov(dst, Operand(value));
352 }
353 
Move(Register dst,ExternalReference reference)354 void TurboAssembler::Move(Register dst, ExternalReference reference) {
355 #ifdef V8_EMBEDDED_BUILTINS
356   if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList() &&
357       reference.address() !=
358           ExternalReference::roots_array_start(isolate()).address()) {
359     LookupExternalReference(dst, reference);
360     return;
361   }
362 #endif  // V8_EMBEDDED_BUILTINS
363   mov(dst, Operand(reference));
364 }
365 
Move(Register dst,Register src,Condition cond)366 void TurboAssembler::Move(Register dst, Register src, Condition cond) {
367   DCHECK(cond == al);
368   if (dst != src) {
369     mr(dst, src);
370   }
371 }
372 
Move(DoubleRegister dst,DoubleRegister src)373 void TurboAssembler::Move(DoubleRegister dst, DoubleRegister src) {
374   if (dst != src) {
375     fmr(dst, src);
376   }
377 }
378 
MultiPush(RegList regs,Register location)379 void TurboAssembler::MultiPush(RegList regs, Register location) {
380   int16_t num_to_push = base::bits::CountPopulation(regs);
381   int16_t stack_offset = num_to_push * kPointerSize;
382 
383   subi(location, location, Operand(stack_offset));
384   for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) {
385     if ((regs & (1 << i)) != 0) {
386       stack_offset -= kPointerSize;
387       StoreP(ToRegister(i), MemOperand(location, stack_offset));
388     }
389   }
390 }
391 
MultiPop(RegList regs,Register location)392 void TurboAssembler::MultiPop(RegList regs, Register location) {
393   int16_t stack_offset = 0;
394 
395   for (int16_t i = 0; i < Register::kNumRegisters; i++) {
396     if ((regs & (1 << i)) != 0) {
397       LoadP(ToRegister(i), MemOperand(location, stack_offset));
398       stack_offset += kPointerSize;
399     }
400   }
401   addi(location, location, Operand(stack_offset));
402 }
403 
MultiPushDoubles(RegList dregs,Register location)404 void TurboAssembler::MultiPushDoubles(RegList dregs, Register location) {
405   int16_t num_to_push = base::bits::CountPopulation(dregs);
406   int16_t stack_offset = num_to_push * kDoubleSize;
407 
408   subi(location, location, Operand(stack_offset));
409   for (int16_t i = DoubleRegister::kNumRegisters - 1; i >= 0; i--) {
410     if ((dregs & (1 << i)) != 0) {
411       DoubleRegister dreg = DoubleRegister::from_code(i);
412       stack_offset -= kDoubleSize;
413       stfd(dreg, MemOperand(location, stack_offset));
414     }
415   }
416 }
417 
MultiPopDoubles(RegList dregs,Register location)418 void TurboAssembler::MultiPopDoubles(RegList dregs, Register location) {
419   int16_t stack_offset = 0;
420 
421   for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) {
422     if ((dregs & (1 << i)) != 0) {
423       DoubleRegister dreg = DoubleRegister::from_code(i);
424       lfd(dreg, MemOperand(location, stack_offset));
425       stack_offset += kDoubleSize;
426     }
427   }
428   addi(location, location, Operand(stack_offset));
429 }
430 
LoadRoot(Register destination,Heap::RootListIndex index,Condition cond)431 void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
432                               Condition cond) {
433   DCHECK(cond == al);
434   LoadP(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
435 }
436 
RecordWriteField(Register object,int offset,Register value,Register dst,LinkRegisterStatus lr_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check)437 void MacroAssembler::RecordWriteField(Register object, int offset,
438                                       Register value, Register dst,
439                                       LinkRegisterStatus lr_status,
440                                       SaveFPRegsMode save_fp,
441                                       RememberedSetAction remembered_set_action,
442                                       SmiCheck smi_check) {
443   // First, check if a write barrier is even needed. The tests below
444   // catch stores of Smis.
445   Label done;
446 
447   // Skip barrier if writing a smi.
448   if (smi_check == INLINE_SMI_CHECK) {
449     JumpIfSmi(value, &done);
450   }
451 
452   // Although the object register is tagged, the offset is relative to the start
453   // of the object, so so offset must be a multiple of kPointerSize.
454   DCHECK(IsAligned(offset, kPointerSize));
455 
456   Add(dst, object, offset - kHeapObjectTag, r0);
457   if (emit_debug_code()) {
458     Label ok;
459     andi(r0, dst, Operand(kPointerSize - 1));
460     beq(&ok, cr0);
461     stop("Unaligned cell in write barrier");
462     bind(&ok);
463   }
464 
465   RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
466               OMIT_SMI_CHECK);
467 
468   bind(&done);
469 
470   // Clobber clobbered input registers when running with the debug-code flag
471   // turned on to provoke errors.
472   if (emit_debug_code()) {
473     mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4)));
474     mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 8)));
475   }
476 }
477 
SaveRegisters(RegList registers)478 void TurboAssembler::SaveRegisters(RegList registers) {
479   DCHECK_GT(NumRegs(registers), 0);
480   RegList regs = 0;
481   for (int i = 0; i < Register::kNumRegisters; ++i) {
482     if ((registers >> i) & 1u) {
483       regs |= Register::from_code(i).bit();
484     }
485   }
486 
487   MultiPush(regs);
488 }
489 
RestoreRegisters(RegList registers)490 void TurboAssembler::RestoreRegisters(RegList registers) {
491   DCHECK_GT(NumRegs(registers), 0);
492   RegList regs = 0;
493   for (int i = 0; i < Register::kNumRegisters; ++i) {
494     if ((registers >> i) & 1u) {
495       regs |= Register::from_code(i).bit();
496     }
497   }
498   MultiPop(regs);
499 }
500 
CallRecordWriteStub(Register object,Register address,RememberedSetAction remembered_set_action,SaveFPRegsMode fp_mode)501 void TurboAssembler::CallRecordWriteStub(
502     Register object, Register address,
503     RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
504   // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
505   // i.e. always emit remember set and save FP registers in RecordWriteStub. If
506   // large performance regression is observed, we should use these values to
507   // avoid unnecessary work.
508 
509   Callable const callable =
510       Builtins::CallableFor(isolate(), Builtins::kRecordWrite);
511   RegList registers = callable.descriptor().allocatable_registers();
512 
513   SaveRegisters(registers);
514 
515   Register object_parameter(callable.descriptor().GetRegisterParameter(
516       RecordWriteDescriptor::kObject));
517   Register slot_parameter(
518       callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
519   Register isolate_parameter(callable.descriptor().GetRegisterParameter(
520       RecordWriteDescriptor::kIsolate));
521   Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
522       RecordWriteDescriptor::kRememberedSet));
523   Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
524       RecordWriteDescriptor::kFPMode));
525 
526   push(object);
527   push(address);
528 
529   pop(slot_parameter);
530   pop(object_parameter);
531 
532   Move(isolate_parameter, ExternalReference::isolate_address(isolate()));
533   Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
534   Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
535   Call(callable.code(), RelocInfo::CODE_TARGET);
536 
537   RestoreRegisters(registers);
538 }
539 
540 // Will clobber 4 registers: object, address, scratch, ip.  The
541 // register 'object' contains a heap object pointer.  The heap object
542 // tag is shifted away.
RecordWrite(Register object,Register address,Register value,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check)543 void MacroAssembler::RecordWrite(Register object, Register address,
544                                  Register value, LinkRegisterStatus lr_status,
545                                  SaveFPRegsMode fp_mode,
546                                  RememberedSetAction remembered_set_action,
547                                  SmiCheck smi_check) {
548   DCHECK(object != value);
549   if (emit_debug_code()) {
550     LoadP(r0, MemOperand(address));
551     cmp(r0, value);
552     Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
553   }
554 
555   if (remembered_set_action == OMIT_REMEMBERED_SET &&
556       !FLAG_incremental_marking) {
557     return;
558   }
559 
560   // First, check if a write barrier is even needed. The tests below
561   // catch stores of smis and stores into the young generation.
562   Label done;
563 
564   if (smi_check == INLINE_SMI_CHECK) {
565     JumpIfSmi(value, &done);
566   }
567 
568   CheckPageFlag(value,
569                 value,  // Used as scratch.
570                 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
571   CheckPageFlag(object,
572                 value,  // Used as scratch.
573                 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
574 
575   // Record the actual write.
576   if (lr_status == kLRHasNotBeenSaved) {
577     mflr(r0);
578     push(r0);
579   }
580   CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
581   if (lr_status == kLRHasNotBeenSaved) {
582     pop(r0);
583     mtlr(r0);
584   }
585 
586   bind(&done);
587 
588   // Count number of write barriers in generated code.
589   isolate()->counters()->write_barriers_static()->Increment();
590   IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
591                    value);
592 
593   // Clobber clobbered registers when running with the debug-code flag
594   // turned on to provoke errors.
595   if (emit_debug_code()) {
596     mov(address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
597     mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16)));
598   }
599 }
600 
PushCommonFrame(Register marker_reg)601 void TurboAssembler::PushCommonFrame(Register marker_reg) {
602   int fp_delta = 0;
603   mflr(r0);
604   if (FLAG_enable_embedded_constant_pool) {
605     if (marker_reg.is_valid()) {
606       Push(r0, fp, kConstantPoolRegister, marker_reg);
607       fp_delta = 2;
608     } else {
609       Push(r0, fp, kConstantPoolRegister);
610       fp_delta = 1;
611     }
612   } else {
613     if (marker_reg.is_valid()) {
614       Push(r0, fp, marker_reg);
615       fp_delta = 1;
616     } else {
617       Push(r0, fp);
618       fp_delta = 0;
619     }
620   }
621   addi(fp, sp, Operand(fp_delta * kPointerSize));
622 }
623 
PushStandardFrame(Register function_reg)624 void TurboAssembler::PushStandardFrame(Register function_reg) {
625   int fp_delta = 0;
626   mflr(r0);
627   if (FLAG_enable_embedded_constant_pool) {
628     if (function_reg.is_valid()) {
629       Push(r0, fp, kConstantPoolRegister, cp, function_reg);
630       fp_delta = 3;
631     } else {
632       Push(r0, fp, kConstantPoolRegister, cp);
633       fp_delta = 2;
634     }
635   } else {
636     if (function_reg.is_valid()) {
637       Push(r0, fp, cp, function_reg);
638       fp_delta = 2;
639     } else {
640       Push(r0, fp, cp);
641       fp_delta = 1;
642     }
643   }
644   addi(fp, sp, Operand(fp_delta * kPointerSize));
645 }
646 
RestoreFrameStateForTailCall()647 void TurboAssembler::RestoreFrameStateForTailCall() {
648   if (FLAG_enable_embedded_constant_pool) {
649     LoadP(kConstantPoolRegister,
650           MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
651     set_constant_pool_available(false);
652   }
653   LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
654   LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
655   mtlr(r0);
656 }
657 
658 // Push and pop all registers that can hold pointers.
PushSafepointRegisters()659 void MacroAssembler::PushSafepointRegisters() {
660   // Safepoints expect a block of kNumSafepointRegisters values on the
661   // stack, so adjust the stack for unsaved registers.
662   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
663   DCHECK_GE(num_unsaved, 0);
664   if (num_unsaved > 0) {
665     subi(sp, sp, Operand(num_unsaved * kPointerSize));
666   }
667   MultiPush(kSafepointSavedRegisters);
668 }
669 
670 
PopSafepointRegisters()671 void MacroAssembler::PopSafepointRegisters() {
672   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
673   MultiPop(kSafepointSavedRegisters);
674   if (num_unsaved > 0) {
675     addi(sp, sp, Operand(num_unsaved * kPointerSize));
676   }
677 }
678 
SafepointRegisterStackIndex(int reg_code)679 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
680   // The registers are pushed starting with the highest encoding,
681   // which means that lowest encodings are closest to the stack pointer.
682   RegList regs = kSafepointSavedRegisters;
683   int index = 0;
684 
685   DCHECK(reg_code >= 0 && reg_code < kNumRegisters);
686 
687   for (int16_t i = 0; i < reg_code; i++) {
688     if ((regs & (1 << i)) != 0) {
689       index++;
690     }
691   }
692 
693   return index;
694 }
695 
696 
CanonicalizeNaN(const DoubleRegister dst,const DoubleRegister src)697 void TurboAssembler::CanonicalizeNaN(const DoubleRegister dst,
698                                      const DoubleRegister src) {
699   // Turn potential sNaN into qNaN.
700   fsub(dst, src, kDoubleRegZero);
701 }
702 
ConvertIntToDouble(Register src,DoubleRegister dst)703 void TurboAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) {
704   MovIntToDouble(dst, src, r0);
705   fcfid(dst, dst);
706 }
707 
ConvertUnsignedIntToDouble(Register src,DoubleRegister dst)708 void TurboAssembler::ConvertUnsignedIntToDouble(Register src,
709                                                 DoubleRegister dst) {
710   MovUnsignedIntToDouble(dst, src, r0);
711   fcfid(dst, dst);
712 }
713 
ConvertIntToFloat(Register src,DoubleRegister dst)714 void TurboAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) {
715   MovIntToDouble(dst, src, r0);
716   fcfids(dst, dst);
717 }
718 
ConvertUnsignedIntToFloat(Register src,DoubleRegister dst)719 void TurboAssembler::ConvertUnsignedIntToFloat(Register src,
720                                                DoubleRegister dst) {
721   MovUnsignedIntToDouble(dst, src, r0);
722   fcfids(dst, dst);
723 }
724 
725 #if V8_TARGET_ARCH_PPC64
ConvertInt64ToDouble(Register src,DoubleRegister double_dst)726 void TurboAssembler::ConvertInt64ToDouble(Register src,
727                                           DoubleRegister double_dst) {
728   MovInt64ToDouble(double_dst, src);
729   fcfid(double_dst, double_dst);
730 }
731 
ConvertUnsignedInt64ToFloat(Register src,DoubleRegister double_dst)732 void TurboAssembler::ConvertUnsignedInt64ToFloat(Register src,
733                                                  DoubleRegister double_dst) {
734   MovInt64ToDouble(double_dst, src);
735   fcfidus(double_dst, double_dst);
736 }
737 
ConvertUnsignedInt64ToDouble(Register src,DoubleRegister double_dst)738 void TurboAssembler::ConvertUnsignedInt64ToDouble(Register src,
739                                                   DoubleRegister double_dst) {
740   MovInt64ToDouble(double_dst, src);
741   fcfidu(double_dst, double_dst);
742 }
743 
ConvertInt64ToFloat(Register src,DoubleRegister double_dst)744 void TurboAssembler::ConvertInt64ToFloat(Register src,
745                                          DoubleRegister double_dst) {
746   MovInt64ToDouble(double_dst, src);
747   fcfids(double_dst, double_dst);
748 }
749 #endif
750 
ConvertDoubleToInt64(const DoubleRegister double_input,const Register dst_hi,const Register dst,const DoubleRegister double_dst,FPRoundingMode rounding_mode)751 void TurboAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
752 #if !V8_TARGET_ARCH_PPC64
753                                           const Register dst_hi,
754 #endif
755                                           const Register dst,
756                                           const DoubleRegister double_dst,
757                                           FPRoundingMode rounding_mode) {
758   if (rounding_mode == kRoundToZero) {
759     fctidz(double_dst, double_input);
760   } else {
761     SetRoundingMode(rounding_mode);
762     fctid(double_dst, double_input);
763     ResetRoundingMode();
764   }
765 
766   MovDoubleToInt64(
767 #if !V8_TARGET_ARCH_PPC64
768       dst_hi,
769 #endif
770       dst, double_dst);
771 }
772 
773 #if V8_TARGET_ARCH_PPC64
ConvertDoubleToUnsignedInt64(const DoubleRegister double_input,const Register dst,const DoubleRegister double_dst,FPRoundingMode rounding_mode)774 void TurboAssembler::ConvertDoubleToUnsignedInt64(
775     const DoubleRegister double_input, const Register dst,
776     const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
777   if (rounding_mode == kRoundToZero) {
778     fctiduz(double_dst, double_input);
779   } else {
780     SetRoundingMode(rounding_mode);
781     fctidu(double_dst, double_input);
782     ResetRoundingMode();
783   }
784 
785   MovDoubleToInt64(dst, double_dst);
786 }
787 #endif
788 
789 #if !V8_TARGET_ARCH_PPC64
ShiftLeftPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)790 void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
791                                    Register src_low, Register src_high,
792                                    Register scratch, Register shift) {
793   DCHECK(!AreAliased(dst_low, src_high));
794   DCHECK(!AreAliased(dst_high, src_low));
795   DCHECK(!AreAliased(dst_low, dst_high, shift));
796   Label less_than_32;
797   Label done;
798   cmpi(shift, Operand(32));
799   blt(&less_than_32);
800   // If shift >= 32
801   andi(scratch, shift, Operand(0x1F));
802   slw(dst_high, src_low, scratch);
803   li(dst_low, Operand::Zero());
804   b(&done);
805   bind(&less_than_32);
806   // If shift < 32
807   subfic(scratch, shift, Operand(32));
808   slw(dst_high, src_high, shift);
809   srw(scratch, src_low, scratch);
810   orx(dst_high, dst_high, scratch);
811   slw(dst_low, src_low, shift);
812   bind(&done);
813 }
814 
ShiftLeftPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)815 void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
816                                    Register src_low, Register src_high,
817                                    uint32_t shift) {
818   DCHECK(!AreAliased(dst_low, src_high));
819   DCHECK(!AreAliased(dst_high, src_low));
820   if (shift == 32) {
821     Move(dst_high, src_low);
822     li(dst_low, Operand::Zero());
823   } else if (shift > 32) {
824     shift &= 0x1F;
825     slwi(dst_high, src_low, Operand(shift));
826     li(dst_low, Operand::Zero());
827   } else if (shift == 0) {
828     Move(dst_low, src_low);
829     Move(dst_high, src_high);
830   } else {
831     slwi(dst_high, src_high, Operand(shift));
832     rlwimi(dst_high, src_low, shift, 32 - shift, 31);
833     slwi(dst_low, src_low, Operand(shift));
834   }
835 }
836 
ShiftRightPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)837 void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
838                                     Register src_low, Register src_high,
839                                     Register scratch, Register shift) {
840   DCHECK(!AreAliased(dst_low, src_high));
841   DCHECK(!AreAliased(dst_high, src_low));
842   DCHECK(!AreAliased(dst_low, dst_high, shift));
843   Label less_than_32;
844   Label done;
845   cmpi(shift, Operand(32));
846   blt(&less_than_32);
847   // If shift >= 32
848   andi(scratch, shift, Operand(0x1F));
849   srw(dst_low, src_high, scratch);
850   li(dst_high, Operand::Zero());
851   b(&done);
852   bind(&less_than_32);
853   // If shift < 32
854   subfic(scratch, shift, Operand(32));
855   srw(dst_low, src_low, shift);
856   slw(scratch, src_high, scratch);
857   orx(dst_low, dst_low, scratch);
858   srw(dst_high, src_high, shift);
859   bind(&done);
860 }
861 
ShiftRightPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)862 void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
863                                     Register src_low, Register src_high,
864                                     uint32_t shift) {
865   DCHECK(!AreAliased(dst_low, src_high));
866   DCHECK(!AreAliased(dst_high, src_low));
867   if (shift == 32) {
868     Move(dst_low, src_high);
869     li(dst_high, Operand::Zero());
870   } else if (shift > 32) {
871     shift &= 0x1F;
872     srwi(dst_low, src_high, Operand(shift));
873     li(dst_high, Operand::Zero());
874   } else if (shift == 0) {
875     Move(dst_low, src_low);
876     Move(dst_high, src_high);
877   } else {
878     srwi(dst_low, src_low, Operand(shift));
879     rlwimi(dst_low, src_high, 32 - shift, 0, shift - 1);
880     srwi(dst_high, src_high, Operand(shift));
881   }
882 }
883 
ShiftRightAlgPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)884 void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
885                                        Register src_low, Register src_high,
886                                        Register scratch, Register shift) {
887   DCHECK(!AreAliased(dst_low, src_high, shift));
888   DCHECK(!AreAliased(dst_high, src_low, shift));
889   Label less_than_32;
890   Label done;
891   cmpi(shift, Operand(32));
892   blt(&less_than_32);
893   // If shift >= 32
894   andi(scratch, shift, Operand(0x1F));
895   sraw(dst_low, src_high, scratch);
896   srawi(dst_high, src_high, 31);
897   b(&done);
898   bind(&less_than_32);
899   // If shift < 32
900   subfic(scratch, shift, Operand(32));
901   srw(dst_low, src_low, shift);
902   slw(scratch, src_high, scratch);
903   orx(dst_low, dst_low, scratch);
904   sraw(dst_high, src_high, shift);
905   bind(&done);
906 }
907 
ShiftRightAlgPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)908 void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
909                                        Register src_low, Register src_high,
910                                        uint32_t shift) {
911   DCHECK(!AreAliased(dst_low, src_high));
912   DCHECK(!AreAliased(dst_high, src_low));
913   if (shift == 32) {
914     Move(dst_low, src_high);
915     srawi(dst_high, src_high, 31);
916   } else if (shift > 32) {
917     shift &= 0x1F;
918     srawi(dst_low, src_high, shift);
919     srawi(dst_high, src_high, 31);
920   } else if (shift == 0) {
921     Move(dst_low, src_low);
922     Move(dst_high, src_high);
923   } else {
924     srwi(dst_low, src_low, Operand(shift));
925     rlwimi(dst_low, src_high, 32 - shift, 0, shift - 1);
926     srawi(dst_high, src_high, shift);
927   }
928 }
929 #endif
930 
LoadConstantPoolPointerRegisterFromCodeTargetAddress(Register code_target_address)931 void TurboAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
932     Register code_target_address) {
933   lwz(kConstantPoolRegister,
934       MemOperand(code_target_address,
935                  Code::kConstantPoolOffset - Code::kHeaderSize));
936   add(kConstantPoolRegister, kConstantPoolRegister, code_target_address);
937 }
938 
LoadPC(Register dst)939 void TurboAssembler::LoadPC(Register dst) {
940   b(4, SetLK);
941   mflr(dst);
942 }
943 
ComputeCodeStartAddress(Register dst)944 void TurboAssembler::ComputeCodeStartAddress(Register dst) {
945   Label current_pc;
946   mov_label_addr(dst, &current_pc);
947 
948   bind(&current_pc);
949   subi(dst, dst, Operand(pc_offset()));
950 }
951 
LoadConstantPoolPointerRegister()952 void TurboAssembler::LoadConstantPoolPointerRegister() {
953   LoadPC(kConstantPoolRegister);
954   int32_t delta = -pc_offset() + 4;
955   add_label_offset(kConstantPoolRegister, kConstantPoolRegister,
956                    ConstantPoolPosition(), delta);
957 }
958 
StubPrologue(StackFrame::Type type)959 void TurboAssembler::StubPrologue(StackFrame::Type type) {
960   {
961     ConstantPoolUnavailableScope constant_pool_unavailable(this);
962     mov(r11, Operand(StackFrame::TypeToMarker(type)));
963     PushCommonFrame(r11);
964   }
965   if (FLAG_enable_embedded_constant_pool) {
966     LoadConstantPoolPointerRegister();
967     set_constant_pool_available(true);
968   }
969 }
970 
Prologue()971 void TurboAssembler::Prologue() {
972   PushStandardFrame(r4);
973   if (FLAG_enable_embedded_constant_pool) {
974     // base contains prologue address
975     LoadConstantPoolPointerRegister();
976     set_constant_pool_available(true);
977   }
978 }
979 
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)980 void TurboAssembler::EnterFrame(StackFrame::Type type,
981                                 bool load_constant_pool_pointer_reg) {
982   if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
983     // Push type explicitly so we can leverage the constant pool.
984     // This path cannot rely on ip containing code entry.
985     PushCommonFrame();
986     LoadConstantPoolPointerRegister();
987     mov(ip, Operand(StackFrame::TypeToMarker(type)));
988     push(ip);
989   } else {
990     mov(ip, Operand(StackFrame::TypeToMarker(type)));
991     PushCommonFrame(ip);
992   }
993   if (type == StackFrame::INTERNAL) {
994     Move(ip, CodeObject());
995     push(ip);
996   }
997 }
998 
LeaveFrame(StackFrame::Type type,int stack_adjustment)999 int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
1000   ConstantPoolUnavailableScope constant_pool_unavailable(this);
1001   // r3: preserved
1002   // r4: preserved
1003   // r5: preserved
1004 
1005   // Drop the execution stack down to the frame pointer and restore
1006   // the caller's state.
1007   int frame_ends;
1008   LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
1009   LoadP(ip, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1010   if (FLAG_enable_embedded_constant_pool) {
1011     LoadP(kConstantPoolRegister,
1012           MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
1013   }
1014   mtlr(r0);
1015   frame_ends = pc_offset();
1016   Add(sp, fp, StandardFrameConstants::kCallerSPOffset + stack_adjustment, r0);
1017   mr(fp, ip);
1018   return frame_ends;
1019 }
1020 
1021 // ExitFrame layout (probably wrongish.. needs updating)
1022 //
1023 //  SP -> previousSP
1024 //        LK reserved
1025 //        code
1026 //        sp_on_exit (for debug?)
1027 // oldSP->prev SP
1028 //        LK
1029 //        <parameters on stack>
1030 
1031 // Prior to calling EnterExitFrame, we've got a bunch of parameters
1032 // on the stack that we need to wrap a real frame around.. so first
1033 // we reserve a slot for LK and push the previous SP which is captured
1034 // in the fp register (r31)
1035 // Then - we buy a new frame
1036 
EnterExitFrame(bool save_doubles,int stack_space,StackFrame::Type frame_type)1037 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
1038                                     StackFrame::Type frame_type) {
1039   DCHECK(frame_type == StackFrame::EXIT ||
1040          frame_type == StackFrame::BUILTIN_EXIT);
1041   // Set up the frame structure on the stack.
1042   DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
1043   DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
1044   DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
1045   DCHECK_GT(stack_space, 0);
1046 
1047   // This is an opportunity to build a frame to wrap
1048   // all of the pushes that have happened inside of V8
1049   // since we were called from C code
1050 
1051   mov(ip, Operand(StackFrame::TypeToMarker(frame_type)));
1052   PushCommonFrame(ip);
1053   // Reserve room for saved entry sp and code object.
1054   subi(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
1055 
1056   if (emit_debug_code()) {
1057     li(r8, Operand::Zero());
1058     StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
1059   }
1060   if (FLAG_enable_embedded_constant_pool) {
1061     StoreP(kConstantPoolRegister,
1062            MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
1063   }
1064   Move(r8, CodeObject());
1065   StoreP(r8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
1066 
1067   // Save the frame pointer and the context in top.
1068   Move(r8, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
1069                                      isolate()));
1070   StoreP(fp, MemOperand(r8));
1071   Move(r8,
1072        ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1073   StoreP(cp, MemOperand(r8));
1074 
1075   // Optionally save all volatile double registers.
1076   if (save_doubles) {
1077     MultiPushDoubles(kCallerSavedDoubles);
1078     // Note that d0 will be accessible at
1079     //   fp - ExitFrameConstants::kFrameSize -
1080     //   kNumCallerSavedDoubles * kDoubleSize,
1081     // since the sp slot and code slot were pushed after the fp.
1082   }
1083 
1084   addi(sp, sp, Operand(-stack_space * kPointerSize));
1085 
1086   // Allocate and align the frame preparing for calling the runtime
1087   // function.
1088   const int frame_alignment = ActivationFrameAlignment();
1089   if (frame_alignment > kPointerSize) {
1090     DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
1091     ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
1092   }
1093   li(r0, Operand::Zero());
1094   StorePU(r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
1095 
1096   // Set the exit frame sp value to point just before the return address
1097   // location.
1098   addi(r8, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize));
1099   StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
1100 }
1101 
ActivationFrameAlignment()1102 int TurboAssembler::ActivationFrameAlignment() {
1103 #if !defined(USE_SIMULATOR)
1104   // Running on the real platform. Use the alignment as mandated by the local
1105   // environment.
1106   // Note: This will break if we ever start generating snapshots on one PPC
1107   // platform for another PPC platform with a different alignment.
1108   return base::OS::ActivationFrameAlignment();
1109 #else  // Simulated
1110   // If we are using the simulator then we should always align to the expected
1111   // alignment. As the simulator is used to generate snapshots we do not know
1112   // if the target platform will need alignment, so this is controlled from a
1113   // flag.
1114   return FLAG_sim_stack_alignment;
1115 #endif
1116 }
1117 
1118 
LeaveExitFrame(bool save_doubles,Register argument_count,bool argument_count_is_length)1119 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
1120                                     bool argument_count_is_length) {
1121   ConstantPoolUnavailableScope constant_pool_unavailable(this);
1122   // Optionally restore all double registers.
1123   if (save_doubles) {
1124     // Calculate the stack location of the saved doubles and restore them.
1125     const int kNumRegs = kNumCallerSavedDoubles;
1126     const int offset =
1127         (ExitFrameConstants::kFixedFrameSizeFromFp + kNumRegs * kDoubleSize);
1128     addi(r6, fp, Operand(-offset));
1129     MultiPopDoubles(kCallerSavedDoubles, r6);
1130   }
1131 
1132   // Clear top frame.
1133   li(r6, Operand::Zero());
1134   Move(ip, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
1135                                      isolate()));
1136   StoreP(r6, MemOperand(ip));
1137 
1138   // Restore current context from top and clear it in debug mode.
1139   Move(ip,
1140        ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1141   LoadP(cp, MemOperand(ip));
1142 
1143 #ifdef DEBUG
1144   mov(r6, Operand(Context::kInvalidContext));
1145   Move(ip,
1146        ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1147   StoreP(r6, MemOperand(ip));
1148 #endif
1149 
1150   // Tear down the exit frame, pop the arguments, and return.
1151   LeaveFrame(StackFrame::EXIT);
1152 
1153   if (argument_count.is_valid()) {
1154     if (!argument_count_is_length) {
1155       ShiftLeftImm(argument_count, argument_count, Operand(kPointerSizeLog2));
1156     }
1157     add(sp, sp, argument_count);
1158   }
1159 }
1160 
MovFromFloatResult(const DoubleRegister dst)1161 void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) {
1162   Move(dst, d1);
1163 }
1164 
MovFromFloatParameter(const DoubleRegister dst)1165 void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) {
1166   Move(dst, d1);
1167 }
1168 
PrepareForTailCall(const ParameterCount & callee_args_count,Register caller_args_count_reg,Register scratch0,Register scratch1)1169 void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
1170                                         Register caller_args_count_reg,
1171                                         Register scratch0, Register scratch1) {
1172 #if DEBUG
1173   if (callee_args_count.is_reg()) {
1174     DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
1175                        scratch1));
1176   } else {
1177     DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
1178   }
1179 #endif
1180 
1181   // Calculate the end of destination area where we will put the arguments
1182   // after we drop current frame. We add kPointerSize to count the receiver
1183   // argument which is not included into formal parameters count.
1184   Register dst_reg = scratch0;
1185   ShiftLeftImm(dst_reg, caller_args_count_reg, Operand(kPointerSizeLog2));
1186   add(dst_reg, fp, dst_reg);
1187   addi(dst_reg, dst_reg,
1188        Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
1189 
1190   Register src_reg = caller_args_count_reg;
1191   // Calculate the end of source area. +kPointerSize is for the receiver.
1192   if (callee_args_count.is_reg()) {
1193     ShiftLeftImm(src_reg, callee_args_count.reg(), Operand(kPointerSizeLog2));
1194     add(src_reg, sp, src_reg);
1195     addi(src_reg, src_reg, Operand(kPointerSize));
1196   } else {
1197     Add(src_reg, sp, (callee_args_count.immediate() + 1) * kPointerSize, r0);
1198   }
1199 
1200   if (FLAG_debug_code) {
1201     cmpl(src_reg, dst_reg);
1202     Check(lt, AbortReason::kStackAccessBelowStackPointer);
1203   }
1204 
1205   // Restore caller's frame pointer and return address now as they will be
1206   // overwritten by the copying loop.
1207   RestoreFrameStateForTailCall();
1208 
1209   // Now copy callee arguments to the caller frame going backwards to avoid
1210   // callee arguments corruption (source and destination areas could overlap).
1211 
1212   // Both src_reg and dst_reg are pointing to the word after the one to copy,
1213   // so they must be pre-decremented in the loop.
1214   Register tmp_reg = scratch1;
1215   Label loop;
1216   if (callee_args_count.is_reg()) {
1217     addi(tmp_reg, callee_args_count.reg(), Operand(1));  // +1 for receiver
1218   } else {
1219     mov(tmp_reg, Operand(callee_args_count.immediate() + 1));
1220   }
1221   mtctr(tmp_reg);
1222   bind(&loop);
1223   LoadPU(tmp_reg, MemOperand(src_reg, -kPointerSize));
1224   StorePU(tmp_reg, MemOperand(dst_reg, -kPointerSize));
1225   bdnz(&loop);
1226 
1227   // Leave current frame.
1228   mr(sp, dst_reg);
1229 }
1230 
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Label * done,bool * definitely_mismatches,InvokeFlag flag)1231 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1232                                     const ParameterCount& actual, Label* done,
1233                                     bool* definitely_mismatches,
1234                                     InvokeFlag flag) {
1235   bool definitely_matches = false;
1236   *definitely_mismatches = false;
1237   Label regular_invoke;
1238 
1239   // Check whether the expected and actual arguments count match. If not,
1240   // setup registers according to contract with ArgumentsAdaptorTrampoline:
1241   //  r3: actual arguments count
1242   //  r4: function (passed through to callee)
1243   //  r5: expected arguments count
1244 
1245   // The code below is made a lot easier because the calling code already sets
1246   // up actual and expected registers according to the contract if values are
1247   // passed in registers.
1248 
1249   // ARM has some sanity checks as per below, considering add them for PPC
1250   //  DCHECK(actual.is_immediate() || actual.reg() == r3);
1251   //  DCHECK(expected.is_immediate() || expected.reg() == r5);
1252 
1253   if (expected.is_immediate()) {
1254     DCHECK(actual.is_immediate());
1255     mov(r3, Operand(actual.immediate()));
1256     if (expected.immediate() == actual.immediate()) {
1257       definitely_matches = true;
1258     } else {
1259       const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1260       if (expected.immediate() == sentinel) {
1261         // Don't worry about adapting arguments for builtins that
1262         // don't want that done. Skip adaption code by making it look
1263         // like we have a match between expected and actual number of
1264         // arguments.
1265         definitely_matches = true;
1266       } else {
1267         *definitely_mismatches = true;
1268         mov(r5, Operand(expected.immediate()));
1269       }
1270     }
1271   } else {
1272     if (actual.is_immediate()) {
1273       mov(r3, Operand(actual.immediate()));
1274       cmpi(expected.reg(), Operand(actual.immediate()));
1275       beq(&regular_invoke);
1276     } else {
1277       cmp(expected.reg(), actual.reg());
1278       beq(&regular_invoke);
1279     }
1280   }
1281 
1282   if (!definitely_matches) {
1283     Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
1284     if (flag == CALL_FUNCTION) {
1285       Call(adaptor);
1286       if (!*definitely_mismatches) {
1287         b(done);
1288       }
1289     } else {
1290       Jump(adaptor, RelocInfo::CODE_TARGET);
1291     }
1292     bind(&regular_invoke);
1293   }
1294 }
1295 
CheckDebugHook(Register fun,Register new_target,const ParameterCount & expected,const ParameterCount & actual)1296 void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
1297                                     const ParameterCount& expected,
1298                                     const ParameterCount& actual) {
1299   Label skip_hook;
1300 
1301   ExternalReference debug_hook_active =
1302       ExternalReference::debug_hook_on_function_call_address(isolate());
1303   Move(r7, debug_hook_active);
1304   LoadByte(r7, MemOperand(r7), r0);
1305   extsb(r7, r7);
1306   CmpSmiLiteral(r7, Smi::kZero, r0);
1307   beq(&skip_hook);
1308 
1309   {
1310     // Load receiver to pass it later to DebugOnFunctionCall hook.
1311     if (actual.is_reg()) {
1312       mr(r7, actual.reg());
1313     } else {
1314       mov(r7, Operand(actual.immediate()));
1315     }
1316     ShiftLeftImm(r7, r7, Operand(kPointerSizeLog2));
1317     LoadPX(r7, MemOperand(sp, r7));
1318     FrameScope frame(this,
1319                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
1320     if (expected.is_reg()) {
1321       SmiTag(expected.reg());
1322       Push(expected.reg());
1323     }
1324     if (actual.is_reg()) {
1325       SmiTag(actual.reg());
1326       Push(actual.reg());
1327     }
1328     if (new_target.is_valid()) {
1329       Push(new_target);
1330     }
1331     Push(fun, fun, r7);
1332     CallRuntime(Runtime::kDebugOnFunctionCall);
1333     Pop(fun);
1334     if (new_target.is_valid()) {
1335       Pop(new_target);
1336     }
1337     if (actual.is_reg()) {
1338       Pop(actual.reg());
1339       SmiUntag(actual.reg());
1340     }
1341     if (expected.is_reg()) {
1342       Pop(expected.reg());
1343       SmiUntag(expected.reg());
1344     }
1345   }
1346   bind(&skip_hook);
1347 }
1348 
InvokeFunctionCode(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag)1349 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
1350                                         const ParameterCount& expected,
1351                                         const ParameterCount& actual,
1352                                         InvokeFlag flag) {
1353   // You can't call a function without a valid frame.
1354   DCHECK(flag == JUMP_FUNCTION || has_frame());
1355   DCHECK(function == r4);
1356   DCHECK_IMPLIES(new_target.is_valid(), new_target == r6);
1357 
1358   // On function call, call into the debugger if necessary.
1359   CheckDebugHook(function, new_target, expected, actual);
1360 
1361   // Clear the new.target register if not given.
1362   if (!new_target.is_valid()) {
1363     LoadRoot(r6, Heap::kUndefinedValueRootIndex);
1364   }
1365 
1366   Label done;
1367   bool definitely_mismatches = false;
1368   InvokePrologue(expected, actual, &done, &definitely_mismatches, flag);
1369   if (!definitely_mismatches) {
1370     // We call indirectly through the code field in the function to
1371     // allow recompilation to take effect without changing any of the
1372     // call sites.
1373     Register code = kJavaScriptCallCodeStartRegister;
1374     LoadP(code, FieldMemOperand(function, JSFunction::kCodeOffset));
1375     addi(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
1376     if (flag == CALL_FUNCTION) {
1377       CallJSEntry(code);
1378     } else {
1379       DCHECK(flag == JUMP_FUNCTION);
1380       JumpToJSEntry(code);
1381     }
1382 
1383     // Continue here if InvokePrologue does handle the invocation due to
1384     // mismatched parameter counts.
1385     bind(&done);
1386   }
1387 }
1388 
InvokeFunction(Register fun,Register new_target,const ParameterCount & actual,InvokeFlag flag)1389 void MacroAssembler::InvokeFunction(Register fun, Register new_target,
1390                                     const ParameterCount& actual,
1391                                     InvokeFlag flag) {
1392   // You can't call a function without a valid frame.
1393   DCHECK(flag == JUMP_FUNCTION || has_frame());
1394 
1395   // Contract with called JS functions requires that function is passed in r4.
1396   DCHECK(fun == r4);
1397 
1398   Register expected_reg = r5;
1399   Register temp_reg = r7;
1400 
1401   LoadP(temp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
1402   LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
1403   LoadWordArith(expected_reg,
1404                 FieldMemOperand(
1405                     temp_reg, SharedFunctionInfo::kFormalParameterCountOffset));
1406 
1407   ParameterCount expected(expected_reg);
1408   InvokeFunctionCode(fun, new_target, expected, actual, flag);
1409 }
1410 
InvokeFunction(Register function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag)1411 void MacroAssembler::InvokeFunction(Register function,
1412                                     const ParameterCount& expected,
1413                                     const ParameterCount& actual,
1414                                     InvokeFlag flag) {
1415   // You can't call a function without a valid frame.
1416   DCHECK(flag == JUMP_FUNCTION || has_frame());
1417 
1418   // Contract with called JS functions requires that function is passed in r4.
1419   DCHECK(function == r4);
1420 
1421   // Get the function and setup the context.
1422   LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
1423 
1424   InvokeFunctionCode(r4, no_reg, expected, actual, flag);
1425 }
1426 
MaybeDropFrames()1427 void MacroAssembler::MaybeDropFrames() {
1428   // Check whether we need to drop frames to restart a function on the stack.
1429   ExternalReference restart_fp =
1430       ExternalReference::debug_restart_fp_address(isolate());
1431   Move(r4, restart_fp);
1432   LoadP(r4, MemOperand(r4));
1433   cmpi(r4, Operand::Zero());
1434   Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
1435        ne);
1436 }
1437 
PushStackHandler()1438 void MacroAssembler::PushStackHandler() {
1439   // Adjust this code if not the case.
1440   STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
1441   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1442 
1443   Push(Smi::kZero);  // Padding.
1444 
1445   // Link the current handler as the next handler.
1446   // Preserve r3-r7.
1447   mov(r8, Operand(ExternalReference::Create(IsolateAddressId::kHandlerAddress,
1448                                             isolate())));
1449   LoadP(r0, MemOperand(r8));
1450   push(r0);
1451 
1452   // Set this new handler as the current one.
1453   StoreP(sp, MemOperand(r8));
1454 }
1455 
1456 
PopStackHandler()1457 void MacroAssembler::PopStackHandler() {
1458   STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
1459   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1460 
1461   pop(r4);
1462   mov(ip, Operand(ExternalReference::Create(IsolateAddressId::kHandlerAddress,
1463                                             isolate())));
1464   StoreP(r4, MemOperand(ip));
1465 
1466   Drop(1);  // Drop padding.
1467 }
1468 
1469 
CompareObjectType(Register object,Register map,Register type_reg,InstanceType type)1470 void MacroAssembler::CompareObjectType(Register object, Register map,
1471                                        Register type_reg, InstanceType type) {
1472   const Register temp = type_reg == no_reg ? r0 : type_reg;
1473 
1474   LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
1475   CompareInstanceType(map, temp, type);
1476 }
1477 
1478 
CompareInstanceType(Register map,Register type_reg,InstanceType type)1479 void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
1480                                          InstanceType type) {
1481   STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
1482   STATIC_ASSERT(LAST_TYPE <= 0xFFFF);
1483   lhz(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1484   cmpi(type_reg, Operand(type));
1485 }
1486 
1487 
CompareRoot(Register obj,Heap::RootListIndex index)1488 void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
1489   DCHECK(obj != r0);
1490   LoadRoot(r0, index);
1491   cmp(obj, r0);
1492 }
1493 
AddAndCheckForOverflow(Register dst,Register left,Register right,Register overflow_dst,Register scratch)1494 void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left,
1495                                             Register right,
1496                                             Register overflow_dst,
1497                                             Register scratch) {
1498   DCHECK(dst != overflow_dst);
1499   DCHECK(dst != scratch);
1500   DCHECK(overflow_dst != scratch);
1501   DCHECK(overflow_dst != left);
1502   DCHECK(overflow_dst != right);
1503 
1504   bool left_is_right = left == right;
1505   RCBit xorRC = left_is_right ? SetRC : LeaveRC;
1506 
1507   // C = A+B; C overflows if A/B have same sign and C has diff sign than A
1508   if (dst == left) {
1509     mr(scratch, left);            // Preserve left.
1510     add(dst, left, right);        // Left is overwritten.
1511     xor_(overflow_dst, dst, scratch, xorRC);  // Original left.
1512     if (!left_is_right) xor_(scratch, dst, right);
1513   } else if (dst == right) {
1514     mr(scratch, right);           // Preserve right.
1515     add(dst, left, right);        // Right is overwritten.
1516     xor_(overflow_dst, dst, left, xorRC);
1517     if (!left_is_right) xor_(scratch, dst, scratch);  // Original right.
1518   } else {
1519     add(dst, left, right);
1520     xor_(overflow_dst, dst, left, xorRC);
1521     if (!left_is_right) xor_(scratch, dst, right);
1522   }
1523   if (!left_is_right) and_(overflow_dst, scratch, overflow_dst, SetRC);
1524 }
1525 
AddAndCheckForOverflow(Register dst,Register left,intptr_t right,Register overflow_dst,Register scratch)1526 void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left,
1527                                             intptr_t right,
1528                                             Register overflow_dst,
1529                                             Register scratch) {
1530   Register original_left = left;
1531   DCHECK(dst != overflow_dst);
1532   DCHECK(dst != scratch);
1533   DCHECK(overflow_dst != scratch);
1534   DCHECK(overflow_dst != left);
1535 
1536   // C = A+B; C overflows if A/B have same sign and C has diff sign than A
1537   if (dst == left) {
1538     // Preserve left.
1539     original_left = overflow_dst;
1540     mr(original_left, left);
1541   }
1542   Add(dst, left, right, scratch);
1543   xor_(overflow_dst, dst, original_left);
1544   if (right >= 0) {
1545     and_(overflow_dst, overflow_dst, dst, SetRC);
1546   } else {
1547     andc(overflow_dst, overflow_dst, dst, SetRC);
1548   }
1549 }
1550 
SubAndCheckForOverflow(Register dst,Register left,Register right,Register overflow_dst,Register scratch)1551 void TurboAssembler::SubAndCheckForOverflow(Register dst, Register left,
1552                                             Register right,
1553                                             Register overflow_dst,
1554                                             Register scratch) {
1555   DCHECK(dst != overflow_dst);
1556   DCHECK(dst != scratch);
1557   DCHECK(overflow_dst != scratch);
1558   DCHECK(overflow_dst != left);
1559   DCHECK(overflow_dst != right);
1560 
1561   // C = A-B; C overflows if A/B have diff signs and C has diff sign than A
1562   if (dst == left) {
1563     mr(scratch, left);      // Preserve left.
1564     sub(dst, left, right);  // Left is overwritten.
1565     xor_(overflow_dst, dst, scratch);
1566     xor_(scratch, scratch, right);
1567     and_(overflow_dst, overflow_dst, scratch, SetRC);
1568   } else if (dst == right) {
1569     mr(scratch, right);     // Preserve right.
1570     sub(dst, left, right);  // Right is overwritten.
1571     xor_(overflow_dst, dst, left);
1572     xor_(scratch, left, scratch);
1573     and_(overflow_dst, overflow_dst, scratch, SetRC);
1574   } else {
1575     sub(dst, left, right);
1576     xor_(overflow_dst, dst, left);
1577     xor_(scratch, left, right);
1578     and_(overflow_dst, scratch, overflow_dst, SetRC);
1579   }
1580 }
1581 
1582 
CallStub(CodeStub * stub,Condition cond)1583 void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
1584   DCHECK(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
1585   Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
1586 }
1587 
CallStubDelayed(CodeStub * stub)1588 void TurboAssembler::CallStubDelayed(CodeStub* stub) {
1589   DCHECK(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
1590 
1591   // Block constant pool for the call instruction sequence.
1592   ConstantPoolUnavailableScope constant_pool_unavailable(this);
1593 
1594   mov(ip, Operand::EmbeddedCode(stub));
1595   mtctr(ip);
1596   bctrl();
1597 }
1598 
TailCallStub(CodeStub * stub,Condition cond)1599 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
1600   Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
1601 }
1602 
AllowThisStubCall(CodeStub * stub)1603 bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
1604   return has_frame_ || !stub->SometimesSetsUpAFrame();
1605 }
1606 
TryDoubleToInt32Exact(Register result,DoubleRegister double_input,Register scratch,DoubleRegister double_scratch)1607 void MacroAssembler::TryDoubleToInt32Exact(Register result,
1608                                            DoubleRegister double_input,
1609                                            Register scratch,
1610                                            DoubleRegister double_scratch) {
1611   Label done;
1612   DCHECK(double_input != double_scratch);
1613 
1614   ConvertDoubleToInt64(double_input,
1615 #if !V8_TARGET_ARCH_PPC64
1616                        scratch,
1617 #endif
1618                        result, double_scratch);
1619 
1620 #if V8_TARGET_ARCH_PPC64
1621   TestIfInt32(result, r0);
1622 #else
1623   TestIfInt32(scratch, result, r0);
1624 #endif
1625   bne(&done);
1626 
1627   // convert back and compare
1628   fcfid(double_scratch, double_scratch);
1629   fcmpu(double_scratch, double_input);
1630   bind(&done);
1631 }
1632 
TruncateDoubleToI(Isolate * isolate,Zone * zone,Register result,DoubleRegister double_input)1633 void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
1634                                        Register result,
1635                                        DoubleRegister double_input) {
1636   Label done;
1637 
1638   TryInlineTruncateDoubleToI(result, double_input, &done);
1639 
1640   // If we fell through then inline version didn't succeed - call stub instead.
1641   mflr(r0);
1642   push(r0);
1643   // Put input on stack.
1644   stfdu(double_input, MemOperand(sp, -kDoubleSize));
1645 
1646   Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
1647 
1648   LoadP(result, MemOperand(sp));
1649   addi(sp, sp, Operand(kDoubleSize));
1650   pop(r0);
1651   mtlr(r0);
1652 
1653   bind(&done);
1654 }
1655 
TryInlineTruncateDoubleToI(Register result,DoubleRegister double_input,Label * done)1656 void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
1657                                                 DoubleRegister double_input,
1658                                                 Label* done) {
1659   DoubleRegister double_scratch = kScratchDoubleReg;
1660 #if !V8_TARGET_ARCH_PPC64
1661   Register scratch = ip;
1662 #endif
1663 
1664   ConvertDoubleToInt64(double_input,
1665 #if !V8_TARGET_ARCH_PPC64
1666                        scratch,
1667 #endif
1668                        result, double_scratch);
1669 
1670 // Test for overflow
1671 #if V8_TARGET_ARCH_PPC64
1672   TestIfInt32(result, r0);
1673 #else
1674   TestIfInt32(scratch, result, r0);
1675 #endif
1676   beq(done);
1677 }
1678 
CallRuntimeDelayed(Zone * zone,Runtime::FunctionId fid,SaveFPRegsMode save_doubles)1679 void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
1680                                         SaveFPRegsMode save_doubles) {
1681   const Runtime::Function* f = Runtime::FunctionForId(fid);
1682   // TODO(1236192): Most runtime routines don't need the number of
1683   // arguments passed in because it is constant. At some point we
1684   // should remove this need and make the runtime routine entry code
1685   // smarter.
1686   mov(r3, Operand(f->nargs));
1687   Move(r4, ExternalReference::Create(f));
1688 #if V8_TARGET_ARCH_PPC64
1689   Handle<Code> code =
1690       CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
1691 #else
1692   Handle<Code> code = CodeFactory::CEntry(isolate(), 1, save_doubles);
1693 #endif
1694   Call(code, RelocInfo::CODE_TARGET);
1695 }
1696 
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)1697 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
1698                                  SaveFPRegsMode save_doubles) {
1699   // All parameters are on the stack.  r3 has the return value after call.
1700 
1701   // If the expected number of arguments of the runtime function is
1702   // constant, we check that the actual number of arguments match the
1703   // expectation.
1704   CHECK(f->nargs < 0 || f->nargs == num_arguments);
1705 
1706   // TODO(1236192): Most runtime routines don't need the number of
1707   // arguments passed in because it is constant. At some point we
1708   // should remove this need and make the runtime routine entry code
1709   // smarter.
1710   mov(r3, Operand(num_arguments));
1711   Move(r4, ExternalReference::Create(f));
1712 #if V8_TARGET_ARCH_PPC64
1713   Handle<Code> code =
1714       CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
1715 #else
1716   Handle<Code> code = CodeFactory::CEntry(isolate(), 1, save_doubles);
1717 #endif
1718   Call(code, RelocInfo::CODE_TARGET);
1719 }
1720 
TailCallRuntime(Runtime::FunctionId fid)1721 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
1722   const Runtime::Function* function = Runtime::FunctionForId(fid);
1723   DCHECK_EQ(1, function->result_size);
1724   if (function->nargs >= 0) {
1725     mov(r3, Operand(function->nargs));
1726   }
1727   JumpToExternalReference(ExternalReference::Create(fid));
1728 }
1729 
1730 
JumpToExternalReference(const ExternalReference & builtin,bool builtin_exit_frame)1731 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
1732                                              bool builtin_exit_frame) {
1733   Move(r4, builtin);
1734   Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
1735                                           kArgvOnStack, builtin_exit_frame);
1736   Jump(code, RelocInfo::CODE_TARGET);
1737 }
1738 
JumpToInstructionStream(Address entry)1739 void MacroAssembler::JumpToInstructionStream(Address entry) {
1740   mov(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
1741   Jump(kOffHeapTrampolineRegister);
1742 }
1743 
LoadWeakValue(Register out,Register in,Label * target_if_cleared)1744 void MacroAssembler::LoadWeakValue(Register out, Register in,
1745                                    Label* target_if_cleared) {
1746   cmpi(in, Operand(kClearedWeakHeapObject));
1747   beq(target_if_cleared);
1748 
1749   mov(r0, Operand(~kWeakHeapObjectMask));
1750   and_(out, in, r0);
1751 }
1752 
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)1753 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
1754                                       Register scratch1, Register scratch2) {
1755   DCHECK_GT(value, 0);
1756   if (FLAG_native_code_counters && counter->Enabled()) {
1757     Move(scratch2, ExternalReference::Create(counter));
1758     lwz(scratch1, MemOperand(scratch2));
1759     addi(scratch1, scratch1, Operand(value));
1760     stw(scratch1, MemOperand(scratch2));
1761   }
1762 }
1763 
1764 
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)1765 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
1766                                       Register scratch1, Register scratch2) {
1767   DCHECK_GT(value, 0);
1768   if (FLAG_native_code_counters && counter->Enabled()) {
1769     Move(scratch2, ExternalReference::Create(counter));
1770     lwz(scratch1, MemOperand(scratch2));
1771     subi(scratch1, scratch1, Operand(value));
1772     stw(scratch1, MemOperand(scratch2));
1773   }
1774 }
1775 
Assert(Condition cond,AbortReason reason,CRegister cr)1776 void TurboAssembler::Assert(Condition cond, AbortReason reason,
1777                             CRegister cr) {
1778   if (emit_debug_code()) Check(cond, reason, cr);
1779 }
1780 
Check(Condition cond,AbortReason reason,CRegister cr)1781 void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
1782   Label L;
1783   b(cond, &L, cr);
1784   Abort(reason);
1785   // will not return here
1786   bind(&L);
1787 }
1788 
Abort(AbortReason reason)1789 void TurboAssembler::Abort(AbortReason reason) {
1790   Label abort_start;
1791   bind(&abort_start);
1792 #ifdef DEBUG
1793   const char* msg = GetAbortReason(reason);
1794   if (msg != nullptr) {
1795     RecordComment("Abort message: ");
1796     RecordComment(msg);
1797   }
1798 
1799   if (FLAG_trap_on_abort) {
1800     stop(msg);
1801     return;
1802   }
1803 #endif
1804 
1805   LoadSmiLiteral(r4, Smi::FromInt(static_cast<int>(reason)));
1806 
1807   // Disable stub call restrictions to always allow calls to abort.
1808   if (!has_frame_) {
1809     // We don't actually want to generate a pile of code for this, so just
1810     // claim there is a stack frame, without generating one.
1811     FrameScope scope(this, StackFrame::NONE);
1812     Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
1813   } else {
1814     Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
1815   }
1816   // will not return here
1817 }
1818 
LoadNativeContextSlot(int index,Register dst)1819 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
1820   LoadP(dst, NativeContextMemOperand());
1821   LoadP(dst, ContextMemOperand(dst, index));
1822 }
1823 
1824 
UntagAndJumpIfSmi(Register dst,Register src,Label * smi_case)1825 void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
1826                                        Label* smi_case) {
1827   STATIC_ASSERT(kSmiTag == 0);
1828   TestBitRange(src, kSmiTagSize - 1, 0, r0);
1829   SmiUntag(dst, src);
1830   beq(smi_case, cr0);
1831 }
1832 
JumpIfEitherSmi(Register reg1,Register reg2,Label * on_either_smi)1833 void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
1834                                      Label* on_either_smi) {
1835   STATIC_ASSERT(kSmiTag == 0);
1836   JumpIfSmi(reg1, on_either_smi);
1837   JumpIfSmi(reg2, on_either_smi);
1838 }
1839 
AssertNotSmi(Register object)1840 void MacroAssembler::AssertNotSmi(Register object) {
1841   if (emit_debug_code()) {
1842     STATIC_ASSERT(kSmiTag == 0);
1843     TestIfSmi(object, r0);
1844     Check(ne, AbortReason::kOperandIsASmi, cr0);
1845   }
1846 }
1847 
1848 
AssertSmi(Register object)1849 void MacroAssembler::AssertSmi(Register object) {
1850   if (emit_debug_code()) {
1851     STATIC_ASSERT(kSmiTag == 0);
1852     TestIfSmi(object, r0);
1853     Check(eq, AbortReason::kOperandIsNotASmi, cr0);
1854   }
1855 }
1856 
AssertFixedArray(Register object)1857 void MacroAssembler::AssertFixedArray(Register object) {
1858   if (emit_debug_code()) {
1859     STATIC_ASSERT(kSmiTag == 0);
1860     TestIfSmi(object, r0);
1861     Check(ne, AbortReason::kOperandIsASmiAndNotAFixedArray, cr0);
1862     push(object);
1863     CompareObjectType(object, object, object, FIXED_ARRAY_TYPE);
1864     pop(object);
1865     Check(eq, AbortReason::kOperandIsNotAFixedArray);
1866   }
1867 }
1868 
AssertConstructor(Register object)1869 void MacroAssembler::AssertConstructor(Register object) {
1870   if (emit_debug_code()) {
1871     STATIC_ASSERT(kSmiTag == 0);
1872     TestIfSmi(object, r0);
1873     Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, cr0);
1874     push(object);
1875     LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
1876     lbz(object, FieldMemOperand(object, Map::kBitFieldOffset));
1877     andi(object, object, Operand(Map::IsConstructorBit::kMask));
1878     pop(object);
1879     Check(ne, AbortReason::kOperandIsNotAConstructor, cr0);
1880   }
1881 }
1882 
AssertFunction(Register object)1883 void MacroAssembler::AssertFunction(Register object) {
1884   if (emit_debug_code()) {
1885     STATIC_ASSERT(kSmiTag == 0);
1886     TestIfSmi(object, r0);
1887     Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, cr0);
1888     push(object);
1889     CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
1890     pop(object);
1891     Check(eq, AbortReason::kOperandIsNotAFunction);
1892   }
1893 }
1894 
1895 
AssertBoundFunction(Register object)1896 void MacroAssembler::AssertBoundFunction(Register object) {
1897   if (emit_debug_code()) {
1898     STATIC_ASSERT(kSmiTag == 0);
1899     TestIfSmi(object, r0);
1900     Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, cr0);
1901     push(object);
1902     CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
1903     pop(object);
1904     Check(eq, AbortReason::kOperandIsNotABoundFunction);
1905   }
1906 }
1907 
AssertGeneratorObject(Register object)1908 void MacroAssembler::AssertGeneratorObject(Register object) {
1909   if (!emit_debug_code()) return;
1910   TestIfSmi(object, r0);
1911   Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, cr0);
1912 
1913   // Load map
1914   Register map = object;
1915   push(object);
1916   LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
1917 
1918   // Check if JSGeneratorObject
1919   Label do_check;
1920   Register instance_type = object;
1921   CompareInstanceType(map, instance_type, JS_GENERATOR_OBJECT_TYPE);
1922   beq(&do_check);
1923 
1924   // Check if JSAsyncGeneratorObject (See MacroAssembler::CompareInstanceType)
1925   cmpi(instance_type, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
1926 
1927   bind(&do_check);
1928   // Restore generator object to register and perform assertion
1929   pop(object);
1930   Check(eq, AbortReason::kOperandIsNotAGeneratorObject);
1931 }
1932 
AssertUndefinedOrAllocationSite(Register object,Register scratch)1933 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
1934                                                      Register scratch) {
1935   if (emit_debug_code()) {
1936     Label done_checking;
1937     AssertNotSmi(object);
1938     CompareRoot(object, Heap::kUndefinedValueRootIndex);
1939     beq(&done_checking);
1940     LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1941     CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
1942     Assert(eq, AbortReason::kExpectedUndefinedOrCell);
1943     bind(&done_checking);
1944   }
1945 }
1946 
1947 
1948 static const int kRegisterPassedArguments = 8;
1949 
CalculateStackPassedWords(int num_reg_arguments,int num_double_arguments)1950 int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
1951                                               int num_double_arguments) {
1952   int stack_passed_words = 0;
1953   if (num_double_arguments > DoubleRegister::kNumRegisters) {
1954     stack_passed_words +=
1955         2 * (num_double_arguments - DoubleRegister::kNumRegisters);
1956   }
1957   // Up to 8 simple arguments are passed in registers r3..r10.
1958   if (num_reg_arguments > kRegisterPassedArguments) {
1959     stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
1960   }
1961   return stack_passed_words;
1962 }
1963 
PrepareCallCFunction(int num_reg_arguments,int num_double_arguments,Register scratch)1964 void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
1965                                           int num_double_arguments,
1966                                           Register scratch) {
1967   int frame_alignment = ActivationFrameAlignment();
1968   int stack_passed_arguments =
1969       CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
1970   int stack_space = kNumRequiredStackFrameSlots;
1971 
1972   if (frame_alignment > kPointerSize) {
1973     // Make stack end at alignment and make room for stack arguments
1974     // -- preserving original value of sp.
1975     mr(scratch, sp);
1976     addi(sp, sp, Operand(-(stack_passed_arguments + 1) * kPointerSize));
1977     DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
1978     ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
1979     StoreP(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
1980   } else {
1981     // Make room for stack arguments
1982     stack_space += stack_passed_arguments;
1983   }
1984 
1985   // Allocate frame with required slots to make ABI work.
1986   li(r0, Operand::Zero());
1987   StorePU(r0, MemOperand(sp, -stack_space * kPointerSize));
1988 }
1989 
PrepareCallCFunction(int num_reg_arguments,Register scratch)1990 void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
1991                                           Register scratch) {
1992   PrepareCallCFunction(num_reg_arguments, 0, scratch);
1993 }
1994 
MovToFloatParameter(DoubleRegister src)1995 void TurboAssembler::MovToFloatParameter(DoubleRegister src) { Move(d1, src); }
1996 
MovToFloatResult(DoubleRegister src)1997 void TurboAssembler::MovToFloatResult(DoubleRegister src) { Move(d1, src); }
1998 
MovToFloatParameters(DoubleRegister src1,DoubleRegister src2)1999 void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
2000                                           DoubleRegister src2) {
2001   if (src2 == d1) {
2002     DCHECK(src1 != d2);
2003     Move(d2, src2);
2004     Move(d1, src1);
2005   } else {
2006     Move(d1, src1);
2007     Move(d2, src2);
2008   }
2009 }
2010 
CallCFunction(ExternalReference function,int num_reg_arguments,int num_double_arguments)2011 void TurboAssembler::CallCFunction(ExternalReference function,
2012                                    int num_reg_arguments,
2013                                    int num_double_arguments) {
2014   Move(ip, function);
2015   CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
2016 }
2017 
CallCFunction(Register function,int num_reg_arguments,int num_double_arguments)2018 void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
2019                                    int num_double_arguments) {
2020   CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
2021 }
2022 
CallCFunction(ExternalReference function,int num_arguments)2023 void TurboAssembler::CallCFunction(ExternalReference function,
2024                                    int num_arguments) {
2025   CallCFunction(function, num_arguments, 0);
2026 }
2027 
CallCFunction(Register function,int num_arguments)2028 void TurboAssembler::CallCFunction(Register function, int num_arguments) {
2029   CallCFunction(function, num_arguments, 0);
2030 }
2031 
CallCFunctionHelper(Register function,int num_reg_arguments,int num_double_arguments)2032 void TurboAssembler::CallCFunctionHelper(Register function,
2033                                          int num_reg_arguments,
2034                                          int num_double_arguments) {
2035   DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
2036   DCHECK(has_frame());
2037 
2038   // Just call directly. The function called cannot cause a GC, or
2039   // allow preemption, so the return address in the link register
2040   // stays correct.
2041   Register dest = function;
2042   if (ABI_USES_FUNCTION_DESCRIPTORS) {
2043     // AIX/PPC64BE Linux uses a function descriptor. When calling C code be
2044     // aware of this descriptor and pick up values from it
2045     LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(function, kPointerSize));
2046     LoadP(ip, MemOperand(function, 0));
2047     dest = ip;
2048   } else if (ABI_CALL_VIA_IP) {
2049     Move(ip, function);
2050     dest = ip;
2051   }
2052 
2053   Call(dest);
2054 
2055   // Remove frame bought in PrepareCallCFunction
2056   int stack_passed_arguments =
2057       CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
2058   int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
2059   if (ActivationFrameAlignment() > kPointerSize) {
2060     LoadP(sp, MemOperand(sp, stack_space * kPointerSize));
2061   } else {
2062     addi(sp, sp, Operand(stack_space * kPointerSize));
2063   }
2064 }
2065 
2066 
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met)2067 void TurboAssembler::CheckPageFlag(
2068     Register object,
2069     Register scratch,  // scratch may be same register as object
2070     int mask, Condition cc, Label* condition_met) {
2071   DCHECK(cc == ne || cc == eq);
2072   ClearRightImm(scratch, object, Operand(kPageSizeBits));
2073   LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
2074 
2075   mov(r0, Operand(mask));
2076   and_(r0, scratch, r0, SetRC);
2077 
2078   if (cc == ne) {
2079     bne(condition_met, cr0);
2080   }
2081   if (cc == eq) {
2082     beq(condition_met, cr0);
2083   }
2084 }
2085 
SetRoundingMode(FPRoundingMode RN)2086 void TurboAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); }
2087 
ResetRoundingMode()2088 void TurboAssembler::ResetRoundingMode() {
2089   mtfsfi(7, kRoundToNearest);  // reset (default is kRoundToNearest)
2090 }
2091 
2092 
2093 ////////////////////////////////////////////////////////////////////////////////
2094 //
2095 // New MacroAssembler Interfaces added for PPC
2096 //
2097 ////////////////////////////////////////////////////////////////////////////////
LoadIntLiteral(Register dst,int value)2098 void TurboAssembler::LoadIntLiteral(Register dst, int value) {
2099   mov(dst, Operand(value));
2100 }
2101 
LoadSmiLiteral(Register dst,Smi * smi)2102 void TurboAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
2103   mov(dst, Operand(smi));
2104 }
2105 
LoadDoubleLiteral(DoubleRegister result,Double value,Register scratch)2106 void TurboAssembler::LoadDoubleLiteral(DoubleRegister result, Double value,
2107                                        Register scratch) {
2108   if (FLAG_enable_embedded_constant_pool && is_constant_pool_available() &&
2109       !(scratch == r0 && ConstantPoolAccessIsInOverflow())) {
2110     ConstantPoolEntry::Access access = ConstantPoolAddEntry(value);
2111     if (access == ConstantPoolEntry::OVERFLOWED) {
2112       addis(scratch, kConstantPoolRegister, Operand::Zero());
2113       lfd(result, MemOperand(scratch, 0));
2114     } else {
2115       lfd(result, MemOperand(kConstantPoolRegister, 0));
2116     }
2117     return;
2118   }
2119 
2120   // avoid gcc strict aliasing error using union cast
2121   union {
2122     uint64_t dval;
2123 #if V8_TARGET_ARCH_PPC64
2124     intptr_t ival;
2125 #else
2126     intptr_t ival[2];
2127 #endif
2128   } litVal;
2129 
2130   litVal.dval = value.AsUint64();
2131 
2132 #if V8_TARGET_ARCH_PPC64
2133   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2134     mov(scratch, Operand(litVal.ival));
2135     mtfprd(result, scratch);
2136     return;
2137   }
2138 #endif
2139 
2140   addi(sp, sp, Operand(-kDoubleSize));
2141 #if V8_TARGET_ARCH_PPC64
2142   mov(scratch, Operand(litVal.ival));
2143   std(scratch, MemOperand(sp));
2144 #else
2145   LoadIntLiteral(scratch, litVal.ival[0]);
2146   stw(scratch, MemOperand(sp, 0));
2147   LoadIntLiteral(scratch, litVal.ival[1]);
2148   stw(scratch, MemOperand(sp, 4));
2149 #endif
2150   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2151   lfd(result, MemOperand(sp, 0));
2152   addi(sp, sp, Operand(kDoubleSize));
2153 }
2154 
MovIntToDouble(DoubleRegister dst,Register src,Register scratch)2155 void TurboAssembler::MovIntToDouble(DoubleRegister dst, Register src,
2156                                     Register scratch) {
2157 // sign-extend src to 64-bit
2158 #if V8_TARGET_ARCH_PPC64
2159   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2160     mtfprwa(dst, src);
2161     return;
2162   }
2163 #endif
2164 
2165   DCHECK(src != scratch);
2166   subi(sp, sp, Operand(kDoubleSize));
2167 #if V8_TARGET_ARCH_PPC64
2168   extsw(scratch, src);
2169   std(scratch, MemOperand(sp, 0));
2170 #else
2171   srawi(scratch, src, 31);
2172   stw(scratch, MemOperand(sp, Register::kExponentOffset));
2173   stw(src, MemOperand(sp, Register::kMantissaOffset));
2174 #endif
2175   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2176   lfd(dst, MemOperand(sp, 0));
2177   addi(sp, sp, Operand(kDoubleSize));
2178 }
2179 
MovUnsignedIntToDouble(DoubleRegister dst,Register src,Register scratch)2180 void TurboAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src,
2181                                             Register scratch) {
2182 // zero-extend src to 64-bit
2183 #if V8_TARGET_ARCH_PPC64
2184   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2185     mtfprwz(dst, src);
2186     return;
2187   }
2188 #endif
2189 
2190   DCHECK(src != scratch);
2191   subi(sp, sp, Operand(kDoubleSize));
2192 #if V8_TARGET_ARCH_PPC64
2193   clrldi(scratch, src, Operand(32));
2194   std(scratch, MemOperand(sp, 0));
2195 #else
2196   li(scratch, Operand::Zero());
2197   stw(scratch, MemOperand(sp, Register::kExponentOffset));
2198   stw(src, MemOperand(sp, Register::kMantissaOffset));
2199 #endif
2200   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2201   lfd(dst, MemOperand(sp, 0));
2202   addi(sp, sp, Operand(kDoubleSize));
2203 }
2204 
MovInt64ToDouble(DoubleRegister dst,Register src_hi,Register src)2205 void TurboAssembler::MovInt64ToDouble(DoubleRegister dst,
2206 #if !V8_TARGET_ARCH_PPC64
2207                                       Register src_hi,
2208 #endif
2209                                       Register src) {
2210 #if V8_TARGET_ARCH_PPC64
2211   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2212     mtfprd(dst, src);
2213     return;
2214   }
2215 #endif
2216 
2217   subi(sp, sp, Operand(kDoubleSize));
2218 #if V8_TARGET_ARCH_PPC64
2219   std(src, MemOperand(sp, 0));
2220 #else
2221   stw(src_hi, MemOperand(sp, Register::kExponentOffset));
2222   stw(src, MemOperand(sp, Register::kMantissaOffset));
2223 #endif
2224   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2225   lfd(dst, MemOperand(sp, 0));
2226   addi(sp, sp, Operand(kDoubleSize));
2227 }
2228 
2229 
2230 #if V8_TARGET_ARCH_PPC64
MovInt64ComponentsToDouble(DoubleRegister dst,Register src_hi,Register src_lo,Register scratch)2231 void TurboAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
2232                                                 Register src_hi,
2233                                                 Register src_lo,
2234                                                 Register scratch) {
2235   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2236     sldi(scratch, src_hi, Operand(32));
2237     rldimi(scratch, src_lo, 0, 32);
2238     mtfprd(dst, scratch);
2239     return;
2240   }
2241 
2242   subi(sp, sp, Operand(kDoubleSize));
2243   stw(src_hi, MemOperand(sp, Register::kExponentOffset));
2244   stw(src_lo, MemOperand(sp, Register::kMantissaOffset));
2245   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2246   lfd(dst, MemOperand(sp));
2247   addi(sp, sp, Operand(kDoubleSize));
2248 }
2249 #endif
2250 
InsertDoubleLow(DoubleRegister dst,Register src,Register scratch)2251 void TurboAssembler::InsertDoubleLow(DoubleRegister dst, Register src,
2252                                      Register scratch) {
2253 #if V8_TARGET_ARCH_PPC64
2254   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2255     mffprd(scratch, dst);
2256     rldimi(scratch, src, 0, 32);
2257     mtfprd(dst, scratch);
2258     return;
2259   }
2260 #endif
2261 
2262   subi(sp, sp, Operand(kDoubleSize));
2263   stfd(dst, MemOperand(sp));
2264   stw(src, MemOperand(sp, Register::kMantissaOffset));
2265   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2266   lfd(dst, MemOperand(sp));
2267   addi(sp, sp, Operand(kDoubleSize));
2268 }
2269 
InsertDoubleHigh(DoubleRegister dst,Register src,Register scratch)2270 void TurboAssembler::InsertDoubleHigh(DoubleRegister dst, Register src,
2271                                       Register scratch) {
2272 #if V8_TARGET_ARCH_PPC64
2273   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2274     mffprd(scratch, dst);
2275     rldimi(scratch, src, 32, 0);
2276     mtfprd(dst, scratch);
2277     return;
2278   }
2279 #endif
2280 
2281   subi(sp, sp, Operand(kDoubleSize));
2282   stfd(dst, MemOperand(sp));
2283   stw(src, MemOperand(sp, Register::kExponentOffset));
2284   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2285   lfd(dst, MemOperand(sp));
2286   addi(sp, sp, Operand(kDoubleSize));
2287 }
2288 
MovDoubleLowToInt(Register dst,DoubleRegister src)2289 void TurboAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) {
2290 #if V8_TARGET_ARCH_PPC64
2291   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2292     mffprwz(dst, src);
2293     return;
2294   }
2295 #endif
2296 
2297   subi(sp, sp, Operand(kDoubleSize));
2298   stfd(src, MemOperand(sp));
2299   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2300   lwz(dst, MemOperand(sp, Register::kMantissaOffset));
2301   addi(sp, sp, Operand(kDoubleSize));
2302 }
2303 
MovDoubleHighToInt(Register dst,DoubleRegister src)2304 void TurboAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) {
2305 #if V8_TARGET_ARCH_PPC64
2306   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2307     mffprd(dst, src);
2308     srdi(dst, dst, Operand(32));
2309     return;
2310   }
2311 #endif
2312 
2313   subi(sp, sp, Operand(kDoubleSize));
2314   stfd(src, MemOperand(sp));
2315   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2316   lwz(dst, MemOperand(sp, Register::kExponentOffset));
2317   addi(sp, sp, Operand(kDoubleSize));
2318 }
2319 
MovDoubleToInt64(Register dst_hi,Register dst,DoubleRegister src)2320 void TurboAssembler::MovDoubleToInt64(
2321 #if !V8_TARGET_ARCH_PPC64
2322     Register dst_hi,
2323 #endif
2324     Register dst, DoubleRegister src) {
2325 #if V8_TARGET_ARCH_PPC64
2326   if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2327     mffprd(dst, src);
2328     return;
2329   }
2330 #endif
2331 
2332   subi(sp, sp, Operand(kDoubleSize));
2333   stfd(src, MemOperand(sp));
2334   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2335 #if V8_TARGET_ARCH_PPC64
2336   ld(dst, MemOperand(sp, 0));
2337 #else
2338   lwz(dst_hi, MemOperand(sp, Register::kExponentOffset));
2339   lwz(dst, MemOperand(sp, Register::kMantissaOffset));
2340 #endif
2341   addi(sp, sp, Operand(kDoubleSize));
2342 }
2343 
MovIntToFloat(DoubleRegister dst,Register src)2344 void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
2345   subi(sp, sp, Operand(kFloatSize));
2346   stw(src, MemOperand(sp, 0));
2347   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2348   lfs(dst, MemOperand(sp, 0));
2349   addi(sp, sp, Operand(kFloatSize));
2350 }
2351 
MovFloatToInt(Register dst,DoubleRegister src)2352 void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
2353   subi(sp, sp, Operand(kFloatSize));
2354   stfs(src, MemOperand(sp, 0));
2355   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
2356   lwz(dst, MemOperand(sp, 0));
2357   addi(sp, sp, Operand(kFloatSize));
2358 }
2359 
Add(Register dst,Register src,intptr_t value,Register scratch)2360 void TurboAssembler::Add(Register dst, Register src, intptr_t value,
2361                          Register scratch) {
2362   if (is_int16(value)) {
2363     addi(dst, src, Operand(value));
2364   } else {
2365     mov(scratch, Operand(value));
2366     add(dst, src, scratch);
2367   }
2368 }
2369 
2370 
Cmpi(Register src1,const Operand & src2,Register scratch,CRegister cr)2371 void MacroAssembler::Cmpi(Register src1, const Operand& src2, Register scratch,
2372                           CRegister cr) {
2373   intptr_t value = src2.immediate();
2374   if (is_int16(value)) {
2375     cmpi(src1, src2, cr);
2376   } else {
2377     mov(scratch, src2);
2378     cmp(src1, scratch, cr);
2379   }
2380 }
2381 
Cmpli(Register src1,const Operand & src2,Register scratch,CRegister cr)2382 void TurboAssembler::Cmpli(Register src1, const Operand& src2, Register scratch,
2383                            CRegister cr) {
2384   intptr_t value = src2.immediate();
2385   if (is_uint16(value)) {
2386     cmpli(src1, src2, cr);
2387   } else {
2388     mov(scratch, src2);
2389     cmpl(src1, scratch, cr);
2390   }
2391 }
2392 
Cmpwi(Register src1,const Operand & src2,Register scratch,CRegister cr)2393 void TurboAssembler::Cmpwi(Register src1, const Operand& src2, Register scratch,
2394                            CRegister cr) {
2395   intptr_t value = src2.immediate();
2396   if (is_int16(value)) {
2397     cmpwi(src1, src2, cr);
2398   } else {
2399     mov(scratch, src2);
2400     cmpw(src1, scratch, cr);
2401   }
2402 }
2403 
2404 
Cmplwi(Register src1,const Operand & src2,Register scratch,CRegister cr)2405 void MacroAssembler::Cmplwi(Register src1, const Operand& src2,
2406                             Register scratch, CRegister cr) {
2407   intptr_t value = src2.immediate();
2408   if (is_uint16(value)) {
2409     cmplwi(src1, src2, cr);
2410   } else {
2411     mov(scratch, src2);
2412     cmplw(src1, scratch, cr);
2413   }
2414 }
2415 
2416 
And(Register ra,Register rs,const Operand & rb,RCBit rc)2417 void MacroAssembler::And(Register ra, Register rs, const Operand& rb,
2418                          RCBit rc) {
2419   if (rb.is_reg()) {
2420     and_(ra, rs, rb.rm(), rc);
2421   } else {
2422     if (is_uint16(rb.immediate()) && RelocInfo::IsNone(rb.rmode_) &&
2423         rc == SetRC) {
2424       andi(ra, rs, rb);
2425     } else {
2426       // mov handles the relocation.
2427       DCHECK(rs != r0);
2428       mov(r0, rb);
2429       and_(ra, rs, r0, rc);
2430     }
2431   }
2432 }
2433 
2434 
Or(Register ra,Register rs,const Operand & rb,RCBit rc)2435 void MacroAssembler::Or(Register ra, Register rs, const Operand& rb, RCBit rc) {
2436   if (rb.is_reg()) {
2437     orx(ra, rs, rb.rm(), rc);
2438   } else {
2439     if (is_uint16(rb.immediate()) && RelocInfo::IsNone(rb.rmode_) &&
2440         rc == LeaveRC) {
2441       ori(ra, rs, rb);
2442     } else {
2443       // mov handles the relocation.
2444       DCHECK(rs != r0);
2445       mov(r0, rb);
2446       orx(ra, rs, r0, rc);
2447     }
2448   }
2449 }
2450 
2451 
Xor(Register ra,Register rs,const Operand & rb,RCBit rc)2452 void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb,
2453                          RCBit rc) {
2454   if (rb.is_reg()) {
2455     xor_(ra, rs, rb.rm(), rc);
2456   } else {
2457     if (is_uint16(rb.immediate()) && RelocInfo::IsNone(rb.rmode_) &&
2458         rc == LeaveRC) {
2459       xori(ra, rs, rb);
2460     } else {
2461       // mov handles the relocation.
2462       DCHECK(rs != r0);
2463       mov(r0, rb);
2464       xor_(ra, rs, r0, rc);
2465     }
2466   }
2467 }
2468 
2469 
CmpSmiLiteral(Register src1,Smi * smi,Register scratch,CRegister cr)2470 void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch,
2471                                    CRegister cr) {
2472 #if V8_TARGET_ARCH_PPC64
2473   LoadSmiLiteral(scratch, smi);
2474   cmp(src1, scratch, cr);
2475 #else
2476   Cmpi(src1, Operand(smi), scratch, cr);
2477 #endif
2478 }
2479 
2480 
CmplSmiLiteral(Register src1,Smi * smi,Register scratch,CRegister cr)2481 void MacroAssembler::CmplSmiLiteral(Register src1, Smi* smi, Register scratch,
2482                                     CRegister cr) {
2483 #if V8_TARGET_ARCH_PPC64
2484   LoadSmiLiteral(scratch, smi);
2485   cmpl(src1, scratch, cr);
2486 #else
2487   Cmpli(src1, Operand(smi), scratch, cr);
2488 #endif
2489 }
2490 
2491 
AddSmiLiteral(Register dst,Register src,Smi * smi,Register scratch)2492 void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
2493                                    Register scratch) {
2494 #if V8_TARGET_ARCH_PPC64
2495   LoadSmiLiteral(scratch, smi);
2496   add(dst, src, scratch);
2497 #else
2498   Add(dst, src, reinterpret_cast<intptr_t>(smi), scratch);
2499 #endif
2500 }
2501 
2502 
SubSmiLiteral(Register dst,Register src,Smi * smi,Register scratch)2503 void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
2504                                    Register scratch) {
2505 #if V8_TARGET_ARCH_PPC64
2506   LoadSmiLiteral(scratch, smi);
2507   sub(dst, src, scratch);
2508 #else
2509   Add(dst, src, -(reinterpret_cast<intptr_t>(smi)), scratch);
2510 #endif
2511 }
2512 
2513 
AndSmiLiteral(Register dst,Register src,Smi * smi,Register scratch,RCBit rc)2514 void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi,
2515                                    Register scratch, RCBit rc) {
2516 #if V8_TARGET_ARCH_PPC64
2517   LoadSmiLiteral(scratch, smi);
2518   and_(dst, src, scratch, rc);
2519 #else
2520   And(dst, src, Operand(smi), rc);
2521 #endif
2522 }
2523 
2524 
2525 // Load a "pointer" sized value from the memory location
LoadP(Register dst,const MemOperand & mem,Register scratch)2526 void TurboAssembler::LoadP(Register dst, const MemOperand& mem,
2527                            Register scratch) {
2528   DCHECK_EQ(mem.rb(), no_reg);
2529   int offset = mem.offset();
2530 
2531   if (!is_int16(offset)) {
2532     /* cannot use d-form */
2533     DCHECK_NE(scratch, no_reg);
2534     mov(scratch, Operand(offset));
2535     LoadPX(dst, MemOperand(mem.ra(), scratch));
2536   } else {
2537 #if V8_TARGET_ARCH_PPC64
2538     int misaligned = (offset & 3);
2539     if (misaligned) {
2540       // adjust base to conform to offset alignment requirements
2541       // Todo: enhance to use scratch if dst is unsuitable
2542       DCHECK(dst != r0);
2543       addi(dst, mem.ra(), Operand((offset & 3) - 4));
2544       ld(dst, MemOperand(dst, (offset & ~3) + 4));
2545     } else {
2546       ld(dst, mem);
2547     }
2548 #else
2549     lwz(dst, mem);
2550 #endif
2551   }
2552 }
2553 
LoadPU(Register dst,const MemOperand & mem,Register scratch)2554 void TurboAssembler::LoadPU(Register dst, const MemOperand& mem,
2555                             Register scratch) {
2556   int offset = mem.offset();
2557 
2558   if (!is_int16(offset)) {
2559     /* cannot use d-form */
2560     DCHECK(scratch != no_reg);
2561     mov(scratch, Operand(offset));
2562     LoadPUX(dst, MemOperand(mem.ra(), scratch));
2563   } else {
2564 #if V8_TARGET_ARCH_PPC64
2565     ldu(dst, mem);
2566 #else
2567     lwzu(dst, mem);
2568 #endif
2569   }
2570 }
2571 
2572 // Store a "pointer" sized value to the memory location
StoreP(Register src,const MemOperand & mem,Register scratch)2573 void TurboAssembler::StoreP(Register src, const MemOperand& mem,
2574                             Register scratch) {
2575   int offset = mem.offset();
2576 
2577   if (!is_int16(offset)) {
2578     /* cannot use d-form */
2579     DCHECK(scratch != no_reg);
2580     mov(scratch, Operand(offset));
2581     StorePX(src, MemOperand(mem.ra(), scratch));
2582   } else {
2583 #if V8_TARGET_ARCH_PPC64
2584     int misaligned = (offset & 3);
2585     if (misaligned) {
2586       // adjust base to conform to offset alignment requirements
2587       // a suitable scratch is required here
2588       DCHECK(scratch != no_reg);
2589       if (scratch == r0) {
2590         LoadIntLiteral(scratch, offset);
2591         stdx(src, MemOperand(mem.ra(), scratch));
2592       } else {
2593         addi(scratch, mem.ra(), Operand((offset & 3) - 4));
2594         std(src, MemOperand(scratch, (offset & ~3) + 4));
2595       }
2596     } else {
2597       std(src, mem);
2598     }
2599 #else
2600     stw(src, mem);
2601 #endif
2602   }
2603 }
2604 
StorePU(Register src,const MemOperand & mem,Register scratch)2605 void TurboAssembler::StorePU(Register src, const MemOperand& mem,
2606                              Register scratch) {
2607   int offset = mem.offset();
2608 
2609   if (!is_int16(offset)) {
2610     /* cannot use d-form */
2611     DCHECK(scratch != no_reg);
2612     mov(scratch, Operand(offset));
2613     StorePUX(src, MemOperand(mem.ra(), scratch));
2614   } else {
2615 #if V8_TARGET_ARCH_PPC64
2616     stdu(src, mem);
2617 #else
2618     stwu(src, mem);
2619 #endif
2620   }
2621 }
2622 
LoadWordArith(Register dst,const MemOperand & mem,Register scratch)2623 void TurboAssembler::LoadWordArith(Register dst, const MemOperand& mem,
2624                                    Register scratch) {
2625   int offset = mem.offset();
2626 
2627   if (!is_int16(offset)) {
2628     DCHECK(scratch != no_reg);
2629     mov(scratch, Operand(offset));
2630     lwax(dst, MemOperand(mem.ra(), scratch));
2631   } else {
2632 #if V8_TARGET_ARCH_PPC64
2633     int misaligned = (offset & 3);
2634     if (misaligned) {
2635       // adjust base to conform to offset alignment requirements
2636       // Todo: enhance to use scratch if dst is unsuitable
2637       DCHECK(dst != r0);
2638       addi(dst, mem.ra(), Operand((offset & 3) - 4));
2639       lwa(dst, MemOperand(dst, (offset & ~3) + 4));
2640     } else {
2641       lwa(dst, mem);
2642     }
2643 #else
2644     lwz(dst, mem);
2645 #endif
2646   }
2647 }
2648 
2649 
2650 // Variable length depending on whether offset fits into immediate field
2651 // MemOperand currently only supports d-form
LoadWord(Register dst,const MemOperand & mem,Register scratch)2652 void MacroAssembler::LoadWord(Register dst, const MemOperand& mem,
2653                               Register scratch) {
2654   Register base = mem.ra();
2655   int offset = mem.offset();
2656 
2657   if (!is_int16(offset)) {
2658     LoadIntLiteral(scratch, offset);
2659     lwzx(dst, MemOperand(base, scratch));
2660   } else {
2661     lwz(dst, mem);
2662   }
2663 }
2664 
2665 
2666 // Variable length depending on whether offset fits into immediate field
2667 // MemOperand current only supports d-form
StoreWord(Register src,const MemOperand & mem,Register scratch)2668 void MacroAssembler::StoreWord(Register src, const MemOperand& mem,
2669                                Register scratch) {
2670   Register base = mem.ra();
2671   int offset = mem.offset();
2672 
2673   if (!is_int16(offset)) {
2674     LoadIntLiteral(scratch, offset);
2675     stwx(src, MemOperand(base, scratch));
2676   } else {
2677     stw(src, mem);
2678   }
2679 }
2680 
2681 
LoadHalfWordArith(Register dst,const MemOperand & mem,Register scratch)2682 void MacroAssembler::LoadHalfWordArith(Register dst, const MemOperand& mem,
2683                                        Register scratch) {
2684   int offset = mem.offset();
2685 
2686   if (!is_int16(offset)) {
2687     DCHECK(scratch != no_reg);
2688     mov(scratch, Operand(offset));
2689     lhax(dst, MemOperand(mem.ra(), scratch));
2690   } else {
2691     lha(dst, mem);
2692   }
2693 }
2694 
2695 
2696 // Variable length depending on whether offset fits into immediate field
2697 // MemOperand currently only supports d-form
LoadHalfWord(Register dst,const MemOperand & mem,Register scratch)2698 void MacroAssembler::LoadHalfWord(Register dst, const MemOperand& mem,
2699                                   Register scratch) {
2700   Register base = mem.ra();
2701   int offset = mem.offset();
2702 
2703   if (!is_int16(offset)) {
2704     LoadIntLiteral(scratch, offset);
2705     lhzx(dst, MemOperand(base, scratch));
2706   } else {
2707     lhz(dst, mem);
2708   }
2709 }
2710 
2711 
2712 // Variable length depending on whether offset fits into immediate field
2713 // MemOperand current only supports d-form
StoreHalfWord(Register src,const MemOperand & mem,Register scratch)2714 void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem,
2715                                    Register scratch) {
2716   Register base = mem.ra();
2717   int offset = mem.offset();
2718 
2719   if (!is_int16(offset)) {
2720     LoadIntLiteral(scratch, offset);
2721     sthx(src, MemOperand(base, scratch));
2722   } else {
2723     sth(src, mem);
2724   }
2725 }
2726 
2727 
2728 // Variable length depending on whether offset fits into immediate field
2729 // MemOperand currently only supports d-form
LoadByte(Register dst,const MemOperand & mem,Register scratch)2730 void MacroAssembler::LoadByte(Register dst, const MemOperand& mem,
2731                               Register scratch) {
2732   Register base = mem.ra();
2733   int offset = mem.offset();
2734 
2735   if (!is_int16(offset)) {
2736     LoadIntLiteral(scratch, offset);
2737     lbzx(dst, MemOperand(base, scratch));
2738   } else {
2739     lbz(dst, mem);
2740   }
2741 }
2742 
2743 
2744 // Variable length depending on whether offset fits into immediate field
2745 // MemOperand current only supports d-form
StoreByte(Register src,const MemOperand & mem,Register scratch)2746 void MacroAssembler::StoreByte(Register src, const MemOperand& mem,
2747                                Register scratch) {
2748   Register base = mem.ra();
2749   int offset = mem.offset();
2750 
2751   if (!is_int16(offset)) {
2752     LoadIntLiteral(scratch, offset);
2753     stbx(src, MemOperand(base, scratch));
2754   } else {
2755     stb(src, mem);
2756   }
2757 }
2758 
2759 
LoadRepresentation(Register dst,const MemOperand & mem,Representation r,Register scratch)2760 void MacroAssembler::LoadRepresentation(Register dst, const MemOperand& mem,
2761                                         Representation r, Register scratch) {
2762   DCHECK(!r.IsDouble());
2763   if (r.IsInteger8()) {
2764     LoadByte(dst, mem, scratch);
2765     extsb(dst, dst);
2766   } else if (r.IsUInteger8()) {
2767     LoadByte(dst, mem, scratch);
2768   } else if (r.IsInteger16()) {
2769     LoadHalfWordArith(dst, mem, scratch);
2770   } else if (r.IsUInteger16()) {
2771     LoadHalfWord(dst, mem, scratch);
2772 #if V8_TARGET_ARCH_PPC64
2773   } else if (r.IsInteger32()) {
2774     LoadWordArith(dst, mem, scratch);
2775 #endif
2776   } else {
2777     LoadP(dst, mem, scratch);
2778   }
2779 }
2780 
2781 
StoreRepresentation(Register src,const MemOperand & mem,Representation r,Register scratch)2782 void MacroAssembler::StoreRepresentation(Register src, const MemOperand& mem,
2783                                          Representation r, Register scratch) {
2784   DCHECK(!r.IsDouble());
2785   if (r.IsInteger8() || r.IsUInteger8()) {
2786     StoreByte(src, mem, scratch);
2787   } else if (r.IsInteger16() || r.IsUInteger16()) {
2788     StoreHalfWord(src, mem, scratch);
2789 #if V8_TARGET_ARCH_PPC64
2790   } else if (r.IsInteger32()) {
2791     StoreWord(src, mem, scratch);
2792 #endif
2793   } else {
2794     if (r.IsHeapObject()) {
2795       AssertNotSmi(src);
2796     } else if (r.IsSmi()) {
2797       AssertSmi(src);
2798     }
2799     StoreP(src, mem, scratch);
2800   }
2801 }
2802 
LoadDouble(DoubleRegister dst,const MemOperand & mem,Register scratch)2803 void TurboAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem,
2804                                 Register scratch) {
2805   Register base = mem.ra();
2806   int offset = mem.offset();
2807 
2808   if (!is_int16(offset)) {
2809     mov(scratch, Operand(offset));
2810     lfdx(dst, MemOperand(base, scratch));
2811   } else {
2812     lfd(dst, mem);
2813   }
2814 }
2815 
LoadDoubleU(DoubleRegister dst,const MemOperand & mem,Register scratch)2816 void MacroAssembler::LoadDoubleU(DoubleRegister dst, const MemOperand& mem,
2817                                 Register scratch) {
2818   Register base = mem.ra();
2819   int offset = mem.offset();
2820 
2821   if (!is_int16(offset)) {
2822     mov(scratch, Operand(offset));
2823     lfdux(dst, MemOperand(base, scratch));
2824   } else {
2825     lfdu(dst, mem);
2826   }
2827 }
2828 
LoadSingle(DoubleRegister dst,const MemOperand & mem,Register scratch)2829 void TurboAssembler::LoadSingle(DoubleRegister dst, const MemOperand& mem,
2830                                 Register scratch) {
2831   Register base = mem.ra();
2832   int offset = mem.offset();
2833 
2834   if (!is_int16(offset)) {
2835     mov(scratch, Operand(offset));
2836     lfsx(dst, MemOperand(base, scratch));
2837   } else {
2838     lfs(dst, mem);
2839   }
2840 }
2841 
LoadSingleU(DoubleRegister dst,const MemOperand & mem,Register scratch)2842 void TurboAssembler::LoadSingleU(DoubleRegister dst, const MemOperand& mem,
2843                                  Register scratch) {
2844   Register base = mem.ra();
2845   int offset = mem.offset();
2846 
2847   if (!is_int16(offset)) {
2848     mov(scratch, Operand(offset));
2849     lfsux(dst, MemOperand(base, scratch));
2850   } else {
2851     lfsu(dst, mem);
2852   }
2853 }
2854 
StoreDouble(DoubleRegister src,const MemOperand & mem,Register scratch)2855 void TurboAssembler::StoreDouble(DoubleRegister src, const MemOperand& mem,
2856                                  Register scratch) {
2857   Register base = mem.ra();
2858   int offset = mem.offset();
2859 
2860   if (!is_int16(offset)) {
2861     mov(scratch, Operand(offset));
2862     stfdx(src, MemOperand(base, scratch));
2863   } else {
2864     stfd(src, mem);
2865   }
2866 }
2867 
StoreDoubleU(DoubleRegister src,const MemOperand & mem,Register scratch)2868 void TurboAssembler::StoreDoubleU(DoubleRegister src, const MemOperand& mem,
2869                                   Register scratch) {
2870   Register base = mem.ra();
2871   int offset = mem.offset();
2872 
2873   if (!is_int16(offset)) {
2874     mov(scratch, Operand(offset));
2875     stfdux(src, MemOperand(base, scratch));
2876   } else {
2877     stfdu(src, mem);
2878   }
2879 }
2880 
StoreSingle(DoubleRegister src,const MemOperand & mem,Register scratch)2881 void TurboAssembler::StoreSingle(DoubleRegister src, const MemOperand& mem,
2882                                  Register scratch) {
2883   Register base = mem.ra();
2884   int offset = mem.offset();
2885 
2886   if (!is_int16(offset)) {
2887     mov(scratch, Operand(offset));
2888     stfsx(src, MemOperand(base, scratch));
2889   } else {
2890     stfs(src, mem);
2891   }
2892 }
2893 
StoreSingleU(DoubleRegister src,const MemOperand & mem,Register scratch)2894 void TurboAssembler::StoreSingleU(DoubleRegister src, const MemOperand& mem,
2895                                   Register scratch) {
2896   Register base = mem.ra();
2897   int offset = mem.offset();
2898 
2899   if (!is_int16(offset)) {
2900     mov(scratch, Operand(offset));
2901     stfsux(src, MemOperand(base, scratch));
2902   } else {
2903     stfsu(src, mem);
2904   }
2905 }
2906 
GetRegisterThatIsNotOneOf(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6)2907 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
2908                                    Register reg4, Register reg5,
2909                                    Register reg6) {
2910   RegList regs = 0;
2911   if (reg1.is_valid()) regs |= reg1.bit();
2912   if (reg2.is_valid()) regs |= reg2.bit();
2913   if (reg3.is_valid()) regs |= reg3.bit();
2914   if (reg4.is_valid()) regs |= reg4.bit();
2915   if (reg5.is_valid()) regs |= reg5.bit();
2916   if (reg6.is_valid()) regs |= reg6.bit();
2917 
2918   const RegisterConfiguration* config = RegisterConfiguration::Default();
2919   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
2920     int code = config->GetAllocatableGeneralCode(i);
2921     Register candidate = Register::from_code(code);
2922     if (regs & candidate.bit()) continue;
2923     return candidate;
2924   }
2925   UNREACHABLE();
2926 }
2927 
SwapP(Register src,Register dst,Register scratch)2928 void TurboAssembler::SwapP(Register src, Register dst, Register scratch) {
2929   if (src == dst) return;
2930   DCHECK(!AreAliased(src, dst, scratch));
2931   mr(scratch, src);
2932   mr(src, dst);
2933   mr(dst, scratch);
2934 }
2935 
SwapP(Register src,MemOperand dst,Register scratch)2936 void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
2937   if (dst.ra() != r0) DCHECK(!AreAliased(src, dst.ra(), scratch));
2938   if (dst.rb() != r0) DCHECK(!AreAliased(src, dst.rb(), scratch));
2939   DCHECK(!AreAliased(src, scratch));
2940   mr(scratch, src);
2941   LoadP(src, dst, r0);
2942   StoreP(scratch, dst, r0);
2943 }
2944 
SwapP(MemOperand src,MemOperand dst,Register scratch_0,Register scratch_1)2945 void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
2946                            Register scratch_1) {
2947   if (src.ra() != r0 && src.ra().is_valid())
2948     DCHECK(!AreAliased(src.ra(), scratch_0, scratch_1));
2949   if (src.rb() != r0 && src.rb().is_valid())
2950     DCHECK(!AreAliased(src.rb(), scratch_0, scratch_1));
2951   if (dst.ra() != r0 && dst.ra().is_valid())
2952     DCHECK(!AreAliased(dst.ra(), scratch_0, scratch_1));
2953   if (dst.rb() != r0 && dst.rb().is_valid())
2954     DCHECK(!AreAliased(dst.rb(), scratch_0, scratch_1));
2955   DCHECK(!AreAliased(scratch_0, scratch_1));
2956   if (is_int16(src.offset()) || is_int16(dst.offset())) {
2957     if (!is_int16(src.offset())) {
2958       // swap operand
2959       MemOperand temp = src;
2960       src = dst;
2961       dst = temp;
2962     }
2963     LoadP(scratch_1, dst, scratch_0);
2964     LoadP(scratch_0, src);
2965     StoreP(scratch_1, src);
2966     StoreP(scratch_0, dst, scratch_1);
2967   } else {
2968     LoadP(scratch_1, dst, scratch_0);
2969     push(scratch_1);
2970     LoadP(scratch_0, src, scratch_1);
2971     StoreP(scratch_0, dst, scratch_1);
2972     pop(scratch_1);
2973     StoreP(scratch_1, src, scratch_0);
2974   }
2975 }
2976 
SwapFloat32(DoubleRegister src,DoubleRegister dst,DoubleRegister scratch)2977 void TurboAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst,
2978                                  DoubleRegister scratch) {
2979   if (src == dst) return;
2980   DCHECK(!AreAliased(src, dst, scratch));
2981   fmr(scratch, src);
2982   fmr(src, dst);
2983   fmr(dst, scratch);
2984 }
2985 
SwapFloat32(DoubleRegister src,MemOperand dst,DoubleRegister scratch)2986 void TurboAssembler::SwapFloat32(DoubleRegister src, MemOperand dst,
2987                                  DoubleRegister scratch) {
2988   DCHECK(!AreAliased(src, scratch));
2989   fmr(scratch, src);
2990   LoadSingle(src, dst, r0);
2991   StoreSingle(scratch, dst, r0);
2992 }
2993 
SwapFloat32(MemOperand src,MemOperand dst,DoubleRegister scratch_0,DoubleRegister scratch_1)2994 void TurboAssembler::SwapFloat32(MemOperand src, MemOperand dst,
2995                                  DoubleRegister scratch_0,
2996                                  DoubleRegister scratch_1) {
2997   DCHECK(!AreAliased(scratch_0, scratch_1));
2998   LoadSingle(scratch_0, src, r0);
2999   LoadSingle(scratch_1, dst, r0);
3000   StoreSingle(scratch_0, dst, r0);
3001   StoreSingle(scratch_1, src, r0);
3002 }
3003 
SwapDouble(DoubleRegister src,DoubleRegister dst,DoubleRegister scratch)3004 void TurboAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst,
3005                                 DoubleRegister scratch) {
3006   if (src == dst) return;
3007   DCHECK(!AreAliased(src, dst, scratch));
3008   fmr(scratch, src);
3009   fmr(src, dst);
3010   fmr(dst, scratch);
3011 }
3012 
SwapDouble(DoubleRegister src,MemOperand dst,DoubleRegister scratch)3013 void TurboAssembler::SwapDouble(DoubleRegister src, MemOperand dst,
3014                                 DoubleRegister scratch) {
3015   DCHECK(!AreAliased(src, scratch));
3016   fmr(scratch, src);
3017   LoadDouble(src, dst, r0);
3018   StoreDouble(scratch, dst, r0);
3019 }
3020 
SwapDouble(MemOperand src,MemOperand dst,DoubleRegister scratch_0,DoubleRegister scratch_1)3021 void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst,
3022                                 DoubleRegister scratch_0,
3023                                 DoubleRegister scratch_1) {
3024   DCHECK(!AreAliased(scratch_0, scratch_1));
3025   LoadDouble(scratch_0, src, r0);
3026   LoadDouble(scratch_1, dst, r0);
3027   StoreDouble(scratch_0, dst, r0);
3028   StoreDouble(scratch_1, src, r0);
3029 }
3030 
3031 #ifdef DEBUG
AreAliased(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6,Register reg7,Register reg8,Register reg9,Register reg10)3032 bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
3033                 Register reg5, Register reg6, Register reg7, Register reg8,
3034                 Register reg9, Register reg10) {
3035   int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
3036                         reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
3037                         reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
3038                         reg10.is_valid();
3039 
3040   RegList regs = 0;
3041   if (reg1.is_valid()) regs |= reg1.bit();
3042   if (reg2.is_valid()) regs |= reg2.bit();
3043   if (reg3.is_valid()) regs |= reg3.bit();
3044   if (reg4.is_valid()) regs |= reg4.bit();
3045   if (reg5.is_valid()) regs |= reg5.bit();
3046   if (reg6.is_valid()) regs |= reg6.bit();
3047   if (reg7.is_valid()) regs |= reg7.bit();
3048   if (reg8.is_valid()) regs |= reg8.bit();
3049   if (reg9.is_valid()) regs |= reg9.bit();
3050   if (reg10.is_valid()) regs |= reg10.bit();
3051   int n_of_non_aliasing_regs = NumRegs(regs);
3052 
3053   return n_of_valid_regs != n_of_non_aliasing_regs;
3054 }
3055 
AreAliased(DoubleRegister reg1,DoubleRegister reg2,DoubleRegister reg3,DoubleRegister reg4,DoubleRegister reg5,DoubleRegister reg6,DoubleRegister reg7,DoubleRegister reg8,DoubleRegister reg9,DoubleRegister reg10)3056 bool AreAliased(DoubleRegister reg1, DoubleRegister reg2, DoubleRegister reg3,
3057                 DoubleRegister reg4, DoubleRegister reg5, DoubleRegister reg6,
3058                 DoubleRegister reg7, DoubleRegister reg8, DoubleRegister reg9,
3059                 DoubleRegister reg10) {
3060   int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
3061                         reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
3062                         reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
3063                         reg10.is_valid();
3064 
3065   RegList regs = 0;
3066   if (reg1.is_valid()) regs |= reg1.bit();
3067   if (reg2.is_valid()) regs |= reg2.bit();
3068   if (reg3.is_valid()) regs |= reg3.bit();
3069   if (reg4.is_valid()) regs |= reg4.bit();
3070   if (reg5.is_valid()) regs |= reg5.bit();
3071   if (reg6.is_valid()) regs |= reg6.bit();
3072   if (reg7.is_valid()) regs |= reg7.bit();
3073   if (reg8.is_valid()) regs |= reg8.bit();
3074   if (reg9.is_valid()) regs |= reg9.bit();
3075   if (reg10.is_valid()) regs |= reg10.bit();
3076   int n_of_non_aliasing_regs = NumRegs(regs);
3077 
3078   return n_of_valid_regs != n_of_non_aliasing_regs;
3079 }
3080 #endif
3081 
ResetSpeculationPoisonRegister()3082 void TurboAssembler::ResetSpeculationPoisonRegister() {
3083   mov(kSpeculationPoisonRegister, Operand(-1));
3084 }
3085 
3086 }  // namespace internal
3087 }  // namespace v8
3088 
3089 #endif  // V8_TARGET_ARCH_PPC
3090