1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_PPC_MACRO_ASSEMBLER_PPC_H_
6 #define V8_PPC_MACRO_ASSEMBLER_PPC_H_
7 
8 #include "src/assembler.h"
9 #include "src/bailout-reason.h"
10 #include "src/double.h"
11 #include "src/globals.h"
12 #include "src/ppc/assembler-ppc.h"
13 
14 namespace v8 {
15 namespace internal {
16 
17 // Give alias names to registers for calling conventions.
18 constexpr Register kReturnRegister0 = r3;
19 constexpr Register kReturnRegister1 = r4;
20 constexpr Register kReturnRegister2 = r5;
21 constexpr Register kJSFunctionRegister = r4;
22 constexpr Register kContextRegister = r30;
23 constexpr Register kAllocateSizeRegister = r4;
24 constexpr Register kSpeculationPoisonRegister = r14;
25 constexpr Register kInterpreterAccumulatorRegister = r3;
26 constexpr Register kInterpreterBytecodeOffsetRegister = r15;
27 constexpr Register kInterpreterBytecodeArrayRegister = r16;
28 constexpr Register kInterpreterDispatchTableRegister = r17;
29 constexpr Register kJavaScriptCallArgCountRegister = r3;
30 constexpr Register kJavaScriptCallNewTargetRegister = r6;
31 constexpr Register kJavaScriptCallCodeStartRegister = r5;
32 constexpr Register kOffHeapTrampolineRegister = ip;
33 constexpr Register kRuntimeCallFunctionRegister = r4;
34 constexpr Register kRuntimeCallArgCountRegister = r3;
35 constexpr Register kWasmInstanceRegister = r10;
36 
37 // ----------------------------------------------------------------------------
38 // Static helper functions
39 
40 // Generate a MemOperand for loading a field from an object.
FieldMemOperand(Register object,int offset)41 inline MemOperand FieldMemOperand(Register object, int offset) {
42   return MemOperand(object, offset - kHeapObjectTag);
43 }
44 
45 
46 // Flags used for AllocateHeapNumber
47 enum TaggingMode {
48   // Tag the result.
49   TAG_RESULT,
50   // Don't tag
51   DONT_TAG_RESULT
52 };
53 
54 
55 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
56 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
57 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
58 
59 
60 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
61                                    Register reg3 = no_reg,
62                                    Register reg4 = no_reg,
63                                    Register reg5 = no_reg,
64                                    Register reg6 = no_reg);
65 
66 
67 #ifdef DEBUG
68 bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
69                 Register reg4 = no_reg, Register reg5 = no_reg,
70                 Register reg6 = no_reg, Register reg7 = no_reg,
71                 Register reg8 = no_reg, Register reg9 = no_reg,
72                 Register reg10 = no_reg);
73 bool AreAliased(DoubleRegister reg1, DoubleRegister reg2,
74                 DoubleRegister reg3 = no_dreg, DoubleRegister reg4 = no_dreg,
75                 DoubleRegister reg5 = no_dreg, DoubleRegister reg6 = no_dreg,
76                 DoubleRegister reg7 = no_dreg, DoubleRegister reg8 = no_dreg,
77                 DoubleRegister reg9 = no_dreg, DoubleRegister reg10 = no_dreg);
78 #endif
79 
80 // These exist to provide portability between 32 and 64bit
81 #if V8_TARGET_ARCH_PPC64
82 #define LoadPX ldx
83 #define LoadPUX ldux
84 #define StorePX stdx
85 #define StorePUX stdux
86 #define ShiftLeftImm sldi
87 #define ShiftRightImm srdi
88 #define ClearLeftImm clrldi
89 #define ClearRightImm clrrdi
90 #define ShiftRightArithImm sradi
91 #define ShiftLeft_ sld
92 #define ShiftRight_ srd
93 #define ShiftRightArith srad
94 #define Mul mulld
95 #define Div divd
96 #else
97 #define LoadPX lwzx
98 #define LoadPUX lwzux
99 #define StorePX stwx
100 #define StorePUX stwux
101 #define ShiftLeftImm slwi
102 #define ShiftRightImm srwi
103 #define ClearLeftImm clrlwi
104 #define ClearRightImm clrrwi
105 #define ShiftRightArithImm srawi
106 #define ShiftLeft_ slw
107 #define ShiftRight_ srw
108 #define ShiftRightArith sraw
109 #define Mul mullw
110 #define Div divw
111 #endif
112 
113 class TurboAssembler : public Assembler {
114  public:
115   TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
116                  CodeObjectRequired create_code_object);
117 
set_has_frame(bool value)118   void set_has_frame(bool value) { has_frame_ = value; }
has_frame()119   bool has_frame() { return has_frame_; }
120 
isolate()121   Isolate* isolate() const { return isolate_; }
122 
CodeObject()123   Handle<HeapObject> CodeObject() {
124     DCHECK(!code_object_.is_null());
125     return code_object_;
126   }
127   // Converts the integer (untagged smi) in |src| to a double, storing
128   // the result to |dst|
129   void ConvertIntToDouble(Register src, DoubleRegister dst);
130 
131   // Converts the unsigned integer (untagged smi) in |src| to
132   // a double, storing the result to |dst|
133   void ConvertUnsignedIntToDouble(Register src, DoubleRegister dst);
134 
135   // Converts the integer (untagged smi) in |src| to
136   // a float, storing the result in |dst|
137   void ConvertIntToFloat(Register src, DoubleRegister dst);
138 
139   // Converts the unsigned integer (untagged smi) in |src| to
140   // a float, storing the result in |dst|
141   void ConvertUnsignedIntToFloat(Register src, DoubleRegister dst);
142 
143 #if V8_TARGET_ARCH_PPC64
144   void ConvertInt64ToFloat(Register src, DoubleRegister double_dst);
145   void ConvertInt64ToDouble(Register src, DoubleRegister double_dst);
146   void ConvertUnsignedInt64ToFloat(Register src, DoubleRegister double_dst);
147   void ConvertUnsignedInt64ToDouble(Register src, DoubleRegister double_dst);
148 #endif
149 
150   // Converts the double_input to an integer.  Note that, upon return,
151   // the contents of double_dst will also hold the fixed point representation.
152   void ConvertDoubleToInt64(const DoubleRegister double_input,
153 #if !V8_TARGET_ARCH_PPC64
154                             const Register dst_hi,
155 #endif
156                             const Register dst, const DoubleRegister double_dst,
157                             FPRoundingMode rounding_mode = kRoundToZero);
158 
159 #if V8_TARGET_ARCH_PPC64
160   // Converts the double_input to an unsigned integer.  Note that, upon return,
161   // the contents of double_dst will also hold the fixed point representation.
162   void ConvertDoubleToUnsignedInt64(
163       const DoubleRegister double_input, const Register dst,
164       const DoubleRegister double_dst,
165       FPRoundingMode rounding_mode = kRoundToZero);
166 #endif
167 
168   // Activation support.
169   void EnterFrame(StackFrame::Type type,
170                   bool load_constant_pool_pointer_reg = false);
171 
172   // Returns the pc offset at which the frame ends.
173   int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
174 
175   // Push a fixed frame, consisting of lr, fp, constant pool.
176   void PushCommonFrame(Register marker_reg = no_reg);
177 
178   // Generates function and stub prologue code.
179   void StubPrologue(StackFrame::Type type);
180   void Prologue();
181 
182   // Push a standard frame, consisting of lr, fp, constant pool,
183   // context and JS function
184   void PushStandardFrame(Register function_reg);
185 
186   // Restore caller's frame pointer and return address prior to being
187   // overwritten by tail call stack preparation.
188   void RestoreFrameStateForTailCall();
189 
190   // Get the actual activation frame alignment for target environment.
191   static int ActivationFrameAlignment();
192 
InitializeRootRegister()193   void InitializeRootRegister() {
194     ExternalReference roots_array_start =
195         ExternalReference::roots_array_start(isolate());
196     mov(kRootRegister, Operand(roots_array_start));
197   }
198 
199   // These exist to provide portability between 32 and 64bit
200   void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg);
201   void LoadPU(Register dst, const MemOperand& mem, Register scratch = no_reg);
202   void LoadWordArith(Register dst, const MemOperand& mem,
203                      Register scratch = no_reg);
204   void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
205   void StorePU(Register src, const MemOperand& mem, Register scratch = no_reg);
206 
207   void LoadDouble(DoubleRegister dst, const MemOperand& mem,
208                   Register scratch = no_reg);
209   void LoadDoubleLiteral(DoubleRegister result, Double value, Register scratch);
210 
211   // load a literal signed int value <value> to GPR <dst>
212   void LoadIntLiteral(Register dst, int value);
213   // load an SMI value <value> to GPR <dst>
214   void LoadSmiLiteral(Register dst, Smi* smi);
215 
216   void LoadSingle(DoubleRegister dst, const MemOperand& mem,
217                   Register scratch = no_reg);
218   void LoadSingleU(DoubleRegister dst, const MemOperand& mem,
219                    Register scratch = no_reg);
220   void LoadPC(Register dst);
221   void ComputeCodeStartAddress(Register dst);
222 
root_array_available()223   bool root_array_available() const { return root_array_available_; }
set_root_array_available(bool v)224   void set_root_array_available(bool v) { root_array_available_ = v; }
225 
226   void StoreDouble(DoubleRegister src, const MemOperand& mem,
227                    Register scratch = no_reg);
228   void StoreDoubleU(DoubleRegister src, const MemOperand& mem,
229                     Register scratch = no_reg);
230 
231   void StoreSingle(DoubleRegister src, const MemOperand& mem,
232                    Register scratch = no_reg);
233   void StoreSingleU(DoubleRegister src, const MemOperand& mem,
234                     Register scratch = no_reg);
235 
236   void Cmpli(Register src1, const Operand& src2, Register scratch,
237              CRegister cr = cr7);
238   void Cmpwi(Register src1, const Operand& src2, Register scratch,
239              CRegister cr = cr7);
240   // Set new rounding mode RN to FPSCR
241   void SetRoundingMode(FPRoundingMode RN);
242 
243   // reset rounding mode to default (kRoundToNearest)
244   void ResetRoundingMode();
245   void Add(Register dst, Register src, intptr_t value, Register scratch);
246 
Push(Register src)247   void Push(Register src) { push(src); }
248   // Push a handle.
249   void Push(Handle<HeapObject> handle);
250   void Push(Smi* smi);
251 
252   // Push two registers.  Pushes leftmost register first (to highest address).
Push(Register src1,Register src2)253   void Push(Register src1, Register src2) {
254     StorePU(src2, MemOperand(sp, -2 * kPointerSize));
255     StoreP(src1, MemOperand(sp, kPointerSize));
256   }
257 
258   // Push three registers.  Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3)259   void Push(Register src1, Register src2, Register src3) {
260     StorePU(src3, MemOperand(sp, -3 * kPointerSize));
261     StoreP(src2, MemOperand(sp, kPointerSize));
262     StoreP(src1, MemOperand(sp, 2 * kPointerSize));
263   }
264 
265   // Push four registers.  Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3,Register src4)266   void Push(Register src1, Register src2, Register src3, Register src4) {
267     StorePU(src4, MemOperand(sp, -4 * kPointerSize));
268     StoreP(src3, MemOperand(sp, kPointerSize));
269     StoreP(src2, MemOperand(sp, 2 * kPointerSize));
270     StoreP(src1, MemOperand(sp, 3 * kPointerSize));
271   }
272 
273   // Push five registers.  Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3,Register src4,Register src5)274   void Push(Register src1, Register src2, Register src3, Register src4,
275             Register src5) {
276     StorePU(src5, MemOperand(sp, -5 * kPointerSize));
277     StoreP(src4, MemOperand(sp, kPointerSize));
278     StoreP(src3, MemOperand(sp, 2 * kPointerSize));
279     StoreP(src2, MemOperand(sp, 3 * kPointerSize));
280     StoreP(src1, MemOperand(sp, 4 * kPointerSize));
281   }
282 
Pop(Register dst)283   void Pop(Register dst) { pop(dst); }
284 
285   // Pop two registers. Pops rightmost register first (from lower address).
Pop(Register src1,Register src2)286   void Pop(Register src1, Register src2) {
287     LoadP(src2, MemOperand(sp, 0));
288     LoadP(src1, MemOperand(sp, kPointerSize));
289     addi(sp, sp, Operand(2 * kPointerSize));
290   }
291 
292   // Pop three registers.  Pops rightmost register first (from lower address).
Pop(Register src1,Register src2,Register src3)293   void Pop(Register src1, Register src2, Register src3) {
294     LoadP(src3, MemOperand(sp, 0));
295     LoadP(src2, MemOperand(sp, kPointerSize));
296     LoadP(src1, MemOperand(sp, 2 * kPointerSize));
297     addi(sp, sp, Operand(3 * kPointerSize));
298   }
299 
300   // Pop four registers.  Pops rightmost register first (from lower address).
Pop(Register src1,Register src2,Register src3,Register src4)301   void Pop(Register src1, Register src2, Register src3, Register src4) {
302     LoadP(src4, MemOperand(sp, 0));
303     LoadP(src3, MemOperand(sp, kPointerSize));
304     LoadP(src2, MemOperand(sp, 2 * kPointerSize));
305     LoadP(src1, MemOperand(sp, 3 * kPointerSize));
306     addi(sp, sp, Operand(4 * kPointerSize));
307   }
308 
309   // Pop five registers.  Pops rightmost register first (from lower address).
Pop(Register src1,Register src2,Register src3,Register src4,Register src5)310   void Pop(Register src1, Register src2, Register src3, Register src4,
311            Register src5) {
312     LoadP(src5, MemOperand(sp, 0));
313     LoadP(src4, MemOperand(sp, kPointerSize));
314     LoadP(src3, MemOperand(sp, 2 * kPointerSize));
315     LoadP(src2, MemOperand(sp, 3 * kPointerSize));
316     LoadP(src1, MemOperand(sp, 4 * kPointerSize));
317     addi(sp, sp, Operand(5 * kPointerSize));
318   }
319 
320   void SaveRegisters(RegList registers);
321   void RestoreRegisters(RegList registers);
322 
323   void CallRecordWriteStub(Register object, Register address,
324                            RememberedSetAction remembered_set_action,
325                            SaveFPRegsMode fp_mode);
326 
327   void MultiPush(RegList regs, Register location = sp);
328   void MultiPop(RegList regs, Register location = sp);
329 
330   void MultiPushDoubles(RegList dregs, Register location = sp);
331   void MultiPopDoubles(RegList dregs, Register location = sp);
332 
333   // Calculate how much stack space (in bytes) are required to store caller
334   // registers excluding those specified in the arguments.
335   int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
336                                       Register exclusion1 = no_reg,
337                                       Register exclusion2 = no_reg,
338                                       Register exclusion3 = no_reg) const;
339 
340   // Push caller saved registers on the stack, and return the number of bytes
341   // stack pointer is adjusted.
342   int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
343                       Register exclusion2 = no_reg,
344                       Register exclusion3 = no_reg);
345   // Restore caller saved registers from the stack, and return the number of
346   // bytes stack pointer is adjusted.
347   int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
348                      Register exclusion2 = no_reg,
349                      Register exclusion3 = no_reg);
350 
351   // Load an object from the root table.
352   void LoadRoot(Register destination, Heap::RootListIndex index,
353                 Condition cond = al);
354 
355   void SwapP(Register src, Register dst, Register scratch);
356   void SwapP(Register src, MemOperand dst, Register scratch);
357   void SwapP(MemOperand src, MemOperand dst, Register scratch_0,
358              Register scratch_1);
359   void SwapFloat32(DoubleRegister src, DoubleRegister dst,
360                    DoubleRegister scratch);
361   void SwapFloat32(DoubleRegister src, MemOperand dst, DoubleRegister scratch);
362   void SwapFloat32(MemOperand src, MemOperand dst, DoubleRegister scratch_0,
363                    DoubleRegister scratch_1);
364   void SwapDouble(DoubleRegister src, DoubleRegister dst,
365                   DoubleRegister scratch);
366   void SwapDouble(DoubleRegister src, MemOperand dst, DoubleRegister scratch);
367   void SwapDouble(MemOperand src, MemOperand dst, DoubleRegister scratch_0,
368                   DoubleRegister scratch_1);
369 
370   // Before calling a C-function from generated code, align arguments on stack.
371   // After aligning the frame, non-register arguments must be stored in
372   // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
373   // are word sized. If double arguments are used, this function assumes that
374   // all double arguments are stored before core registers; otherwise the
375   // correct alignment of the double values is not guaranteed.
376   // Some compilers/platforms require the stack to be aligned when calling
377   // C++ code.
378   // Needs a scratch register to do some arithmetic. This register will be
379   // trashed.
380   void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
381                             Register scratch);
382   void PrepareCallCFunction(int num_reg_arguments, Register scratch);
383 
384   void PrepareForTailCall(const ParameterCount& callee_args_count,
385                           Register caller_args_count_reg, Register scratch0,
386                           Register scratch1);
387 
388   // There are two ways of passing double arguments on ARM, depending on
389   // whether soft or hard floating point ABI is used. These functions
390   // abstract parameter passing for the three different ways we call
391   // C functions from generated code.
392   void MovToFloatParameter(DoubleRegister src);
393   void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
394   void MovToFloatResult(DoubleRegister src);
395 
396   // Calls a C function and cleans up the space for arguments allocated
397   // by PrepareCallCFunction. The called function is not allowed to trigger a
398   // garbage collection, since that might move the code and invalidate the
399   // return address (unless this is somehow accounted for by the called
400   // function).
401   void CallCFunction(ExternalReference function, int num_arguments);
402   void CallCFunction(Register function, int num_arguments);
403   void CallCFunction(ExternalReference function, int num_reg_arguments,
404                      int num_double_arguments);
405   void CallCFunction(Register function, int num_reg_arguments,
406                      int num_double_arguments);
407 
408   // TODO(jgruber): Remove in favor of MacroAssembler::CallRuntime.
409   void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
410                           SaveFPRegsMode save_doubles = kDontSaveFPRegs);
411   void MovFromFloatParameter(DoubleRegister dst);
412   void MovFromFloatResult(DoubleRegister dst);
413 
414   // Calls Abort(msg) if the condition cond is not satisfied.
415   // Use --debug_code to enable.
416   void Assert(Condition cond, AbortReason reason, CRegister cr = cr7);
417 
418   // Like Assert(), but always enabled.
419   void Check(Condition cond, AbortReason reason, CRegister cr = cr7);
420 
421   // Print a message to stdout and abort execution.
422   void Abort(AbortReason reason);
423 
424   inline bool AllowThisStubCall(CodeStub* stub);
425 #if !V8_TARGET_ARCH_PPC64
426   void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
427                      Register src_high, Register scratch, Register shift);
428   void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
429                      Register src_high, uint32_t shift);
430   void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
431                       Register src_high, Register scratch, Register shift);
432   void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
433                       Register src_high, uint32_t shift);
434   void ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low,
435                          Register src_high, Register scratch, Register shift);
436   void ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low,
437                          Register src_high, uint32_t shift);
438 #endif
439 
440 #ifdef V8_EMBEDDED_BUILTINS
441   void LookupConstant(Register destination, Handle<Object> object);
442   void LookupExternalReference(Register destination,
443                                ExternalReference reference);
444 #endif  // V8_EMBEDDED_BUILTINS
445 
446   // Returns the size of a call in instructions. Note, the value returned is
447   // only valid as long as no entries are added to the constant pool between
448   // checking the call size and emitting the actual call.
449   static int CallSize(Register target);
450   int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
451 
452   // Jump, Call, and Ret pseudo instructions implementing inter-working.
453   void Jump(Register target);
454   void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al,
455             CRegister cr = cr7);
456   void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al,
457             CRegister cr = cr7);
458   void Call(Register target);
459   void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
460   int CallSize(Handle<Code> code,
461                RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
462                Condition cond = al);
463   void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
464             Condition cond = al);
465   void Call(Label* target);
466 
CallForDeoptimization(Address target,RelocInfo::Mode rmode)467   void CallForDeoptimization(Address target, RelocInfo::Mode rmode) {
468     Call(target, rmode);
469   }
470 
471   // Emit code to discard a non-negative number of pointer-sized elements
472   // from the stack, clobbering only the sp register.
473   void Drop(int count);
474   void Drop(Register count, Register scratch = r0);
475 
Ret()476   void Ret() { blr(); }
477   void Ret(Condition cond, CRegister cr = cr7) { bclr(cond, cr); }
Ret(int drop)478   void Ret(int drop) {
479     Drop(drop);
480     blr();
481   }
482 
483   // If the value is a NaN, canonicalize the value else, do nothing.
484   void CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
CanonicalizeNaN(const DoubleRegister value)485   void CanonicalizeNaN(const DoubleRegister value) {
486     CanonicalizeNaN(value, value);
487   }
488   void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
489                      Label* condition_met);
490 
491   // Move values between integer and floating point registers.
492   void MovIntToDouble(DoubleRegister dst, Register src, Register scratch);
493   void MovUnsignedIntToDouble(DoubleRegister dst, Register src,
494                               Register scratch);
495   void MovInt64ToDouble(DoubleRegister dst,
496 #if !V8_TARGET_ARCH_PPC64
497                         Register src_hi,
498 #endif
499                         Register src);
500 #if V8_TARGET_ARCH_PPC64
501   void MovInt64ComponentsToDouble(DoubleRegister dst, Register src_hi,
502                                   Register src_lo, Register scratch);
503 #endif
504   void InsertDoubleLow(DoubleRegister dst, Register src, Register scratch);
505   void InsertDoubleHigh(DoubleRegister dst, Register src, Register scratch);
506   void MovDoubleLowToInt(Register dst, DoubleRegister src);
507   void MovDoubleHighToInt(Register dst, DoubleRegister src);
508   void MovDoubleToInt64(
509 #if !V8_TARGET_ARCH_PPC64
510       Register dst_hi,
511 #endif
512       Register dst, DoubleRegister src);
513   void MovIntToFloat(DoubleRegister dst, Register src);
514   void MovFloatToInt(Register dst, DoubleRegister src);
515   // Register move. May do nothing if the registers are identical.
Move(Register dst,Smi * smi)516   void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); }
517   void Move(Register dst, Handle<HeapObject> value);
518   void Move(Register dst, ExternalReference reference);
519   void Move(Register dst, Register src, Condition cond = al);
520   void Move(DoubleRegister dst, DoubleRegister src);
521 
522   void SmiUntag(Register reg, RCBit rc = LeaveRC, int scale = 0) {
523     SmiUntag(reg, reg, rc, scale);
524   }
525 
526   void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC, int scale = 0) {
527     if (scale > kSmiShift) {
528       ShiftLeftImm(dst, src, Operand(scale - kSmiShift), rc);
529     } else if (scale < kSmiShift) {
530       ShiftRightArithImm(dst, src, kSmiShift - scale, rc);
531     } else {
532       // do nothing
533     }
534   }
535   // ---------------------------------------------------------------------------
536   // Bit testing/extraction
537   //
538   // Bit numbering is such that the least significant bit is bit 0
539   // (for consistency between 32/64-bit).
540 
541   // Extract consecutive bits (defined by rangeStart - rangeEnd) from src
542   // and, if !test, shift them into the least significant bits of dst.
543   inline void ExtractBitRange(Register dst, Register src, int rangeStart,
544                               int rangeEnd, RCBit rc = LeaveRC,
545                               bool test = false) {
546     DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
547     int rotate = (rangeEnd == 0) ? 0 : kBitsPerPointer - rangeEnd;
548     int width = rangeStart - rangeEnd + 1;
549     if (rc == SetRC && rangeStart < 16 && (rangeEnd == 0 || test)) {
550       // Prefer faster andi when applicable.
551       andi(dst, src, Operand(((1 << width) - 1) << rangeEnd));
552     } else {
553 #if V8_TARGET_ARCH_PPC64
554       rldicl(dst, src, rotate, kBitsPerPointer - width, rc);
555 #else
556       rlwinm(dst, src, rotate, kBitsPerPointer - width, kBitsPerPointer - 1,
557              rc);
558 #endif
559     }
560   }
561 
562   inline void ExtractBit(Register dst, Register src, uint32_t bitNumber,
563                          RCBit rc = LeaveRC, bool test = false) {
564     ExtractBitRange(dst, src, bitNumber, bitNumber, rc, test);
565   }
566 
567   // Extract consecutive bits (defined by mask) from src and place them
568   // into the least significant bits of dst.
569   inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
570                              RCBit rc = LeaveRC, bool test = false) {
571     int start = kBitsPerPointer - 1;
572     int end;
573     uintptr_t bit = (1L << start);
574 
575     while (bit && (mask & bit) == 0) {
576       start--;
577       bit >>= 1;
578     }
579     end = start;
580     bit >>= 1;
581 
582     while (bit && (mask & bit)) {
583       end--;
584       bit >>= 1;
585     }
586 
587     // 1-bits in mask must be contiguous
588     DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
589 
590     ExtractBitRange(dst, src, start, end, rc, test);
591   }
592 
593   // Test single bit in value.
594   inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
595     ExtractBitRange(scratch, value, bitNumber, bitNumber, SetRC, true);
596   }
597 
598   // Test consecutive bit range in value.  Range is defined by mask.
599   inline void TestBitMask(Register value, uintptr_t mask,
600                           Register scratch = r0) {
601     ExtractBitMask(scratch, value, mask, SetRC, true);
602   }
603   // Test consecutive bit range in value.  Range is defined by
604   // rangeStart - rangeEnd.
605   inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
606                            Register scratch = r0) {
607     ExtractBitRange(scratch, value, rangeStart, rangeEnd, SetRC, true);
608   }
609 
TestIfSmi(Register value,Register scratch)610   inline void TestIfSmi(Register value, Register scratch) {
611     TestBitRange(value, kSmiTagSize - 1, 0, scratch);
612   }
613   // Jump the register contains a smi.
JumpIfSmi(Register value,Label * smi_label)614   inline void JumpIfSmi(Register value, Label* smi_label) {
615     TestIfSmi(value, r0);
616     beq(smi_label, cr0);  // branch if SMI
617   }
618 #if V8_TARGET_ARCH_PPC64
619   inline void TestIfInt32(Register value, Register scratch,
620                           CRegister cr = cr7) {
621     // High bits must be identical to fit into an 32-bit integer
622     extsw(scratch, value);
623     cmp(scratch, value, cr);
624   }
625 #else
626   inline void TestIfInt32(Register hi_word, Register lo_word, Register scratch,
627                           CRegister cr = cr7) {
628     // High bits must be identical to fit into an 32-bit integer
629     srawi(scratch, lo_word, 31);
630     cmp(scratch, hi_word, cr);
631   }
632 #endif
633 
634   // Overflow handling functions.
635   // Usage: call the appropriate arithmetic function and then call one of the
636   // flow control functions with the corresponding label.
637 
638   // Compute dst = left + right, setting condition codes. dst may be same as
639   // either left or right (or a unique register). left and right must not be
640   // the same register.
641   void AddAndCheckForOverflow(Register dst, Register left, Register right,
642                               Register overflow_dst, Register scratch = r0);
643   void AddAndCheckForOverflow(Register dst, Register left, intptr_t right,
644                               Register overflow_dst, Register scratch = r0);
645 
646   // Compute dst = left - right, setting condition codes. dst may be same as
647   // either left or right (or a unique register). left and right must not be
648   // the same register.
649   void SubAndCheckForOverflow(Register dst, Register left, Register right,
650                               Register overflow_dst, Register scratch = r0);
651 
652   // Performs a truncating conversion of a floating point number as used by
653   // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
654   // succeeds, otherwise falls through if result is saturated. On return
655   // 'result' either holds answer, or is clobbered on fall through.
656   //
657   // Only public for the test code in test-code-stubs-arm.cc.
658   void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
659                                   Label* done);
660   void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
661                          DoubleRegister double_input);
662 
663   // Call a code stub.
664   void CallStubDelayed(CodeStub* stub);
665 
666   void LoadConstantPoolPointerRegister();
667 
668   // Loads the constant pool pointer (kConstantPoolRegister).
669   void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
670       Register code_target_address);
AbortConstantPoolBuilding()671   void AbortConstantPoolBuilding() {
672 #ifdef DEBUG
673     // Avoid DCHECK(!is_linked()) failure in ~Label()
674     bind(ConstantPoolPosition());
675 #endif
676   }
677 
678   void ResetSpeculationPoisonRegister();
679 
680  protected:
681   // This handle will be patched with the code object on installation.
682   Handle<HeapObject> code_object_;
683 
684  private:
685   static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
686 
687   bool has_frame_ = false;
688   bool root_array_available_ = true;
689   Isolate* const isolate_;
690 
691   void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
692             CRegister cr = cr7);
693   int CalculateStackPassedWords(int num_reg_arguments,
694                                 int num_double_arguments);
695   void CallCFunctionHelper(Register function, int num_reg_arguments,
696                            int num_double_arguments);
697 };
698 
699 // MacroAssembler implements a collection of frequently used acros.
700 class MacroAssembler : public TurboAssembler {
701  public:
702   MacroAssembler(Isolate* isolate, void* buffer, int size,
703                  CodeObjectRequired create_code_object);
704 
705   // ---------------------------------------------------------------------------
706   // GC Support
707 
708   void IncrementalMarkingRecordWriteHelper(Register object, Register value,
709                                            Register address);
710 
711   // Record in the remembered set the fact that we have a pointer to new space
712   // at the address pointed to by the addr register.  Only works if addr is not
713   // in new space.
714   void RememberedSetHelper(Register object,  // Used for debug code.
715                            Register addr, Register scratch,
716                            SaveFPRegsMode save_fp);
717 
718   void JumpToJSEntry(Register target);
719   // Check if object is in new space.  Jumps if the object is not in new space.
720   // The register scratch can be object itself, but scratch will be clobbered.
JumpIfNotInNewSpace(Register object,Register scratch,Label * branch)721   void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
722     InNewSpace(object, scratch, eq, branch);
723   }
724 
725   // Check if object is in new space.  Jumps if the object is in new space.
726   // The register scratch can be object itself, but it will be clobbered.
JumpIfInNewSpace(Register object,Register scratch,Label * branch)727   void JumpIfInNewSpace(Register object, Register scratch, Label* branch) {
728     InNewSpace(object, scratch, ne, branch);
729   }
730 
731   // Check if an object has a given incremental marking color.
732   void HasColor(Register object, Register scratch0, Register scratch1,
733                 Label* has_color, int first_bit, int second_bit);
734 
735   void JumpIfBlack(Register object, Register scratch0, Register scratch1,
736                    Label* on_black);
737 
738   // Checks the color of an object.  If the object is white we jump to the
739   // incremental marker.
740   void JumpIfWhite(Register value, Register scratch1, Register scratch2,
741                    Register scratch3, Label* value_is_white);
742 
743   // Notify the garbage collector that we wrote a pointer into an object.
744   // |object| is the object being stored into, |value| is the object being
745   // stored.  value and scratch registers are clobbered by the operation.
746   // The offset is the offset from the start of the object, not the offset from
747   // the tagged HeapObject pointer.  For use with FieldMemOperand(reg, off).
748   void RecordWriteField(
749       Register object, int offset, Register value, Register scratch,
750       LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
751       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
752       SmiCheck smi_check = INLINE_SMI_CHECK);
753 
754   // For a given |object| notify the garbage collector that the slot |address|
755   // has been written.  |value| is the object being stored. The value and
756   // address registers are clobbered by the operation.
757   void RecordWrite(
758       Register object, Register address, Register value,
759       LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
760       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
761       SmiCheck smi_check = INLINE_SMI_CHECK);
762 
763   // Push and pop the registers that can hold pointers, as defined by the
764   // RegList constant kSafepointSavedRegisters.
765   void PushSafepointRegisters();
766   void PopSafepointRegisters();
767 
768   // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
769   // from C.
770   // Does not handle errors.
771   void FlushICache(Register address, size_t size, Register scratch);
772 
773   // Enter exit frame.
774   // stack_space - extra stack space, used for parameters before call to C.
775   // At least one slot (for the return address) should be provided.
776   void EnterExitFrame(bool save_doubles, int stack_space = 1,
777                       StackFrame::Type frame_type = StackFrame::EXIT);
778 
779   // Leave the current exit frame. Expects the return value in r0.
780   // Expect the number of values, pushed prior to the exit frame, to
781   // remove in a register (or no_reg, if there is nothing to remove).
782   void LeaveExitFrame(bool save_doubles, Register argument_count,
783                       bool argument_count_is_length = false);
784 
785   // Load the global proxy from the current context.
LoadGlobalProxy(Register dst)786   void LoadGlobalProxy(Register dst) {
787     LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
788   }
789 
790   void LoadNativeContextSlot(int index, Register dst);
791 
792   // ----------------------------------------------------------------
793   // new PPC macro-assembler interfaces that are slightly higher level
794   // than assembler-ppc and may generate variable length sequences
795 
796   // load a literal double value <value> to FPR <result>
797   void LoadWord(Register dst, const MemOperand& mem, Register scratch);
798   void StoreWord(Register src, const MemOperand& mem, Register scratch);
799 
800   void LoadHalfWord(Register dst, const MemOperand& mem, Register scratch);
801   void LoadHalfWordArith(Register dst, const MemOperand& mem,
802                          Register scratch = no_reg);
803   void StoreHalfWord(Register src, const MemOperand& mem, Register scratch);
804 
805   void LoadByte(Register dst, const MemOperand& mem, Register scratch);
806   void StoreByte(Register src, const MemOperand& mem, Register scratch);
807 
808   void LoadRepresentation(Register dst, const MemOperand& mem, Representation r,
809                           Register scratch = no_reg);
810   void StoreRepresentation(Register src, const MemOperand& mem,
811                            Representation r, Register scratch = no_reg);
812   void LoadDoubleU(DoubleRegister dst, const MemOperand& mem,
813                    Register scratch = no_reg);
814 
815   void Cmpi(Register src1, const Operand& src2, Register scratch,
816             CRegister cr = cr7);
817   void Cmplwi(Register src1, const Operand& src2, Register scratch,
818               CRegister cr = cr7);
819   void And(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
820   void Or(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
821   void Xor(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
822 
823   void AddSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
824   void SubSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
825   void CmpSmiLiteral(Register src1, Smi* smi, Register scratch,
826                      CRegister cr = cr7);
827   void CmplSmiLiteral(Register src1, Smi* smi, Register scratch,
828                       CRegister cr = cr7);
829   void AndSmiLiteral(Register dst, Register src, Smi* smi, Register scratch,
830                      RCBit rc = LeaveRC);
831 
832 
833 
834   // ---------------------------------------------------------------------------
835   // JavaScript invokes
836 
837   // Removes current frame and its arguments from the stack preserving
838   // the arguments and a return address pushed to the stack for the next call.
839   // Both |callee_args_count| and |caller_args_count_reg| do not include
840   // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
841   // is trashed.
842 
843   // Invoke the JavaScript function code by either calling or jumping.
844   void InvokeFunctionCode(Register function, Register new_target,
845                           const ParameterCount& expected,
846                           const ParameterCount& actual, InvokeFlag flag);
847 
848   // On function call, call into the debugger if necessary.
849   void CheckDebugHook(Register fun, Register new_target,
850                       const ParameterCount& expected,
851                       const ParameterCount& actual);
852 
853   // Invoke the JavaScript function in the given register. Changes the
854   // current context to the context in the function before invoking.
855   void InvokeFunction(Register function, Register new_target,
856                       const ParameterCount& actual, InvokeFlag flag);
857 
858   void InvokeFunction(Register function, const ParameterCount& expected,
859                       const ParameterCount& actual, InvokeFlag flag);
860 
861   void DebugBreak();
862   // Frame restart support
863   void MaybeDropFrames();
864 
865   // Exception handling
866 
867   // Push a new stack handler and link into stack handler chain.
868   void PushStackHandler();
869 
870   // Unlink the stack handler on top of the stack from the stack handler chain.
871   // Must preserve the result register.
872   void PopStackHandler();
873 
874   // ---------------------------------------------------------------------------
875   // Support functions.
876 
877   // Compare object type for heap object.  heap_object contains a non-Smi
878   // whose object type should be compared with the given type.  This both
879   // sets the flags and leaves the object type in the type_reg register.
880   // It leaves the map in the map register (unless the type_reg and map register
881   // are the same register).  It leaves the heap object in the heap_object
882   // register unless the heap_object register is the same register as one of the
883   // other registers.
884   // Type_reg can be no_reg. In that case ip is used.
885   void CompareObjectType(Register heap_object, Register map, Register type_reg,
886                          InstanceType type);
887 
888   // Compare instance type in a map.  map contains a valid map object whose
889   // object type should be compared with the given type.  This both
890   // sets the flags and leaves the object type in the type_reg register.
891   void CompareInstanceType(Register map, Register type_reg, InstanceType type);
892 
893   // Compare the object in a register to a value from the root list.
894   // Uses the ip register as scratch.
895   void CompareRoot(Register obj, Heap::RootListIndex index);
PushRoot(Heap::RootListIndex index)896   void PushRoot(Heap::RootListIndex index) {
897     LoadRoot(r0, index);
898     Push(r0);
899   }
900 
901   // Compare the object in a register to a value and jump if they are equal.
JumpIfRoot(Register with,Heap::RootListIndex index,Label * if_equal)902   void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
903     CompareRoot(with, index);
904     beq(if_equal);
905   }
906 
907   // Compare the object in a register to a value and jump if they are not equal.
JumpIfNotRoot(Register with,Heap::RootListIndex index,Label * if_not_equal)908   void JumpIfNotRoot(Register with, Heap::RootListIndex index,
909                      Label* if_not_equal) {
910     CompareRoot(with, index);
911     bne(if_not_equal);
912   }
913 
914   // Try to convert a double to a signed 32-bit integer.
915   // CR_EQ in cr7 is set and result assigned if the conversion is exact.
916   void TryDoubleToInt32Exact(Register result, DoubleRegister double_input,
917                              Register scratch, DoubleRegister double_scratch);
918 
919   // ---------------------------------------------------------------------------
920   // Runtime calls
921 
922   static int CallSizeNotPredictableCodeSize(Address target,
923                                             RelocInfo::Mode rmode,
924                                             Condition cond = al);
925   void CallJSEntry(Register target);
926 
927   // Call a code stub.
928   void CallStub(CodeStub* stub, Condition cond = al);
929   void TailCallStub(CodeStub* stub, Condition cond = al);
930 
931   // Call a runtime routine.
932   void CallRuntime(const Runtime::Function* f, int num_arguments,
933                    SaveFPRegsMode save_doubles = kDontSaveFPRegs);
CallRuntimeSaveDoubles(Runtime::FunctionId fid)934   void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
935     const Runtime::Function* function = Runtime::FunctionForId(fid);
936     CallRuntime(function, function->nargs, kSaveFPRegs);
937   }
938 
939   // Convenience function: Same as above, but takes the fid instead.
940   void CallRuntime(Runtime::FunctionId fid,
941                    SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
942     const Runtime::Function* function = Runtime::FunctionForId(fid);
943     CallRuntime(function, function->nargs, save_doubles);
944   }
945 
946   // Convenience function: Same as above, but takes the fid instead.
947   void CallRuntime(Runtime::FunctionId fid, int num_arguments,
948                    SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
949     CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
950   }
951 
952   // Convenience function: tail call a runtime routine (jump).
953   void TailCallRuntime(Runtime::FunctionId fid);
954 
955 
956 
957   // Jump to a runtime routine.
958   void JumpToExternalReference(const ExternalReference& builtin,
959                                bool builtin_exit_frame = false);
960 
961   // Generates a trampoline to jump to the off-heap instruction stream.
962   void JumpToInstructionStream(Address entry);
963 
964   // ---------------------------------------------------------------------------
965   // In-place weak references.
966   void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
967 
968   // ---------------------------------------------------------------------------
969   // StatsCounter support
970 
971   void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
972                         Register scratch2);
973   void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
974                         Register scratch2);
975 
976   // ---------------------------------------------------------------------------
977   // Smi utilities
978 
979   // Shift left by kSmiShift
980   void SmiTag(Register reg, RCBit rc = LeaveRC) { SmiTag(reg, reg, rc); }
981   void SmiTag(Register dst, Register src, RCBit rc = LeaveRC) {
982     ShiftLeftImm(dst, src, Operand(kSmiShift), rc);
983   }
984 
SmiToPtrArrayOffset(Register dst,Register src)985   void SmiToPtrArrayOffset(Register dst, Register src) {
986 #if V8_TARGET_ARCH_PPC64
987     STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
988     ShiftRightArithImm(dst, src, kSmiShift - kPointerSizeLog2);
989 #else
990     STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2);
991     ShiftLeftImm(dst, src, Operand(kPointerSizeLog2 - kSmiShift));
992 #endif
993   }
994 
995   // Untag the source value into destination and jump if source is a smi.
996   // Souce and destination can be the same register.
997   void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
998 
999   // Jump if either of the registers contain a non-smi.
JumpIfNotSmi(Register value,Label * not_smi_label)1000   inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
1001     TestIfSmi(value, r0);
1002     bne(not_smi_label, cr0);
1003   }
1004   // Jump if either of the registers contain a smi.
1005   void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1006 
1007   // Abort execution if argument is a smi, enabled via --debug-code.
1008   void AssertNotSmi(Register object);
1009   void AssertSmi(Register object);
1010 
1011 
1012 
1013 #if V8_TARGET_ARCH_PPC64
1014   // Ensure it is permissible to read/write int value directly from
1015   // upper half of the smi.
1016   STATIC_ASSERT(kSmiTag == 0);
1017   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
1018 #endif
1019 #if V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN
1020 #define SmiWordOffset(offset) (offset + kPointerSize / 2)
1021 #else
1022 #define SmiWordOffset(offset) offset
1023 #endif
1024 
1025   // Abort execution if argument is not a FixedArray, enabled via --debug-code.
1026   void AssertFixedArray(Register object);
1027 
1028   // Abort execution if argument is not a Constructor, enabled via --debug-code.
1029   void AssertConstructor(Register object);
1030 
1031   // Abort execution if argument is not a JSFunction, enabled via --debug-code.
1032   void AssertFunction(Register object);
1033 
1034   // Abort execution if argument is not a JSBoundFunction,
1035   // enabled via --debug-code.
1036   void AssertBoundFunction(Register object);
1037 
1038   // Abort execution if argument is not a JSGeneratorObject (or subclass),
1039   // enabled via --debug-code.
1040   void AssertGeneratorObject(Register object);
1041 
1042   // Abort execution if argument is not undefined or an AllocationSite, enabled
1043   // via --debug-code.
1044   void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1045 
1046   // ---------------------------------------------------------------------------
1047   // Patching helpers.
1048 
1049   template <typename Field>
1050   void DecodeField(Register dst, Register src, RCBit rc = LeaveRC) {
1051     ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift,
1052                     rc);
1053   }
1054 
1055   template <typename Field>
1056   void DecodeField(Register reg, RCBit rc = LeaveRC) {
1057     DecodeField<Field>(reg, reg, rc);
1058   }
1059 
1060  private:
1061   static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
1062 
1063   // Helper functions for generating invokes.
1064   void InvokePrologue(const ParameterCount& expected,
1065                       const ParameterCount& actual, Label* done,
1066                       bool* definitely_mismatches, InvokeFlag flag);
1067 
1068   // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1069   void InNewSpace(Register object, Register scratch,
1070                   Condition cond,  // eq for new space, ne otherwise.
1071                   Label* branch);
1072 
1073   // Compute memory operands for safepoint stack slots.
1074   static int SafepointRegisterStackIndex(int reg_code);
1075 
1076   // Needs access to SafepointRegisterStackIndex for compiled frame
1077   // traversal.
1078   friend class StandardFrame;
1079 };
1080 
1081 // -----------------------------------------------------------------------------
1082 // Static helper functions.
1083 
1084 inline MemOperand ContextMemOperand(Register context, int index = 0) {
1085   return MemOperand(context, Context::SlotOffset(index));
1086 }
1087 
1088 
NativeContextMemOperand()1089 inline MemOperand NativeContextMemOperand() {
1090   return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
1091 }
1092 
1093 #define ACCESS_MASM(masm) masm->
1094 
1095 }  // namespace internal
1096 }  // namespace v8
1097 
1098 #endif  // V8_PPC_MACRO_ASSEMBLER_PPC_H_
1099