1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
6 #error This header must be included via macro-assembler.h
7 #endif
8 
9 #ifndef V8_CODEGEN_ARM64_MACRO_ASSEMBLER_ARM64_H_
10 #define V8_CODEGEN_ARM64_MACRO_ASSEMBLER_ARM64_H_
11 
12 #include <vector>
13 
14 #include "src/base/bits.h"
15 #include "src/codegen/arm64/assembler-arm64.h"
16 #include "src/codegen/bailout-reason.h"
17 #include "src/common/globals.h"
18 #include "src/objects/tagged-index.h"
19 
20 // Simulator specific helpers.
21 #if USE_SIMULATOR
22 #if DEBUG
23 #define ASM_LOCATION(message) __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
24 #define ASM_LOCATION_IN_ASSEMBLER(message) \
25   Debug("LOCATION: " message, __LINE__, NO_PARAM)
26 #else
27 #define ASM_LOCATION(message)
28 #define ASM_LOCATION_IN_ASSEMBLER(message)
29 #endif
30 #else
31 #define ASM_LOCATION(message)
32 #define ASM_LOCATION_IN_ASSEMBLER(message)
33 #endif
34 
35 namespace v8 {
36 namespace internal {
37 
38 #define LS_MACRO_LIST(V)                                     \
39   V(Ldrb, Register&, rt, LDRB_w)                             \
40   V(Strb, Register&, rt, STRB_w)                             \
41   V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
42   V(Ldrh, Register&, rt, LDRH_w)                             \
43   V(Strh, Register&, rt, STRH_w)                             \
44   V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
45   V(Ldr, CPURegister&, rt, LoadOpFor(rt))                    \
46   V(Str, CPURegister&, rt, StoreOpFor(rt))                   \
47   V(Ldrsw, Register&, rt, LDRSW_x)
48 
49 #define LSPAIR_MACRO_LIST(V)                             \
50   V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2))  \
51   V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
52   V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x)
53 
54 #define LDA_STL_MACRO_LIST(V) \
55   V(Ldarb, ldarb)             \
56   V(Ldarh, ldarh)             \
57   V(Ldar, ldar)               \
58   V(Ldaxrb, ldaxrb)           \
59   V(Ldaxrh, ldaxrh)           \
60   V(Ldaxr, ldaxr)             \
61   V(Stlrb, stlrb)             \
62   V(Stlrh, stlrh)             \
63   V(Stlr, stlr)
64 
65 #define STLX_MACRO_LIST(V) \
66   V(Stlxrb, stlxrb)        \
67   V(Stlxrh, stlxrh)        \
68   V(Stlxr, stlxr)
69 
70 // ----------------------------------------------------------------------------
71 // Static helper functions
72 
73 // Generate a MemOperand for loading a field from an object.
74 inline MemOperand FieldMemOperand(Register object, int offset);
75 
76 // ----------------------------------------------------------------------------
77 // MacroAssembler
78 
79 enum BranchType {
80   // Copies of architectural conditions.
81   // The associated conditions can be used in place of those, the code will
82   // take care of reinterpreting them with the correct type.
83   integer_eq = eq,
84   integer_ne = ne,
85   integer_hs = hs,
86   integer_lo = lo,
87   integer_mi = mi,
88   integer_pl = pl,
89   integer_vs = vs,
90   integer_vc = vc,
91   integer_hi = hi,
92   integer_ls = ls,
93   integer_ge = ge,
94   integer_lt = lt,
95   integer_gt = gt,
96   integer_le = le,
97   integer_al = al,
98   integer_nv = nv,
99 
100   // These two are *different* from the architectural codes al and nv.
101   // 'always' is used to generate unconditional branches.
102   // 'never' is used to not generate a branch (generally as the inverse
103   // branch type of 'always).
104   always,
105   never,
106   // cbz and cbnz
107   reg_zero,
108   reg_not_zero,
109   // tbz and tbnz
110   reg_bit_clear,
111   reg_bit_set,
112 
113   // Aliases.
114   kBranchTypeFirstCondition = eq,
115   kBranchTypeLastCondition = nv,
116   kBranchTypeFirstUsingReg = reg_zero,
117   kBranchTypeFirstUsingBit = reg_bit_clear
118 };
119 
InvertBranchType(BranchType type)120 inline BranchType InvertBranchType(BranchType type) {
121   if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
122     return static_cast<BranchType>(
123         NegateCondition(static_cast<Condition>(type)));
124   } else {
125     return static_cast<BranchType>(type ^ 1);
126   }
127 }
128 
129 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
130 enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
131 
132 // The macro assembler supports moving automatically pre-shifted immediates for
133 // arithmetic and logical instructions, and then applying a post shift in the
134 // instruction to undo the modification, in order to reduce the code emitted for
135 // an operation. For example:
136 //
137 //  Add(x0, x0, 0x1f7de) => movz x16, 0xfbef; add x0, x0, x16, lsl #1.
138 //
139 // This optimisation can be only partially applied when the stack pointer is an
140 // operand or destination, so this enumeration is used to control the shift.
141 enum PreShiftImmMode {
142   kNoShift,          // Don't pre-shift.
143   kLimitShiftForSP,  // Limit pre-shift for add/sub extend use.
144   kAnyShift          // Allow any pre-shift.
145 };
146 
147 // TODO(victorgomes): Move definition to macro-assembler.h, once all other
148 // platforms are updated.
149 enum class StackLimitKind { kInterruptStackLimit, kRealStackLimit };
150 
151 class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
152  public:
153   using TurboAssemblerBase::TurboAssemblerBase;
154 
155 #if DEBUG
set_allow_macro_instructions(bool value)156   void set_allow_macro_instructions(bool value) {
157     allow_macro_instructions_ = value;
158   }
allow_macro_instructions()159   bool allow_macro_instructions() const { return allow_macro_instructions_; }
160 #endif
161 
162   // We should not use near calls or jumps for calls to external references,
163   // since the code spaces are not guaranteed to be close to each other.
CanUseNearCallOrJump(RelocInfo::Mode rmode)164   bool CanUseNearCallOrJump(RelocInfo::Mode rmode) {
165     return rmode != RelocInfo::EXTERNAL_REFERENCE;
166   }
167 
168   static bool IsNearCallOffset(int64_t offset);
169 
170   // Activation support.
171   void EnterFrame(StackFrame::Type type);
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)172   void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
173     // Out-of-line constant pool not implemented on arm64.
174     UNREACHABLE();
175   }
176   void LeaveFrame(StackFrame::Type type);
177 
178   inline void InitializeRootRegister();
179 
180   void Mov(const Register& rd, const Operand& operand,
181            DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
182   void Mov(const Register& rd, uint64_t imm);
Mov(const VRegister & vd,int vd_index,const VRegister & vn,int vn_index)183   void Mov(const VRegister& vd, int vd_index, const VRegister& vn,
184            int vn_index) {
185     DCHECK(allow_macro_instructions());
186     mov(vd, vd_index, vn, vn_index);
187   }
188   void Mov(const Register& rd, Smi smi);
Mov(const VRegister & vd,const VRegister & vn,int index)189   void Mov(const VRegister& vd, const VRegister& vn, int index) {
190     DCHECK(allow_macro_instructions());
191     mov(vd, vn, index);
192   }
Mov(const VRegister & vd,int vd_index,const Register & rn)193   void Mov(const VRegister& vd, int vd_index, const Register& rn) {
194     DCHECK(allow_macro_instructions());
195     mov(vd, vd_index, rn);
196   }
Mov(const Register & rd,const VRegister & vn,int vn_index)197   void Mov(const Register& rd, const VRegister& vn, int vn_index) {
198     DCHECK(allow_macro_instructions());
199     mov(rd, vn, vn_index);
200   }
201 
202   // These are required for compatibility with architecture independent code.
203   // Remove if not needed.
204   void Move(Register dst, Smi src);
205   void Move(Register dst, MemOperand src);
206   void Move(Register dst, Register src);
207 
208   // Move src0 to dst0 and src1 to dst1, handling possible overlaps.
209   void MovePair(Register dst0, Register src0, Register dst1, Register src1);
210 
211   // Register swap. Note that the register operands should be distinct.
212   void Swap(Register lhs, Register rhs);
213   void Swap(VRegister lhs, VRegister rhs);
214 
215 // NEON by element instructions.
216 #define NEON_BYELEMENT_MACRO_LIST(V) \
217   V(fmla, Fmla)                      \
218   V(fmls, Fmls)                      \
219   V(fmul, Fmul)                      \
220   V(fmulx, Fmulx)                    \
221   V(mul, Mul)                        \
222   V(mla, Mla)                        \
223   V(mls, Mls)                        \
224   V(sqdmulh, Sqdmulh)                \
225   V(sqrdmulh, Sqrdmulh)              \
226   V(sqdmull, Sqdmull)                \
227   V(sqdmull2, Sqdmull2)              \
228   V(sqdmlal, Sqdmlal)                \
229   V(sqdmlal2, Sqdmlal2)              \
230   V(sqdmlsl, Sqdmlsl)                \
231   V(sqdmlsl2, Sqdmlsl2)              \
232   V(smull, Smull)                    \
233   V(smull2, Smull2)                  \
234   V(smlal, Smlal)                    \
235   V(smlal2, Smlal2)                  \
236   V(smlsl, Smlsl)                    \
237   V(smlsl2, Smlsl2)                  \
238   V(umull, Umull)                    \
239   V(umull2, Umull2)                  \
240   V(umlal, Umlal)                    \
241   V(umlal2, Umlal2)                  \
242   V(umlsl, Umlsl)                    \
243   V(umlsl2, Umlsl2)
244 
245 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                                   \
246   void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm, \
247             int vm_index) {                                                \
248     DCHECK(allow_macro_instructions());                                    \
249     ASM(vd, vn, vm, vm_index);                                             \
250   }
251   NEON_BYELEMENT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
252 #undef DEFINE_MACRO_ASM_FUNC
253 
254 // NEON 2 vector register instructions.
255 #define NEON_2VREG_MACRO_LIST(V) \
256   V(abs, Abs)                    \
257   V(addp, Addp)                  \
258   V(addv, Addv)                  \
259   V(cls, Cls)                    \
260   V(clz, Clz)                    \
261   V(cnt, Cnt)                    \
262   V(faddp, Faddp)                \
263   V(fcvtas, Fcvtas)              \
264   V(fcvtau, Fcvtau)              \
265   V(fcvtl, Fcvtl)                \
266   V(fcvtms, Fcvtms)              \
267   V(fcvtmu, Fcvtmu)              \
268   V(fcvtn, Fcvtn)                \
269   V(fcvtns, Fcvtns)              \
270   V(fcvtnu, Fcvtnu)              \
271   V(fcvtps, Fcvtps)              \
272   V(fcvtpu, Fcvtpu)              \
273   V(fmaxnmp, Fmaxnmp)            \
274   V(fmaxnmv, Fmaxnmv)            \
275   V(fmaxp, Fmaxp)                \
276   V(fmaxv, Fmaxv)                \
277   V(fminnmp, Fminnmp)            \
278   V(fminnmv, Fminnmv)            \
279   V(fminp, Fminp)                \
280   V(fminv, Fminv)                \
281   V(fneg, Fneg)                  \
282   V(frecpe, Frecpe)              \
283   V(frecpx, Frecpx)              \
284   V(frinta, Frinta)              \
285   V(frinti, Frinti)              \
286   V(frintm, Frintm)              \
287   V(frintn, Frintn)              \
288   V(frintp, Frintp)              \
289   V(frintx, Frintx)              \
290   V(frintz, Frintz)              \
291   V(frsqrte, Frsqrte)            \
292   V(fsqrt, Fsqrt)                \
293   V(mov, Mov)                    \
294   V(mvn, Mvn)                    \
295   V(neg, Neg)                    \
296   V(not_, Not)                   \
297   V(rbit, Rbit)                  \
298   V(rev16, Rev16)                \
299   V(rev32, Rev32)                \
300   V(rev64, Rev64)                \
301   V(sadalp, Sadalp)              \
302   V(saddlp, Saddlp)              \
303   V(saddlv, Saddlv)              \
304   V(smaxv, Smaxv)                \
305   V(sminv, Sminv)                \
306   V(sqabs, Sqabs)                \
307   V(sqneg, Sqneg)                \
308   V(sqxtn2, Sqxtn2)              \
309   V(sqxtn, Sqxtn)                \
310   V(sqxtun2, Sqxtun2)            \
311   V(sqxtun, Sqxtun)              \
312   V(suqadd, Suqadd)              \
313   V(sxtl2, Sxtl2)                \
314   V(sxtl, Sxtl)                  \
315   V(uadalp, Uadalp)              \
316   V(uaddlp, Uaddlp)              \
317   V(uaddlv, Uaddlv)              \
318   V(umaxv, Umaxv)                \
319   V(uminv, Uminv)                \
320   V(uqxtn2, Uqxtn2)              \
321   V(uqxtn, Uqxtn)                \
322   V(urecpe, Urecpe)              \
323   V(ursqrte, Ursqrte)            \
324   V(usqadd, Usqadd)              \
325   V(uxtl2, Uxtl2)                \
326   V(uxtl, Uxtl)                  \
327   V(xtn2, Xtn2)                  \
328   V(xtn, Xtn)
329 
330 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                \
331   void MASM(const VRegister& vd, const VRegister& vn) { \
332     DCHECK(allow_macro_instructions());                 \
333     ASM(vd, vn);                                        \
334   }
NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)335   NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
336 #undef DEFINE_MACRO_ASM_FUNC
337 #undef NEON_2VREG_MACRO_LIST
338 
339 // NEON 2 vector register with immediate instructions.
340 #define NEON_2VREG_FPIMM_MACRO_LIST(V) \
341   V(fcmeq, Fcmeq)                      \
342   V(fcmge, Fcmge)                      \
343   V(fcmgt, Fcmgt)                      \
344   V(fcmle, Fcmle)                      \
345   V(fcmlt, Fcmlt)
346 
347 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                            \
348   void MASM(const VRegister& vd, const VRegister& vn, double imm) { \
349     DCHECK(allow_macro_instructions());                             \
350     ASM(vd, vn, imm);                                               \
351   }
352   NEON_2VREG_FPIMM_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
353 #undef DEFINE_MACRO_ASM_FUNC
354 
355 // NEON 3 vector register instructions.
356 #define NEON_3VREG_MACRO_LIST(V) \
357   V(add, Add)                    \
358   V(addhn2, Addhn2)              \
359   V(addhn, Addhn)                \
360   V(addp, Addp)                  \
361   V(and_, And)                   \
362   V(bic, Bic)                    \
363   V(bif, Bif)                    \
364   V(bit, Bit)                    \
365   V(bsl, Bsl)                    \
366   V(cmeq, Cmeq)                  \
367   V(cmge, Cmge)                  \
368   V(cmgt, Cmgt)                  \
369   V(cmhi, Cmhi)                  \
370   V(cmhs, Cmhs)                  \
371   V(cmtst, Cmtst)                \
372   V(eor, Eor)                    \
373   V(fabd, Fabd)                  \
374   V(facge, Facge)                \
375   V(facgt, Facgt)                \
376   V(faddp, Faddp)                \
377   V(fcmeq, Fcmeq)                \
378   V(fcmge, Fcmge)                \
379   V(fcmgt, Fcmgt)                \
380   V(fmaxnmp, Fmaxnmp)            \
381   V(fmaxp, Fmaxp)                \
382   V(fminnmp, Fminnmp)            \
383   V(fminp, Fminp)                \
384   V(fmla, Fmla)                  \
385   V(fmls, Fmls)                  \
386   V(fmulx, Fmulx)                \
387   V(fnmul, Fnmul)                \
388   V(frecps, Frecps)              \
389   V(frsqrts, Frsqrts)            \
390   V(mla, Mla)                    \
391   V(mls, Mls)                    \
392   V(mul, Mul)                    \
393   V(orn, Orn)                    \
394   V(orr, Orr)                    \
395   V(pmull2, Pmull2)              \
396   V(pmull, Pmull)                \
397   V(pmul, Pmul)                  \
398   V(raddhn2, Raddhn2)            \
399   V(raddhn, Raddhn)              \
400   V(rsubhn2, Rsubhn2)            \
401   V(rsubhn, Rsubhn)              \
402   V(sabal2, Sabal2)              \
403   V(sabal, Sabal)                \
404   V(saba, Saba)                  \
405   V(sabdl2, Sabdl2)              \
406   V(sabdl, Sabdl)                \
407   V(sabd, Sabd)                  \
408   V(saddl2, Saddl2)              \
409   V(saddl, Saddl)                \
410   V(saddw2, Saddw2)              \
411   V(saddw, Saddw)                \
412   V(shadd, Shadd)                \
413   V(shsub, Shsub)                \
414   V(smaxp, Smaxp)                \
415   V(smax, Smax)                  \
416   V(sminp, Sminp)                \
417   V(smin, Smin)                  \
418   V(smlal2, Smlal2)              \
419   V(smlal, Smlal)                \
420   V(smlsl2, Smlsl2)              \
421   V(smlsl, Smlsl)                \
422   V(smull2, Smull2)              \
423   V(smull, Smull)                \
424   V(sqadd, Sqadd)                \
425   V(sqdmlal2, Sqdmlal2)          \
426   V(sqdmlal, Sqdmlal)            \
427   V(sqdmlsl2, Sqdmlsl2)          \
428   V(sqdmlsl, Sqdmlsl)            \
429   V(sqdmulh, Sqdmulh)            \
430   V(sqdmull2, Sqdmull2)          \
431   V(sqdmull, Sqdmull)            \
432   V(sqrdmulh, Sqrdmulh)          \
433   V(sqrshl, Sqrshl)              \
434   V(sqshl, Sqshl)                \
435   V(sqsub, Sqsub)                \
436   V(srhadd, Srhadd)              \
437   V(srshl, Srshl)                \
438   V(sshl, Sshl)                  \
439   V(ssubl2, Ssubl2)              \
440   V(ssubl, Ssubl)                \
441   V(ssubw2, Ssubw2)              \
442   V(ssubw, Ssubw)                \
443   V(subhn2, Subhn2)              \
444   V(subhn, Subhn)                \
445   V(sub, Sub)                    \
446   V(trn1, Trn1)                  \
447   V(trn2, Trn2)                  \
448   V(uabal2, Uabal2)              \
449   V(uabal, Uabal)                \
450   V(uaba, Uaba)                  \
451   V(uabdl2, Uabdl2)              \
452   V(uabdl, Uabdl)                \
453   V(uabd, Uabd)                  \
454   V(uaddl2, Uaddl2)              \
455   V(uaddl, Uaddl)                \
456   V(uaddw2, Uaddw2)              \
457   V(uaddw, Uaddw)                \
458   V(uhadd, Uhadd)                \
459   V(uhsub, Uhsub)                \
460   V(umaxp, Umaxp)                \
461   V(umax, Umax)                  \
462   V(uminp, Uminp)                \
463   V(umin, Umin)                  \
464   V(umlal2, Umlal2)              \
465   V(umlal, Umlal)                \
466   V(umlsl2, Umlsl2)              \
467   V(umlsl, Umlsl)                \
468   V(umull2, Umull2)              \
469   V(umull, Umull)                \
470   V(uqadd, Uqadd)                \
471   V(uqrshl, Uqrshl)              \
472   V(uqshl, Uqshl)                \
473   V(uqsub, Uqsub)                \
474   V(urhadd, Urhadd)              \
475   V(urshl, Urshl)                \
476   V(ushl, Ushl)                  \
477   V(usubl2, Usubl2)              \
478   V(usubl, Usubl)                \
479   V(usubw2, Usubw2)              \
480   V(usubw, Usubw)                \
481   V(uzp1, Uzp1)                  \
482   V(uzp2, Uzp2)                  \
483   V(zip1, Zip1)                  \
484   V(zip2, Zip2)
485 
486 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                                     \
487   void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm) { \
488     DCHECK(allow_macro_instructions());                                      \
489     ASM(vd, vn, vm);                                                         \
490   }
491   NEON_3VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
492 #undef DEFINE_MACRO_ASM_FUNC
493 
494   void Bic(const VRegister& vd, const int imm8, const int left_shift = 0) {
495     DCHECK(allow_macro_instructions());
496     bic(vd, imm8, left_shift);
497   }
498 
499   // This is required for compatibility in architecture independent code.
500   inline void jmp(Label* L);
501 
502   void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
503   inline void B(Label* label);
504   inline void B(Condition cond, Label* label);
505   void B(Label* label, Condition cond);
506 
507   void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
508   void Tbz(const Register& rt, unsigned bit_pos, Label* label);
509 
510   void Cbnz(const Register& rt, Label* label);
511   void Cbz(const Register& rt, Label* label);
512 
Pacibsp()513   void Pacibsp() {
514     DCHECK(allow_macro_instructions_);
515     pacibsp();
516   }
Autibsp()517   void Autibsp() {
518     DCHECK(allow_macro_instructions_);
519     autibsp();
520   }
521 
522   // The 1716 pac and aut instructions encourage people to use x16 and x17
523   // directly, perhaps without realising that this is forbidden. For example:
524   //
525   //     UseScratchRegisterScope temps(&masm);
526   //     Register temp = temps.AcquireX();  // temp will be x16
527   //     __ Mov(x17, ptr);
528   //     __ Mov(x16, modifier);  // Will override temp!
529   //     __ Pacib1716();
530   //
531   // To work around this issue, you must exclude x16 and x17 from the scratch
532   // register list. You may need to replace them with other registers:
533   //
534   //     UseScratchRegisterScope temps(&masm);
535   //     temps.Exclude(x16, x17);
536   //     temps.Include(x10, x11);
537   //     __ Mov(x17, ptr);
538   //     __ Mov(x16, modifier);
539   //     __ Pacib1716();
Pacib1716()540   void Pacib1716() {
541     DCHECK(allow_macro_instructions_);
542     DCHECK(!TmpList()->IncludesAliasOf(x16));
543     DCHECK(!TmpList()->IncludesAliasOf(x17));
544     pacib1716();
545   }
Autib1716()546   void Autib1716() {
547     DCHECK(allow_macro_instructions_);
548     DCHECK(!TmpList()->IncludesAliasOf(x16));
549     DCHECK(!TmpList()->IncludesAliasOf(x17));
550     autib1716();
551   }
552 
553   inline void Dmb(BarrierDomain domain, BarrierType type);
554   inline void Dsb(BarrierDomain domain, BarrierType type);
555   inline void Isb();
556   inline void Csdb();
557 
558   inline void SmiUntag(Register dst, Register src);
559   inline void SmiUntag(Register dst, const MemOperand& src);
560   inline void SmiUntag(Register smi);
561 
562   inline void SmiToInt32(Register smi);
563 
564   // Calls Abort(msg) if the condition cond is not satisfied.
565   // Use --debug_code to enable.
566   void Assert(Condition cond, AbortReason reason);
567 
568   // Like Assert(), but without condition.
569   // Use --debug_code to enable.
570   void AssertUnreachable(AbortReason reason);
571 
572   void AssertSmi(Register object,
573                  AbortReason reason = AbortReason::kOperandIsNotASmi);
574 
575   // Like Assert(), but always enabled.
576   void Check(Condition cond, AbortReason reason);
577 
578   inline void Debug(const char* message, uint32_t code, Instr params = BREAK);
579 
580   void Trap();
581   void DebugBreak();
582 
583   // Print a message to stderr and abort execution.
584   void Abort(AbortReason reason);
585 
586   // Like printf, but print at run-time from generated code.
587   //
588   // The caller must ensure that arguments for floating-point placeholders
589   // (such as %e, %f or %g) are VRegisters, and that arguments for integer
590   // placeholders are Registers.
591   //
592   // Format placeholders that refer to more than one argument, or to a specific
593   // argument, are not supported. This includes formats like "%1$d" or "%.*d".
594   //
595   // This function automatically preserves caller-saved registers so that
596   // calling code can use Printf at any point without having to worry about
597   // corruption. The preservation mechanism generates a lot of code. If this is
598   // a problem, preserve the important registers manually and then call
599   // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
600   // implicitly preserved.
601   void Printf(const char* format, CPURegister arg0 = NoCPUReg,
602               CPURegister arg1 = NoCPUReg, CPURegister arg2 = NoCPUReg,
603               CPURegister arg3 = NoCPUReg);
604 
605   // Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
606   //
607   // The return code from the system printf call will be returned in x0.
608   void PrintfNoPreserve(const char* format, const CPURegister& arg0 = NoCPUReg,
609                         const CPURegister& arg1 = NoCPUReg,
610                         const CPURegister& arg2 = NoCPUReg,
611                         const CPURegister& arg3 = NoCPUReg);
612 
613   // Remaining instructions are simple pass-through calls to the assembler.
614   inline void Asr(const Register& rd, const Register& rn, unsigned shift);
615   inline void Asr(const Register& rd, const Register& rn, const Register& rm);
616 
617   // Try to move an immediate into the destination register in a single
618   // instruction. Returns true for success, and updates the contents of dst.
619   // Returns false, otherwise.
620   bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm);
621 
622   inline void Bind(Label* label,
623                    BranchTargetIdentifier id = BranchTargetIdentifier::kNone);
624 
625   // Control-flow integrity:
626 
627   // Define a function entrypoint.
628   inline void CodeEntry();
629   // Define an exception handler.
630   inline void ExceptionHandler();
631   // Define an exception handler and bind a label.
632   inline void BindExceptionHandler(Label* label);
633 
634   // Control-flow integrity:
635 
636   // Define a jump (BR) target.
637   inline void JumpTarget();
638   // Define a jump (BR) target and bind a label.
639   inline void BindJumpTarget(Label* label);
640   // Define a call (BLR) target. The target also allows tail calls (via BR)
641   // when the target is x16 or x17.
642   inline void CallTarget();
643   // Define a jump/call target.
644   inline void JumpOrCallTarget();
645   // Define a jump/call target and bind a label.
646   inline void BindJumpOrCallTarget(Label* label);
647 
648   static unsigned CountSetHalfWords(uint64_t imm, unsigned reg_size);
649 
TmpList()650   CPURegList* TmpList() { return &tmp_list_; }
FPTmpList()651   CPURegList* FPTmpList() { return &fptmp_list_; }
652 
653   static CPURegList DefaultTmpList();
654   static CPURegList DefaultFPTmpList();
655 
656   // Move macros.
657   inline void Mvn(const Register& rd, uint64_t imm);
658   void Mvn(const Register& rd, const Operand& operand);
659   static bool IsImmMovn(uint64_t imm, unsigned reg_size);
660   static bool IsImmMovz(uint64_t imm, unsigned reg_size);
661 
662   void LogicalMacro(const Register& rd, const Register& rn,
663                     const Operand& operand, LogicalOp op);
664   void AddSubMacro(const Register& rd, const Register& rn,
665                    const Operand& operand, FlagsUpdate S, AddSubOp op);
666   inline void Orr(const Register& rd, const Register& rn,
667                   const Operand& operand);
668   void Orr(const VRegister& vd, const int imm8, const int left_shift = 0) {
669     DCHECK(allow_macro_instructions());
670     orr(vd, imm8, left_shift);
671   }
672   inline void Orn(const Register& rd, const Register& rn,
673                   const Operand& operand);
674   inline void Eor(const Register& rd, const Register& rn,
675                   const Operand& operand);
676   inline void Eon(const Register& rd, const Register& rn,
677                   const Operand& operand);
678   inline void And(const Register& rd, const Register& rn,
679                   const Operand& operand);
680   inline void Ands(const Register& rd, const Register& rn,
681                    const Operand& operand);
682   inline void Tst(const Register& rn, const Operand& operand);
683   inline void Bic(const Register& rd, const Register& rn,
684                   const Operand& operand);
685   inline void Blr(const Register& xn);
686   inline void Cmp(const Register& rn, const Operand& operand);
687   inline void CmpTagged(const Register& rn, const Operand& operand);
688   inline void Subs(const Register& rd, const Register& rn,
689                    const Operand& operand);
690   void Csel(const Register& rd, const Register& rn, const Operand& operand,
691             Condition cond);
692   inline void Fcsel(const VRegister& fd, const VRegister& fn,
693                     const VRegister& fm, Condition cond);
694 
695   // Emits a runtime assert that the stack pointer is aligned.
696   void AssertSpAligned();
697 
698   // Copy slot_count stack slots from the stack offset specified by src to
699   // the stack offset specified by dst. The offsets and count are expressed in
700   // slot-sized units. Offset dst must be less than src, or the gap between
701   // them must be greater than or equal to slot_count, otherwise the result is
702   // unpredictable. The function may corrupt its register arguments. The
703   // registers must not alias each other.
704   void CopySlots(int dst, Register src, Register slot_count);
705   void CopySlots(Register dst, Register src, Register slot_count);
706 
707   // Copy count double words from the address in register src to the address
708   // in register dst. There are three modes for this function:
709   // 1) Address dst must be less than src, or the gap between them must be
710   //    greater than or equal to count double words, otherwise the result is
711   //    unpredictable. This is the default mode.
712   // 2) Address src must be less than dst, or the gap between them must be
713   //    greater than or equal to count double words, otherwise the result is
714   //    undpredictable. In this mode, src and dst specify the last (highest)
715   //    address of the regions to copy from and to.
716   // 3) The same as mode 1, but the words are copied in the reversed order.
717   // The case where src == dst is not supported.
718   // The function may corrupt its register arguments. The registers must not
719   // alias each other.
720   enum CopyDoubleWordsMode {
721     kDstLessThanSrc,
722     kSrcLessThanDst,
723     kDstLessThanSrcAndReverse
724   };
725   void CopyDoubleWords(Register dst, Register src, Register count,
726                        CopyDoubleWordsMode mode = kDstLessThanSrc);
727 
728   // Calculate the address of a double word-sized slot at slot_offset from the
729   // stack pointer, and write it to dst. Positive slot_offsets are at addresses
730   // greater than sp, with slot zero at sp.
731   void SlotAddress(Register dst, int slot_offset);
732   void SlotAddress(Register dst, Register slot_offset);
733 
734   // Load a literal from the inline constant pool.
735   inline void Ldr(const CPURegister& rt, const Operand& imm);
736 
737   // Claim or drop stack space.
738   //
739   // On Windows, Claim will write a value every 4k, as is required by the stack
740   // expansion mechanism.
741   //
742   // The stack pointer must be aligned to 16 bytes and the size claimed or
743   // dropped must be a multiple of 16 bytes.
744   //
745   // Note that unit_size must be specified in bytes. For variants which take a
746   // Register count, the unit size must be a power of two.
747   inline void Claim(int64_t count, uint64_t unit_size = kXRegSize);
748   inline void Claim(const Register& count, uint64_t unit_size = kXRegSize);
749   inline void Drop(int64_t count, uint64_t unit_size = kXRegSize);
750   inline void Drop(const Register& count, uint64_t unit_size = kXRegSize);
751 
752   // Drop 'count' arguments from the stack, rounded up to a multiple of two,
753   // without actually accessing memory.
754   // We assume the size of the arguments is the pointer size.
755   // An optional mode argument is passed, which can indicate we need to
756   // explicitly add the receiver to the count.
757   enum ArgumentsCountMode { kCountIncludesReceiver, kCountExcludesReceiver };
758   inline void DropArguments(const Register& count,
759                             ArgumentsCountMode mode = kCountIncludesReceiver);
760   inline void DropArguments(int64_t count,
761                             ArgumentsCountMode mode = kCountIncludesReceiver);
762 
763   // Drop 'count' slots from stack, rounded up to a multiple of two, without
764   // actually accessing memory.
765   inline void DropSlots(int64_t count);
766 
767   // Push a single argument, with padding, to the stack.
768   inline void PushArgument(const Register& arg);
769 
770   // Add and sub macros.
771   inline void Add(const Register& rd, const Register& rn,
772                   const Operand& operand);
773   inline void Adds(const Register& rd, const Register& rn,
774                    const Operand& operand);
775   inline void Sub(const Register& rd, const Register& rn,
776                   const Operand& operand);
777 
778   // Abort execution if argument is not a positive or zero integer, enabled via
779   // --debug-code.
780   void AssertPositiveOrZero(Register value);
781 
782 #define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
783   inline void FN(const REGTYPE REG, const MemOperand& addr);
784   LS_MACRO_LIST(DECLARE_FUNCTION)
785 #undef DECLARE_FUNCTION
786 
787   // Push or pop up to 4 registers of the same width to or from the stack.
788   //
789   // If an argument register is 'NoReg', all further arguments are also assumed
790   // to be 'NoReg', and are thus not pushed or popped.
791   //
792   // Arguments are ordered such that "Push(a, b);" is functionally equivalent
793   // to "Push(a); Push(b);".
794   //
795   // It is valid to push the same register more than once, and there is no
796   // restriction on the order in which registers are specified.
797   //
798   // It is not valid to pop into the same register more than once in one
799   // operation, not even into the zero register.
800   //
801   // The stack pointer must be aligned to 16 bytes on entry and the total size
802   // of the specified registers must also be a multiple of 16 bytes.
803   //
804   // Other than the registers passed into Pop, the stack pointer, (possibly)
805   // the system stack pointer and (possibly) the link register, these methods
806   // do not modify any other registers.
807   //
808   // Some of the methods take an optional LoadLRMode or StoreLRMode template
809   // argument, which specifies whether we need to sign the link register at the
810   // start of the operation, or authenticate it at the end of the operation,
811   // when control flow integrity measures are enabled.
812   // When the mode is kDontLoadLR or kDontStoreLR, LR must not be passed as an
813   // argument to the operation.
814   enum LoadLRMode { kAuthLR, kDontLoadLR };
815   enum StoreLRMode { kSignLR, kDontStoreLR };
816   template <StoreLRMode lr_mode = kDontStoreLR>
817   void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
818             const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
819   void Push(const CPURegister& src0, const CPURegister& src1,
820             const CPURegister& src2, const CPURegister& src3,
821             const CPURegister& src4, const CPURegister& src5 = NoReg,
822             const CPURegister& src6 = NoReg, const CPURegister& src7 = NoReg);
823   template <LoadLRMode lr_mode = kDontLoadLR>
824   void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
825            const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
826   void Pop(const CPURegister& dst0, const CPURegister& dst1,
827            const CPURegister& dst2, const CPURegister& dst3,
828            const CPURegister& dst4, const CPURegister& dst5 = NoReg,
829            const CPURegister& dst6 = NoReg, const CPURegister& dst7 = NoReg);
830   template <StoreLRMode lr_mode = kDontStoreLR>
831   void Push(const Register& src0, const VRegister& src1);
832 
833   void MaybeSaveRegisters(RegList registers);
834   void MaybeRestoreRegisters(RegList registers);
835 
836   void CallEphemeronKeyBarrier(Register object, Operand offset,
837                                SaveFPRegsMode fp_mode);
838 
839   void CallRecordWriteStubSaveRegisters(
840       Register object, Operand offset,
841       RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
842       StubCallMode mode = StubCallMode::kCallBuiltinPointer);
843   void CallRecordWriteStub(
844       Register object, Register slot_address,
845       RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
846       StubCallMode mode = StubCallMode::kCallBuiltinPointer);
847 
848   // For a given |object| and |offset|:
849   //   - Move |object| to |dst_object|.
850   //   - Compute the address of the slot pointed to by |offset| in |object| and
851   //     write it to |dst_slot|.
852   // This method makes sure |object| and |offset| are allowed to overlap with
853   // the destination registers.
854   void MoveObjectAndSlot(Register dst_object, Register dst_slot,
855                          Register object, Operand offset);
856 
857   // Alternative forms of Push and Pop, taking a RegList or CPURegList that
858   // specifies the registers that are to be pushed or popped. Higher-numbered
859   // registers are associated with higher memory addresses (as in the A32 push
860   // and pop instructions).
861   //
862   // (Push|Pop)SizeRegList allow you to specify the register size as a
863   // parameter. Only kXRegSizeInBits, kWRegSizeInBits, kDRegSizeInBits and
864   // kSRegSizeInBits are supported.
865   //
866   // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
867   //
868   // The methods take an optional LoadLRMode or StoreLRMode template argument.
869   // When control flow integrity measures are enabled and the link register is
870   // included in 'registers', passing kSignLR to PushCPURegList will sign the
871   // link register before pushing the list, and passing kAuthLR to
872   // PopCPURegList will authenticate it after popping the list.
873   template <StoreLRMode lr_mode = kDontStoreLR>
874   void PushCPURegList(CPURegList registers);
875   template <LoadLRMode lr_mode = kDontLoadLR>
876   void PopCPURegList(CPURegList registers);
877 
878   // Calculate how much stack space (in bytes) are required to store caller
879   // registers excluding those specified in the arguments.
880   int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
881                                       Register exclusion) const;
882 
883   // Push caller saved registers on the stack, and return the number of bytes
884   // stack pointer is adjusted.
885   int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion = no_reg);
886 
887   // Restore caller saved registers from the stack, and return the number of
888   // bytes stack pointer is adjusted.
889   int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion = no_reg);
890 
891   // Move an immediate into register dst, and return an Operand object for use
892   // with a subsequent instruction that accepts a shift. The value moved into
893   // dst is not necessarily equal to imm; it may have had a shifting operation
894   // applied to it that will be subsequently undone by the shift applied in the
895   // Operand.
896   Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm,
897                                     PreShiftImmMode mode);
898 
899   void CheckPageFlag(const Register& object, int mask, Condition cc,
900                      Label* condition_met);
901 
902   // Compare a register with an operand, and branch to label depending on the
903   // condition. May corrupt the status flags.
904   inline void CompareAndBranch(const Register& lhs, const Operand& rhs,
905                                Condition cond, Label* label);
906   inline void CompareTaggedAndBranch(const Register& lhs, const Operand& rhs,
907                                      Condition cond, Label* label);
908 
909   // Test the bits of register defined by bit_pattern, and branch if ANY of
910   // those bits are set. May corrupt the status flags.
911   inline void TestAndBranchIfAnySet(const Register& reg,
912                                     const uint64_t bit_pattern, Label* label);
913 
914   // Test the bits of register defined by bit_pattern, and branch if ALL of
915   // those bits are clear (ie. not set.) May corrupt the status flags.
916   inline void TestAndBranchIfAllClear(const Register& reg,
917                                       const uint64_t bit_pattern, Label* label);
918 
919   inline void Brk(int code);
920 
921   inline void JumpIfSmi(Register value, Label* smi_label,
922                         Label* not_smi_label = nullptr);
923 
924   inline void JumpIfEqual(Register x, int32_t y, Label* dest);
925   inline void JumpIfLessThan(Register x, int32_t y, Label* dest);
926 
927   void LoadMap(Register dst, Register object);
928 
929   inline void Fmov(VRegister fd, VRegister fn);
930   inline void Fmov(VRegister fd, Register rn);
931   // Provide explicit double and float interfaces for FP immediate moves, rather
932   // than relying on implicit C++ casts. This allows signalling NaNs to be
933   // preserved when the immediate matches the format of fd. Most systems convert
934   // signalling NaNs to quiet NaNs when converting between float and double.
935   inline void Fmov(VRegister fd, double imm);
936   inline void Fmov(VRegister fd, float imm);
937   // Provide a template to allow other types to be converted automatically.
938   template <typename T>
Fmov(VRegister fd,T imm)939   void Fmov(VRegister fd, T imm) {
940     DCHECK(allow_macro_instructions());
941     Fmov(fd, static_cast<double>(imm));
942   }
943   inline void Fmov(Register rd, VRegister fn);
944 
945   void Movi(const VRegister& vd, uint64_t imm, Shift shift = LSL,
946             int shift_amount = 0);
947   void Movi(const VRegister& vd, uint64_t hi, uint64_t lo);
948 
949   void LoadFromConstantsTable(Register destination, int constant_index) final;
950   void LoadRootRegisterOffset(Register destination, intptr_t offset) final;
951   void LoadRootRelative(Register destination, int32_t offset) final;
952 
953   void Jump(Register target, Condition cond = al);
954   void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
955   void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
956   void Jump(const ExternalReference& reference);
957 
958   void Call(Register target);
959   void Call(Address target, RelocInfo::Mode rmode);
960   void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
961   void Call(ExternalReference target);
962 
963   // Generate an indirect call (for when a direct call's range is not adequate).
964   void IndirectCall(Address target, RelocInfo::Mode rmode);
965 
966   // Load the builtin given by the Smi in |builtin_| into the same
967   // register.
968   void LoadEntryFromBuiltinIndex(Register builtin);
969   void LoadEntryFromBuiltin(Builtin builtin, Register destination);
970   MemOperand EntryFromBuiltinAsOperand(Builtin builtin);
971   void CallBuiltinByIndex(Register builtin);
972   void CallBuiltin(Builtin builtin);
973   void TailCallBuiltin(Builtin builtin);
974 
975   void LoadCodeObjectEntry(Register destination, Register code_object);
976   void CallCodeObject(Register code_object);
977   void JumpCodeObject(Register code_object,
978                       JumpMode jump_mode = JumpMode::kJump);
979 
980   // Load code entry point from the CodeDataContainer object.
981   void LoadCodeDataContainerEntry(Register destination,
982                                   Register code_data_container_object);
983   // Load code entry point from the CodeDataContainer object and compute
984   // Code object pointer out of it. Must not be used for CodeDataContainers
985   // corresponding to builtins, because their entry points values point to
986   // the embedded instruction stream in .text section.
987   void LoadCodeDataContainerCodeNonBuiltin(Register destination,
988                                            Register code_data_container_object);
989   void CallCodeDataContainerObject(Register code_data_container_object);
990   void JumpCodeDataContainerObject(Register code_data_container_object,
991                                    JumpMode jump_mode = JumpMode::kJump);
992 
993   // Helper functions that dispatch either to Call/JumpCodeObject or to
994   // Call/JumpCodeDataContainerObject.
995   void LoadCodeTEntry(Register destination, Register code);
996   void CallCodeTObject(Register code);
997   void JumpCodeTObject(Register code, JumpMode jump_mode = JumpMode::kJump);
998 
999   // Generates an instruction sequence s.t. the return address points to the
1000   // instruction following the call.
1001   // The return address on the stack is used by frame iteration.
1002   void StoreReturnAddressAndCall(Register target);
1003 
1004   void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
1005                              DeoptimizeKind kind, Label* ret,
1006                              Label* jump_deoptimization_entry_label);
1007 
1008   // Calls a C function.
1009   // The called function is not allowed to trigger a
1010   // garbage collection, since that might move the code and invalidate the
1011   // return address (unless this is somehow accounted for by the called
1012   // function).
1013   void CallCFunction(ExternalReference function, int num_reg_arguments);
1014   void CallCFunction(ExternalReference function, int num_reg_arguments,
1015                      int num_double_arguments);
1016   void CallCFunction(Register function, int num_reg_arguments,
1017                      int num_double_arguments);
1018 
1019   // Performs a truncating conversion of a floating point number as used by
1020   // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
1021   // Exits with 'result' holding the answer.
1022   void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
1023                          DoubleRegister double_input, StubCallMode stub_mode,
1024                          LinkRegisterStatus lr_status);
1025 
1026   inline void Mul(const Register& rd, const Register& rn, const Register& rm);
1027 
1028   inline void Fcvtzs(const Register& rd, const VRegister& fn);
1029   void Fcvtzs(const VRegister& vd, const VRegister& vn, int fbits = 0) {
1030     DCHECK(allow_macro_instructions());
1031     fcvtzs(vd, vn, fbits);
1032   }
1033 
Fjcvtzs(const Register & rd,const VRegister & vn)1034   void Fjcvtzs(const Register& rd, const VRegister& vn) {
1035     DCHECK(allow_macro_instructions());
1036     DCHECK(!rd.IsZero());
1037     fjcvtzs(rd, vn);
1038   }
1039 
1040   inline void Fcvtzu(const Register& rd, const VRegister& fn);
1041   void Fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0) {
1042     DCHECK(allow_macro_instructions());
1043     fcvtzu(vd, vn, fbits);
1044   }
1045 
1046   inline void Madd(const Register& rd, const Register& rn, const Register& rm,
1047                    const Register& ra);
1048   inline void Mneg(const Register& rd, const Register& rn, const Register& rm);
1049   inline void Sdiv(const Register& rd, const Register& rn, const Register& rm);
1050   inline void Udiv(const Register& rd, const Register& rn, const Register& rm);
1051   inline void Msub(const Register& rd, const Register& rn, const Register& rm,
1052                    const Register& ra);
1053 
1054   inline void Lsl(const Register& rd, const Register& rn, unsigned shift);
1055   inline void Lsl(const Register& rd, const Register& rn, const Register& rm);
1056   inline void Umull(const Register& rd, const Register& rn, const Register& rm);
1057   inline void Smull(const Register& rd, const Register& rn, const Register& rm);
1058 
1059   inline void Sxtb(const Register& rd, const Register& rn);
1060   inline void Sxth(const Register& rd, const Register& rn);
1061   inline void Sxtw(const Register& rd, const Register& rn);
1062   inline void Ubfiz(const Register& rd, const Register& rn, unsigned lsb,
1063                     unsigned width);
1064   inline void Ubfx(const Register& rd, const Register& rn, unsigned lsb,
1065                    unsigned width);
1066   inline void Lsr(const Register& rd, const Register& rn, unsigned shift);
1067   inline void Lsr(const Register& rd, const Register& rn, const Register& rm);
1068   inline void Ror(const Register& rd, const Register& rs, unsigned shift);
1069   inline void Ror(const Register& rd, const Register& rn, const Register& rm);
1070   inline void Cmn(const Register& rn, const Operand& operand);
1071   inline void Fadd(const VRegister& fd, const VRegister& fn,
1072                    const VRegister& fm);
1073   inline void Fcmp(const VRegister& fn, const VRegister& fm);
1074   inline void Fcmp(const VRegister& fn, double value);
1075   inline void Fabs(const VRegister& fd, const VRegister& fn);
1076   inline void Fmul(const VRegister& fd, const VRegister& fn,
1077                    const VRegister& fm);
1078   inline void Fsub(const VRegister& fd, const VRegister& fn,
1079                    const VRegister& fm);
1080   inline void Fdiv(const VRegister& fd, const VRegister& fn,
1081                    const VRegister& fm);
1082   inline void Fmax(const VRegister& fd, const VRegister& fn,
1083                    const VRegister& fm);
1084   inline void Fmin(const VRegister& fd, const VRegister& fn,
1085                    const VRegister& fm);
1086   inline void Rbit(const Register& rd, const Register& rn);
1087   inline void Rev(const Register& rd, const Register& rn);
1088 
1089   enum AdrHint {
1090     // The target must be within the immediate range of adr.
1091     kAdrNear,
1092     // The target may be outside of the immediate range of adr. Additional
1093     // instructions may be emitted.
1094     kAdrFar
1095   };
1096   void Adr(const Register& rd, Label* label, AdrHint = kAdrNear);
1097 
1098   // Add/sub with carry macros.
1099   inline void Adc(const Register& rd, const Register& rn,
1100                   const Operand& operand);
1101 
1102   // Conditional macros.
1103   inline void Ccmp(const Register& rn, const Operand& operand, StatusFlags nzcv,
1104                    Condition cond);
1105   inline void CcmpTagged(const Register& rn, const Operand& operand,
1106                          StatusFlags nzcv, Condition cond);
1107 
1108   inline void Clz(const Register& rd, const Register& rn);
1109 
1110   // Poke 'src' onto the stack. The offset is in bytes. The stack pointer must
1111   // be 16 byte aligned.
1112   // When the optional template argument is kSignLR and control flow integrity
1113   // measures are enabled, we sign the link register before poking it onto the
1114   // stack. 'src' must be lr in this case.
1115   template <StoreLRMode lr_mode = kDontStoreLR>
1116   void Poke(const CPURegister& src, const Operand& offset);
1117 
1118   // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
1119   // The stack pointer must be aligned to 16 bytes.
1120   // When the optional template argument is kAuthLR and control flow integrity
1121   // measures are enabled, we authenticate the link register after peeking the
1122   // value. 'dst' must be lr in this case.
1123   template <LoadLRMode lr_mode = kDontLoadLR>
1124   void Peek(const CPURegister& dst, const Operand& offset);
1125 
1126   // Poke 'src1' and 'src2' onto the stack. The values written will be adjacent
1127   // with 'src2' at a higher address than 'src1'. The offset is in bytes. The
1128   // stack pointer must be 16 byte aligned.
1129   void PokePair(const CPURegister& src1, const CPURegister& src2, int offset);
1130 
1131   inline void Sbfx(const Register& rd, const Register& rn, unsigned lsb,
1132                    unsigned width);
1133 
1134   inline void Bfi(const Register& rd, const Register& rn, unsigned lsb,
1135                   unsigned width);
1136 
1137   inline void Scvtf(const VRegister& fd, const Register& rn,
1138                     unsigned fbits = 0);
1139   void Scvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) {
1140     DCHECK(allow_macro_instructions());
1141     scvtf(vd, vn, fbits);
1142   }
1143   inline void Ucvtf(const VRegister& fd, const Register& rn,
1144                     unsigned fbits = 0);
1145   void Ucvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) {
1146     DCHECK(allow_macro_instructions());
1147     ucvtf(vd, vn, fbits);
1148   }
1149 
1150   void AssertFPCRState(Register fpcr = NoReg);
1151   void CanonicalizeNaN(const VRegister& dst, const VRegister& src);
CanonicalizeNaN(const VRegister & reg)1152   void CanonicalizeNaN(const VRegister& reg) { CanonicalizeNaN(reg, reg); }
1153 
1154   inline void CmovX(const Register& rd, const Register& rn, Condition cond);
1155   inline void Cset(const Register& rd, Condition cond);
1156   inline void Csetm(const Register& rd, Condition cond);
1157   inline void Fccmp(const VRegister& fn, const VRegister& fm, StatusFlags nzcv,
1158                     Condition cond);
1159   inline void Csinc(const Register& rd, const Register& rn, const Register& rm,
1160                     Condition cond);
1161 
1162   inline void Fcvt(const VRegister& fd, const VRegister& fn);
1163 
1164   int ActivationFrameAlignment();
1165 
Ins(const VRegister & vd,int vd_index,const VRegister & vn,int vn_index)1166   void Ins(const VRegister& vd, int vd_index, const VRegister& vn,
1167            int vn_index) {
1168     DCHECK(allow_macro_instructions());
1169     ins(vd, vd_index, vn, vn_index);
1170   }
Ins(const VRegister & vd,int vd_index,const Register & rn)1171   void Ins(const VRegister& vd, int vd_index, const Register& rn) {
1172     DCHECK(allow_macro_instructions());
1173     ins(vd, vd_index, rn);
1174   }
1175 
1176   inline void Bl(Label* label);
1177   inline void Br(const Register& xn);
1178 
1179   inline void Uxtb(const Register& rd, const Register& rn);
1180   inline void Uxth(const Register& rd, const Register& rn);
1181   inline void Uxtw(const Register& rd, const Register& rn);
1182 
Dup(const VRegister & vd,const VRegister & vn,int index)1183   void Dup(const VRegister& vd, const VRegister& vn, int index) {
1184     DCHECK(allow_macro_instructions());
1185     dup(vd, vn, index);
1186   }
Dup(const VRegister & vd,const Register & rn)1187   void Dup(const VRegister& vd, const Register& rn) {
1188     DCHECK(allow_macro_instructions());
1189     dup(vd, rn);
1190   }
1191 
1192 #define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
1193   inline void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr);
LSPAIR_MACRO_LIST(DECLARE_FUNCTION)1194   LSPAIR_MACRO_LIST(DECLARE_FUNCTION)
1195 #undef DECLARE_FUNCTION
1196 
1197   void St1(const VRegister& vt, const MemOperand& dst) {
1198     DCHECK(allow_macro_instructions());
1199     st1(vt, dst);
1200   }
St1(const VRegister & vt,const VRegister & vt2,const MemOperand & dst)1201   void St1(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
1202     DCHECK(allow_macro_instructions());
1203     st1(vt, vt2, dst);
1204   }
St1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & dst)1205   void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1206            const MemOperand& dst) {
1207     DCHECK(allow_macro_instructions());
1208     st1(vt, vt2, vt3, dst);
1209   }
St1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & dst)1210   void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1211            const VRegister& vt4, const MemOperand& dst) {
1212     DCHECK(allow_macro_instructions());
1213     st1(vt, vt2, vt3, vt4, dst);
1214   }
St1(const VRegister & vt,int lane,const MemOperand & dst)1215   void St1(const VRegister& vt, int lane, const MemOperand& dst) {
1216     DCHECK(allow_macro_instructions());
1217     st1(vt, lane, dst);
1218   }
1219 
1220 #define NEON_2VREG_SHIFT_MACRO_LIST(V) \
1221   V(rshrn, Rshrn)                      \
1222   V(rshrn2, Rshrn2)                    \
1223   V(shl, Shl)                          \
1224   V(shll, Shll)                        \
1225   V(shll2, Shll2)                      \
1226   V(shrn, Shrn)                        \
1227   V(shrn2, Shrn2)                      \
1228   V(sli, Sli)                          \
1229   V(sqrshrn, Sqrshrn)                  \
1230   V(sqrshrn2, Sqrshrn2)                \
1231   V(sqrshrun, Sqrshrun)                \
1232   V(sqrshrun2, Sqrshrun2)              \
1233   V(sqshl, Sqshl)                      \
1234   V(sqshlu, Sqshlu)                    \
1235   V(sqshrn, Sqshrn)                    \
1236   V(sqshrn2, Sqshrn2)                  \
1237   V(sqshrun, Sqshrun)                  \
1238   V(sqshrun2, Sqshrun2)                \
1239   V(sri, Sri)                          \
1240   V(srshr, Srshr)                      \
1241   V(srsra, Srsra)                      \
1242   V(sshll, Sshll)                      \
1243   V(sshll2, Sshll2)                    \
1244   V(sshr, Sshr)                        \
1245   V(ssra, Ssra)                        \
1246   V(uqrshrn, Uqrshrn)                  \
1247   V(uqrshrn2, Uqrshrn2)                \
1248   V(uqshl, Uqshl)                      \
1249   V(uqshrn, Uqshrn)                    \
1250   V(uqshrn2, Uqshrn2)                  \
1251   V(urshr, Urshr)                      \
1252   V(ursra, Ursra)                      \
1253   V(ushll, Ushll)                      \
1254   V(ushll2, Ushll2)                    \
1255   V(ushr, Ushr)                        \
1256   V(usra, Usra)
1257 
1258 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                           \
1259   void MASM(const VRegister& vd, const VRegister& vn, int shift) { \
1260     DCHECK(allow_macro_instructions());                            \
1261     ASM(vd, vn, shift);                                            \
1262   }
NEON_2VREG_SHIFT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)1263   NEON_2VREG_SHIFT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
1264 #undef DEFINE_MACRO_ASM_FUNC
1265 
1266   void Umov(const Register& rd, const VRegister& vn, int vn_index) {
1267     DCHECK(allow_macro_instructions());
1268     umov(rd, vn, vn_index);
1269   }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vm)1270   void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1271     DCHECK(allow_macro_instructions());
1272     tbl(vd, vn, vm);
1273   }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vm)1274   void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1275            const VRegister& vm) {
1276     DCHECK(allow_macro_instructions());
1277     tbl(vd, vn, vn2, vm);
1278   }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vm)1279   void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1280            const VRegister& vn3, const VRegister& vm) {
1281     DCHECK(allow_macro_instructions());
1282     tbl(vd, vn, vn2, vn3, vm);
1283   }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vn4,const VRegister & vm)1284   void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1285            const VRegister& vn3, const VRegister& vn4, const VRegister& vm) {
1286     DCHECK(allow_macro_instructions());
1287     tbl(vd, vn, vn2, vn3, vn4, vm);
1288   }
Ext(const VRegister & vd,const VRegister & vn,const VRegister & vm,int index)1289   void Ext(const VRegister& vd, const VRegister& vn, const VRegister& vm,
1290            int index) {
1291     DCHECK(allow_macro_instructions());
1292     ext(vd, vn, vm, index);
1293   }
1294 
Smov(const Register & rd,const VRegister & vn,int vn_index)1295   void Smov(const Register& rd, const VRegister& vn, int vn_index) {
1296     DCHECK(allow_macro_instructions());
1297     smov(rd, vn, vn_index);
1298   }
1299 
1300 // Load-acquire/store-release macros.
1301 #define DECLARE_FUNCTION(FN, OP) \
1302   inline void FN(const Register& rt, const Register& rn);
1303   LDA_STL_MACRO_LIST(DECLARE_FUNCTION)
1304 #undef DECLARE_FUNCTION
1305 
1306   // Load an object from the root table.
1307   void LoadRoot(Register destination, RootIndex index) final;
1308   void PushRoot(RootIndex index);
1309 
1310   inline void Ret(const Register& xn = lr);
1311 
1312   // Perform a conversion from a double to a signed int64. If the input fits in
1313   // range of the 64-bit result, execution branches to done. Otherwise,
1314   // execution falls through, and the sign of the result can be used to
1315   // determine if overflow was towards positive or negative infinity.
1316   //
1317   // On successful conversion, the least significant 32 bits of the result are
1318   // equivalent to the ECMA-262 operation "ToInt32".
1319   void TryConvertDoubleToInt64(Register result, DoubleRegister input,
1320                                Label* done);
1321 
1322   inline void Mrs(const Register& rt, SystemRegister sysreg);
1323   inline void Msr(SystemRegister sysreg, const Register& rt);
1324 
1325   // Prologue claims an extra slot due to arm64's alignement constraints.
1326   static constexpr int kExtraSlotClaimedByPrologue = 1;
1327   // Generates function prologue code.
1328   void Prologue();
1329 
Cmgt(const VRegister & vd,const VRegister & vn,int imm)1330   void Cmgt(const VRegister& vd, const VRegister& vn, int imm) {
1331     DCHECK(allow_macro_instructions());
1332     cmgt(vd, vn, imm);
1333   }
Cmge(const VRegister & vd,const VRegister & vn,int imm)1334   void Cmge(const VRegister& vd, const VRegister& vn, int imm) {
1335     DCHECK(allow_macro_instructions());
1336     cmge(vd, vn, imm);
1337   }
Cmeq(const VRegister & vd,const VRegister & vn,int imm)1338   void Cmeq(const VRegister& vd, const VRegister& vn, int imm) {
1339     DCHECK(allow_macro_instructions());
1340     cmeq(vd, vn, imm);
1341   }
Cmlt(const VRegister & vd,const VRegister & vn,int imm)1342   void Cmlt(const VRegister& vd, const VRegister& vn, int imm) {
1343     DCHECK(allow_macro_instructions());
1344     cmlt(vd, vn, imm);
1345   }
1346 
1347   inline void Neg(const Register& rd, const Operand& operand);
1348   inline void Negs(const Register& rd, const Operand& operand);
1349 
1350   // Compute rd = abs(rm).
1351   // This function clobbers the condition flags. On output the overflow flag is
1352   // set iff the negation overflowed.
1353   //
1354   // If rm is the minimum representable value, the result is not representable.
1355   // Handlers for each case can be specified using the relevant labels.
1356   void Abs(const Register& rd, const Register& rm,
1357            Label* is_not_representable = nullptr,
1358            Label* is_representable = nullptr);
1359 
1360   inline void Cls(const Register& rd, const Register& rn);
1361   inline void Cneg(const Register& rd, const Register& rn, Condition cond);
1362   inline void Rev16(const Register& rd, const Register& rn);
1363   inline void Rev32(const Register& rd, const Register& rn);
1364   inline void Fcvtns(const Register& rd, const VRegister& fn);
1365   inline void Fcvtnu(const Register& rd, const VRegister& fn);
1366   inline void Fcvtms(const Register& rd, const VRegister& fn);
1367   inline void Fcvtmu(const Register& rd, const VRegister& fn);
1368   inline void Fcvtas(const Register& rd, const VRegister& fn);
1369   inline void Fcvtau(const Register& rd, const VRegister& fn);
1370 
1371   // Compute the start of the generated instruction stream from the current PC.
1372   // This is an alternative to embedding the {CodeObject} handle as a reference.
1373   void ComputeCodeStartAddress(const Register& rd);
1374 
1375   // ---------------------------------------------------------------------------
1376   // Pointer compression Support
1377 
1378   // Loads a field containing a HeapObject and decompresses it if pointer
1379   // compression is enabled.
1380   void LoadTaggedPointerField(const Register& destination,
1381                               const MemOperand& field_operand);
1382 
1383   // Loads a field containing any tagged value and decompresses it if necessary.
1384   void LoadAnyTaggedField(const Register& destination,
1385                           const MemOperand& field_operand);
1386 
1387   // Loads a field containing a tagged signed value and decompresses it if
1388   // necessary.
1389   void LoadTaggedSignedField(const Register& destination,
1390                              const MemOperand& field_operand);
1391 
1392   // Loads a field containing smi value and untags it.
1393   void SmiUntagField(Register dst, const MemOperand& src);
1394 
1395   // Compresses and stores tagged value to given on-heap location.
1396   void StoreTaggedField(const Register& value,
1397                         const MemOperand& dst_field_operand);
1398 
1399   void AtomicStoreTaggedField(const Register& value, const Register& dst_base,
1400                               const Register& dst_index, const Register& temp);
1401 
1402   void DecompressTaggedSigned(const Register& destination,
1403                               const MemOperand& field_operand);
1404   void DecompressTaggedPointer(const Register& destination,
1405                                const MemOperand& field_operand);
1406   void DecompressTaggedPointer(const Register& destination,
1407                                const Register& source);
1408   void DecompressAnyTagged(const Register& destination,
1409                            const MemOperand& field_operand);
1410 
1411   void AtomicDecompressTaggedSigned(const Register& destination,
1412                                     const Register& base, const Register& index,
1413                                     const Register& temp);
1414   void AtomicDecompressTaggedPointer(const Register& destination,
1415                                      const Register& base,
1416                                      const Register& index,
1417                                      const Register& temp);
1418   void AtomicDecompressAnyTagged(const Register& destination,
1419                                  const Register& base, const Register& index,
1420                                  const Register& temp);
1421 
1422   // Restore FP and LR from the values stored in the current frame. This will
1423   // authenticate the LR when pointer authentication is enabled.
1424   void RestoreFPAndLR();
1425 
1426 #if V8_ENABLE_WEBASSEMBLY
1427   void StoreReturnAddressInWasmExitFrame(Label* return_location);
1428 #endif  // V8_ENABLE_WEBASSEMBLY
1429 
1430   // Wasm helpers. These instructions don't have direct lowering
1431   // to native instructions. These helpers allow us to define the optimal code
1432   // sequence, and be used in both TurboFan and Liftoff.
1433   void PopcntHelper(Register dst, Register src);
1434   void I64x2BitMask(Register dst, VRegister src);
1435   void I64x2AllTrue(Register dst, VRegister src);
1436 
1437   // ---------------------------------------------------------------------------
1438   // V8 Heap sandbox support
1439 
1440   // Loads a field containing off-heap pointer and does necessary decoding
1441   // if V8 heap sandbox is enabled.
1442   void LoadExternalPointerField(Register destination, MemOperand field_operand,
1443                                 ExternalPointerTag tag,
1444                                 Register isolate_root = Register::no_reg());
1445 
1446  protected:
1447   // The actual Push and Pop implementations. These don't generate any code
1448   // other than that required for the push or pop. This allows
1449   // (Push|Pop)CPURegList to bundle together run-time assertions for a large
1450   // block of registers.
1451   //
1452   // Note that size is per register, and is specified in bytes.
1453   void PushHelper(int count, int size, const CPURegister& src0,
1454                   const CPURegister& src1, const CPURegister& src2,
1455                   const CPURegister& src3);
1456   void PopHelper(int count, int size, const CPURegister& dst0,
1457                  const CPURegister& dst1, const CPURegister& dst2,
1458                  const CPURegister& dst3);
1459 
1460   void ConditionalCompareMacro(const Register& rn, const Operand& operand,
1461                                StatusFlags nzcv, Condition cond,
1462                                ConditionalCompareOp op);
1463 
1464   void AddSubWithCarryMacro(const Register& rd, const Register& rn,
1465                             const Operand& operand, FlagsUpdate S,
1466                             AddSubWithCarryOp op);
1467 
1468   // Call Printf. On a native build, a simple call will be generated, but if the
1469   // simulator is being used then a suitable pseudo-instruction is used. The
1470   // arguments and stack must be prepared by the caller as for a normal AAPCS64
1471   // call to 'printf'.
1472   //
1473   // The 'args' argument should point to an array of variable arguments in their
1474   // proper PCS registers (and in calling order). The argument registers can
1475   // have mixed types. The format string (x0) should not be included.
1476   void CallPrintf(int arg_count = 0, const CPURegister* args = nullptr);
1477 
1478  private:
1479 #if DEBUG
1480   // Tell whether any of the macro instruction can be used. When false the
1481   // MacroAssembler will assert if a method which can emit a variable number
1482   // of instructions is called.
1483   bool allow_macro_instructions_ = true;
1484 #endif
1485 
1486   // Scratch registers available for use by the MacroAssembler.
1487   CPURegList tmp_list_ = DefaultTmpList();
1488   CPURegList fptmp_list_ = DefaultFPTmpList();
1489 
1490   // Helps resolve branching to labels potentially out of range.
1491   // If the label is not bound, it registers the information necessary to later
1492   // be able to emit a veneer for this branch if necessary.
1493   // If the label is bound, it returns true if the label (or the previous link
1494   // in the label chain) is out of range. In that case the caller is responsible
1495   // for generating appropriate code.
1496   // Otherwise it returns false.
1497   // This function also checks wether veneers need to be emitted.
1498   bool NeedExtraInstructionsOrRegisterBranch(Label* label,
1499                                              ImmBranchType branch_type);
1500 
1501   void Movi16bitHelper(const VRegister& vd, uint64_t imm);
1502   void Movi32bitHelper(const VRegister& vd, uint64_t imm);
1503   void Movi64bitHelper(const VRegister& vd, uint64_t imm);
1504 
1505   void LoadStoreMacro(const CPURegister& rt, const MemOperand& addr,
1506                       LoadStoreOp op);
1507 
1508   void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
1509                           const MemOperand& addr, LoadStorePairOp op);
1510 
1511   int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode,
1512                                 byte* pc);
1513 
1514   void JumpHelper(int64_t offset, RelocInfo::Mode rmode, Condition cond = al);
1515 };
1516 
1517 class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
1518  public:
1519   using TurboAssembler::TurboAssembler;
1520 
1521   // Instruction set functions ------------------------------------------------
1522   // Logical macros.
1523   inline void Bics(const Register& rd, const Register& rn,
1524                    const Operand& operand);
1525 
1526   inline void Adcs(const Register& rd, const Register& rn,
1527                    const Operand& operand);
1528   inline void Sbc(const Register& rd, const Register& rn,
1529                   const Operand& operand);
1530   inline void Sbcs(const Register& rd, const Register& rn,
1531                    const Operand& operand);
1532   inline void Ngc(const Register& rd, const Operand& operand);
1533   inline void Ngcs(const Register& rd, const Operand& operand);
1534 
1535   inline void Ccmn(const Register& rn, const Operand& operand, StatusFlags nzcv,
1536                    Condition cond);
1537 
1538 #define DECLARE_FUNCTION(FN, OP) \
1539   inline void FN(const Register& rs, const Register& rt, const Register& rn);
1540   STLX_MACRO_LIST(DECLARE_FUNCTION)
1541 #undef DECLARE_FUNCTION
1542 
1543   // Branch type inversion relies on these relations.
1544   STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) &&
1545                 (reg_bit_clear == (reg_bit_set ^ 1)) &&
1546                 (always == (never ^ 1)));
1547 
1548   inline void Bfxil(const Register& rd, const Register& rn, unsigned lsb,
1549                     unsigned width);
1550   inline void Cinc(const Register& rd, const Register& rn, Condition cond);
1551   inline void Cinv(const Register& rd, const Register& rn, Condition cond);
1552   inline void CzeroX(const Register& rd, Condition cond);
1553   inline void Csinv(const Register& rd, const Register& rn, const Register& rm,
1554                     Condition cond);
1555   inline void Csneg(const Register& rd, const Register& rn, const Register& rm,
1556                     Condition cond);
1557   inline void Extr(const Register& rd, const Register& rn, const Register& rm,
1558                    unsigned lsb);
Fcvtl(const VRegister & vd,const VRegister & vn)1559   void Fcvtl(const VRegister& vd, const VRegister& vn) {
1560     DCHECK(allow_macro_instructions());
1561     fcvtl(vd, vn);
1562   }
Fcvtl2(const VRegister & vd,const VRegister & vn)1563   void Fcvtl2(const VRegister& vd, const VRegister& vn) {
1564     DCHECK(allow_macro_instructions());
1565     fcvtl2(vd, vn);
1566   }
Fcvtn(const VRegister & vd,const VRegister & vn)1567   void Fcvtn(const VRegister& vd, const VRegister& vn) {
1568     DCHECK(allow_macro_instructions());
1569     fcvtn(vd, vn);
1570   }
Fcvtn2(const VRegister & vd,const VRegister & vn)1571   void Fcvtn2(const VRegister& vd, const VRegister& vn) {
1572     DCHECK(allow_macro_instructions());
1573     fcvtn2(vd, vn);
1574   }
Fcvtxn(const VRegister & vd,const VRegister & vn)1575   void Fcvtxn(const VRegister& vd, const VRegister& vn) {
1576     DCHECK(allow_macro_instructions());
1577     fcvtxn(vd, vn);
1578   }
Fcvtxn2(const VRegister & vd,const VRegister & vn)1579   void Fcvtxn2(const VRegister& vd, const VRegister& vn) {
1580     DCHECK(allow_macro_instructions());
1581     fcvtxn2(vd, vn);
1582   }
1583   inline void Fmadd(const VRegister& fd, const VRegister& fn,
1584                     const VRegister& fm, const VRegister& fa);
1585   inline void Fmaxnm(const VRegister& fd, const VRegister& fn,
1586                      const VRegister& fm);
1587   inline void Fminnm(const VRegister& fd, const VRegister& fn,
1588                      const VRegister& fm);
1589   inline void Fmsub(const VRegister& fd, const VRegister& fn,
1590                     const VRegister& fm, const VRegister& fa);
1591   inline void Fnmadd(const VRegister& fd, const VRegister& fn,
1592                      const VRegister& fm, const VRegister& fa);
1593   inline void Fnmsub(const VRegister& fd, const VRegister& fn,
1594                      const VRegister& fm, const VRegister& fa);
1595   inline void Hint(SystemHint code);
1596   inline void Hlt(int code);
1597   inline void Ldnp(const CPURegister& rt, const CPURegister& rt2,
1598                    const MemOperand& src);
1599   inline void Movk(const Register& rd, uint64_t imm, int shift = -1);
Nop()1600   inline void Nop() { nop(); }
1601   void Mvni(const VRegister& vd, const int imm8, Shift shift = LSL,
1602             const int shift_amount = 0) {
1603     DCHECK(allow_macro_instructions());
1604     mvni(vd, imm8, shift, shift_amount);
1605   }
1606   inline void Rev(const Register& rd, const Register& rn);
1607   inline void Sbfiz(const Register& rd, const Register& rn, unsigned lsb,
1608                     unsigned width);
1609   inline void Smaddl(const Register& rd, const Register& rn, const Register& rm,
1610                      const Register& ra);
1611   inline void Smsubl(const Register& rd, const Register& rn, const Register& rm,
1612                      const Register& ra);
1613   inline void Smulh(const Register& rd, const Register& rn, const Register& rm);
1614   inline void Stnp(const CPURegister& rt, const CPURegister& rt2,
1615                    const MemOperand& dst);
1616   inline void Umaddl(const Register& rd, const Register& rn, const Register& rm,
1617                      const Register& ra);
1618   inline void Umsubl(const Register& rd, const Register& rn, const Register& rm,
1619                      const Register& ra);
1620 
Cmle(const VRegister & vd,const VRegister & vn,int imm)1621   void Cmle(const VRegister& vd, const VRegister& vn, int imm) {
1622     DCHECK(allow_macro_instructions());
1623     cmle(vd, vn, imm);
1624   }
1625 
Ld1(const VRegister & vt,const MemOperand & src)1626   void Ld1(const VRegister& vt, const MemOperand& src) {
1627     DCHECK(allow_macro_instructions());
1628     ld1(vt, src);
1629   }
Ld1(const VRegister & vt,const VRegister & vt2,const MemOperand & src)1630   void Ld1(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
1631     DCHECK(allow_macro_instructions());
1632     ld1(vt, vt2, src);
1633   }
Ld1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & src)1634   void Ld1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1635            const MemOperand& src) {
1636     DCHECK(allow_macro_instructions());
1637     ld1(vt, vt2, vt3, src);
1638   }
Ld1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & src)1639   void Ld1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1640            const VRegister& vt4, const MemOperand& src) {
1641     DCHECK(allow_macro_instructions());
1642     ld1(vt, vt2, vt3, vt4, src);
1643   }
Ld1(const VRegister & vt,int lane,const MemOperand & src)1644   void Ld1(const VRegister& vt, int lane, const MemOperand& src) {
1645     DCHECK(allow_macro_instructions());
1646     ld1(vt, lane, src);
1647   }
Ld1r(const VRegister & vt,const MemOperand & src)1648   void Ld1r(const VRegister& vt, const MemOperand& src) {
1649     DCHECK(allow_macro_instructions());
1650     ld1r(vt, src);
1651   }
Ld2(const VRegister & vt,const VRegister & vt2,const MemOperand & src)1652   void Ld2(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
1653     DCHECK(allow_macro_instructions());
1654     ld2(vt, vt2, src);
1655   }
Ld2(const VRegister & vt,const VRegister & vt2,int lane,const MemOperand & src)1656   void Ld2(const VRegister& vt, const VRegister& vt2, int lane,
1657            const MemOperand& src) {
1658     DCHECK(allow_macro_instructions());
1659     ld2(vt, vt2, lane, src);
1660   }
Ld2r(const VRegister & vt,const VRegister & vt2,const MemOperand & src)1661   void Ld2r(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
1662     DCHECK(allow_macro_instructions());
1663     ld2r(vt, vt2, src);
1664   }
Ld3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & src)1665   void Ld3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1666            const MemOperand& src) {
1667     DCHECK(allow_macro_instructions());
1668     ld3(vt, vt2, vt3, src);
1669   }
Ld3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,int lane,const MemOperand & src)1670   void Ld3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1671            int lane, const MemOperand& src) {
1672     DCHECK(allow_macro_instructions());
1673     ld3(vt, vt2, vt3, lane, src);
1674   }
Ld3r(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & src)1675   void Ld3r(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1676             const MemOperand& src) {
1677     DCHECK(allow_macro_instructions());
1678     ld3r(vt, vt2, vt3, src);
1679   }
Ld4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & src)1680   void Ld4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1681            const VRegister& vt4, const MemOperand& src) {
1682     DCHECK(allow_macro_instructions());
1683     ld4(vt, vt2, vt3, vt4, src);
1684   }
Ld4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,int lane,const MemOperand & src)1685   void Ld4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1686            const VRegister& vt4, int lane, const MemOperand& src) {
1687     DCHECK(allow_macro_instructions());
1688     ld4(vt, vt2, vt3, vt4, lane, src);
1689   }
Ld4r(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & src)1690   void Ld4r(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1691             const VRegister& vt4, const MemOperand& src) {
1692     DCHECK(allow_macro_instructions());
1693     ld4r(vt, vt2, vt3, vt4, src);
1694   }
St2(const VRegister & vt,const VRegister & vt2,const MemOperand & dst)1695   void St2(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
1696     DCHECK(allow_macro_instructions());
1697     st2(vt, vt2, dst);
1698   }
St3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & dst)1699   void St3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1700            const MemOperand& dst) {
1701     DCHECK(allow_macro_instructions());
1702     st3(vt, vt2, vt3, dst);
1703   }
St4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & dst)1704   void St4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1705            const VRegister& vt4, const MemOperand& dst) {
1706     DCHECK(allow_macro_instructions());
1707     st4(vt, vt2, vt3, vt4, dst);
1708   }
St2(const VRegister & vt,const VRegister & vt2,int lane,const MemOperand & dst)1709   void St2(const VRegister& vt, const VRegister& vt2, int lane,
1710            const MemOperand& dst) {
1711     DCHECK(allow_macro_instructions());
1712     st2(vt, vt2, lane, dst);
1713   }
St3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,int lane,const MemOperand & dst)1714   void St3(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1715            int lane, const MemOperand& dst) {
1716     DCHECK(allow_macro_instructions());
1717     st3(vt, vt2, vt3, lane, dst);
1718   }
St4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,int lane,const MemOperand & dst)1719   void St4(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
1720            const VRegister& vt4, int lane, const MemOperand& dst) {
1721     DCHECK(allow_macro_instructions());
1722     st4(vt, vt2, vt3, vt4, lane, dst);
1723   }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vm)1724   void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1725     DCHECK(allow_macro_instructions());
1726     tbx(vd, vn, vm);
1727   }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vm)1728   void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1729            const VRegister& vm) {
1730     DCHECK(allow_macro_instructions());
1731     tbx(vd, vn, vn2, vm);
1732   }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vm)1733   void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1734            const VRegister& vn3, const VRegister& vm) {
1735     DCHECK(allow_macro_instructions());
1736     tbx(vd, vn, vn2, vn3, vm);
1737   }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vn4,const VRegister & vm)1738   void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vn2,
1739            const VRegister& vn3, const VRegister& vn4, const VRegister& vm) {
1740     DCHECK(allow_macro_instructions());
1741     tbx(vd, vn, vn2, vn3, vn4, vm);
1742   }
1743 
1744   // For the 'lr_mode' template argument of the following methods, see
1745   // PushCPURegList/PopCPURegList.
1746   template <StoreLRMode lr_mode = kDontStoreLR>
1747   inline void PushSizeRegList(
1748       RegList registers, unsigned reg_size,
1749       CPURegister::RegisterType type = CPURegister::kRegister) {
1750     PushCPURegList<lr_mode>(CPURegList(type, reg_size, registers));
1751   }
1752   template <LoadLRMode lr_mode = kDontLoadLR>
1753   inline void PopSizeRegList(
1754       RegList registers, unsigned reg_size,
1755       CPURegister::RegisterType type = CPURegister::kRegister) {
1756     PopCPURegList<lr_mode>(CPURegList(type, reg_size, registers));
1757   }
1758   template <StoreLRMode lr_mode = kDontStoreLR>
PushXRegList(RegList regs)1759   inline void PushXRegList(RegList regs) {
1760     PushSizeRegList<lr_mode>(regs, kXRegSizeInBits);
1761   }
1762   template <LoadLRMode lr_mode = kDontLoadLR>
PopXRegList(RegList regs)1763   inline void PopXRegList(RegList regs) {
1764     PopSizeRegList<lr_mode>(regs, kXRegSizeInBits);
1765   }
PushWRegList(RegList regs)1766   inline void PushWRegList(RegList regs) {
1767     PushSizeRegList(regs, kWRegSizeInBits);
1768   }
PopWRegList(RegList regs)1769   inline void PopWRegList(RegList regs) {
1770     PopSizeRegList(regs, kWRegSizeInBits);
1771   }
PushQRegList(RegList regs)1772   inline void PushQRegList(RegList regs) {
1773     PushSizeRegList(regs, kQRegSizeInBits, CPURegister::kVRegister);
1774   }
PopQRegList(RegList regs)1775   inline void PopQRegList(RegList regs) {
1776     PopSizeRegList(regs, kQRegSizeInBits, CPURegister::kVRegister);
1777   }
PushDRegList(RegList regs)1778   inline void PushDRegList(RegList regs) {
1779     PushSizeRegList(regs, kDRegSizeInBits, CPURegister::kVRegister);
1780   }
PopDRegList(RegList regs)1781   inline void PopDRegList(RegList regs) {
1782     PopSizeRegList(regs, kDRegSizeInBits, CPURegister::kVRegister);
1783   }
PushSRegList(RegList regs)1784   inline void PushSRegList(RegList regs) {
1785     PushSizeRegList(regs, kSRegSizeInBits, CPURegister::kVRegister);
1786   }
PopSRegList(RegList regs)1787   inline void PopSRegList(RegList regs) {
1788     PopSizeRegList(regs, kSRegSizeInBits, CPURegister::kVRegister);
1789   }
1790 
1791   // Push the specified register 'count' times.
1792   void PushMultipleTimes(CPURegister src, Register count);
1793 
1794   // Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The
1795   // values peeked will be adjacent, with the value in 'dst2' being from a
1796   // higher address than 'dst1'. The offset is in bytes. The stack pointer must
1797   // be aligned to 16 bytes.
1798   void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
1799 
1800   // Preserve the callee-saved registers (as defined by AAPCS64).
1801   //
1802   // Higher-numbered registers are pushed before lower-numbered registers, and
1803   // thus get higher addresses.
1804   // Floating-point registers are pushed before general-purpose registers, and
1805   // thus get higher addresses.
1806   //
1807   // When control flow integrity measures are enabled, this method signs the
1808   // link register before pushing it.
1809   //
1810   // Note that registers are not checked for invalid values. Use this method
1811   // only if you know that the GC won't try to examine the values on the stack.
1812   void PushCalleeSavedRegisters();
1813 
1814   // Restore the callee-saved registers (as defined by AAPCS64).
1815   //
1816   // Higher-numbered registers are popped after lower-numbered registers, and
1817   // thus come from higher addresses.
1818   // Floating-point registers are popped after general-purpose registers, and
1819   // thus come from higher addresses.
1820   //
1821   // When control flow integrity measures are enabled, this method
1822   // authenticates the link register after popping it.
1823   void PopCalleeSavedRegisters();
1824 
1825   // Helpers ------------------------------------------------------------------
1826 
1827   template <typename Field>
DecodeField(Register dst,Register src)1828   void DecodeField(Register dst, Register src) {
1829     static const int shift = Field::kShift;
1830     static const int setbits = CountSetBits(Field::kMask, 32);
1831     Ubfx(dst, src, shift, setbits);
1832   }
1833 
1834   template <typename Field>
DecodeField(Register reg)1835   void DecodeField(Register reg) {
1836     DecodeField<Field>(reg, reg);
1837   }
1838 
1839   Operand ReceiverOperand(const Register arg_count);
1840 
1841   // ---- SMI and Number Utilities ----
1842 
1843   inline void SmiTag(Register dst, Register src);
1844   inline void SmiTag(Register smi);
1845 
1846   inline void JumpIfNotSmi(Register value, Label* not_smi_label);
1847 
1848   // Abort execution if argument is a smi, enabled via --debug-code.
1849   void AssertNotSmi(Register object,
1850                     AbortReason reason = AbortReason::kOperandIsASmi);
1851 
1852   // Abort execution if argument is not a CodeT, enabled via --debug-code.
1853   void AssertCodeT(Register object);
1854 
1855   // Abort execution if argument is not a Constructor, enabled via --debug-code.
1856   void AssertConstructor(Register object);
1857 
1858   // Abort execution if argument is not a JSFunction, enabled via --debug-code.
1859   void AssertFunction(Register object);
1860 
1861   // Abort execution if argument is not a JSGeneratorObject (or subclass),
1862   // enabled via --debug-code.
1863   void AssertGeneratorObject(Register object);
1864 
1865   // Abort execution if argument is not a JSBoundFunction,
1866   // enabled via --debug-code.
1867   void AssertBoundFunction(Register object);
1868 
1869   // Abort execution if argument is not undefined or an AllocationSite, enabled
1870   // via --debug-code.
1871   void AssertUndefinedOrAllocationSite(Register object);
1872 
1873   // ---- Calling / Jumping helpers ----
1874 
1875   void CallRuntime(const Runtime::Function* f, int num_arguments,
1876                    SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
1877 
1878   // Convenience function: Same as above, but takes the fid instead.
1879   void CallRuntime(Runtime::FunctionId fid, int num_arguments,
1880                    SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
1881     CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
1882   }
1883 
1884   // Convenience function: Same as above, but takes the fid instead.
1885   void CallRuntime(Runtime::FunctionId fid,
1886                    SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
1887     const Runtime::Function* function = Runtime::FunctionForId(fid);
1888     CallRuntime(function, function->nargs, save_doubles);
1889   }
1890 
1891   void TailCallRuntime(Runtime::FunctionId fid);
1892 
1893   // Jump to a runtime routine.
1894   void JumpToExternalReference(const ExternalReference& builtin,
1895                                bool builtin_exit_frame = false);
1896 
1897   // Generates a trampoline to jump to the off-heap instruction stream.
1898   void JumpToInstructionStream(Address entry);
1899 
1900   // Registers used through the invocation chain are hard-coded.
1901   // We force passing the parameters to ensure the contracts are correctly
1902   // honoured by the caller.
1903   // 'function' must be x1.
1904   // 'actual' must use an immediate or x0.
1905   // 'expected' must use an immediate or x2.
1906   // 'call_kind' must be x5.
1907   void InvokePrologue(Register expected_parameter_count,
1908                       Register actual_parameter_count, Label* done,
1909                       InvokeType type);
1910 
1911   // On function call, call into the debugger.
1912   void CallDebugOnFunctionCall(Register fun, Register new_target,
1913                                Register expected_parameter_count,
1914                                Register actual_parameter_count);
1915   void InvokeFunctionCode(Register function, Register new_target,
1916                           Register expected_parameter_count,
1917                           Register actual_parameter_count, InvokeType type);
1918   // Invoke the JavaScript function in the given register.
1919   // Changes the current context to the context in the function before invoking.
1920   void InvokeFunctionWithNewTarget(Register function, Register new_target,
1921                                    Register actual_parameter_count,
1922                                    InvokeType type);
1923   void InvokeFunction(Register function, Register expected_parameter_count,
1924                       Register actual_parameter_count, InvokeType type);
1925 
1926   // ---- Code generation helpers ----
1927 
1928   // ---------------------------------------------------------------------------
1929   // Support functions.
1930 
1931   // Compare object type for heap object.  heap_object contains a non-Smi
1932   // whose object type should be compared with the given type.  This both
1933   // sets the flags and leaves the object type in the type_reg register.
1934   // It leaves the map in the map register (unless the type_reg and map register
1935   // are the same register).  It leaves the heap object in the heap_object
1936   // register unless the heap_object register is the same register as one of the
1937   // other registers.
1938   void CompareObjectType(Register heap_object, Register map, Register type_reg,
1939                          InstanceType type);
1940 
1941   // Compare object type for heap object, and branch if equal (or not.)
1942   // heap_object contains a non-Smi whose object type should be compared with
1943   // the given type.  This both sets the flags and leaves the object type in
1944   // the type_reg register. It leaves the map in the map register (unless the
1945   // type_reg and map register are the same register).  It leaves the heap
1946   // object in the heap_object register unless the heap_object register is the
1947   // same register as one of the other registers.
1948   void JumpIfObjectType(Register object, Register map, Register type_reg,
1949                         InstanceType type, Label* if_cond_pass,
1950                         Condition cond = eq);
1951 
1952   // Compare instance type in a map.  map contains a valid map object whose
1953   // object type should be compared with the given type.  This both
1954   // sets the flags and leaves the object type in the type_reg register.
1955   void CompareInstanceType(Register map, Register type_reg, InstanceType type);
1956 
1957   // Compare instance type ranges for a map (lower_limit and higher_limit
1958   // inclusive).
1959   //
1960   // Always use unsigned comparisons: ls for a positive result.
1961   void CompareInstanceTypeRange(Register map, Register type_reg,
1962                                 InstanceType lower_limit,
1963                                 InstanceType higher_limit);
1964 
1965   // Load the elements kind field from a map, and return it in the result
1966   // register.
1967   void LoadElementsKindFromMap(Register result, Register map);
1968 
1969   // Compare the object in a register to a value from the root list.
1970   void CompareRoot(const Register& obj, RootIndex index);
1971 
1972   // Compare the object in a register to a value and jump if they are equal.
1973   void JumpIfRoot(const Register& obj, RootIndex index, Label* if_equal);
1974 
1975   // Compare the object in a register to a value and jump if they are not equal.
1976   void JumpIfNotRoot(const Register& obj, RootIndex index, Label* if_not_equal);
1977 
1978   // Checks if value is in range [lower_limit, higher_limit] using a single
1979   // comparison.
1980   void JumpIfIsInRange(const Register& value, unsigned lower_limit,
1981                        unsigned higher_limit, Label* on_in_range);
1982 
1983   // ---------------------------------------------------------------------------
1984   // Frames.
1985 
1986   void ExitFramePreserveFPRegs();
1987   void ExitFrameRestoreFPRegs();
1988 
1989   // Enter exit frame. Exit frames are used when calling C code from generated
1990   // (JavaScript) code.
1991   //
1992   // The only registers modified by this function are the provided scratch
1993   // register, the frame pointer and the stack pointer.
1994   //
1995   // The 'extra_space' argument can be used to allocate some space in the exit
1996   // frame that will be ignored by the GC. This space will be reserved in the
1997   // bottom of the frame immediately above the return address slot.
1998   //
1999   // Set up a stack frame and registers as follows:
2000   //         fp[8]: CallerPC (lr)
2001   //   fp -> fp[0]: CallerFP (old fp)
2002   //         fp[-8]: SPOffset (new sp)
2003   //         fp[-16]: CodeObject()
2004   //         fp[-16 - fp-size]: Saved doubles, if saved_doubles is true.
2005   //         sp[8]: Memory reserved for the caller if extra_space != 0.
2006   //                 Alignment padding, if necessary.
2007   //   sp -> sp[0]: Space reserved for the return address.
2008   //
2009   // This function also stores the new frame information in the top frame, so
2010   // that the new frame becomes the current frame.
2011   void EnterExitFrame(bool save_doubles, const Register& scratch,
2012                       int extra_space = 0,
2013                       StackFrame::Type frame_type = StackFrame::EXIT);
2014 
2015   // Leave the current exit frame, after a C function has returned to generated
2016   // (JavaScript) code.
2017   //
2018   // This effectively unwinds the operation of EnterExitFrame:
2019   //  * Preserved doubles are restored (if restore_doubles is true).
2020   //  * The frame information is removed from the top frame.
2021   //  * The exit frame is dropped.
2022   void LeaveExitFrame(bool save_doubles, const Register& scratch,
2023                       const Register& scratch2);
2024 
2025   // Load the global proxy from the current context.
2026   void LoadGlobalProxy(Register dst);
2027 
2028   // ---------------------------------------------------------------------------
2029   // In-place weak references.
2030   void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
2031 
2032   // ---------------------------------------------------------------------------
2033   // StatsCounter support
2034 
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2035   void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
2036                         Register scratch2) {
2037     if (!FLAG_native_code_counters) return;
2038     EmitIncrementCounter(counter, value, scratch1, scratch2);
2039   }
2040   void EmitIncrementCounter(StatsCounter* counter, int value, Register scratch1,
2041                             Register scratch2);
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2042   void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
2043                         Register scratch2) {
2044     if (!FLAG_native_code_counters) return;
2045     EmitIncrementCounter(counter, -value, scratch1, scratch2);
2046   }
2047 
2048   // ---------------------------------------------------------------------------
2049   // Stack limit utilities
2050   void LoadStackLimit(Register destination, StackLimitKind kind);
2051   void StackOverflowCheck(Register num_args, Label* stack_overflow);
2052 
2053   // ---------------------------------------------------------------------------
2054   // Garbage collector support (GC).
2055 
2056   // Notify the garbage collector that we wrote a pointer into an object.
2057   // |object| is the object being stored into, |value| is the object being
2058   // stored.
2059   // The offset is the offset from the start of the object, not the offset from
2060   // the tagged HeapObject pointer.  For use with FieldMemOperand(reg, off).
2061   void RecordWriteField(
2062       Register object, int offset, Register value, LinkRegisterStatus lr_status,
2063       SaveFPRegsMode save_fp,
2064       RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
2065       SmiCheck smi_check = SmiCheck::kInline);
2066 
2067   // For a given |object| notify the garbage collector that the slot at |offset|
2068   // has been written. |value| is the object being stored.
2069   void RecordWrite(
2070       Register object, Operand offset, Register value,
2071       LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
2072       RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
2073       SmiCheck smi_check = SmiCheck::kInline);
2074 
2075   // ---------------------------------------------------------------------------
2076   // Debugging.
2077 
2078   void LoadNativeContextSlot(Register dst, int index);
2079 
2080   DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
2081 };
2082 
2083 // Use this scope when you need a one-to-one mapping between methods and
2084 // instructions. This scope prevents the MacroAssembler from being called and
2085 // literal pools from being emitted. It also asserts the number of instructions
2086 // emitted is what you specified when creating the scope.
2087 class V8_NODISCARD InstructionAccurateScope {
2088  public:
2089   explicit InstructionAccurateScope(TurboAssembler* tasm, size_t count = 0)
tasm_(tasm)2090       : tasm_(tasm),
2091         block_pool_(tasm, count * kInstrSize)
2092 #ifdef DEBUG
2093         ,
2094         size_(count * kInstrSize)
2095 #endif
2096   {
2097     tasm_->CheckVeneerPool(false, true, count * kInstrSize);
2098     tasm_->StartBlockVeneerPool();
2099 #ifdef DEBUG
2100     if (count != 0) {
2101       tasm_->bind(&start_);
2102     }
2103     previous_allow_macro_instructions_ = tasm_->allow_macro_instructions();
2104     tasm_->set_allow_macro_instructions(false);
2105 #endif
2106   }
2107 
~InstructionAccurateScope()2108   ~InstructionAccurateScope() {
2109     tasm_->EndBlockVeneerPool();
2110 #ifdef DEBUG
2111     if (start_.is_bound()) {
2112       DCHECK(tasm_->SizeOfCodeGeneratedSince(&start_) == size_);
2113     }
2114     tasm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
2115 #endif
2116   }
2117 
2118  private:
2119   TurboAssembler* tasm_;
2120   TurboAssembler::BlockConstPoolScope block_pool_;
2121 #ifdef DEBUG
2122   size_t size_;
2123   Label start_;
2124   bool previous_allow_macro_instructions_;
2125 #endif
2126 };
2127 
2128 // This scope utility allows scratch registers to be managed safely. The
2129 // TurboAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
2130 // registers. These registers can be allocated on demand, and will be returned
2131 // at the end of the scope.
2132 //
2133 // When the scope ends, the MacroAssembler's lists will be restored to their
2134 // original state, even if the lists were modified by some other means. Note
2135 // that this scope can be nested but the destructors need to run in the opposite
2136 // order as the constructors. We do not have assertions for this.
2137 class V8_NODISCARD UseScratchRegisterScope {
2138  public:
UseScratchRegisterScope(TurboAssembler * tasm)2139   explicit UseScratchRegisterScope(TurboAssembler* tasm)
2140       : available_(tasm->TmpList()),
2141         availablefp_(tasm->FPTmpList()),
2142         old_available_(available_->list()),
2143         old_availablefp_(availablefp_->list()) {
2144     DCHECK_EQ(available_->type(), CPURegister::kRegister);
2145     DCHECK_EQ(availablefp_->type(), CPURegister::kVRegister);
2146   }
2147 
2148   V8_EXPORT_PRIVATE ~UseScratchRegisterScope();
2149 
2150   // Take a register from the appropriate temps list. It will be returned
2151   // automatically when the scope ends.
AcquireW()2152   Register AcquireW() { return AcquireNextAvailable(available_).W(); }
AcquireX()2153   Register AcquireX() { return AcquireNextAvailable(available_).X(); }
AcquireS()2154   VRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
AcquireD()2155   VRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
AcquireQ()2156   VRegister AcquireQ() { return AcquireNextAvailable(availablefp_).Q(); }
AcquireV(VectorFormat format)2157   VRegister AcquireV(VectorFormat format) {
2158     return VRegister::Create(AcquireNextAvailable(availablefp_).code(), format);
2159   }
2160 
2161   Register AcquireSameSizeAs(const Register& reg);
2162   V8_EXPORT_PRIVATE VRegister AcquireSameSizeAs(const VRegister& reg);
2163 
Include(const CPURegList & list)2164   void Include(const CPURegList& list) { available_->Combine(list); }
Exclude(const CPURegList & list)2165   void Exclude(const CPURegList& list) {
2166 #if DEBUG
2167     CPURegList copy(list);
2168     while (!copy.IsEmpty()) {
2169       const CPURegister& reg = copy.PopHighestIndex();
2170       DCHECK(available_->IncludesAliasOf(reg));
2171     }
2172 #endif
2173     available_->Remove(list);
2174   }
2175   void Include(const Register& reg1, const Register& reg2 = NoReg) {
2176     CPURegList list(reg1, reg2);
2177     Include(list);
2178   }
2179   void Exclude(const Register& reg1, const Register& reg2 = NoReg) {
2180     CPURegList list(reg1, reg2);
2181     Exclude(list);
2182   }
2183 
2184  private:
2185   V8_EXPORT_PRIVATE static CPURegister AcquireNextAvailable(
2186       CPURegList* available);
2187 
2188   // Available scratch registers.
2189   CPURegList* available_;    // kRegister
2190   CPURegList* availablefp_;  // kVRegister
2191 
2192   // The state of the available lists at the start of this scope.
2193   RegList old_available_;    // kRegister
2194   RegList old_availablefp_;  // kVRegister
2195 };
2196 
2197 }  // namespace internal
2198 }  // namespace v8
2199 
2200 #define ACCESS_MASM(masm) masm->
2201 
2202 #endif  // V8_CODEGEN_ARM64_MACRO_ASSEMBLER_ARM64_H_
2203