1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions
6 // are met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the
14 // distribution.
15 //
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
19 //
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 // OF THE POSSIBILITY OF SUCH DAMAGE.
32 
33 // The original source code covered by the above license above has been
34 // modified significantly by Google Inc.
35 // Copyright 2012 the V8 project authors. All rights reserved.
36 
37 // A light-weight ARM Assembler
38 // Generates user mode instructions for the ARM architecture up to version 5
39 
40 #ifndef V8_CODEGEN_ARM_ASSEMBLER_ARM_H_
41 #define V8_CODEGEN_ARM_ASSEMBLER_ARM_H_
42 
43 #include <stdio.h>
44 #include <memory>
45 #include <vector>
46 
47 #include "src/codegen/arm/constants-arm.h"
48 #include "src/codegen/arm/register-arm.h"
49 #include "src/codegen/assembler.h"
50 #include "src/codegen/constant-pool.h"
51 #include "src/numbers/double.h"
52 #include "src/utils/boxed-float.h"
53 
54 namespace v8 {
55 namespace internal {
56 
57 class SafepointTableBuilder;
58 
59 // Coprocessor number
60 enum Coprocessor {
61   p0 = 0,
62   p1 = 1,
63   p2 = 2,
64   p3 = 3,
65   p4 = 4,
66   p5 = 5,
67   p6 = 6,
68   p7 = 7,
69   p8 = 8,
70   p9 = 9,
71   p10 = 10,
72   p11 = 11,
73   p12 = 12,
74   p13 = 13,
75   p14 = 14,
76   p15 = 15
77 };
78 
79 // -----------------------------------------------------------------------------
80 // Machine instruction Operands
81 
82 // Class Operand represents a shifter operand in data processing instructions
83 class V8_EXPORT_PRIVATE Operand {
84  public:
85   // immediate
86   V8_INLINE explicit Operand(int32_t immediate,
87                              RelocInfo::Mode rmode = RelocInfo::NONE)
rmode_(rmode)88       : rmode_(rmode) {
89     value_.immediate = immediate;
90   }
91   V8_INLINE static Operand Zero();
92   V8_INLINE explicit Operand(const ExternalReference& f);
93   explicit Operand(Handle<HeapObject> handle);
94   V8_INLINE explicit Operand(Smi value);
95 
96   // rm
97   V8_INLINE explicit Operand(Register rm);
98 
99   // rm <shift_op> shift_imm
100   explicit Operand(Register rm, ShiftOp shift_op, int shift_imm);
SmiUntag(Register rm)101   V8_INLINE static Operand SmiUntag(Register rm) {
102     return Operand(rm, ASR, kSmiTagSize);
103   }
PointerOffsetFromSmiKey(Register key)104   V8_INLINE static Operand PointerOffsetFromSmiKey(Register key) {
105     STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
106     return Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize);
107   }
DoubleOffsetFromSmiKey(Register key)108   V8_INLINE static Operand DoubleOffsetFromSmiKey(Register key) {
109     STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kDoubleSizeLog2);
110     return Operand(key, LSL, kDoubleSizeLog2 - kSmiTagSize);
111   }
112 
113   // rm <shift_op> rs
114   explicit Operand(Register rm, ShiftOp shift_op, Register rs);
115 
116   static Operand EmbeddedNumber(double number);  // Smi or HeapNumber.
117   static Operand EmbeddedStringConstant(const StringConstantBase* str);
118 
119   // Return true if this is a register operand.
IsRegister()120   bool IsRegister() const {
121     return rm_.is_valid() && rs_ == no_reg && shift_op_ == LSL &&
122            shift_imm_ == 0;
123   }
124   // Return true if this is a register operand shifted with an immediate.
IsImmediateShiftedRegister()125   bool IsImmediateShiftedRegister() const {
126     return rm_.is_valid() && !rs_.is_valid();
127   }
128   // Return true if this is a register operand shifted with a register.
IsRegisterShiftedRegister()129   bool IsRegisterShiftedRegister() const {
130     return rm_.is_valid() && rs_.is_valid();
131   }
132 
133   // Return the number of actual instructions required to implement the given
134   // instruction for this particular operand. This can be a single instruction,
135   // if no load into a scratch register is necessary, or anything between 2 and
136   // 4 instructions when we need to load from the constant pool (depending upon
137   // whether the constant pool entry is in the small or extended section). If
138   // the instruction this operand is used for is a MOV or MVN instruction the
139   // actual instruction to use is required for this calculation. For other
140   // instructions instr is ignored.
141   //
142   // The value returned is only valid as long as no entries are added to the
143   // constant pool between this call and the actual instruction being emitted.
144   int InstructionsRequired(const Assembler* assembler, Instr instr = 0) const;
145   bool MustOutputRelocInfo(const Assembler* assembler) const;
146 
immediate()147   inline int32_t immediate() const {
148     DCHECK(IsImmediate());
149     DCHECK(!IsHeapObjectRequest());
150     return value_.immediate;
151   }
IsImmediate()152   bool IsImmediate() const { return !rm_.is_valid(); }
153 
heap_object_request()154   HeapObjectRequest heap_object_request() const {
155     DCHECK(IsHeapObjectRequest());
156     return value_.heap_object_request;
157   }
IsHeapObjectRequest()158   bool IsHeapObjectRequest() const {
159     DCHECK_IMPLIES(is_heap_object_request_, IsImmediate());
160     DCHECK_IMPLIES(is_heap_object_request_,
161                    rmode_ == RelocInfo::FULL_EMBEDDED_OBJECT ||
162                        rmode_ == RelocInfo::CODE_TARGET);
163     return is_heap_object_request_;
164   }
165 
rm()166   Register rm() const { return rm_; }
rs()167   Register rs() const { return rs_; }
shift_op()168   ShiftOp shift_op() const { return shift_op_; }
169 
170  private:
171   Register rm_ = no_reg;
172   Register rs_ = no_reg;
173   ShiftOp shift_op_;
174   int shift_imm_;  // valid if rm_ != no_reg && rs_ == no_reg
175   union Value {
Value()176     Value() {}
177     HeapObjectRequest heap_object_request;  // if is_heap_object_request_
178     int32_t immediate;                      // otherwise
179   } value_;                                 // valid if rm_ == no_reg
180   bool is_heap_object_request_ = false;
181   RelocInfo::Mode rmode_;
182 
183   friend class Assembler;
184 };
185 
186 // Class MemOperand represents a memory operand in load and store instructions
187 class V8_EXPORT_PRIVATE MemOperand {
188  public:
189   // [rn +/- offset]      Offset/NegOffset
190   // [rn +/- offset]!     PreIndex/NegPreIndex
191   // [rn], +/- offset     PostIndex/NegPostIndex
192   // offset is any signed 32-bit value; offset is first loaded to a scratch
193   // register if it does not fit the addressing mode (12-bit unsigned and sign
194   // bit)
195   explicit MemOperand(Register rn, int32_t offset = 0, AddrMode am = Offset);
196 
197   // [rn +/- rm]          Offset/NegOffset
198   // [rn +/- rm]!         PreIndex/NegPreIndex
199   // [rn], +/- rm         PostIndex/NegPostIndex
200   explicit MemOperand(Register rn, Register rm, AddrMode am = Offset);
201 
202   // [rn +/- rm <shift_op> shift_imm]      Offset/NegOffset
203   // [rn +/- rm <shift_op> shift_imm]!     PreIndex/NegPreIndex
204   // [rn], +/- rm <shift_op> shift_imm     PostIndex/NegPostIndex
205   explicit MemOperand(Register rn, Register rm, ShiftOp shift_op, int shift_imm,
206                       AddrMode am = Offset);
207   V8_INLINE static MemOperand PointerAddressFromSmiKey(Register array,
208                                                        Register key,
209                                                        AddrMode am = Offset) {
210     STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
211     return MemOperand(array, key, LSL, kPointerSizeLog2 - kSmiTagSize, am);
212   }
213 
set_offset(int32_t offset)214   void set_offset(int32_t offset) {
215     DCHECK(rm_ == no_reg);
216     offset_ = offset;
217   }
218 
offset()219   uint32_t offset() const {
220     DCHECK(rm_ == no_reg);
221     return offset_;
222   }
223 
rn()224   Register rn() const { return rn_; }
rm()225   Register rm() const { return rm_; }
am()226   AddrMode am() const { return am_; }
227 
OffsetIsUint12Encodable()228   bool OffsetIsUint12Encodable() const {
229     return offset_ >= 0 ? is_uint12(offset_) : is_uint12(-offset_);
230   }
231 
232  private:
233   Register rn_;     // base
234   Register rm_;     // register offset
235   int32_t offset_;  // valid if rm_ == no_reg
236   ShiftOp shift_op_;
237   int shift_imm_;  // valid if rm_ != no_reg && rs_ == no_reg
238   AddrMode am_;    // bits P, U, and W
239 
240   friend class Assembler;
241 };
242 
243 // Class NeonMemOperand represents a memory operand in load and
244 // store NEON instructions
245 class V8_EXPORT_PRIVATE NeonMemOperand {
246  public:
247   // [rn {:align}]       Offset
248   // [rn {:align}]!      PostIndex
249   explicit NeonMemOperand(Register rn, AddrMode am = Offset, int align = 0);
250 
251   // [rn {:align}], rm   PostIndex
252   explicit NeonMemOperand(Register rn, Register rm, int align = 0);
253 
rn()254   Register rn() const { return rn_; }
rm()255   Register rm() const { return rm_; }
align()256   int align() const { return align_; }
257 
258  private:
259   void SetAlignment(int align);
260 
261   Register rn_;  // base
262   Register rm_;  // register increment
263   int align_;
264 };
265 
266 // Class NeonListOperand represents a list of NEON registers
267 class NeonListOperand {
268  public:
269   explicit NeonListOperand(DoubleRegister base, int register_count = 1)
base_(base)270       : base_(base), register_count_(register_count) {}
NeonListOperand(QwNeonRegister q_reg)271   explicit NeonListOperand(QwNeonRegister q_reg)
272       : base_(q_reg.low()), register_count_(2) {}
base()273   DoubleRegister base() const { return base_; }
register_count()274   int register_count() { return register_count_; }
length()275   int length() const { return register_count_ - 1; }
type()276   NeonListType type() const {
277     switch (register_count_) {
278       default:
279         UNREACHABLE();
280       // Fall through.
281       case 1:
282         return nlt_1;
283       case 2:
284         return nlt_2;
285       case 3:
286         return nlt_3;
287       case 4:
288         return nlt_4;
289     }
290   }
291 
292  private:
293   DoubleRegister base_;
294   int register_count_;
295 };
296 
297 class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
298  public:
299   // Create an assembler. Instructions and relocation information are emitted
300   // into a buffer, with the instructions starting from the beginning and the
301   // relocation information starting from the end of the buffer. See CodeDesc
302   // for a detailed comment on the layout (globals.h).
303   //
304   // If the provided buffer is nullptr, the assembler allocates and grows its
305   // own buffer. Otherwise it takes ownership of the provided buffer.
306   explicit Assembler(const AssemblerOptions&,
307                      std::unique_ptr<AssemblerBuffer> = {});
308 
309   ~Assembler() override;
310 
AbortedCodeGeneration()311   void AbortedCodeGeneration() override { pending_32_bit_constants_.clear(); }
312 
313   // GetCode emits any pending (non-emitted) code and fills the descriptor desc.
314   static constexpr int kNoHandlerTable = 0;
315   static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr;
316   void GetCode(Isolate* isolate, CodeDesc* desc,
317                SafepointTableBuilder* safepoint_table_builder,
318                int handler_table_offset);
319 
320   // Convenience wrapper for code without safepoint or handler tables.
GetCode(Isolate * isolate,CodeDesc * desc)321   void GetCode(Isolate* isolate, CodeDesc* desc) {
322     GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable);
323   }
324 
325   // Label operations & relative jumps (PPUM Appendix D)
326   //
327   // Takes a branch opcode (cc) and a label (L) and generates
328   // either a backward branch or a forward branch and links it
329   // to the label fixup chain. Usage:
330   //
331   // Label L;    // unbound label
332   // j(cc, &L);  // forward branch to unbound label
333   // bind(&L);   // bind label to the current pc
334   // j(cc, &L);  // backward branch to bound label
335   // bind(&L);   // illegal: a label may be bound only once
336   //
337   // Note: The same Label can be used for forward and backward branches
338   // but it may be bound only once.
339 
340   void bind(Label* L);  // binds an unbound label L to the current code position
341 
342   // Returns the branch offset to the given label from the current code position
343   // Links the label to the current position if it is still unbound
344   // Manages the jump elimination optimization if the second parameter is true.
345   int branch_offset(Label* L);
346 
347   // Returns true if the given pc address is the start of a constant pool load
348   // instruction sequence.
349   V8_INLINE static bool is_constant_pool_load(Address pc);
350 
351   // Return the address in the constant pool of the code target address used by
352   // the branch/call instruction at pc, or the object in a mov.
353   V8_INLINE static Address constant_pool_entry_address(Address pc,
354                                                        Address constant_pool);
355 
356   // Read/Modify the code target address in the branch/call instruction at pc.
357   // The isolate argument is unused (and may be nullptr) when skipping flushing.
358   V8_INLINE static Address target_address_at(Address pc, Address constant_pool);
359   V8_INLINE static void set_target_address_at(
360       Address pc, Address constant_pool, Address target,
361       ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
362 
363   // This sets the branch destination (which is in the constant pool on ARM).
364   // This is for calls and branches within generated code.
365   inline static void deserialization_set_special_target_at(
366       Address constant_pool_entry, Code code, Address target);
367 
368   // Get the size of the special target encoded at 'location'.
369   inline static int deserialization_special_target_size(Address location);
370 
371   // This sets the internal reference at the pc.
372   inline static void deserialization_set_target_internal_reference_at(
373       Address pc, Address target,
374       RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
375 
376   // Here we are patching the address in the constant pool, not the actual call
377   // instruction.  The address in the constant pool is the same size as a
378   // pointer.
379   static constexpr int kSpecialTargetSize = kPointerSize;
380 
GetScratchRegisterList()381   RegList* GetScratchRegisterList() { return &scratch_register_list_; }
GetScratchVfpRegisterList()382   VfpRegList* GetScratchVfpRegisterList() {
383     return &scratch_vfp_register_list_;
384   }
385 
386   // ---------------------------------------------------------------------------
387   // Code generation
388 
389   // Insert the smallest number of nop instructions
390   // possible to align the pc offset to a multiple
391   // of m. m must be a power of 2 (>= 4).
392   void Align(int m);
393   // Insert the smallest number of zero bytes possible to align the pc offset
394   // to a mulitple of m. m must be a power of 2 (>= 2).
395   void DataAlign(int m);
396   // Aligns code to something that's optimal for a jump target for the platform.
397   void CodeTargetAlign();
398 
399   // Branch instructions
400   void b(int branch_offset, Condition cond = al,
401          RelocInfo::Mode rmode = RelocInfo::NONE);
402   void bl(int branch_offset, Condition cond = al,
403           RelocInfo::Mode rmode = RelocInfo::NONE);
404   void blx(int branch_offset);                     // v5 and above
405   void blx(Register target, Condition cond = al);  // v5 and above
406   void bx(Register target, Condition cond = al);   // v5 and above, plus v4t
407 
408   // Convenience branch instructions using labels
409   void b(Label* L, Condition cond = al);
b(Condition cond,Label * L)410   void b(Condition cond, Label* L) { b(L, cond); }
411   void bl(Label* L, Condition cond = al);
bl(Condition cond,Label * L)412   void bl(Condition cond, Label* L) { bl(L, cond); }
413   void blx(Label* L);  // v5 and above
414 
415   // Data-processing instructions
416 
417   void and_(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
418             Condition cond = al);
419   void and_(Register dst, Register src1, Register src2, SBit s = LeaveCC,
420             Condition cond = al);
421 
422   void eor(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
423            Condition cond = al);
424   void eor(Register dst, Register src1, Register src2, SBit s = LeaveCC,
425            Condition cond = al);
426 
427   void sub(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
428            Condition cond = al);
429   void sub(Register dst, Register src1, Register src2, SBit s = LeaveCC,
430            Condition cond = al);
431 
432   void rsb(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
433            Condition cond = al);
434 
435   void add(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
436            Condition cond = al);
437   void add(Register dst, Register src1, Register src2, SBit s = LeaveCC,
438            Condition cond = al);
439 
440   void adc(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
441            Condition cond = al);
442 
443   void sbc(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
444            Condition cond = al);
445 
446   void rsc(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
447            Condition cond = al);
448 
449   void tst(Register src1, const Operand& src2, Condition cond = al);
450   void tst(Register src1, Register src2, Condition cond = al);
451 
452   void teq(Register src1, const Operand& src2, Condition cond = al);
453 
454   void cmp(Register src1, const Operand& src2, Condition cond = al);
455   void cmp(Register src1, Register src2, Condition cond = al);
456 
457   void cmp_raw_immediate(Register src1, int raw_immediate, Condition cond = al);
458 
459   void cmn(Register src1, const Operand& src2, Condition cond = al);
460 
461   void orr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
462            Condition cond = al);
463   void orr(Register dst, Register src1, Register src2, SBit s = LeaveCC,
464            Condition cond = al);
465 
466   void mov(Register dst, const Operand& src, SBit s = LeaveCC,
467            Condition cond = al);
468   void mov(Register dst, Register src, SBit s = LeaveCC, Condition cond = al);
469 
470   // Load the position of the label relative to the generated code object
471   // pointer in a register.
472   void mov_label_offset(Register dst, Label* label);
473 
474   // ARMv7 instructions for loading a 32 bit immediate in two instructions.
475   // The constant for movw and movt should be in the range 0-0xffff.
476   void movw(Register reg, uint32_t immediate, Condition cond = al);
477   void movt(Register reg, uint32_t immediate, Condition cond = al);
478 
479   void bic(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
480            Condition cond = al);
481 
482   void mvn(Register dst, const Operand& src, SBit s = LeaveCC,
483            Condition cond = al);
484 
485   // Shift instructions
486 
487   void asr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
488            Condition cond = al);
489 
490   void lsl(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
491            Condition cond = al);
492 
493   void lsr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
494            Condition cond = al);
495 
496   // Multiply instructions
497 
498   void mla(Register dst, Register src1, Register src2, Register srcA,
499            SBit s = LeaveCC, Condition cond = al);
500 
501   void mls(Register dst, Register src1, Register src2, Register srcA,
502            Condition cond = al);
503 
504   void sdiv(Register dst, Register src1, Register src2, Condition cond = al);
505 
506   void udiv(Register dst, Register src1, Register src2, Condition cond = al);
507 
508   void mul(Register dst, Register src1, Register src2, SBit s = LeaveCC,
509            Condition cond = al);
510 
511   void smmla(Register dst, Register src1, Register src2, Register srcA,
512              Condition cond = al);
513 
514   void smmul(Register dst, Register src1, Register src2, Condition cond = al);
515 
516   void smlal(Register dstL, Register dstH, Register src1, Register src2,
517              SBit s = LeaveCC, Condition cond = al);
518 
519   void smull(Register dstL, Register dstH, Register src1, Register src2,
520              SBit s = LeaveCC, Condition cond = al);
521 
522   void umlal(Register dstL, Register dstH, Register src1, Register src2,
523              SBit s = LeaveCC, Condition cond = al);
524 
525   void umull(Register dstL, Register dstH, Register src1, Register src2,
526              SBit s = LeaveCC, Condition cond = al);
527 
528   // Miscellaneous arithmetic instructions
529 
530   void clz(Register dst, Register src, Condition cond = al);  // v5 and above
531 
532   // Saturating instructions. v6 and above.
533 
534   // Unsigned saturate.
535   //
536   // Saturate an optionally shifted signed value to an unsigned range.
537   //
538   //   usat dst, #satpos, src
539   //   usat dst, #satpos, src, lsl #sh
540   //   usat dst, #satpos, src, asr #sh
541   //
542   // Register dst will contain:
543   //
544   //   0,                 if s < 0
545   //   (1 << satpos) - 1, if s > ((1 << satpos) - 1)
546   //   s,                 otherwise
547   //
548   // where s is the contents of src after shifting (if used.)
549   void usat(Register dst, int satpos, const Operand& src, Condition cond = al);
550 
551   // Bitfield manipulation instructions. v7 and above.
552 
553   void ubfx(Register dst, Register src, int lsb, int width,
554             Condition cond = al);
555 
556   void sbfx(Register dst, Register src, int lsb, int width,
557             Condition cond = al);
558 
559   void bfc(Register dst, int lsb, int width, Condition cond = al);
560 
561   void bfi(Register dst, Register src, int lsb, int width, Condition cond = al);
562 
563   void pkhbt(Register dst, Register src1, const Operand& src2,
564              Condition cond = al);
565 
566   void pkhtb(Register dst, Register src1, const Operand& src2,
567              Condition cond = al);
568 
569   void sxtb(Register dst, Register src, int rotate = 0, Condition cond = al);
570   void sxtab(Register dst, Register src1, Register src2, int rotate = 0,
571              Condition cond = al);
572   void sxth(Register dst, Register src, int rotate = 0, Condition cond = al);
573   void sxtah(Register dst, Register src1, Register src2, int rotate = 0,
574              Condition cond = al);
575 
576   void uxtb(Register dst, Register src, int rotate = 0, Condition cond = al);
577   void uxtab(Register dst, Register src1, Register src2, int rotate = 0,
578              Condition cond = al);
579   void uxtb16(Register dst, Register src, int rotate = 0, Condition cond = al);
580   void uxth(Register dst, Register src, int rotate = 0, Condition cond = al);
581   void uxtah(Register dst, Register src1, Register src2, int rotate = 0,
582              Condition cond = al);
583 
584   // Reverse the bits in a register.
585   void rbit(Register dst, Register src, Condition cond = al);
586   void rev(Register dst, Register src, Condition cond = al);
587 
588   // Status register access instructions
589 
590   void mrs(Register dst, SRegister s, Condition cond = al);
591   void msr(SRegisterFieldMask fields, const Operand& src, Condition cond = al);
592 
593   // Load/Store instructions
594   void ldr(Register dst, const MemOperand& src, Condition cond = al);
595   void str(Register src, const MemOperand& dst, Condition cond = al);
596   void ldrb(Register dst, const MemOperand& src, Condition cond = al);
597   void strb(Register src, const MemOperand& dst, Condition cond = al);
598   void ldrh(Register dst, const MemOperand& src, Condition cond = al);
599   void strh(Register src, const MemOperand& dst, Condition cond = al);
600   void ldrsb(Register dst, const MemOperand& src, Condition cond = al);
601   void ldrsh(Register dst, const MemOperand& src, Condition cond = al);
602   void ldrd(Register dst1, Register dst2, const MemOperand& src,
603             Condition cond = al);
604   void strd(Register src1, Register src2, const MemOperand& dst,
605             Condition cond = al);
606 
607   // Load literal from a pc relative address.
608   void ldr_pcrel(Register dst, int imm12, Condition cond = al);
609 
610   // Load/Store exclusive instructions
611   void ldrex(Register dst, Register src, Condition cond = al);
612   void strex(Register src1, Register src2, Register dst, Condition cond = al);
613   void ldrexb(Register dst, Register src, Condition cond = al);
614   void strexb(Register src1, Register src2, Register dst, Condition cond = al);
615   void ldrexh(Register dst, Register src, Condition cond = al);
616   void strexh(Register src1, Register src2, Register dst, Condition cond = al);
617   void ldrexd(Register dst1, Register dst2, Register src, Condition cond = al);
618   void strexd(Register res, Register src1, Register src2, Register dst,
619               Condition cond = al);
620 
621   // Preload instructions
622   void pld(const MemOperand& address);
623 
624   // Load/Store multiple instructions
625   void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);
626   void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
627 
628   // Exception-generating instructions and debugging support
629   void stop(Condition cond = al, int32_t code = kDefaultStopCode);
630 
631   void bkpt(uint32_t imm16);  // v5 and above
632   void svc(uint32_t imm24, Condition cond = al);
633 
634   // Synchronization instructions.
635   // On ARMv6, an equivalent CP15 operation will be used.
636   void dmb(BarrierOption option);
637   void dsb(BarrierOption option);
638   void isb(BarrierOption option);
639 
640   // Conditional speculation barrier.
641   void csdb();
642 
643   // Coprocessor instructions
644 
645   void cdp(Coprocessor coproc, int opcode_1, CRegister crd, CRegister crn,
646            CRegister crm, int opcode_2, Condition cond = al);
647 
648   void cdp2(Coprocessor coproc, int opcode_1, CRegister crd, CRegister crn,
649             CRegister crm,
650             int opcode_2);  // v5 and above
651 
652   void mcr(Coprocessor coproc, int opcode_1, Register rd, CRegister crn,
653            CRegister crm, int opcode_2 = 0, Condition cond = al);
654 
655   void mcr2(Coprocessor coproc, int opcode_1, Register rd, CRegister crn,
656             CRegister crm,
657             int opcode_2 = 0);  // v5 and above
658 
659   void mrc(Coprocessor coproc, int opcode_1, Register rd, CRegister crn,
660            CRegister crm, int opcode_2 = 0, Condition cond = al);
661 
662   void mrc2(Coprocessor coproc, int opcode_1, Register rd, CRegister crn,
663             CRegister crm,
664             int opcode_2 = 0);  // v5 and above
665 
666   void ldc(Coprocessor coproc, CRegister crd, const MemOperand& src,
667            LFlag l = Short, Condition cond = al);
668   void ldc(Coprocessor coproc, CRegister crd, Register base, int option,
669            LFlag l = Short, Condition cond = al);
670 
671   void ldc2(Coprocessor coproc, CRegister crd, const MemOperand& src,
672             LFlag l = Short);  // v5 and above
673   void ldc2(Coprocessor coproc, CRegister crd, Register base, int option,
674             LFlag l = Short);  // v5 and above
675 
676   // Support for VFP.
677   // All these APIs support S0 to S31 and D0 to D31.
678 
679   void vldr(const DwVfpRegister dst, const Register base, int offset,
680             const Condition cond = al);
681   void vldr(const DwVfpRegister dst, const MemOperand& src,
682             const Condition cond = al);
683 
684   void vldr(const SwVfpRegister dst, const Register base, int offset,
685             const Condition cond = al);
686   void vldr(const SwVfpRegister dst, const MemOperand& src,
687             const Condition cond = al);
688 
689   void vstr(const DwVfpRegister src, const Register base, int offset,
690             const Condition cond = al);
691   void vstr(const DwVfpRegister src, const MemOperand& dst,
692             const Condition cond = al);
693 
694   void vstr(const SwVfpRegister src, const Register base, int offset,
695             const Condition cond = al);
696   void vstr(const SwVfpRegister src, const MemOperand& dst,
697             const Condition cond = al);
698 
699   void vldm(BlockAddrMode am, Register base, DwVfpRegister first,
700             DwVfpRegister last, Condition cond = al);
701 
702   void vstm(BlockAddrMode am, Register base, DwVfpRegister first,
703             DwVfpRegister last, Condition cond = al);
704 
705   void vldm(BlockAddrMode am, Register base, SwVfpRegister first,
706             SwVfpRegister last, Condition cond = al);
707 
708   void vstm(BlockAddrMode am, Register base, SwVfpRegister first,
709             SwVfpRegister last, Condition cond = al);
710 
711   void vmov(const SwVfpRegister dst, Float32 imm);
712   void vmov(const DwVfpRegister dst, Double imm,
713             const Register extra_scratch = no_reg);
714   void vmov(const SwVfpRegister dst, const SwVfpRegister src,
715             const Condition cond = al);
716   void vmov(const DwVfpRegister dst, const DwVfpRegister src,
717             const Condition cond = al);
718   void vmov(const DwVfpRegister dst, const Register src1, const Register src2,
719             const Condition cond = al);
720   void vmov(const Register dst1, const Register dst2, const DwVfpRegister src,
721             const Condition cond = al);
722   void vmov(const SwVfpRegister dst, const Register src,
723             const Condition cond = al);
724   void vmov(const Register dst, const SwVfpRegister src,
725             const Condition cond = al);
726   void vcvt_f64_s32(const DwVfpRegister dst, const SwVfpRegister src,
727                     VFPConversionMode mode = kDefaultRoundToZero,
728                     const Condition cond = al);
729   void vcvt_f32_s32(const SwVfpRegister dst, const SwVfpRegister src,
730                     VFPConversionMode mode = kDefaultRoundToZero,
731                     const Condition cond = al);
732   void vcvt_f64_u32(const DwVfpRegister dst, const SwVfpRegister src,
733                     VFPConversionMode mode = kDefaultRoundToZero,
734                     const Condition cond = al);
735   void vcvt_f32_u32(const SwVfpRegister dst, const SwVfpRegister src,
736                     VFPConversionMode mode = kDefaultRoundToZero,
737                     const Condition cond = al);
738   void vcvt_s32_f32(const SwVfpRegister dst, const SwVfpRegister src,
739                     VFPConversionMode mode = kDefaultRoundToZero,
740                     const Condition cond = al);
741   void vcvt_u32_f32(const SwVfpRegister dst, const SwVfpRegister src,
742                     VFPConversionMode mode = kDefaultRoundToZero,
743                     const Condition cond = al);
744   void vcvt_s32_f64(const SwVfpRegister dst, const DwVfpRegister src,
745                     VFPConversionMode mode = kDefaultRoundToZero,
746                     const Condition cond = al);
747   void vcvt_u32_f64(const SwVfpRegister dst, const DwVfpRegister src,
748                     VFPConversionMode mode = kDefaultRoundToZero,
749                     const Condition cond = al);
750   void vcvt_f64_f32(const DwVfpRegister dst, const SwVfpRegister src,
751                     VFPConversionMode mode = kDefaultRoundToZero,
752                     const Condition cond = al);
753   void vcvt_f32_f64(const SwVfpRegister dst, const DwVfpRegister src,
754                     VFPConversionMode mode = kDefaultRoundToZero,
755                     const Condition cond = al);
756   void vcvt_f64_s32(const DwVfpRegister dst, int fraction_bits,
757                     const Condition cond = al);
758 
759   void vmrs(const Register dst, const Condition cond = al);
760   void vmsr(const Register dst, const Condition cond = al);
761 
762   void vneg(const DwVfpRegister dst, const DwVfpRegister src,
763             const Condition cond = al);
764   void vneg(const SwVfpRegister dst, const SwVfpRegister src,
765             const Condition cond = al);
766   void vabs(const DwVfpRegister dst, const DwVfpRegister src,
767             const Condition cond = al);
768   void vabs(const SwVfpRegister dst, const SwVfpRegister src,
769             const Condition cond = al);
770   void vadd(const DwVfpRegister dst, const DwVfpRegister src1,
771             const DwVfpRegister src2, const Condition cond = al);
772   void vadd(const SwVfpRegister dst, const SwVfpRegister src1,
773             const SwVfpRegister src2, const Condition cond = al);
774   void vsub(const DwVfpRegister dst, const DwVfpRegister src1,
775             const DwVfpRegister src2, const Condition cond = al);
776   void vsub(const SwVfpRegister dst, const SwVfpRegister src1,
777             const SwVfpRegister src2, const Condition cond = al);
778   void vmul(const DwVfpRegister dst, const DwVfpRegister src1,
779             const DwVfpRegister src2, const Condition cond = al);
780   void vmul(const SwVfpRegister dst, const SwVfpRegister src1,
781             const SwVfpRegister src2, const Condition cond = al);
782   void vmla(const DwVfpRegister dst, const DwVfpRegister src1,
783             const DwVfpRegister src2, const Condition cond = al);
784   void vmla(const SwVfpRegister dst, const SwVfpRegister src1,
785             const SwVfpRegister src2, const Condition cond = al);
786   void vmls(const DwVfpRegister dst, const DwVfpRegister src1,
787             const DwVfpRegister src2, const Condition cond = al);
788   void vmls(const SwVfpRegister dst, const SwVfpRegister src1,
789             const SwVfpRegister src2, const Condition cond = al);
790   void vdiv(const DwVfpRegister dst, const DwVfpRegister src1,
791             const DwVfpRegister src2, const Condition cond = al);
792   void vdiv(const SwVfpRegister dst, const SwVfpRegister src1,
793             const SwVfpRegister src2, const Condition cond = al);
794   void vcmp(const DwVfpRegister src1, const DwVfpRegister src2,
795             const Condition cond = al);
796   void vcmp(const SwVfpRegister src1, const SwVfpRegister src2,
797             const Condition cond = al);
798   void vcmp(const DwVfpRegister src1, const double src2,
799             const Condition cond = al);
800   void vcmp(const SwVfpRegister src1, const float src2,
801             const Condition cond = al);
802 
803   void vmaxnm(const DwVfpRegister dst, const DwVfpRegister src1,
804               const DwVfpRegister src2);
805   void vmaxnm(const SwVfpRegister dst, const SwVfpRegister src1,
806               const SwVfpRegister src2);
807   void vminnm(const DwVfpRegister dst, const DwVfpRegister src1,
808               const DwVfpRegister src2);
809   void vminnm(const SwVfpRegister dst, const SwVfpRegister src1,
810               const SwVfpRegister src2);
811 
812   // VSEL supports cond in {eq, ne, ge, lt, gt, le, vs, vc}.
813   void vsel(const Condition cond, const DwVfpRegister dst,
814             const DwVfpRegister src1, const DwVfpRegister src2);
815   void vsel(const Condition cond, const SwVfpRegister dst,
816             const SwVfpRegister src1, const SwVfpRegister src2);
817 
818   void vsqrt(const DwVfpRegister dst, const DwVfpRegister src,
819              const Condition cond = al);
820   void vsqrt(const SwVfpRegister dst, const SwVfpRegister src,
821              const Condition cond = al);
822 
823   // ARMv8 rounding instructions.
824   void vrinta(const SwVfpRegister dst, const SwVfpRegister src);
825   void vrinta(const DwVfpRegister dst, const DwVfpRegister src);
826   void vrintn(const SwVfpRegister dst, const SwVfpRegister src);
827   void vrintn(const DwVfpRegister dst, const DwVfpRegister src);
828   void vrintm(const SwVfpRegister dst, const SwVfpRegister src);
829   void vrintm(const DwVfpRegister dst, const DwVfpRegister src);
830   void vrintp(const SwVfpRegister dst, const SwVfpRegister src);
831   void vrintp(const DwVfpRegister dst, const DwVfpRegister src);
832   void vrintz(const SwVfpRegister dst, const SwVfpRegister src,
833               const Condition cond = al);
834   void vrintz(const DwVfpRegister dst, const DwVfpRegister src,
835               const Condition cond = al);
836 
837   // Support for NEON.
838 
839   // All these APIs support D0 to D31 and Q0 to Q15.
840   void vld1(NeonSize size, const NeonListOperand& dst,
841             const NeonMemOperand& src);
842   void vld1r(NeonSize size, const NeonListOperand& dst,
843              const NeonMemOperand& src);
844   void vst1(NeonSize size, const NeonListOperand& src,
845             const NeonMemOperand& dst);
846   // dt represents the narrower type
847   void vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src);
848   // dst_dt represents the narrower type, src_dt represents the src type.
849   void vqmovn(NeonDataType dst_dt, NeonDataType src_dt, DwVfpRegister dst,
850               QwNeonRegister src);
851 
852   // Only unconditional core <-> scalar moves are currently supported.
853   void vmov(NeonDataType dt, DwVfpRegister dst, int index, Register src);
854   void vmov(NeonDataType dt, Register dst, DwVfpRegister src, int index);
855 
856   void vmov(QwNeonRegister dst, uint64_t imm);
857   void vmov(QwNeonRegister dst, QwNeonRegister src);
858   void vdup(NeonSize size, QwNeonRegister dst, Register src);
859   void vdup(NeonSize size, QwNeonRegister dst, DwVfpRegister src, int index);
860   void vdup(NeonSize size, DwVfpRegister dst, DwVfpRegister src, int index);
861 
862   void vcvt_f32_s32(QwNeonRegister dst, QwNeonRegister src);
863   void vcvt_f32_u32(QwNeonRegister dst, QwNeonRegister src);
864   void vcvt_s32_f32(QwNeonRegister dst, QwNeonRegister src);
865   void vcvt_u32_f32(QwNeonRegister dst, QwNeonRegister src);
866 
867   void vmvn(QwNeonRegister dst, QwNeonRegister src);
868   void vswp(DwVfpRegister dst, DwVfpRegister src);
869   void vswp(QwNeonRegister dst, QwNeonRegister src);
870   void vabs(QwNeonRegister dst, QwNeonRegister src);
871   void vabs(NeonSize size, QwNeonRegister dst, QwNeonRegister src);
872   void vneg(QwNeonRegister dst, QwNeonRegister src);
873   void vneg(NeonSize size, QwNeonRegister dst, QwNeonRegister src);
874 
875   void vand(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
876   void vbic(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
877   void veor(DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2);
878   void veor(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
879   void vbsl(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
880   void vorr(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
881   void vadd(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
882   void vadd(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
883             QwNeonRegister src2);
884   void vqadd(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
885              QwNeonRegister src2);
886   void vsub(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
887   void vsub(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
888             QwNeonRegister src2);
889   void vqsub(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
890              QwNeonRegister src2);
891   void vmlal(NeonDataType size, QwNeonRegister dst, DwVfpRegister src1,
892              DwVfpRegister src2);
893   void vmul(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
894   void vmul(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
895             QwNeonRegister src2);
896   void vmull(NeonDataType size, QwNeonRegister dst, DwVfpRegister src1,
897              DwVfpRegister src2);
898   void vmin(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
899   void vmin(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
900             QwNeonRegister src2);
901   void vmax(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
902   void vmax(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
903             QwNeonRegister src2);
904   void vpadd(DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2);
905   void vpadd(NeonSize size, DwVfpRegister dst, DwVfpRegister src1,
906              DwVfpRegister src2);
907   void vpmin(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1,
908              DwVfpRegister src2);
909   void vpmax(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1,
910              DwVfpRegister src2);
911   void vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift);
912   void vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src,
913             QwNeonRegister shift);
914   void vshr(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift);
915   void vsli(NeonSize size, DwVfpRegister dst, DwVfpRegister src, int shift);
916   void vsri(NeonSize size, DwVfpRegister dst, DwVfpRegister src, int shift);
917   // vrecpe and vrsqrte only support floating point lanes.
918   void vrecpe(QwNeonRegister dst, QwNeonRegister src);
919   void vrsqrte(QwNeonRegister dst, QwNeonRegister src);
920   void vrecps(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
921   void vrsqrts(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
922   void vtst(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
923             QwNeonRegister src2);
924   void vceq(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
925   void vceq(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
926             QwNeonRegister src2);
927   void vcge(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
928   void vcge(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
929             QwNeonRegister src2);
930   void vcgt(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
931   void vcgt(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
932             QwNeonRegister src2);
933   void vrhadd(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
934               QwNeonRegister src2);
935   void vext(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2,
936             int bytes);
937   void vzip(NeonSize size, DwVfpRegister src1, DwVfpRegister src2);
938   void vzip(NeonSize size, QwNeonRegister src1, QwNeonRegister src2);
939   void vuzp(NeonSize size, DwVfpRegister src1, DwVfpRegister src2);
940   void vuzp(NeonSize size, QwNeonRegister src1, QwNeonRegister src2);
941   void vrev16(NeonSize size, QwNeonRegister dst, QwNeonRegister src);
942   void vrev32(NeonSize size, QwNeonRegister dst, QwNeonRegister src);
943   void vrev64(NeonSize size, QwNeonRegister dst, QwNeonRegister src);
944   void vtrn(NeonSize size, DwVfpRegister src1, DwVfpRegister src2);
945   void vtrn(NeonSize size, QwNeonRegister src1, QwNeonRegister src2);
946   void vtbl(DwVfpRegister dst, const NeonListOperand& list,
947             DwVfpRegister index);
948   void vtbx(DwVfpRegister dst, const NeonListOperand& list,
949             DwVfpRegister index);
950 
951   // Pseudo instructions
952 
953   // Different nop operations are used by the code generator to detect certain
954   // states of the generated code.
955   enum NopMarkerTypes {
956     NON_MARKING_NOP = 0,
957     DEBUG_BREAK_NOP,
958     // IC markers.
959     PROPERTY_ACCESS_INLINED,
960     PROPERTY_ACCESS_INLINED_CONTEXT,
961     PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
962     // Helper values.
963     LAST_CODE_MARKER,
964     FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
965   };
966 
967   void nop(int type = 0);  // 0 is the default non-marking type.
968 
969   void push(Register src, Condition cond = al) {
970     str(src, MemOperand(sp, 4, NegPreIndex), cond);
971   }
972 
973   void pop(Register dst, Condition cond = al) {
974     ldr(dst, MemOperand(sp, 4, PostIndex), cond);
975   }
976 
977   void pop();
978 
979   void vpush(QwNeonRegister src, Condition cond = al) {
980     vstm(db_w, sp, src.low(), src.high(), cond);
981   }
982 
983   void vpush(DwVfpRegister src, Condition cond = al) {
984     vstm(db_w, sp, src, src, cond);
985   }
986 
987   void vpush(SwVfpRegister src, Condition cond = al) {
988     vstm(db_w, sp, src, src, cond);
989   }
990 
991   void vpop(DwVfpRegister dst, Condition cond = al) {
992     vldm(ia_w, sp, dst, dst, cond);
993   }
994 
995   // Jump unconditionally to given label.
jmp(Label * L)996   void jmp(Label* L) { b(L, al); }
997 
998   // Check the code size generated from label to here.
SizeOfCodeGeneratedSince(Label * label)999   int SizeOfCodeGeneratedSince(Label* label) {
1000     return pc_offset() - label->pos();
1001   }
1002 
1003   // Check the number of instructions generated from label to here.
InstructionsGeneratedSince(Label * label)1004   int InstructionsGeneratedSince(Label* label) {
1005     return SizeOfCodeGeneratedSince(label) / kInstrSize;
1006   }
1007 
1008   // Check whether an immediate fits an addressing mode 1 instruction.
1009   static bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
1010 
1011   // Check whether an immediate fits an addressing mode 2 instruction.
1012   bool ImmediateFitsAddrMode2Instruction(int32_t imm32);
1013 
1014   // Class for scoping postponing the constant pool generation.
1015   class BlockConstPoolScope {
1016    public:
BlockConstPoolScope(Assembler * assem)1017     explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
1018       assem_->StartBlockConstPool();
1019     }
~BlockConstPoolScope()1020     ~BlockConstPoolScope() { assem_->EndBlockConstPool(); }
1021 
1022    private:
1023     Assembler* assem_;
1024 
1025     DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
1026   };
1027 
1028   // Unused on this architecture.
MaybeEmitOutOfLineConstantPool()1029   void MaybeEmitOutOfLineConstantPool() {}
1030 
1031   // Record a deoptimization reason that can be used by a log or cpu profiler.
1032   // Use --trace-deopt to enable.
1033   void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
1034                          int id);
1035 
1036   // Record the emission of a constant pool.
1037   //
1038   // The emission of constant pool depends on the size of the code generated and
1039   // the number of RelocInfo recorded.
1040   // The Debug mechanism needs to map code offsets between two versions of a
1041   // function, compiled with and without debugger support (see for example
1042   // Debug::PrepareForBreakPoints()).
1043   // Compiling functions with debugger support generates additional code
1044   // (DebugCodegen::GenerateSlot()). This may affect the emission of the
1045   // constant pools and cause the version of the code with debugger support to
1046   // have constant pools generated in different places.
1047   // Recording the position and size of emitted constant pools allows to
1048   // correctly compute the offset mappings between the different versions of a
1049   // function in all situations.
1050   //
1051   // The parameter indicates the size of the constant pool (in bytes), including
1052   // the marker and branch over the data.
1053   void RecordConstPool(int size);
1054 
1055   // Writes a single byte or word of data in the code stream.  Used
1056   // for inline tables, e.g., jump-tables. CheckConstantPool() should be
1057   // called before any use of db/dd/dq/dp to ensure that constant pools
1058   // are not emitted as part of the tables generated.
1059   void db(uint8_t data);
1060   void dd(uint32_t data);
1061   void dq(uint64_t data);
dp(uintptr_t data)1062   void dp(uintptr_t data) { dd(data); }
1063 
1064   // Read/patch instructions
instr_at(int pos)1065   Instr instr_at(int pos) {
1066     return *reinterpret_cast<Instr*>(buffer_start_ + pos);
1067   }
instr_at_put(int pos,Instr instr)1068   void instr_at_put(int pos, Instr instr) {
1069     *reinterpret_cast<Instr*>(buffer_start_ + pos) = instr;
1070   }
instr_at(Address pc)1071   static Instr instr_at(Address pc) { return *reinterpret_cast<Instr*>(pc); }
instr_at_put(Address pc,Instr instr)1072   static void instr_at_put(Address pc, Instr instr) {
1073     *reinterpret_cast<Instr*>(pc) = instr;
1074   }
1075   static Condition GetCondition(Instr instr);
1076   static bool IsLdrRegisterImmediate(Instr instr);
1077   static bool IsVldrDRegisterImmediate(Instr instr);
1078   static int GetLdrRegisterImmediateOffset(Instr instr);
1079   static int GetVldrDRegisterImmediateOffset(Instr instr);
1080   static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
1081   static Instr SetVldrDRegisterImmediateOffset(Instr instr, int offset);
1082   static bool IsStrRegisterImmediate(Instr instr);
1083   static Instr SetStrRegisterImmediateOffset(Instr instr, int offset);
1084   static bool IsAddRegisterImmediate(Instr instr);
1085   static Instr SetAddRegisterImmediateOffset(Instr instr, int offset);
1086   static Register GetRd(Instr instr);
1087   static Register GetRn(Instr instr);
1088   static Register GetRm(Instr instr);
1089   static bool IsPush(Instr instr);
1090   static bool IsPop(Instr instr);
1091   static bool IsStrRegFpOffset(Instr instr);
1092   static bool IsLdrRegFpOffset(Instr instr);
1093   static bool IsStrRegFpNegOffset(Instr instr);
1094   static bool IsLdrRegFpNegOffset(Instr instr);
1095   static bool IsLdrPcImmediateOffset(Instr instr);
1096   static bool IsBOrBlPcImmediateOffset(Instr instr);
1097   static bool IsVldrDPcImmediateOffset(Instr instr);
1098   static bool IsBlxReg(Instr instr);
1099   static bool IsBlxIp(Instr instr);
1100   static bool IsTstImmediate(Instr instr);
1101   static bool IsCmpRegister(Instr instr);
1102   static bool IsCmpImmediate(Instr instr);
1103   static Register GetCmpImmediateRegister(Instr instr);
1104   static int GetCmpImmediateRawImmediate(Instr instr);
1105   static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
1106   static bool IsMovImmed(Instr instr);
1107   static bool IsOrrImmed(Instr instr);
1108   static bool IsMovT(Instr instr);
1109   static Instr GetMovTPattern();
1110   static bool IsMovW(Instr instr);
1111   static Instr GetMovWPattern();
1112   static Instr EncodeMovwImmediate(uint32_t immediate);
1113   static Instr PatchMovwImmediate(Instr instruction, uint32_t immediate);
1114   static int DecodeShiftImm(Instr instr);
1115   static Instr PatchShiftImm(Instr instr, int immed);
1116 
1117   // Constants in pools are accessed via pc relative addressing, which can
1118   // reach +/-4KB for integer PC-relative loads and +/-1KB for floating-point
1119   // PC-relative loads, thereby defining a maximum distance between the
1120   // instruction and the accessed constant.
1121   static constexpr int kMaxDistToIntPool = 4 * KB;
1122   // All relocations could be integer, it therefore acts as the limit.
1123   static constexpr int kMinNumPendingConstants = 4;
1124   static constexpr int kMaxNumPending32Constants =
1125       kMaxDistToIntPool / kInstrSize;
1126 
1127   // Postpone the generation of the constant pool for the specified number of
1128   // instructions.
1129   void BlockConstPoolFor(int instructions);
1130 
1131   // Check if is time to emit a constant pool.
1132   void CheckConstPool(bool force_emit, bool require_jump);
1133 
MaybeCheckConstPool()1134   void MaybeCheckConstPool() {
1135     if (pc_offset() >= next_buffer_check_) {
1136       CheckConstPool(false, true);
1137     }
1138   }
1139 
1140   // Move a 32-bit immediate into a register, potentially via the constant pool.
1141   void Move32BitImmediate(Register rd, const Operand& x, Condition cond = al);
1142 
1143   // Get the code target object for a pc-relative call or jump.
1144   V8_INLINE Handle<Code> relative_code_target_object_handle_at(
1145       Address pc_) const;
1146 
1147  protected:
buffer_space()1148   int buffer_space() const { return reloc_info_writer.pos() - pc_; }
1149 
1150   // Decode branch instruction at pos and return branch target pos
1151   int target_at(int pos);
1152 
1153   // Patch branch instruction at pos to branch to given branch target pos
1154   void target_at_put(int pos, int target_pos);
1155 
1156   // Prevent contant pool emission until EndBlockConstPool is called.
1157   // Calls to this function can be nested but must be followed by an equal
1158   // number of call to EndBlockConstpool.
StartBlockConstPool()1159   void StartBlockConstPool() {
1160     if (const_pool_blocked_nesting_++ == 0) {
1161       // Prevent constant pool checks happening by setting the next check to
1162       // the biggest possible offset.
1163       next_buffer_check_ = kMaxInt;
1164     }
1165   }
1166 
1167   // Resume constant pool emission. Needs to be called as many times as
1168   // StartBlockConstPool to have an effect.
EndBlockConstPool()1169   void EndBlockConstPool() {
1170     if (--const_pool_blocked_nesting_ == 0) {
1171 #ifdef DEBUG
1172       // Max pool start (if we need a jump and an alignment).
1173       int start = pc_offset() + kInstrSize + 2 * kPointerSize;
1174       // Check the constant pool hasn't been blocked for too long.
1175       DCHECK(pending_32_bit_constants_.empty() ||
1176              (start < first_const_pool_32_use_ + kMaxDistToIntPool));
1177 #endif
1178       // Two cases:
1179       //  * no_const_pool_before_ >= next_buffer_check_ and the emission is
1180       //    still blocked
1181       //  * no_const_pool_before_ < next_buffer_check_ and the next emit will
1182       //    trigger a check.
1183       next_buffer_check_ = no_const_pool_before_;
1184     }
1185   }
1186 
is_const_pool_blocked()1187   bool is_const_pool_blocked() const {
1188     return (const_pool_blocked_nesting_ > 0) ||
1189            (pc_offset() < no_const_pool_before_);
1190   }
1191 
VfpRegisterIsAvailable(DwVfpRegister reg)1192   bool VfpRegisterIsAvailable(DwVfpRegister reg) {
1193     DCHECK(reg.is_valid());
1194     return IsEnabled(VFP32DREGS) ||
1195            (reg.code() < LowDwVfpRegister::kNumRegisters);
1196   }
1197 
VfpRegisterIsAvailable(QwNeonRegister reg)1198   bool VfpRegisterIsAvailable(QwNeonRegister reg) {
1199     DCHECK(reg.is_valid());
1200     return IsEnabled(VFP32DREGS) ||
1201            (reg.code() < LowDwVfpRegister::kNumRegisters / 2);
1202   }
1203 
1204   inline void emit(Instr x);
1205 
1206   // Code generation
1207   // The relocation writer's position is at least kGap bytes below the end of
1208   // the generated instructions. This is so that multi-instruction sequences do
1209   // not have to check for overflow. The same is true for writes of large
1210   // relocation info entries.
1211   static constexpr int kGap = 32;
1212   STATIC_ASSERT(AssemblerBase::kMinimalBufferSize >= 2 * kGap);
1213 
1214   // Relocation info generation
1215   // Each relocation is encoded as a variable size value
1216   static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize;
1217   RelocInfoWriter reloc_info_writer;
1218 
1219   // ConstantPoolEntry records are used during code generation as temporary
1220   // containers for constants and code target addresses until they are emitted
1221   // to the constant pool. These records are temporarily stored in a separate
1222   // buffer until a constant pool is emitted.
1223   // If every instruction in a long sequence is accessing the pool, we need one
1224   // pending relocation entry per instruction.
1225 
1226   // The buffers of pending constant pool entries.
1227   std::vector<ConstantPoolEntry> pending_32_bit_constants_;
1228 
1229   // Scratch registers available for use by the Assembler.
1230   RegList scratch_register_list_;
1231   VfpRegList scratch_vfp_register_list_;
1232 
1233  private:
1234   // Avoid overflows for displacements etc.
1235   static const int kMaximalBufferSize = 512 * MB;
1236 
1237   int next_buffer_check_;  // pc offset of next buffer check
1238 
1239   // Constant pool generation
1240   // Pools are emitted in the instruction stream, preferably after unconditional
1241   // jumps or after returns from functions (in dead code locations).
1242   // If a long code sequence does not contain unconditional jumps, it is
1243   // necessary to emit the constant pool before the pool gets too far from the
1244   // location it is accessed from. In this case, we emit a jump over the emitted
1245   // constant pool.
1246   // Constants in the pool may be addresses of functions that gets relocated;
1247   // if so, a relocation info entry is associated to the constant pool entry.
1248 
1249   // Repeated checking whether the constant pool should be emitted is rather
1250   // expensive. By default we only check again once a number of instructions
1251   // has been generated. That also means that the sizing of the buffers is not
1252   // an exact science, and that we rely on some slop to not overrun buffers.
1253   static constexpr int kCheckPoolIntervalInst = 32;
1254   static constexpr int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize;
1255 
1256   // Emission of the constant pool may be blocked in some code sequences.
1257   int const_pool_blocked_nesting_;  // Block emission if this is not zero.
1258   int no_const_pool_before_;        // Block emission before this pc offset.
1259 
1260   // Keep track of the first instruction requiring a constant pool entry
1261   // since the previous constant pool was emitted.
1262   int first_const_pool_32_use_;
1263 
1264   // The bound position, before this we cannot do instruction elimination.
1265   int last_bound_pos_;
1266 
1267   inline void CheckBuffer();
1268   void GrowBuffer();
1269 
1270   // Instruction generation
1271   void AddrMode1(Instr instr, Register rd, Register rn, const Operand& x);
1272   // Attempt to encode operand |x| for instruction |instr| and return true on
1273   // success. The result will be encoded in |instr| directly. This method may
1274   // change the opcode if deemed beneficial, for instance, MOV may be turned
1275   // into MVN, ADD into SUB, AND into BIC, ...etc.  The only reason this method
1276   // may fail is that the operand is an immediate that cannot be encoded.
1277   bool AddrMode1TryEncodeOperand(Instr* instr, const Operand& x);
1278 
1279   void AddrMode2(Instr instr, Register rd, const MemOperand& x);
1280   void AddrMode3(Instr instr, Register rd, const MemOperand& x);
1281   void AddrMode4(Instr instr, Register rn, RegList rl);
1282   void AddrMode5(Instr instr, CRegister crd, const MemOperand& x);
1283 
1284   // Labels
1285   void print(const Label* L);
1286   void bind_to(Label* L, int pos);
1287   void next(Label* L);
1288 
1289   // Record reloc info for current pc_
1290   void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
1291   void ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
1292                             intptr_t value);
1293   void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
1294 
1295   int WriteCodeComments();
1296 
1297   friend class RelocInfo;
1298   friend class BlockConstPoolScope;
1299   friend class EnsureSpace;
1300   friend class UseScratchRegisterScope;
1301 };
1302 
1303 class EnsureSpace {
1304  public:
1305   V8_INLINE explicit EnsureSpace(Assembler* assembler);
1306 };
1307 
1308 class PatchingAssembler : public Assembler {
1309  public:
1310   PatchingAssembler(const AssemblerOptions& options, byte* address,
1311                     int instructions);
1312   ~PatchingAssembler();
1313 
1314   void Emit(Address addr);
1315   void PadWithNops();
1316 };
1317 
1318 // This scope utility allows scratch registers to be managed safely. The
1319 // Assembler's GetScratchRegisterList() is used as a pool of scratch
1320 // registers. These registers can be allocated on demand, and will be returned
1321 // at the end of the scope.
1322 //
1323 // When the scope ends, the Assembler's list will be restored to its original
1324 // state, even if the list is modified by some other means. Note that this scope
1325 // can be nested but the destructors need to run in the opposite order as the
1326 // constructors. We do not have assertions for this.
1327 class V8_EXPORT_PRIVATE UseScratchRegisterScope {
1328  public:
1329   explicit UseScratchRegisterScope(Assembler* assembler);
1330   ~UseScratchRegisterScope();
1331 
1332   // Take a register from the list and return it.
1333   Register Acquire();
AcquireS()1334   SwVfpRegister AcquireS() { return AcquireVfp<SwVfpRegister>(); }
AcquireLowD()1335   LowDwVfpRegister AcquireLowD() { return AcquireVfp<LowDwVfpRegister>(); }
AcquireD()1336   DwVfpRegister AcquireD() {
1337     DwVfpRegister reg = AcquireVfp<DwVfpRegister>();
1338     DCHECK(assembler_->VfpRegisterIsAvailable(reg));
1339     return reg;
1340   }
AcquireQ()1341   QwNeonRegister AcquireQ() {
1342     QwNeonRegister reg = AcquireVfp<QwNeonRegister>();
1343     DCHECK(assembler_->VfpRegisterIsAvailable(reg));
1344     return reg;
1345   }
1346 
1347   // Check if we have registers available to acquire.
CanAcquire()1348   bool CanAcquire() const { return *assembler_->GetScratchRegisterList() != 0; }
CanAcquireD()1349   bool CanAcquireD() const { return CanAcquireVfp<DwVfpRegister>(); }
1350 
1351  private:
1352   friend class Assembler;
1353   friend class TurboAssembler;
1354 
1355   template <typename T>
1356   bool CanAcquireVfp() const;
1357 
1358   template <typename T>
1359   T AcquireVfp();
1360 
1361   Assembler* assembler_;
1362   // Available scratch registers at the start of this scope.
1363   RegList old_available_;
1364   VfpRegList old_available_vfp_;
1365 };
1366 
1367 }  // namespace internal
1368 }  // namespace v8
1369 
1370 #endif  // V8_CODEGEN_ARM_ASSEMBLER_ARM_H_
1371