1 // Copyright (c) 2013, the Dart project authors.  Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file.
4 //
5 // This is forked from Dart revision df52deea9f25690eb8b66c5995da92b70f7ac1fe
6 // Please update the (git) revision if we merge changes from Dart.
7 // https://code.google.com/p/dart/wiki/GettingTheSource
8 
9 #ifndef VM_ASSEMBLER_ARM_H_
10 #define VM_ASSEMBLER_ARM_H_
11 
12 #ifndef VM_ASSEMBLER_H_
13 #error Do not include assembler_arm.h directly; use assembler.h instead.
14 #endif
15 
16 #include "platform/assert.h"
17 #include "platform/utils.h"
18 #include "vm/constants_arm.h"
19 #include "vm/cpu.h"
20 #include "vm/hash_map.h"
21 #include "vm/object.h"
22 #include "vm/simulator.h"
23 
24 namespace dart {
25 
26 // Forward declarations.
27 class RuntimeEntry;
28 class StubEntry;
29 
30 #if 0
31 // Moved to ARM32::AssemblerARM32 as needed
32 // Instruction encoding bits.
33 enum {
34   H   = 1 << 5,   // halfword (or byte)
35   L   = 1 << 20,  // load (or store)
36   S   = 1 << 20,  // set condition code (or leave unchanged)
37   W   = 1 << 21,  // writeback base register (or leave unchanged)
38   A   = 1 << 21,  // accumulate in multiply instruction (or not)
39   B   = 1 << 22,  // unsigned byte (or word)
40   D   = 1 << 22,  // high/lo bit of start of s/d register range
41   N   = 1 << 22,  // long (or short)
42   U   = 1 << 23,  // positive (or negative) offset/index
43   P   = 1 << 24,  // offset/pre-indexed addressing (or post-indexed addressing)
44   I   = 1 << 25,  // immediate shifter operand (or not)
45 
46   B0 = 1,
47   B1 = 1 << 1,
48   B2 = 1 << 2,
49   B3 = 1 << 3,
50   B4 = 1 << 4,
51   B5 = 1 << 5,
52   B6 = 1 << 6,
53   B7 = 1 << 7,
54   B8 = 1 << 8,
55   B9 = 1 << 9,
56   B10 = 1 << 10,
57   B11 = 1 << 11,
58   B12 = 1 << 12,
59   B16 = 1 << 16,
60   B17 = 1 << 17,
61   B18 = 1 << 18,
62   B19 = 1 << 19,
63   B20 = 1 << 20,
64   B21 = 1 << 21,
65   B22 = 1 << 22,
66   B23 = 1 << 23,
67   B24 = 1 << 24,
68   B25 = 1 << 25,
69   B26 = 1 << 26,
70   B27 = 1 << 27,
71 };
72 #endif
73 
74 class Label : public ValueObject {
75 public:
Label()76   Label() : position_(0) {}
77 
~Label()78   ~Label() {
79     // Assert if label is being destroyed with unresolved branches pending.
80     ASSERT(!IsLinked());
81   }
82 
83   // Returns the position for bound and linked labels. Cannot be used
84   // for unused labels.
Position()85   intptr_t Position() const {
86     ASSERT(!IsUnused());
87     return IsBound() ? -position_ - kWordSize : position_ - kWordSize;
88   }
89 
IsBound()90   bool IsBound() const { return position_ < 0; }
IsUnused()91   bool IsUnused() const { return position_ == 0; }
IsLinked()92   bool IsLinked() const { return position_ > 0; }
93 
94 private:
95   intptr_t position_;
96 
Reinitialize()97   void Reinitialize() { position_ = 0; }
98 
BindTo(intptr_t position)99   void BindTo(intptr_t position) {
100     ASSERT(!IsBound());
101     position_ = -position - kWordSize;
102     ASSERT(IsBound());
103   }
104 
LinkTo(intptr_t position)105   void LinkTo(intptr_t position) {
106     ASSERT(!IsBound());
107     position_ = position + kWordSize;
108     ASSERT(IsLinked());
109   }
110 
111   friend class Assembler;
112   DISALLOW_COPY_AND_ASSIGN(Label);
113 };
114 
115 // Encodes Addressing Mode 1 - Data-processing operands.
116 class Operand : public ValueObject {
117 public:
118   // Data-processing operands - Uninitialized.
Operand()119   Operand() : type_(-1), encoding_(-1) {}
120 
121   // Data-processing operands - Copy constructor.
Operand(const Operand & other)122   Operand(const Operand &other)
123       : ValueObject(), type_(other.type_), encoding_(other.encoding_) {}
124 
125   // Data-processing operands - Assignment operator.
126   Operand &operator=(const Operand &other) {
127     type_ = other.type_;
128     encoding_ = other.encoding_;
129     return *this;
130   }
131 
132 #if 0
133   // Moved to encodeRotatedImm8() in IceAssemblerARM32.cpp
134   // Data-processing operands - Immediate.
135   explicit Operand(uint32_t immediate) {
136     ASSERT(immediate < (1 << kImmed8Bits));
137     type_ = 1;
138     encoding_ = immediate;
139   }
140 
141   // Moved to decodeOperand() and encodeRotatedImm8() in IceAssemblerARM32.cpp
142   // Data-processing operands - Rotated immediate.
143   Operand(uint32_t rotate, uint32_t immed8) {
144     ASSERT((rotate < (1 << kRotateBits)) && (immed8 < (1 << kImmed8Bits)));
145     type_ = 1;
146     encoding_ = (rotate << kRotateShift) | (immed8 << kImmed8Shift);
147   }
148 
149   // Moved to decodeOperand() in IceAssemblerARM32.cpp
150   // Data-processing operands - Register.
151   explicit Operand(Register rm) {
152     type_ = 0;
153     encoding_ = static_cast<uint32_t>(rm);
154   }
155 
156   // Moved to encodeShiftRotateImm5() in IceAssemblerARM32.cpp
157   // Data-processing operands - Logical shift/rotate by immediate.
158   Operand(Register rm, Shift shift, uint32_t shift_imm) {
159     ASSERT(shift_imm < (1 << kShiftImmBits));
160     type_ = 0;
161     encoding_ = shift_imm << kShiftImmShift |
162                 static_cast<uint32_t>(shift) << kShiftShift |
163                 static_cast<uint32_t>(rm);
164   }
165 
166   // Moved to encodeShiftRotateReg() in IceAssemblerARM32.cpp
167   // Data-processing operands - Logical shift/rotate by register.
168   Operand(Register rm, Shift shift, Register rs) {
169     type_ = 0;
170     encoding_ = static_cast<uint32_t>(rs) << kShiftRegisterShift |
171                 static_cast<uint32_t>(shift) << kShiftShift | (1 << 4) |
172                 static_cast<uint32_t>(rm);
173   }
174 
175   // Already defined as ARM32::OperandARM32FlexImm::canHoldImm().
176   static bool CanHold(uint32_t immediate, Operand* o) {
177     // Avoid the more expensive test for frequent small immediate values.
178     if (immediate < (1 << kImmed8Bits)) {
179       o->type_ = 1;
180       o->encoding_ = (0 << kRotateShift) | (immediate << kImmed8Shift);
181       return true;
182     }
183     // Note that immediate must be unsigned for the test to work correctly.
184     for (int rot = 0; rot < 16; rot++) {
185       uint32_t imm8 = (immediate << 2*rot) | (immediate >> (32 - 2*rot));
186       if (imm8 < (1 << kImmed8Bits)) {
187         o->type_ = 1;
188         o->encoding_ = (rot << kRotateShift) | (imm8 << kImmed8Shift);
189         return true;
190       }
191     }
192     return false;
193   }
194 #endif
195 
196 private:
is_valid()197   bool is_valid() const { return (type_ == 0) || (type_ == 1); }
198 
type()199   uint32_t type() const {
200     ASSERT(is_valid());
201     return type_;
202   }
203 
encoding()204   uint32_t encoding() const {
205     ASSERT(is_valid());
206     return encoding_;
207   }
208 
209   uint32_t type_; // Encodes the type field (bits 27-25) in the instruction.
210   uint32_t encoding_;
211 
212   friend class Assembler;
213   friend class Address;
214 };
215 
216 enum OperandSize {
217   kByte,
218   kUnsignedByte,
219   kHalfword,
220   kUnsignedHalfword,
221   kWord,
222   kUnsignedWord,
223   kWordPair,
224   kSWord,
225   kDWord,
226   kRegList,
227 };
228 
229 // Load/store multiple addressing mode.
230 enum BlockAddressMode {
231   // clang-format off
232   // bit encoding P U W
233   DA           = (0|0|0) << 21,  // decrement after
234   IA           = (0|4|0) << 21,  // increment after
235   DB           = (8|0|0) << 21,  // decrement before
236   IB           = (8|4|0) << 21,  // increment before
237   DA_W         = (0|0|1) << 21,  // decrement after with writeback to base
238   IA_W         = (0|4|1) << 21,  // increment after with writeback to base
239   DB_W         = (8|0|1) << 21,  // decrement before with writeback to base
240   IB_W         = (8|4|1) << 21   // increment before with writeback to base
241   // clang-format on
242 };
243 
244 class Address : public ValueObject {
245 public:
246   enum OffsetKind {
247     Immediate,
248     IndexRegister,
249     ScaledIndexRegister,
250   };
251 
252   // Memory operand addressing mode
253   enum Mode {
254     // clang-format off
255     kModeMask    = (8|4|1) << 21,
256     // bit encoding P U W
257     Offset       = (8|4|0) << 21,  // offset (w/o writeback to base)
258     PreIndex     = (8|4|1) << 21,  // pre-indexed addressing with writeback
259     PostIndex    = (0|4|0) << 21,  // post-indexed addressing with writeback
260     NegOffset    = (8|0|0) << 21,  // negative offset (w/o writeback to base)
261     NegPreIndex  = (8|0|1) << 21,  // negative pre-indexed with writeback
262     NegPostIndex = (0|0|0) << 21   // negative post-indexed with writeback
263     // clang-format on
264   };
265 
Address(const Address & other)266   Address(const Address &other)
267       : ValueObject(), encoding_(other.encoding_), kind_(other.kind_) {}
268 
269   Address &operator=(const Address &other) {
270     encoding_ = other.encoding_;
271     kind_ = other.kind_;
272     return *this;
273   }
274 
Equals(const Address & other)275   bool Equals(const Address &other) const {
276     return (encoding_ == other.encoding_) && (kind_ == other.kind_);
277   }
278 
279 #if 0
280   // Moved to decodeImmRegOffset() in IceAssemblerARM32.cpp.
281   // Used to model stack offsets.
282   explicit Address(Register rn, int32_t offset = 0, Mode am = Offset) {
283     ASSERT(Utils::IsAbsoluteUint(12, offset));
284     kind_ = Immediate;
285     if (offset < 0) {
286       encoding_ = (am ^ (1 << kUShift)) | -offset;  // Flip U to adjust sign.
287     } else {
288       encoding_ = am | offset;
289     }
290     encoding_ |= static_cast<uint32_t>(rn) << kRnShift;
291   }
292 #endif
293 
294   // There is no register offset mode unless Mode is Offset, in which case the
295   // shifted register case below should be used.
296   Address(Register rn, Register r, Mode am);
297 
298   Address(Register rn, Register rm, Shift shift = LSL, uint32_t shift_imm = 0,
299           Mode am = Offset) {
300     Operand o(rm, shift, shift_imm);
301 
302     if ((shift == LSL) && (shift_imm == 0)) {
303       kind_ = IndexRegister;
304     } else {
305       kind_ = ScaledIndexRegister;
306     }
307     encoding_ = o.encoding() | am | (static_cast<uint32_t>(rn) << kRnShift);
308   }
309 
310   // There is no shifted register mode with a register shift.
311   Address(Register rn, Register rm, Shift shift, Register r, Mode am = Offset);
312 
313   static OperandSize OperandSizeFor(intptr_t cid);
314 
315   static bool CanHoldLoadOffset(OperandSize size, int32_t offset,
316                                 int32_t *offset_mask);
317   static bool CanHoldStoreOffset(OperandSize size, int32_t offset,
318                                  int32_t *offset_mask);
319   static bool CanHoldImmediateOffset(bool is_load, intptr_t cid,
320                                      int64_t offset);
321 
322 private:
rn()323   Register rn() const {
324     return Instr::At(reinterpret_cast<uword>(&encoding_))->RnField();
325   }
326 
rm()327   Register rm() const {
328     return ((kind() == IndexRegister) || (kind() == ScaledIndexRegister))
329                ? Instr::At(reinterpret_cast<uword>(&encoding_))->RmField()
330                : kNoRegister;
331   }
332 
mode()333   Mode mode() const { return static_cast<Mode>(encoding() & kModeMask); }
334 
encoding()335   uint32_t encoding() const { return encoding_; }
336 
337 #if 0
338 // Moved to encodeImmRegOffsetEnc3 in IceAssemblerARM32.cpp
339   // Encoding for addressing mode 3.
340   uint32_t encoding3() const;
341 #endif
342 
343   // Encoding for vfp load/store addressing.
344   uint32_t vencoding() const;
345 
kind()346   OffsetKind kind() const { return kind_; }
347 
348   uint32_t encoding_;
349 
350   OffsetKind kind_;
351 
352   friend class Assembler;
353 };
354 
355 class FieldAddress : public Address {
356 public:
FieldAddress(Register base,int32_t disp)357   FieldAddress(Register base, int32_t disp)
358       : Address(base, disp - kHeapObjectTag) {}
359 
360   // This addressing mode does not exist.
361   FieldAddress(Register base, Register r);
362 
FieldAddress(const FieldAddress & other)363   FieldAddress(const FieldAddress &other) : Address(other) {}
364 
365   FieldAddress &operator=(const FieldAddress &other) {
366     Address::operator=(other);
367     return *this;
368   }
369 };
370 
371 class Assembler : public ValueObject {
372 public:
373   explicit Assembler(bool use_far_branches = false)
buffer_()374       : buffer_(), prologue_offset_(-1), use_far_branches_(use_far_branches),
375         comments_(), constant_pool_allowed_(false) {}
376 
~Assembler()377   ~Assembler() {}
378 
PopRegister(Register r)379   void PopRegister(Register r) { Pop(r); }
380 
381   void Bind(Label *label);
Jump(Label * label)382   void Jump(Label *label) { b(label); }
383 
384   // Misc. functionality
CodeSize()385   intptr_t CodeSize() const { return buffer_.Size(); }
prologue_offset()386   intptr_t prologue_offset() const { return prologue_offset_; }
387 
388   // Count the fixups that produce a pointer offset, without processing
389   // the fixups.  On ARM there are no pointers in code.
CountPointerOffsets()390   intptr_t CountPointerOffsets() const { return 0; }
391 
GetPointerOffsets()392   const ZoneGrowableArray<intptr_t> &GetPointerOffsets() const {
393     ASSERT(buffer_.pointer_offsets().length() == 0); // No pointers in code.
394     return buffer_.pointer_offsets();
395   }
396 
object_pool_wrapper()397   ObjectPoolWrapper &object_pool_wrapper() { return object_pool_wrapper_; }
398 
MakeObjectPool()399   RawObjectPool *MakeObjectPool() {
400     return object_pool_wrapper_.MakeObjectPool();
401   }
402 
use_far_branches()403   bool use_far_branches() const {
404     return FLAG_use_far_branches || use_far_branches_;
405   }
406 
407 #if defined(TESTING) || defined(DEBUG)
408   // Used in unit tests and to ensure predictable verification code size in
409   // FlowGraphCompiler::EmitEdgeCounter.
set_use_far_branches(bool b)410   void set_use_far_branches(bool b) { use_far_branches_ = b; }
411 #endif // TESTING || DEBUG
412 
FinalizeInstructions(const MemoryRegion & region)413   void FinalizeInstructions(const MemoryRegion &region) {
414     buffer_.FinalizeInstructions(region);
415   }
416 
417   // Debugging and bringup support.
418   void Stop(const char *message);
419   void Unimplemented(const char *message);
420   void Untested(const char *message);
421   void Unreachable(const char *message);
422 
423   static void InitializeMemoryWithBreakpoints(uword data, intptr_t length);
424 
425   void Comment(const char *format, ...) PRINTF_ATTRIBUTE(2, 3);
426   static bool EmittingComments();
427 
428   const Code::Comments &GetCodeComments() const;
429 
430   static const char *RegisterName(Register reg);
431 
432   static const char *FpuRegisterName(FpuRegister reg);
433 
434 #if 0
435   // Moved to ARM32::AssemblerARM32::and_()
436   // Data-processing instructions.
437   void and_(Register rd, Register rn, Operand o, Condition cond = AL);
438 
439   // Moved to ARM32::AssemblerARM32::eor()
440   void eor(Register rd, Register rn, Operand o, Condition cond = AL);
441 
442   // Moved to ARM32::AssemberARM32::sub()
443   void sub(Register rd, Register rn, Operand o, Condition cond = AL);
444   void subs(Register rd, Register rn, Operand o, Condition cond = AL);
445 
446   // Moved to ARM32::AssemberARM32::rsb()
447   void rsb(Register rd, Register rn, Operand o, Condition cond = AL);
448   void rsbs(Register rd, Register rn, Operand o, Condition cond = AL);
449 
450   // Moved to ARM32::AssemblerARM32::add()
451   void add(Register rd, Register rn, Operand o, Condition cond = AL);
452 
453   void adds(Register rd, Register rn, Operand o, Condition cond = AL);
454 
455   // Moved to ARM32::AssemblerARM32::adc()
456   void adc(Register rd, Register rn, Operand o, Condition cond = AL);
457 
458   void adcs(Register rd, Register rn, Operand o, Condition cond = AL);
459 
460   // Moved to ARM32::AssemblerARM32::sbc()
461   void sbc(Register rd, Register rn, Operand o, Condition cond = AL);
462 
463   // Moved to ARM32::AssemblerARM32::sbc()
464   void sbcs(Register rd, Register rn, Operand o, Condition cond = AL);
465 
466   // Moved to ARM32::AssemblerARM32::rsc()
467   void rsc(Register rd, Register rn, Operand o, Condition cond = AL);
468 
469   // Moved to ARM32::AssemblerARM32::tst();
470   void tst(Register rn, Operand o, Condition cond = AL);
471 #endif
472 
473   void teq(Register rn, Operand o, Condition cond = AL);
474 
475 #if 0
476   // Moved to ARM32::AssemblerARM32::cmp()
477   void cmp(Register rn, Operand o, Condition cond = AL);
478 
479   // Moved to ARM32::AssemblerARM32::cmn()
480   void cmn(Register rn, Operand o, Condition cond = AL);
481 
482   // Moved to ARM32::IceAssemblerARM32::orr().
483   void orr(Register rd, Register rn, Operand o, Condition cond = AL);
484   void orrs(Register rd, Register rn, Operand o, Condition cond = AL);
485 
486   // Moved to ARM32::IceAssemblerARM32::mov()
487   void mov(Register rd, Operand o, Condition cond = AL);
488   void movs(Register rd, Operand o, Condition cond = AL);
489 
490   // Moved to ARM32::IceAssemblerARM32::bic()
491   void bic(Register rd, Register rn, Operand o, Condition cond = AL);
492   void bics(Register rd, Register rn, Operand o, Condition cond = AL);
493 
494   // Moved to ARM32::IceAssemblerARM32::mvn()
495   void mvn(Register rd, Operand o, Condition cond = AL);
496   void mvns(Register rd, Operand o, Condition cond = AL);
497 
498   // Miscellaneous data-processing instructions.
499   // Moved to ARM32::AssemblerARM32::clz()
500   void clz(Register rd, Register rm, Condition cond = AL);
501 
502   // Multiply instructions.
503 
504   // Moved to ARM32::AssemblerARM32::mul()
505   void mul(Register rd, Register rn, Register rm, Condition cond = AL);
506   void muls(Register rd, Register rn, Register rm, Condition cond = AL);
507 
508   // Moved to ARM32::AssemblerARM32::mla()
509   void mla(Register rd, Register rn, Register rm, Register ra,
510            Condition cond = AL);
511   // Moved to ARM32::AssemblerARM32::mls()
512   void mls(Register rd, Register rn, Register rm, Register ra,
513            Condition cond = AL);
514 #endif
515 
516   void smull(Register rd_lo, Register rd_hi, Register rn, Register rm,
517              Condition cond = AL);
518 
519 #if 0
520   // Moved to ARM32::AssemblerARM32::umull();
521   void umull(Register rd_lo, Register rd_hi, Register rn, Register rm,
522              Condition cond = AL);
523 #endif
524   void smlal(Register rd_lo, Register rd_hi, Register rn, Register rm,
525              Condition cond = AL);
526   void umlal(Register rd_lo, Register rd_hi, Register rn, Register rm,
527              Condition cond = AL);
528 
529   // Emulation of this instruction uses IP and the condition codes. Therefore,
530   // none of the registers can be IP, and the instruction can only be used
531   // unconditionally.
532   void umaal(Register rd_lo, Register rd_hi, Register rn, Register rm);
533 
534   // Division instructions.
535 #if 0
536   // Moved to ARM32::AssemblerARM32::sdiv()
537   void sdiv(Register rd, Register rn, Register rm, Condition cond = AL);
538   // Moved to ARM32::AssemblerARM32::udiv()
539   void udiv(Register rd, Register rn, Register rm, Condition cond = AL);
540 
541   // Moved to ARM32::AssemblerARM32::ldr()
542   // Load/store instructions.
543   void ldr(Register rd, Address ad, Condition cond = AL);
544   // Moved to ARM32::AssemblerARM32::str()
545   void str(Register rd, Address ad, Condition cond = AL);
546 
547   // Moved to ARM32::AssemblerARM32::ldr()
548   void ldrb(Register rd, Address ad, Condition cond = AL);
549   // Moved to ARM32::AssemblerARM32::str()
550   void strb(Register rd, Address ad, Condition cond = AL);
551 
552   // Moved to ARM32::AssemblerARM32::ldr()
553   void ldrh(Register rd, Address ad, Condition cond = AL);
554   // Moved to ARM32::AssemblerARM32::str()
555   void strh(Register rd, Address ad, Condition cond = AL);
556 #endif
557 
558   void ldrsb(Register rd, Address ad, Condition cond = AL);
559   void ldrsh(Register rd, Address ad, Condition cond = AL);
560 
561   // ldrd and strd actually support the full range of addressing modes, but
562   // we don't use them, and we need to split them up into two instructions for
563   // ARMv5TE, so we only support the base + offset mode.
564   void ldrd(Register rd, Register rn, int32_t offset, Condition cond = AL);
565   void strd(Register rd, Register rn, int32_t offset, Condition cond = AL);
566 
567 #if 0
568   // Folded into ARM32::AssemblerARM32::popList(), since it is its only use (and
569   // doesn't implement ARM LDM instructions).
570   void ldm(BlockAddressMode am, Register base,
571            RegList regs, Condition cond = AL);
572 
573   // Folded into ARM32::AssemblerARM32::pushList(), since it is its only use
574   // (and doesn't implement ARM STM instruction).
575   void stm(BlockAddressMode am, Register base,
576            RegList regs, Condition cond = AL);
577 
578   // Moved to ARM::AssemblerARM32::ldrex();
579   void ldrex(Register rd, Register rn, Condition cond = AL);
580   // Moved to ARM::AssemblerARM32::strex();
581   void strex(Register rd, Register rt, Register rn, Condition cond = AL);
582 #endif
583 
584   // Miscellaneous instructions.
585   void clrex();
586 
587 #if 0
588   // Moved to ARM32::AssemblerARM32::nop().
589   void nop(Condition cond = AL);
590 
591   // Moved to ARM32::AssemblerARM32::bkpt()
592   // Note that gdb sets breakpoints using the undefined instruction 0xe7f001f0.
593   void bkpt(uint16_t imm16);
594 
595   static int32_t BkptEncoding(uint16_t imm16) {
596     // bkpt requires that the cond field is AL.
597     return (AL << kConditionShift) | B24 | B21 |
598            ((imm16 >> 4) << 8) | B6 | B5 | B4 | (imm16 & 0xf);
599   }
600 
601   // Not ported. PNaCl doesn't allow breakpoint instructions.
602   static uword GetBreakInstructionFiller() {
603     return BkptEncoding(0);
604   }
605 
606   // Floating point instructions (VFPv3-D16 and VFPv3-D32 profiles).
607 
608   // Moved to ARM32::AssemblerARM32::vmovsr().
609   void vmovsr(SRegister sn, Register rt, Condition cond = AL);
610   // Moved to ARM32::AssemblerARM32::vmovrs().
611   void vmovrs(Register rt, SRegister sn, Condition cond = AL);
612 #endif
613   void vmovsrr(SRegister sm, Register rt, Register rt2, Condition cond = AL);
614   void vmovrrs(Register rt, Register rt2, SRegister sm, Condition cond = AL);
615 #if 0
616   // Moved to ARM32::AssemblerARM32::vmovdrr().
617   void vmovdrr(DRegister dm, Register rt, Register rt2, Condition cond = AL);
618   // Moved to ARM32::AssemblerARM32::vmovrrd().
619   void vmovrrd(Register rt, Register rt2, DRegister dm, Condition cond = AL);
620   // Moved to ARM32::AssemblerARM32::vmovqir().
621   void vmovdr(DRegister dd, int i, Register rt, Condition cond = AL);
622   // Moved to ARM32::AssemblerARM32::vmovss().
623   void vmovs(SRegister sd, SRegister sm, Condition cond = AL);
624   // Moved to ARM32::AssemblerARM32::vmovdd().
625   void vmovd(DRegister dd, DRegister dm, Condition cond = AL);
626 #endif
627   void vmovq(QRegister qd, QRegister qm);
628 
629 #if 0
630   // Returns false if the immediate cannot be encoded.
631   // Moved to ARM32::AssemblerARM32::vmovs();
632   bool vmovs(SRegister sd, float s_imm, Condition cond = AL);
633   // Moved to ARM32::AssemblerARM32::vmovs();
634   bool vmovd(DRegister dd, double d_imm, Condition cond = AL);
635 
636   // Moved to ARM32::AssemblerARM32::vldrs()
637   void vldrs(SRegister sd, Address ad, Condition cond = AL);
638   // Moved to Arm32::AssemblerARM32::vstrs()
639   void vstrs(SRegister sd, Address ad, Condition cond = AL);
640 #endif
641   // Moved to ARM32::AssemblerARM32::vldrd()
642   void vldrd(DRegister dd, Address ad, Condition cond = AL);
643 #if 0
644   // Moved to Arm32::AssemblerARM32::vstrd()
645   void vstrd(DRegister dd, Address ad, Condition cond = AL);
646 #endif
647 
648   void vldms(BlockAddressMode am, Register base, SRegister first,
649              SRegister last, Condition cond = AL);
650   void vstms(BlockAddressMode am, Register base, SRegister first,
651              SRegister last, Condition cond = AL);
652 
653   void vldmd(BlockAddressMode am, Register base, DRegister first,
654              intptr_t count, Condition cond = AL);
655   void vstmd(BlockAddressMode am, Register base, DRegister first,
656              intptr_t count, Condition cond = AL);
657 
658 #if 0
659   // Moved to Arm32::AssemblerARM32::vadds()
660   void vadds(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
661   // Moved to Arm32::AssemblerARM32::vaddd()
662   void vaddd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
663   // Moved to ARM32::AssemblerARM32::vaddqi().
664   void vaddqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm);
665   // Moved to ARM32::AssemblerARM32::vaddqf().
666   void vaddqs(QRegister qd, QRegister qn, QRegister qm);
667   // Moved to Arm32::AssemblerARM32::vsubs()
668   void vsubs(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
669   // Moved to Arm32::AssemblerARM32::vsubd()
670   void vsubd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
671   // Moved to ARM32::AssemblerARM32::vsubqi().
672   void vsubqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm);
673   // Moved to ARM32::AssemblerARM32::vsubqf().
674   void vsubqs(QRegister qd, QRegister qn, QRegister qm);
675   // Moved to Arm32::AssemblerARM32::vmuls()
676   void vmuls(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
677   // Moved to Arm32::AssemblerARM32::vmuld()
678   void vmuld(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
679   // Moved to ARM32::AssemblerARM32::vmulqi().
680   void vmulqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm);
681   // Moved to ARM32::AssemblerARM32::vmulqf().
682   void vmulqs(QRegister qd, QRegister qn, QRegister qm);
683   // Moved to ARM32::AssemblerARM32::vshlqi().
684   void vshlqi(OperandSize sz, QRegister qd, QRegister qm, QRegister qn);
685   // Moved to ARM32::AssemblerARM32::vshlqu().
686   void vshlqu(OperandSize sz, QRegister qd, QRegister qm, QRegister qn);
687   // Moved to Arm32::AssemblerARM32::vmlas()
688   void vmlas(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
689   // Moved to Arm32::AssemblerARM32::vmlad()
690   void vmlad(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
691   // Moved to Arm32::AssemblerARM32::vmlss()
692   void vmlss(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
693   // Moved to Arm32::AssemblerARM32::vmlsd()
694   void vmlsd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
695   // Moved to Arm32::AssemblerARM32::vdivs()
696   void vdivs(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
697   // Moved to Arm32::AssemblerARM32::vdivd()
698   void vdivd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
699 #endif
700   void vminqs(QRegister qd, QRegister qn, QRegister qm);
701   void vmaxqs(QRegister qd, QRegister qn, QRegister qm);
702   void vrecpeqs(QRegister qd, QRegister qm);
703   void vrecpsqs(QRegister qd, QRegister qn, QRegister qm);
704   void vrsqrteqs(QRegister qd, QRegister qm);
705   void vrsqrtsqs(QRegister qd, QRegister qn, QRegister qm);
706 
707 #if 0
708   // Moved to ARM32::AssemblerARM32::vorrq()
709   void veorq(QRegister qd, QRegister qn, QRegister qm);
710   // Moved to ARM32::AssemblerARM32::vorrq()
711   void vorrq(QRegister qd, QRegister qn, QRegister qm);
712 #endif
713   void vornq(QRegister qd, QRegister qn, QRegister qm);
714 #if 0
715   // Moved to Arm32::AssemblerARM32::vandq().
716   void vandq(QRegister qd, QRegister qn, QRegister qm);
717   // Moved to Arm32::AssemblerARM32::vandq().
718   void vmvnq(QRegister qd, QRegister qm);
719 
720   // Moved to Arm32::AssemblerARM32::vceqqi().
721   void vceqqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm);
722   // Moved to Arm32::AssemblerARM32::vceqqs().
723   void vceqqs(QRegister qd, QRegister qn, QRegister qm);
724   // Moved to Arm32::AssemblerARM32::vcgeqi().
725   void vcgeqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm);
726   // Moved to Arm32::AssemblerARM32::vcugeqi().
727   void vcugeqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm);
728   // Moved to Arm32::AssemblerARM32::vcgeqs().
729   void vcgeqs(QRegister qd, QRegister qn, QRegister qm);
730   // Moved to Arm32::AssemblerARM32::vcgtqi().
731   void vcgtqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm);
732   // Moved to Arm32::AssemblerARM32::vcugtqi().
733   void vcugtqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm);
734   // Moved to Arm32::AssemblerARM32::vcgtqs().
735   void vcgtqs(QRegister qd, QRegister qn, QRegister qm);
736 
737   // Moved to Arm32::AssemblerARM32::vabss().
738   void vabss(SRegister sd, SRegister sm, Condition cond = AL);
739   // Moved to Arm32::AssemblerARM32::vabsd().
740   void vabsd(DRegister dd, DRegister dm, Condition cond = AL);
741   // Moved to Arm32::AssemblerARM32::vabsq().
742   void vabsqs(QRegister qd, QRegister qm);
743 #endif
744   void vnegs(SRegister sd, SRegister sm, Condition cond = AL);
745   void vnegd(DRegister dd, DRegister dm, Condition cond = AL);
746 #if 0
747   // Moved to ARM32::AssemblerARM32::vnegqs().
748   void vnegqs(QRegister qd, QRegister qm);
749   // Moved to ARM32::AssemblerARM32::vsqrts().
750   void vsqrts(SRegister sd, SRegister sm, Condition cond = AL);
751   // Moved to ARM32::AssemblerARM32::vsqrts().
752   void vsqrtd(DRegister dd, DRegister dm, Condition cond = AL);
753 
754   // Moved to ARM32::AssemblerARM32::vcvtsd().
755   void vcvtsd(SRegister sd, DRegister dm, Condition cond = AL);
756   // Moved to ARM32::AssemblerARM32:vcvtds().
757   void vcvtds(DRegister dd, SRegister sm, Condition cond = AL);
758   // Moved to ARM32::AssemblerARM32::vcvtis()
759   void vcvtis(SRegister sd, SRegister sm, Condition cond = AL);
760   // Moved to ARM32::AssemblerARM32::vcvtid()
761   void vcvtid(SRegister sd, DRegister dm, Condition cond = AL);
762   // Moved to ARM32::AssemblerARM32::vcvtsi()
763   void vcvtsi(SRegister sd, SRegister sm, Condition cond = AL);
764   // Moved to ARM32::AssemblerARM32::vcvtdi()
765   void vcvtdi(DRegister dd, SRegister sm, Condition cond = AL);
766   // Moved to ARM32::AssemblerARM32::vcvtus().
767   void vcvtus(SRegister sd, SRegister sm, Condition cond = AL);
768   // Moved to ARM32::AssemblerARM32::vcvtud().
769   void vcvtud(SRegister sd, DRegister dm, Condition cond = AL);
770   // Moved to ARM32::AssemblerARM32::vcvtsu()
771   void vcvtsu(SRegister sd, SRegister sm, Condition cond = AL);
772   // Moved to ARM32::AssemblerARM32::vcvtdu()
773   void vcvtdu(DRegister dd, SRegister sm, Condition cond = AL);
774 
775   // Moved to ARM23::AssemblerARM32::vcmps().
776   void vcmps(SRegister sd, SRegister sm, Condition cond = AL);
777   // Moved to ARM23::AssemblerARM32::vcmpd().
778   void vcmpd(DRegister dd, DRegister dm, Condition cond = AL);
779   // Moved to ARM23::AssemblerARM32::vcmpsz().
780   void vcmpsz(SRegister sd, Condition cond = AL);
781   // Moved to ARM23::AssemblerARM32::vcmpdz().
782   void vcmpdz(DRegister dd, Condition cond = AL);
783 
784   // APSR_nzcv version moved to ARM32::AssemblerARM32::vmrsAPSR_nzcv()
785   void vmrs(Register rd, Condition cond = AL);
786 #endif
787   void vmstat(Condition cond = AL);
788 
789   // Duplicates the operand of size sz at index idx from dm to all elements of
790   // qd. This is a special case of vtbl.
791   void vdup(OperandSize sz, QRegister qd, DRegister dm, int idx);
792 
793   // Each byte of dm is an index into the table of bytes formed by concatenating
794   // a list of 'length' registers starting with dn. The result is placed in dd.
795   void vtbl(DRegister dd, DRegister dn, int length, DRegister dm);
796 
797   // The words of qd and qm are interleaved with the low words of the result
798   // in qd and the high words in qm.
799   void vzipqw(QRegister qd, QRegister qm);
800 
801   // Branch instructions.
802 #if 0
803   // Moved to ARM32::AssemblerARM32::b();
804   void b(Label* label, Condition cond = AL);
805   // Moved to ARM32::AssemblerARM32::bl()
806   void bl(Label* label, Condition cond = AL);
807   // Moved to ARM32::AssemblerARM32::bx()
808   void bx(Register rm, Condition cond = AL);
809   // Moved to ARM32::AssemblerARM32::blx()
810   void blx(Register rm, Condition cond = AL);
811 #endif
812 
813   void Branch(const StubEntry &stub_entry,
814               Patchability patchable = kNotPatchable, Register pp = PP,
815               Condition cond = AL);
816 
817   void BranchLink(const StubEntry &stub_entry,
818                   Patchability patchable = kNotPatchable);
819   void BranchLink(const Code &code, Patchability patchable);
820 
821   // Branch and link to an entry address. Call sequence can be patched.
822   void BranchLinkPatchable(const StubEntry &stub_entry);
823   void BranchLinkPatchable(const Code &code);
824 
825   // Branch and link to [base + offset]. Call sequence is never patched.
826   void BranchLinkOffset(Register base, int32_t offset);
827 
828   // Add signed immediate value to rd. May clobber IP.
829   void AddImmediate(Register rd, int32_t value, Condition cond = AL);
830   void AddImmediate(Register rd, Register rn, int32_t value,
831                     Condition cond = AL);
832   void AddImmediateSetFlags(Register rd, Register rn, int32_t value,
833                             Condition cond = AL);
834   void SubImmediateSetFlags(Register rd, Register rn, int32_t value,
835                             Condition cond = AL);
836   void AndImmediate(Register rd, Register rs, int32_t imm, Condition cond = AL);
837 
838   // Test rn and immediate. May clobber IP.
839   void TestImmediate(Register rn, int32_t imm, Condition cond = AL);
840 
841   // Compare rn with signed immediate value. May clobber IP.
842   void CompareImmediate(Register rn, int32_t value, Condition cond = AL);
843 
844   // Signed integer division of left by right. Checks to see if integer
845   // division is supported. If not, uses the FPU for division with
846   // temporary registers tmpl and tmpr. tmpl and tmpr must be different
847   // registers.
848   void IntegerDivide(Register result, Register left, Register right,
849                      DRegister tmpl, DRegister tmpr);
850 
851   // Load and Store.
852   // These three do not clobber IP.
853   void LoadPatchableImmediate(Register rd, int32_t value, Condition cond = AL);
854   void LoadDecodableImmediate(Register rd, int32_t value, Condition cond = AL);
855   void LoadImmediate(Register rd, int32_t value, Condition cond = AL);
856   // These two may clobber IP.
857   void LoadSImmediate(SRegister sd, float value, Condition cond = AL);
858   void LoadDImmediate(DRegister dd, double value, Register scratch,
859                       Condition cond = AL);
860 
861   void MarkExceptionHandler(Label *label);
862 
863   void Drop(intptr_t stack_elements);
864 
865   void RestoreCodePointer();
866   void LoadPoolPointer(Register reg = PP);
867 
868   void LoadIsolate(Register rd);
869 
870   void LoadObject(Register rd, const Object &object, Condition cond = AL);
871   void LoadUniqueObject(Register rd, const Object &object, Condition cond = AL);
872   void LoadFunctionFromCalleePool(Register dst, const Function &function,
873                                   Register new_pp);
874   void LoadNativeEntry(Register dst, const ExternalLabel *label,
875                        Patchability patchable, Condition cond = AL);
876   void PushObject(const Object &object);
877   void CompareObject(Register rn, const Object &object);
878 
879   // When storing into a heap object field, knowledge of the previous content
880   // is expressed through these constants.
881   enum FieldContent {
882     kEmptyOrSmiOrNull, // Empty = garbage/zapped in release/debug mode.
883     kHeapObjectOrSmi,
884     kOnlySmi,
885   };
886 
887   void StoreIntoObject(Register object,     // Object we are storing into.
888                        const Address &dest, // Where we are storing into.
889                        Register value,      // Value we are storing.
890                        bool can_value_be_smi = true);
891   void StoreIntoObjectOffset(Register object, int32_t offset, Register value,
892                              bool can_value_be_smi = true);
893 
894   void StoreIntoObjectNoBarrier(Register object, const Address &dest,
895                                 Register value,
896                                 FieldContent old_content = kHeapObjectOrSmi);
InitializeFieldNoBarrier(Register object,const Address & dest,Register value)897   void InitializeFieldNoBarrier(Register object, const Address &dest,
898                                 Register value) {
899     StoreIntoObjectNoBarrier(object, dest, value, kEmptyOrSmiOrNull);
900   }
901   void
902   StoreIntoObjectNoBarrierOffset(Register object, int32_t offset,
903                                  Register value,
904                                  FieldContent old_content = kHeapObjectOrSmi);
905   void StoreIntoObjectNoBarrier(Register object, const Address &dest,
906                                 const Object &value,
907                                 FieldContent old_content = kHeapObjectOrSmi);
908   void
909   StoreIntoObjectNoBarrierOffset(Register object, int32_t offset,
910                                  const Object &value,
911                                  FieldContent old_content = kHeapObjectOrSmi);
912 
913   // Store value_even, value_odd, value_even, ... into the words in the address
914   // range [begin, end), assumed to be uninitialized fields in object (tagged).
915   // The stores must not need a generational store barrier (e.g., smi/null),
916   // and (value_even, value_odd) must be a valid register pair.
917   // Destroys register 'begin'.
918   void InitializeFieldsNoBarrier(Register object, Register begin, Register end,
919                                  Register value_even, Register value_odd);
920   // Like above, for the range [base+begin_offset, base+end_offset), unrolled.
921   void InitializeFieldsNoBarrierUnrolled(Register object, Register base,
922                                          intptr_t begin_offset,
923                                          intptr_t end_offset,
924                                          Register value_even,
925                                          Register value_odd);
926 
927   // Stores a Smi value into a heap object field that always contains a Smi.
928   void StoreIntoSmiField(const Address &dest, Register value);
929 
930   void LoadClassId(Register result, Register object, Condition cond = AL);
931   void LoadClassById(Register result, Register class_id);
932   void LoadClass(Register result, Register object, Register scratch);
933   void CompareClassId(Register object, intptr_t class_id, Register scratch);
934   void LoadClassIdMayBeSmi(Register result, Register object);
935   void LoadTaggedClassIdMayBeSmi(Register result, Register object);
936 
937   void ComputeRange(Register result, Register value, Register scratch,
938                     Label *miss);
939 
940   void UpdateRangeFeedback(Register value, intptr_t idx, Register ic_data,
941                            Register scratch1, Register scratch2, Label *miss);
942 
943   intptr_t FindImmediate(int32_t imm);
944   bool CanLoadFromObjectPool(const Object &object) const;
945   void LoadFromOffset(OperandSize type, Register reg, Register base,
946                       int32_t offset, Condition cond = AL);
947   void LoadFieldFromOffset(OperandSize type, Register reg, Register base,
948                            int32_t offset, Condition cond = AL) {
949     LoadFromOffset(type, reg, base, offset - kHeapObjectTag, cond);
950   }
951   void StoreToOffset(OperandSize type, Register reg, Register base,
952                      int32_t offset, Condition cond = AL);
953   void LoadSFromOffset(SRegister reg, Register base, int32_t offset,
954                        Condition cond = AL);
955   void StoreSToOffset(SRegister reg, Register base, int32_t offset,
956                       Condition cond = AL);
957   void LoadDFromOffset(DRegister reg, Register base, int32_t offset,
958                        Condition cond = AL);
959   void StoreDToOffset(DRegister reg, Register base, int32_t offset,
960                       Condition cond = AL);
961 
962   void LoadMultipleDFromOffset(DRegister first, intptr_t count, Register base,
963                                int32_t offset);
964   void StoreMultipleDToOffset(DRegister first, intptr_t count, Register base,
965                               int32_t offset);
966 
967   void CopyDoubleField(Register dst, Register src, Register tmp1, Register tmp2,
968                        DRegister dtmp);
969   void CopyFloat32x4Field(Register dst, Register src, Register tmp1,
970                           Register tmp2, DRegister dtmp);
971   void CopyFloat64x2Field(Register dst, Register src, Register tmp1,
972                           Register tmp2, DRegister dtmp);
973 
974 #if 0
975   // Moved to ARM32::AssemblerARM32::push().
976   void Push(Register rd, Condition cond = AL);
977 
978   // Moved to ARM32::AssemblerARM32::pop().
979   void Pop(Register rd, Condition cond = AL);
980 
981   // Moved to ARM32::AssemblerARM32::pushList().
982   void PushList(RegList regs, Condition cond = AL);
983 
984   // Moved to ARM32::AssemblerARM32::popList().
985   void PopList(RegList regs, Condition cond = AL);
986 #endif
987   void MoveRegister(Register rd, Register rm, Condition cond = AL);
988 
989   // Convenience shift instructions. Use mov instruction with shifter operand
990   // for variants setting the status flags.
991 #if 0
992   // Moved to ARM32::AssemblerARM32::lsl()
993   void Lsl(Register rd, Register rm, const Operand& shift_imm,
994            Condition cond = AL);
995   // Moved to ARM32::AssemblerARM32::lsl()
996   void Lsl(Register rd, Register rm, Register rs, Condition cond = AL);
997   // Moved to ARM32::AssemblerARM32::lsr()
998   void Lsr(Register rd, Register rm, const Operand& shift_imm,
999            Condition cond = AL);
1000   // Moved to ARM32::AssemblerARM32::lsr()
1001   void Lsr(Register rd, Register rm, Register rs, Condition cond = AL);
1002   // Moved to ARM32::AssemblerARM32::asr()
1003   void Asr(Register rd, Register rm, const Operand& shift_imm,
1004            Condition cond = AL);
1005   // Moved to ARM32::AssemblerARM32::asr()
1006   void Asr(Register rd, Register rm, Register rs, Condition cond = AL);
1007 #endif
1008   void Asrs(Register rd, Register rm, const Operand &shift_imm,
1009             Condition cond = AL);
1010   void Ror(Register rd, Register rm, const Operand &shift_imm,
1011            Condition cond = AL);
1012   void Ror(Register rd, Register rm, Register rs, Condition cond = AL);
1013   void Rrx(Register rd, Register rm, Condition cond = AL);
1014 
1015   // Fill rd with the sign of rm.
1016   void SignFill(Register rd, Register rm, Condition cond = AL);
1017 
1018   void Vreciprocalqs(QRegister qd, QRegister qm);
1019   void VreciprocalSqrtqs(QRegister qd, QRegister qm);
1020   // If qm must be preserved, then provide a (non-QTMP) temporary.
1021   void Vsqrtqs(QRegister qd, QRegister qm, QRegister temp);
1022   void Vdivqs(QRegister qd, QRegister qn, QRegister qm);
1023 
1024   void SmiTag(Register reg, Condition cond = AL) {
1025     Lsl(reg, reg, Operand(kSmiTagSize), cond);
1026   }
1027 
1028   void SmiTag(Register dst, Register src, Condition cond = AL) {
1029     Lsl(dst, src, Operand(kSmiTagSize), cond);
1030   }
1031 
1032   void SmiUntag(Register reg, Condition cond = AL) {
1033     Asr(reg, reg, Operand(kSmiTagSize), cond);
1034   }
1035 
1036   void SmiUntag(Register dst, Register src, Condition cond = AL) {
1037     Asr(dst, src, Operand(kSmiTagSize), cond);
1038   }
1039 
1040   // Untag the value in the register assuming it is a smi.
1041   // Untagging shifts tag bit into the carry flag - if carry is clear
1042   // assumption was correct. In this case jump to the is_smi label.
1043   // Otherwise fall-through.
SmiUntag(Register dst,Register src,Label * is_smi)1044   void SmiUntag(Register dst, Register src, Label *is_smi) {
1045     ASSERT(kSmiTagSize == 1);
1046     Asrs(dst, src, Operand(kSmiTagSize));
1047     b(is_smi, CC);
1048   }
1049 
1050   void CheckCodePointer();
1051 
1052   // Function frame setup and tear down.
1053   void EnterFrame(RegList regs, intptr_t frame_space);
1054   void LeaveFrame(RegList regs);
1055   void Ret();
1056   void ReserveAlignedFrameSpace(intptr_t frame_space);
1057 
1058   // Create a frame for calling into runtime that preserves all volatile
1059   // registers.  Frame's SP is guaranteed to be correctly aligned and
1060   // frame_space bytes are reserved under it.
1061   void EnterCallRuntimeFrame(intptr_t frame_space);
1062   void LeaveCallRuntimeFrame();
1063 
1064   void CallRuntime(const RuntimeEntry &entry, intptr_t argument_count);
1065 
1066   // Set up a Dart frame on entry with a frame pointer and PC information to
1067   // enable easy access to the RawInstruction object of code corresponding
1068   // to this frame.
1069   void EnterDartFrame(intptr_t frame_size);
1070   void LeaveDartFrame(RestorePP restore_pp = kRestoreCallerPP);
1071 
1072   // Set up a Dart frame for a function compiled for on-stack replacement.
1073   // The frame layout is a normal Dart frame, but the frame is partially set
1074   // up on entry (it is the frame of the unoptimized code).
1075   void EnterOsrFrame(intptr_t extra_size);
1076 
1077   // Set up a stub frame so that the stack traversal code can easily identify
1078   // a stub frame.
1079   void EnterStubFrame();
1080   void LeaveStubFrame();
1081 
1082   // The register into which the allocation stats table is loaded with
1083   // LoadAllocationStatsAddress should be passed to
1084   // IncrementAllocationStats(WithSize) as stats_addr_reg to update the
1085   // allocation stats. These are separate assembler macros so we can
1086   // avoid a dependent load too nearby the load of the table address.
1087   void LoadAllocationStatsAddress(Register dest, intptr_t cid,
1088                                   bool inline_isolate = true);
1089   void IncrementAllocationStats(Register stats_addr, intptr_t cid,
1090                                 Heap::Space space);
1091   void IncrementAllocationStatsWithSize(Register stats_addr_reg,
1092                                         Register size_reg, Heap::Space space);
1093 
1094   Address ElementAddressForIntIndex(bool is_load, bool is_external,
1095                                     intptr_t cid, intptr_t index_scale,
1096                                     Register array, intptr_t index,
1097                                     Register temp);
1098 
1099   Address ElementAddressForRegIndex(bool is_load, bool is_external,
1100                                     intptr_t cid, intptr_t index_scale,
1101                                     Register array, Register index);
1102 
1103   // If allocation tracing for |cid| is enabled, will jump to |trace| label,
1104   // which will allocate in the runtime where tracing occurs.
1105   void MaybeTraceAllocation(intptr_t cid, Register temp_reg, Label *trace,
1106                             bool inline_isolate = true);
1107 
1108   // Inlined allocation of an instance of class 'cls', code has no runtime
1109   // calls. Jump to 'failure' if the instance cannot be allocated here.
1110   // Allocated instance is returned in 'instance_reg'.
1111   // Only the tags field of the object is initialized.
1112   void TryAllocate(const Class &cls, Label *failure, Register instance_reg,
1113                    Register temp_reg);
1114 
1115   void TryAllocateArray(intptr_t cid, intptr_t instance_size, Label *failure,
1116                         Register instance, Register end_address, Register temp1,
1117                         Register temp2);
1118 
1119   // Emit data (e.g encoded instruction or immediate) in instruction stream.
1120   void Emit(int32_t value);
1121 
1122   // On some other platforms, we draw a distinction between safe and unsafe
1123   // smis.
IsSafe(const Object & object)1124   static bool IsSafe(const Object &object) { return true; }
IsSafeSmi(const Object & object)1125   static bool IsSafeSmi(const Object &object) { return object.IsSmi(); }
1126 
constant_pool_allowed()1127   bool constant_pool_allowed() const { return constant_pool_allowed_; }
set_constant_pool_allowed(bool b)1128   void set_constant_pool_allowed(bool b) { constant_pool_allowed_ = b; }
1129 
1130 private:
1131   AssemblerBuffer buffer_; // Contains position independent code.
1132   ObjectPoolWrapper object_pool_wrapper_;
1133 
1134   int32_t prologue_offset_;
1135 
1136   bool use_far_branches_;
1137 
1138 #if 0
1139   // If you are thinking of using one or both of these instructions directly,
1140   // instead LoadImmediate should probably be used.
1141   // Moved to ARM::AssemblerARM32::movw
1142   void movw(Register rd, uint16_t imm16, Condition cond = AL);
1143   // Moved to ARM::AssemblerARM32::movt
1144   void movt(Register rd, uint16_t imm16, Condition cond = AL);
1145 #endif
1146 
1147   void BindARMv6(Label *label);
1148   void BindARMv7(Label *label);
1149 
1150   void LoadWordFromPoolOffset(Register rd, int32_t offset, Register pp,
1151                               Condition cond);
1152 
1153   void BranchLink(const ExternalLabel *label);
1154 
1155   class CodeComment : public ZoneAllocated {
1156   public:
CodeComment(intptr_t pc_offset,const String & comment)1157     CodeComment(intptr_t pc_offset, const String &comment)
1158         : pc_offset_(pc_offset), comment_(comment) {}
1159 
pc_offset()1160     intptr_t pc_offset() const { return pc_offset_; }
comment()1161     const String &comment() const { return comment_; }
1162 
1163   private:
1164     intptr_t pc_offset_;
1165     const String &comment_;
1166 
1167     DISALLOW_COPY_AND_ASSIGN(CodeComment);
1168   };
1169 
1170   GrowableArray<CodeComment *> comments_;
1171 
1172   bool constant_pool_allowed_;
1173 
1174   void LoadObjectHelper(Register rd, const Object &object, Condition cond,
1175                         bool is_unique, Register pp);
1176 
1177 #if 0
1178   // Moved to ARM32::AssemblerARM32::emitType01()
1179   void EmitType01(Condition cond,
1180                   int type,
1181                   Opcode opcode,
1182                   int set_cc,
1183                   Register rn,
1184                   Register rd,
1185                   Operand o);
1186 
1187   // Moved to ARM32::AssemblerARM32::emitType05()
1188   void EmitType5(Condition cond, int32_t offset, bool link);
1189 
1190   // Moved to ARM32::AssemberARM32::emitMemOp()
1191   void EmitMemOp(Condition cond,
1192                  bool load,
1193                  bool byte,
1194                  Register rd,
1195                  Address ad);
1196 
1197   // Moved to AssemblerARM32::emitMemOpEnc3();
1198   void EmitMemOpAddressMode3(Condition cond,
1199                              int32_t mode,
1200                              Register rd,
1201                              Address ad);
1202 
1203   // Moved to ARM32::AssemblerARM32::emitMultiMemOp()
1204   void EmitMultiMemOp(Condition cond,
1205                       BlockAddressMode am,
1206                       bool load,
1207                       Register base,
1208                       RegList regs);
1209 #endif
1210 
1211   void EmitShiftImmediate(Condition cond, Shift opcode, Register rd,
1212                           Register rm, Operand o);
1213 
1214   void EmitShiftRegister(Condition cond, Shift opcode, Register rd, Register rm,
1215                          Operand o);
1216 
1217 #if 0
1218   // Moved to ARM32::AssemblerARM32::emitMulOp()
1219   void EmitMulOp(Condition cond,
1220                  int32_t opcode,
1221                  Register rd,
1222                  Register rn,
1223                  Register rm,
1224                  Register rs);
1225 
1226   // Moved to ARM32::AssemblerARM32::emitDivOp();
1227   void EmitDivOp(Condition cond,
1228                  int32_t opcode,
1229                  Register rd,
1230                  Register rn,
1231                  Register rm);
1232 #endif
1233 
1234   void EmitMultiVSMemOp(Condition cond, BlockAddressMode am, bool load,
1235                         Register base, SRegister start, uint32_t count);
1236 
1237   void EmitMultiVDMemOp(Condition cond, BlockAddressMode am, bool load,
1238                         Register base, DRegister start, int32_t count);
1239 
1240 #if 0
1241   // Moved to ARM32::AssemblerARM32::emitVFPsss
1242   void EmitVFPsss(Condition cond,
1243                   int32_t opcode,
1244                   SRegister sd,
1245                   SRegister sn,
1246                   SRegister sm);
1247 
1248   // Moved to ARM32::AssemblerARM32::emitVFPddd
1249   void EmitVFPddd(Condition cond,
1250                   int32_t opcode,
1251                   DRegister dd,
1252                   DRegister dn,
1253                   DRegister dm);
1254 
1255   // Moved to ARM32::AssemblerARM32::emitVFPsd
1256   void EmitVFPsd(Condition cond,
1257                  int32_t opcode,
1258                  SRegister sd,
1259                  DRegister dm);
1260 
1261   // Moved to ARM32::AssemblerARM32::emitVFPds
1262   void EmitVFPds(Condition cond,
1263                  int32_t opcode,
1264                  DRegister dd,
1265                  SRegister sm);
1266 
1267   // Moved to ARM32::AssemblerARM32::emitSIMDqqq()
1268   void EmitSIMDqqq(int32_t opcode, OperandSize sz,
1269                    QRegister qd, QRegister qn, QRegister qm);
1270 #endif
1271 
1272   void EmitSIMDddd(int32_t opcode, OperandSize sz, DRegister dd, DRegister dn,
1273                    DRegister dm);
1274 
1275   void EmitFarBranch(Condition cond, int32_t offset, bool link);
1276 #if 0
1277   // Moved to ARM32::AssemblerARM32::emitBranch()
1278   void EmitBranch(Condition cond, Label* label, bool link);
1279   // Moved to ARM32::AssemblerARM32::encodeBranchoffset().
1280   int32_t EncodeBranchOffset(int32_t offset, int32_t inst);
1281   // Moved to ARM32::AssemberARM32::decodeBranchOffset().
1282   static int32_t DecodeBranchOffset(int32_t inst);
1283 #endif
1284   int32_t EncodeTstOffset(int32_t offset, int32_t inst);
1285   int32_t DecodeTstOffset(int32_t inst);
1286 
1287   void StoreIntoObjectFilter(Register object, Register value, Label *no_update);
1288 
1289   // Shorter filtering sequence that assumes that value is not a smi.
1290   void StoreIntoObjectFilterNoSmi(Register object, Register value,
1291                                   Label *no_update);
1292 
1293   // Helpers for write-barrier verification.
1294 
1295   // Returns VerifiedMemory::offset() as an Operand.
1296   Operand GetVerifiedMemoryShadow();
1297   // Writes value to [base + offset] and also its shadow location, if enabled.
1298   void WriteShadowedField(Register base, intptr_t offset, Register value,
1299                           Condition cond = AL);
1300   void WriteShadowedFieldPair(Register base, intptr_t offset,
1301                               Register value_even, Register value_odd,
1302                               Condition cond = AL);
1303   // Writes new_value to address and its shadow location, if enabled, after
1304   // verifying that its old value matches its shadow.
1305   void VerifiedWrite(const Address &address, Register new_value,
1306                      FieldContent old_content);
1307 
1308 #if 0
1309   // Added the following missing operations:
1310   //
1311   // ARM32::AssemblerARM32::uxt() (uxtb and uxth)
1312   // ARM32::AssemblerARM32::vpop()
1313   // ARM32::AssemblerARM32::vpush()
1314   // ARM32::AssemblerARM32::rbit()
1315   // ARM32::AssemblerARM32::vbslq()
1316   // ARM32::AssemblerARM32::veord()
1317   // ARM32::AssemblerARM32::vld1qr()
1318   // ARM32::AssemblerARM32::vshlqc
1319   // ARM32::AssemblerARM32::vshrqic
1320   // ARM32::AssemblerARM32::vshrquc
1321   // ARM32::AssemblerARM32::vst1qr()
1322   // ARM32::AssemblerARM32::vmorqi()
1323   // ARM32::AssemblerARM32::vmovqc()
1324 #endif
1325 
1326   DISALLOW_ALLOCATION();
1327   DISALLOW_COPY_AND_ASSIGN(Assembler);
1328 };
1329 
1330 } // namespace dart
1331 
1332 #endif // VM_ASSEMBLER_ARM_H_
1333