1 // Copyright 2015, ARM Limited
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 //   * Redistributions of source code must retain the above copyright notice,
8 //     this list of conditions and the following disclaimer.
9 //   * Redistributions in binary form must reproduce the above copyright notice,
10 //     this list of conditions and the following disclaimer in the documentation
11 //     and/or other materials provided with the distribution.
12 //   * Neither the name of ARM Limited nor the names of its contributors may be
13 //     used to endorse or promote products derived from this software without
14 //     specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 
27 #include "jit/arm64/vixl/MacroAssembler-vixl.h"
28 
29 #include <ctype.h>
30 
31 namespace vixl {
32 
MacroAssembler()33 MacroAssembler::MacroAssembler()
34     : js::jit::Assembler(),
35       sp_(x28),
36       tmp_list_(ip0, ip1),
37       fptmp_list_(d31)
38 {
39 }
40 
41 
FinalizeCode()42 void MacroAssembler::FinalizeCode() {
43   Assembler::FinalizeCode();
44 }
45 
46 
MoveImmediateHelper(MacroAssembler * masm,const Register & rd,uint64_t imm)47 int MacroAssembler::MoveImmediateHelper(MacroAssembler* masm,
48                                         const Register &rd,
49                                         uint64_t imm) {
50   bool emit_code = (masm != NULL);
51   VIXL_ASSERT(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
52   // The worst case for size is mov 64-bit immediate to sp:
53   //  * up to 4 instructions to materialise the constant
54   //  * 1 instruction to move to sp
55   MacroEmissionCheckScope guard(masm);
56 
57   // Immediates on Aarch64 can be produced using an initial value, and zero to
58   // three move keep operations.
59   //
60   // Initial values can be generated with:
61   //  1. 64-bit move zero (movz).
62   //  2. 32-bit move inverted (movn).
63   //  3. 64-bit move inverted.
64   //  4. 32-bit orr immediate.
65   //  5. 64-bit orr immediate.
66   // Move-keep may then be used to modify each of the 16-bit half words.
67   //
68   // The code below supports all five initial value generators, and
69   // applying move-keep operations to move-zero and move-inverted initial
70   // values.
71 
72   // Try to move the immediate in one instruction, and if that fails, switch to
73   // using multiple instructions.
74   if (OneInstrMoveImmediateHelper(masm, rd, imm)) {
75     return 1;
76   } else {
77     int instruction_count = 0;
78     unsigned reg_size = rd.size();
79 
80     // Generic immediate case. Imm will be represented by
81     //   [imm3, imm2, imm1, imm0], where each imm is 16 bits.
82     // A move-zero or move-inverted is generated for the first non-zero or
83     // non-0xffff immX, and a move-keep for subsequent non-zero immX.
84 
85     uint64_t ignored_halfword = 0;
86     bool invert_move = false;
87     // If the number of 0xffff halfwords is greater than the number of 0x0000
88     // halfwords, it's more efficient to use move-inverted.
89     if (CountClearHalfWords(~imm, reg_size) >
90         CountClearHalfWords(imm, reg_size)) {
91       ignored_halfword = 0xffff;
92       invert_move = true;
93     }
94 
95     // Mov instructions can't move values into the stack pointer, so set up a
96     // temporary register, if needed.
97     UseScratchRegisterScope temps;
98     Register temp;
99     if (emit_code) {
100       temps.Open(masm);
101       temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
102     }
103 
104     // Iterate through the halfwords. Use movn/movz for the first non-ignored
105     // halfword, and movk for subsequent halfwords.
106     VIXL_ASSERT((reg_size % 16) == 0);
107     bool first_mov_done = false;
108     for (unsigned i = 0; i < (temp.size() / 16); i++) {
109       uint64_t imm16 = (imm >> (16 * i)) & 0xffff;
110       if (imm16 != ignored_halfword) {
111         if (!first_mov_done) {
112           if (invert_move) {
113             if (emit_code) masm->movn(temp, ~imm16 & 0xffff, 16 * i);
114             instruction_count++;
115           } else {
116             if (emit_code) masm->movz(temp, imm16, 16 * i);
117             instruction_count++;
118           }
119           first_mov_done = true;
120         } else {
121           // Construct a wider constant.
122           if (emit_code) masm->movk(temp, imm16, 16 * i);
123           instruction_count++;
124         }
125       }
126     }
127 
128     VIXL_ASSERT(first_mov_done);
129 
130     // Move the temporary if the original destination register was the stack
131     // pointer.
132     if (rd.IsSP()) {
133       if (emit_code) masm->mov(rd, temp);
134       instruction_count++;
135     }
136     return instruction_count;
137   }
138 }
139 
140 
OneInstrMoveImmediateHelper(MacroAssembler * masm,const Register & dst,int64_t imm)141 bool MacroAssembler::OneInstrMoveImmediateHelper(MacroAssembler* masm,
142                                                  const Register& dst,
143                                                  int64_t imm) {
144   bool emit_code = masm != NULL;
145   unsigned n, imm_s, imm_r;
146   int reg_size = dst.size();
147 
148   if (IsImmMovz(imm, reg_size) && !dst.IsSP()) {
149     // Immediate can be represented in a move zero instruction. Movz can't write
150     // to the stack pointer.
151     if (emit_code) {
152       masm->movz(dst, imm);
153     }
154     return true;
155   } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) {
156     // Immediate can be represented in a move negative instruction. Movn can't
157     // write to the stack pointer.
158     if (emit_code) {
159       masm->movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask));
160     }
161     return true;
162   } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
163     // Immediate can be represented in a logical orr instruction.
164     VIXL_ASSERT(!dst.IsZero());
165     if (emit_code) {
166       masm->LogicalImmediate(
167           dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR);
168     }
169     return true;
170   }
171   return false;
172 }
173 
174 
B(Label * label,BranchType type,Register reg,int bit)175 void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
176   VIXL_ASSERT((reg.Is(NoReg) || (type >= kBranchTypeFirstUsingReg)) &&
177               ((bit == -1) || (type >= kBranchTypeFirstUsingBit)));
178   if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
179     B(static_cast<Condition>(type), label);
180   } else {
181     switch (type) {
182       case always:        B(label);              break;
183       case never:         break;
184       case reg_zero:      Cbz(reg, label);       break;
185       case reg_not_zero:  Cbnz(reg, label);      break;
186       case reg_bit_clear: Tbz(reg, bit, label);  break;
187       case reg_bit_set:   Tbnz(reg, bit, label); break;
188       default:
189         VIXL_UNREACHABLE();
190     }
191   }
192 }
193 
194 
B(Label * label)195 void MacroAssembler::B(Label* label) {
196   SingleEmissionCheckScope guard(this);
197   b(label);
198 }
199 
200 
B(Label * label,Condition cond)201 void MacroAssembler::B(Label* label, Condition cond) {
202   VIXL_ASSERT((cond != al) && (cond != nv));
203   EmissionCheckScope guard(this, 2 * kInstructionSize);
204 
205   if (label->bound() && LabelIsOutOfRange(label, CondBranchType)) {
206     Label done;
207     b(&done, InvertCondition(cond));
208     b(label);
209     bind(&done);
210   } else {
211     // TODO: Need to register a slot in a literal pool, so that we can
212     // write a branch instruction there and use that to branch in case
213     // the unbound label winds up being out of range.
214     b(label, cond);
215   }
216 }
217 
218 
Cbnz(const Register & rt,Label * label)219 void MacroAssembler::Cbnz(const Register& rt, Label* label) {
220   VIXL_ASSERT(!rt.IsZero());
221   EmissionCheckScope guard(this, 2 * kInstructionSize);
222 
223   if (label->bound() && LabelIsOutOfRange(label, CondBranchType)) {
224     Label done;
225     cbz(rt, &done);
226     b(label);
227     bind(&done);
228   } else {
229     // TODO: Need to register a slot in a literal pool, so that we can
230     // write a branch instruction there and use that to branch in case
231     // the unbound label winds up being out of range.
232     cbnz(rt, label);
233   }
234 }
235 
236 
Cbz(const Register & rt,Label * label)237 void MacroAssembler::Cbz(const Register& rt, Label* label) {
238   VIXL_ASSERT(!rt.IsZero());
239   EmissionCheckScope guard(this, 2 * kInstructionSize);
240 
241   if (label->bound() && LabelIsOutOfRange(label, CondBranchType)) {
242     Label done;
243     cbnz(rt, &done);
244     b(label);
245     bind(&done);
246   } else {
247     // TODO: Nede to register a slot in a literal pool, so that we can
248     // write a branch instruction there and use that to branch in case
249     // the unbound label winds up being out of range.
250     cbz(rt, label);
251   }
252 }
253 
254 
Tbnz(const Register & rt,unsigned bit_pos,Label * label)255 void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
256   VIXL_ASSERT(!rt.IsZero());
257   EmissionCheckScope guard(this, 2 * kInstructionSize);
258 
259   if (label->bound() && LabelIsOutOfRange(label, TestBranchType)) {
260     Label done;
261     tbz(rt, bit_pos, &done);
262     b(label);
263     bind(&done);
264   } else {
265     // TODO: Nede to register a slot in a literal pool, so that we can
266     // write a branch instruction there and use that to branch in case
267     // the unbound label winds up being out of range.
268     tbnz(rt, bit_pos, label);
269   }
270 }
271 
272 
Tbz(const Register & rt,unsigned bit_pos,Label * label)273 void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
274   VIXL_ASSERT(!rt.IsZero());
275   EmissionCheckScope guard(this, 2 * kInstructionSize);
276 
277   if (label->bound() && LabelIsOutOfRange(label, TestBranchType)) {
278     Label done;
279     tbnz(rt, bit_pos, &done);
280     b(label);
281     bind(&done);
282   } else {
283     // TODO: Nede to register a slot in a literal pool, so that we can
284     // write a branch instruction there and use that to branch in case
285     // the unbound label winds up being out of range.
286     tbz(rt, bit_pos, label);
287   }
288 }
289 
290 
And(const Register & rd,const Register & rn,const Operand & operand)291 void MacroAssembler::And(const Register& rd,
292                          const Register& rn,
293                          const Operand& operand) {
294   LogicalMacro(rd, rn, operand, AND);
295 }
296 
297 
Ands(const Register & rd,const Register & rn,const Operand & operand)298 void MacroAssembler::Ands(const Register& rd,
299                           const Register& rn,
300                           const Operand& operand) {
301   LogicalMacro(rd, rn, operand, ANDS);
302 }
303 
304 
Tst(const Register & rn,const Operand & operand)305 void MacroAssembler::Tst(const Register& rn,
306                          const Operand& operand) {
307   Ands(AppropriateZeroRegFor(rn), rn, operand);
308 }
309 
310 
Bic(const Register & rd,const Register & rn,const Operand & operand)311 void MacroAssembler::Bic(const Register& rd,
312                          const Register& rn,
313                          const Operand& operand) {
314   LogicalMacro(rd, rn, operand, BIC);
315 }
316 
317 
Bics(const Register & rd,const Register & rn,const Operand & operand)318 void MacroAssembler::Bics(const Register& rd,
319                           const Register& rn,
320                           const Operand& operand) {
321   LogicalMacro(rd, rn, operand, BICS);
322 }
323 
324 
Orr(const Register & rd,const Register & rn,const Operand & operand)325 void MacroAssembler::Orr(const Register& rd,
326                          const Register& rn,
327                          const Operand& operand) {
328   LogicalMacro(rd, rn, operand, ORR);
329 }
330 
331 
Orn(const Register & rd,const Register & rn,const Operand & operand)332 void MacroAssembler::Orn(const Register& rd,
333                          const Register& rn,
334                          const Operand& operand) {
335   LogicalMacro(rd, rn, operand, ORN);
336 }
337 
338 
Eor(const Register & rd,const Register & rn,const Operand & operand)339 void MacroAssembler::Eor(const Register& rd,
340                          const Register& rn,
341                          const Operand& operand) {
342   LogicalMacro(rd, rn, operand, EOR);
343 }
344 
345 
Eon(const Register & rd,const Register & rn,const Operand & operand)346 void MacroAssembler::Eon(const Register& rd,
347                          const Register& rn,
348                          const Operand& operand) {
349   LogicalMacro(rd, rn, operand, EON);
350 }
351 
352 
LogicalMacro(const Register & rd,const Register & rn,const Operand & operand,LogicalOp op)353 void MacroAssembler::LogicalMacro(const Register& rd,
354                                   const Register& rn,
355                                   const Operand& operand,
356                                   LogicalOp op) {
357   // The worst case for size is logical immediate to sp:
358   //  * up to 4 instructions to materialise the constant
359   //  * 1 instruction to do the operation
360   //  * 1 instruction to move to sp
361   MacroEmissionCheckScope guard(this);
362   UseScratchRegisterScope temps(this);
363 
364   if (operand.IsImmediate()) {
365     int64_t immediate = operand.immediate();
366     unsigned reg_size = rd.size();
367 
368     // If the operation is NOT, invert the operation and immediate.
369     if ((op & NOT) == NOT) {
370       op = static_cast<LogicalOp>(op & ~NOT);
371       immediate = ~immediate;
372     }
373 
374     // Ignore the top 32 bits of an immediate if we're moving to a W register.
375     if (rd.Is32Bits()) {
376       // Check that the top 32 bits are consistent.
377       VIXL_ASSERT(((immediate >> kWRegSize) == 0) ||
378                   ((immediate >> kWRegSize) == -1));
379       immediate &= kWRegMask;
380     }
381 
382     VIXL_ASSERT(rd.Is64Bits() || is_uint32(immediate));
383 
384     // Special cases for all set or all clear immediates.
385     if (immediate == 0) {
386       switch (op) {
387         case AND:
388           Mov(rd, 0);
389           return;
390         case ORR:
391           VIXL_FALLTHROUGH();
392         case EOR:
393           Mov(rd, rn);
394           return;
395         case ANDS:
396           VIXL_FALLTHROUGH();
397         case BICS:
398           break;
399         default:
400           VIXL_UNREACHABLE();
401       }
402     } else if ((rd.Is64Bits() && (immediate == -1)) ||
403                (rd.Is32Bits() && (immediate == 0xffffffff))) {
404       switch (op) {
405         case AND:
406           Mov(rd, rn);
407           return;
408         case ORR:
409           Mov(rd, immediate);
410           return;
411         case EOR:
412           Mvn(rd, rn);
413           return;
414         case ANDS:
415           VIXL_FALLTHROUGH();
416         case BICS:
417           break;
418         default:
419           VIXL_UNREACHABLE();
420       }
421     }
422 
423     unsigned n, imm_s, imm_r;
424     if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
425       // Immediate can be encoded in the instruction.
426       LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
427     } else {
428       // Immediate can't be encoded: synthesize using move immediate.
429       Register temp = temps.AcquireSameSizeAs(rn);
430       Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate);
431 
432       // VIXL can acquire temp registers. Assert that the caller is aware.
433       VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn));
434       VIXL_ASSERT(!temp.Is(operand.maybeReg()));
435 
436       if (rd.Is(sp)) {
437         // If rd is the stack pointer we cannot use it as the destination
438         // register so we use the temp register as an intermediate again.
439         Logical(temp, rn, imm_operand, op);
440         Mov(sp, temp);
441       } else {
442         Logical(rd, rn, imm_operand, op);
443       }
444     }
445   } else if (operand.IsExtendedRegister()) {
446     VIXL_ASSERT(operand.reg().size() <= rd.size());
447     // Add/sub extended supports shift <= 4. We want to support exactly the
448     // same modes here.
449     VIXL_ASSERT(operand.shift_amount() <= 4);
450     VIXL_ASSERT(operand.reg().Is64Bits() ||
451            ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
452 
453     temps.Exclude(operand.reg());
454     Register temp = temps.AcquireSameSizeAs(rn);
455 
456     // VIXL can acquire temp registers. Assert that the caller is aware.
457     VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn));
458     VIXL_ASSERT(!temp.Is(operand.maybeReg()));
459 
460     EmitExtendShift(temp, operand.reg(), operand.extend(),
461                     operand.shift_amount());
462     Logical(rd, rn, Operand(temp), op);
463   } else {
464     // The operand can be encoded in the instruction.
465     VIXL_ASSERT(operand.IsShiftedRegister());
466     Logical(rd, rn, operand, op);
467   }
468 }
469 
470 
Mov(const Register & rd,const Operand & operand,DiscardMoveMode discard_mode)471 void MacroAssembler::Mov(const Register& rd,
472                          const Operand& operand,
473                          DiscardMoveMode discard_mode) {
474   // The worst case for size is mov immediate with up to 4 instructions.
475   MacroEmissionCheckScope guard(this);
476 
477   if (operand.IsImmediate()) {
478     // Call the macro assembler for generic immediates.
479     Mov(rd, operand.immediate());
480   } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
481     // Emit a shift instruction if moving a shifted register. This operation
482     // could also be achieved using an orr instruction (like orn used by Mvn),
483     // but using a shift instruction makes the disassembly clearer.
484     EmitShift(rd, operand.reg(), operand.shift(), operand.shift_amount());
485   } else if (operand.IsExtendedRegister()) {
486     // Emit an extend instruction if moving an extended register. This handles
487     // extend with post-shift operations, too.
488     EmitExtendShift(rd, operand.reg(), operand.extend(),
489                     operand.shift_amount());
490   } else {
491     // Otherwise, emit a register move only if the registers are distinct, or
492     // if they are not X registers.
493     //
494     // Note that mov(w0, w0) is not a no-op because it clears the top word of
495     // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
496     // registers is not required to clear the top word of the X register. In
497     // this case, the instruction is discarded.
498     //
499     // If the sp is an operand, add #0 is emitted, otherwise, orr #0.
500     if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
501                                   (discard_mode == kDontDiscardForSameWReg))) {
502       mov(rd, operand.reg());
503     }
504   }
505 }
506 
507 
Movi16bitHelper(const VRegister & vd,uint64_t imm)508 void MacroAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) {
509   VIXL_ASSERT(is_uint16(imm));
510   int byte1 = (imm & 0xff);
511   int byte2 = ((imm >> 8) & 0xff);
512   if (byte1 == byte2) {
513     movi(vd.Is64Bits() ? vd.V8B() : vd.V16B(), byte1);
514   } else if (byte1 == 0) {
515     movi(vd, byte2, LSL, 8);
516   } else if (byte2 == 0) {
517     movi(vd, byte1);
518   } else if (byte1 == 0xff) {
519     mvni(vd, ~byte2 & 0xff, LSL, 8);
520   } else if (byte2 == 0xff) {
521     mvni(vd, ~byte1 & 0xff);
522   } else {
523     UseScratchRegisterScope temps(this);
524     Register temp = temps.AcquireW();
525     movz(temp, imm);
526     dup(vd, temp);
527   }
528 }
529 
530 
Movi32bitHelper(const VRegister & vd,uint64_t imm)531 void MacroAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) {
532   VIXL_ASSERT(is_uint32(imm));
533 
534   uint8_t bytes[sizeof(imm)];
535   memcpy(bytes, &imm, sizeof(imm));
536 
537   // All bytes are either 0x00 or 0xff.
538   {
539     bool all0orff = true;
540     for (int i = 0; i < 4; ++i) {
541       if ((bytes[i] != 0) && (bytes[i] != 0xff)) {
542         all0orff = false;
543         break;
544       }
545     }
546 
547     if (all0orff == true) {
548       movi(vd.Is64Bits() ? vd.V1D() : vd.V2D(), ((imm << 32) | imm));
549       return;
550     }
551   }
552 
553   // Of the 4 bytes, only one byte is non-zero.
554   for (int i = 0; i < 4; i++) {
555     if ((imm & (0xff << (i * 8))) == imm) {
556       movi(vd, bytes[i], LSL, i * 8);
557       return;
558     }
559   }
560 
561   // Of the 4 bytes, only one byte is not 0xff.
562   for (int i = 0; i < 4; i++) {
563     uint32_t mask = ~(0xff << (i * 8));
564     if ((imm & mask) == mask) {
565       mvni(vd, ~bytes[i] & 0xff, LSL, i * 8);
566       return;
567     }
568   }
569 
570   // Immediate is of the form 0x00MMFFFF.
571   if ((imm & 0xff00ffff) == 0x0000ffff) {
572     movi(vd, bytes[2], MSL, 16);
573     return;
574   }
575 
576   // Immediate is of the form 0x0000MMFF.
577   if ((imm & 0xffff00ff) == 0x000000ff) {
578     movi(vd, bytes[1], MSL, 8);
579     return;
580   }
581 
582   // Immediate is of the form 0xFFMM0000.
583   if ((imm & 0xff00ffff) == 0xff000000) {
584     mvni(vd, ~bytes[2] & 0xff, MSL, 16);
585     return;
586   }
587   // Immediate is of the form 0xFFFFMM00.
588   if ((imm & 0xffff00ff) == 0xffff0000) {
589     mvni(vd, ~bytes[1] & 0xff, MSL, 8);
590     return;
591   }
592 
593   // Top and bottom 16-bits are equal.
594   if (((imm >> 16) & 0xffff) == (imm & 0xffff)) {
595     Movi16bitHelper(vd.Is64Bits() ? vd.V4H() : vd.V8H(), imm & 0xffff);
596     return;
597   }
598 
599   // Default case.
600   {
601     UseScratchRegisterScope temps(this);
602     Register temp = temps.AcquireW();
603     Mov(temp, imm);
604     dup(vd, temp);
605   }
606 }
607 
608 
Movi64bitHelper(const VRegister & vd,uint64_t imm)609 void MacroAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) {
610   // All bytes are either 0x00 or 0xff.
611   {
612     bool all0orff = true;
613     for (int i = 0; i < 8; ++i) {
614       int byteval = (imm >> (i * 8)) & 0xff;
615       if (byteval != 0 && byteval != 0xff) {
616         all0orff = false;
617         break;
618       }
619     }
620     if (all0orff == true) {
621       movi(vd, imm);
622       return;
623     }
624   }
625 
626   // Top and bottom 32-bits are equal.
627   if (((imm >> 32) & 0xffffffff) == (imm & 0xffffffff)) {
628     Movi32bitHelper(vd.Is64Bits() ? vd.V2S() : vd.V4S(), imm & 0xffffffff);
629     return;
630   }
631 
632   // Default case.
633   {
634     UseScratchRegisterScope temps(this);
635     Register temp = temps.AcquireX();
636     Mov(temp, imm);
637     if (vd.Is1D()) {
638       mov(vd.D(), 0, temp);
639     } else {
640       dup(vd.V2D(), temp);
641     }
642   }
643 }
644 
645 
Movi(const VRegister & vd,uint64_t imm,Shift shift,int shift_amount)646 void MacroAssembler::Movi(const VRegister& vd,
647                           uint64_t imm,
648                           Shift shift,
649                           int shift_amount) {
650   MacroEmissionCheckScope guard(this);
651   if (shift_amount != 0 || shift != LSL) {
652     movi(vd, imm, shift, shift_amount);
653   } else if (vd.Is8B() || vd.Is16B()) {
654     // 8-bit immediate.
655     VIXL_ASSERT(is_uint8(imm));
656     movi(vd, imm);
657   } else if (vd.Is4H() || vd.Is8H()) {
658     // 16-bit immediate.
659     Movi16bitHelper(vd, imm);
660   } else if (vd.Is2S() || vd.Is4S()) {
661     // 32-bit immediate.
662     Movi32bitHelper(vd, imm);
663   } else {
664     // 64-bit immediate.
665     Movi64bitHelper(vd, imm);
666   }
667 }
668 
669 
Movi(const VRegister & vd,uint64_t hi,uint64_t lo)670 void MacroAssembler::Movi(const VRegister& vd,
671                           uint64_t hi,
672                           uint64_t lo) {
673   // TODO: Move 128-bit values in a more efficient way.
674   VIXL_ASSERT(vd.Is128Bits());
675   UseScratchRegisterScope temps(this);
676   Movi(vd.V2D(), lo);
677   Register temp = temps.AcquireX();
678   Mov(temp, hi);
679   Ins(vd.V2D(), 1, temp);
680 }
681 
682 
Mvn(const Register & rd,const Operand & operand)683 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
684   // The worst case for size is mvn immediate with up to 4 instructions.
685   MacroEmissionCheckScope guard(this);
686 
687   if (operand.IsImmediate()) {
688     // Call the macro assembler for generic immediates.
689     Mvn(rd, operand.immediate());
690   } else if (operand.IsExtendedRegister()) {
691     UseScratchRegisterScope temps(this);
692     temps.Exclude(operand.reg());
693 
694     // Emit two instructions for the extend case. This differs from Mov, as
695     // the extend and invert can't be achieved in one instruction.
696     Register temp = temps.AcquireSameSizeAs(rd);
697 
698     // VIXL can acquire temp registers. Assert that the caller is aware.
699     VIXL_ASSERT(!temp.Is(rd) && !temp.Is(operand.maybeReg()));
700 
701     EmitExtendShift(temp, operand.reg(), operand.extend(),
702                     operand.shift_amount());
703     mvn(rd, Operand(temp));
704   } else {
705     // Otherwise, register and shifted register cases can be handled by the
706     // assembler directly, using orn.
707     mvn(rd, operand);
708   }
709 }
710 
711 
Mov(const Register & rd,uint64_t imm)712 void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
713   MoveImmediateHelper(this, rd, imm);
714 }
715 
716 
Ccmp(const Register & rn,const Operand & operand,StatusFlags nzcv,Condition cond)717 void MacroAssembler::Ccmp(const Register& rn,
718                           const Operand& operand,
719                           StatusFlags nzcv,
720                           Condition cond) {
721   if (operand.IsImmediate() && (operand.immediate() < 0)) {
722     ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMN);
723   } else {
724     ConditionalCompareMacro(rn, operand, nzcv, cond, CCMP);
725   }
726 }
727 
728 
Ccmn(const Register & rn,const Operand & operand,StatusFlags nzcv,Condition cond)729 void MacroAssembler::Ccmn(const Register& rn,
730                           const Operand& operand,
731                           StatusFlags nzcv,
732                           Condition cond) {
733   if (operand.IsImmediate() && (operand.immediate() < 0)) {
734     ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMP);
735   } else {
736     ConditionalCompareMacro(rn, operand, nzcv, cond, CCMN);
737   }
738 }
739 
740 
ConditionalCompareMacro(const Register & rn,const Operand & operand,StatusFlags nzcv,Condition cond,ConditionalCompareOp op)741 void MacroAssembler::ConditionalCompareMacro(const Register& rn,
742                                              const Operand& operand,
743                                              StatusFlags nzcv,
744                                              Condition cond,
745                                              ConditionalCompareOp op) {
746   VIXL_ASSERT((cond != al) && (cond != nv));
747   // The worst case for size is ccmp immediate:
748   //  * up to 4 instructions to materialise the constant
749   //  * 1 instruction for ccmp
750   MacroEmissionCheckScope guard(this);
751 
752   if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
753       (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) {
754     // The immediate can be encoded in the instruction, or the operand is an
755     // unshifted register: call the assembler.
756     ConditionalCompare(rn, operand, nzcv, cond, op);
757   } else {
758     UseScratchRegisterScope temps(this);
759     // The operand isn't directly supported by the instruction: perform the
760     // operation on a temporary register.
761     Register temp = temps.AcquireSameSizeAs(rn);
762     VIXL_ASSERT(!temp.Is(rn) && !temp.Is(operand.maybeReg()));
763     Mov(temp, operand);
764     ConditionalCompare(rn, temp, nzcv, cond, op);
765   }
766 }
767 
768 
Csel(const Register & rd,const Register & rn,const Operand & operand,Condition cond)769 void MacroAssembler::Csel(const Register& rd,
770                           const Register& rn,
771                           const Operand& operand,
772                           Condition cond) {
773   VIXL_ASSERT(!rd.IsZero());
774   VIXL_ASSERT(!rn.IsZero());
775   VIXL_ASSERT((cond != al) && (cond != nv));
776   // The worst case for size is csel immediate:
777   //  * up to 4 instructions to materialise the constant
778   //  * 1 instruction for csel
779   MacroEmissionCheckScope guard(this);
780 
781   if (operand.IsImmediate()) {
782     // Immediate argument. Handle special cases of 0, 1 and -1 using zero
783     // register.
784     int64_t imm = operand.immediate();
785     Register zr = AppropriateZeroRegFor(rn);
786     if (imm == 0) {
787       csel(rd, rn, zr, cond);
788     } else if (imm == 1) {
789       csinc(rd, rn, zr, cond);
790     } else if (imm == -1) {
791       csinv(rd, rn, zr, cond);
792     } else {
793       UseScratchRegisterScope temps(this);
794       Register temp = temps.AcquireSameSizeAs(rn);
795       VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn));
796       VIXL_ASSERT(!temp.Is(operand.maybeReg()));
797       Mov(temp, operand.immediate());
798       csel(rd, rn, temp, cond);
799     }
800   } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
801     // Unshifted register argument.
802     csel(rd, rn, operand.reg(), cond);
803   } else {
804     // All other arguments.
805     UseScratchRegisterScope temps(this);
806     Register temp = temps.AcquireSameSizeAs(rn);
807     VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn));
808     VIXL_ASSERT(!temp.Is(operand.maybeReg()));
809     Mov(temp, operand);
810     csel(rd, rn, temp, cond);
811   }
812 }
813 
814 
Add(const Register & rd,const Register & rn,const Operand & operand,FlagsUpdate S)815 void MacroAssembler::Add(const Register& rd,
816                          const Register& rn,
817                          const Operand& operand,
818                          FlagsUpdate S) {
819   if (operand.IsImmediate() && (operand.immediate() < 0) &&
820       IsImmAddSub(-operand.immediate())) {
821     AddSubMacro(rd, rn, -operand.immediate(), S, SUB);
822   } else {
823     AddSubMacro(rd, rn, operand, S, ADD);
824   }
825 }
826 
827 
Adds(const Register & rd,const Register & rn,const Operand & operand)828 void MacroAssembler::Adds(const Register& rd,
829                           const Register& rn,
830                           const Operand& operand) {
831   Add(rd, rn, operand, SetFlags);
832 }
833 
834 
Sub(const Register & rd,const Register & rn,const Operand & operand,FlagsUpdate S)835 void MacroAssembler::Sub(const Register& rd,
836                          const Register& rn,
837                          const Operand& operand,
838                          FlagsUpdate S) {
839   if (operand.IsImmediate() && (operand.immediate() < 0) &&
840       IsImmAddSub(-operand.immediate())) {
841     AddSubMacro(rd, rn, -operand.immediate(), S, ADD);
842   } else {
843     AddSubMacro(rd, rn, operand, S, SUB);
844   }
845 }
846 
847 
Subs(const Register & rd,const Register & rn,const Operand & operand)848 void MacroAssembler::Subs(const Register& rd,
849                           const Register& rn,
850                           const Operand& operand) {
851   Sub(rd, rn, operand, SetFlags);
852 }
853 
854 
Cmn(const Register & rn,const Operand & operand)855 void MacroAssembler::Cmn(const Register& rn, const Operand& operand) {
856   Adds(AppropriateZeroRegFor(rn), rn, operand);
857 }
858 
859 
Cmp(const Register & rn,const Operand & operand)860 void MacroAssembler::Cmp(const Register& rn, const Operand& operand) {
861   Subs(AppropriateZeroRegFor(rn), rn, operand);
862 }
863 
864 
Fcmp(const FPRegister & fn,double value,FPTrapFlags trap)865 void MacroAssembler::Fcmp(const FPRegister& fn, double value,
866                           FPTrapFlags trap) {
867   // The worst case for size is:
868   //  * 1 to materialise the constant, using literal pool if necessary
869   //  * 1 instruction for fcmp{e}
870   MacroEmissionCheckScope guard(this);
871   if (value != 0.0) {
872     UseScratchRegisterScope temps(this);
873     FPRegister tmp = temps.AcquireSameSizeAs(fn);
874     VIXL_ASSERT(!tmp.Is(fn));
875     Fmov(tmp, value);
876     FPCompareMacro(fn, tmp, trap);
877   } else {
878     FPCompareMacro(fn, value, trap);
879   }
880 }
881 
882 
Fcmpe(const FPRegister & fn,double value)883 void MacroAssembler::Fcmpe(const FPRegister& fn, double value) {
884   Fcmp(fn, value, EnableTrap);
885 }
886 
887 
Fmov(VRegister vd,double imm)888 void MacroAssembler::Fmov(VRegister vd, double imm) {
889   // Floating point immediates are loaded through the literal pool.
890   MacroEmissionCheckScope guard(this);
891 
892   if (vd.Is1S() || vd.Is2S() || vd.Is4S()) {
893     Fmov(vd, static_cast<float>(imm));
894     return;
895   }
896 
897   VIXL_ASSERT(vd.Is1D() || vd.Is2D());
898   if (IsImmFP64(imm)) {
899     fmov(vd, imm);
900   } else {
901     uint64_t rawbits = double_to_rawbits(imm);
902     if (vd.IsScalar()) {
903       if (rawbits == 0) {
904         fmov(vd, xzr);
905       } else {
906         Assembler::fImmPool64(vd, imm);
907       }
908     } else {
909       // TODO: consider NEON support for load literal.
910       Movi(vd, rawbits);
911     }
912   }
913 }
914 
915 
Fmov(VRegister vd,float imm)916 void MacroAssembler::Fmov(VRegister vd, float imm) {
917   // Floating point immediates are loaded through the literal pool.
918   MacroEmissionCheckScope guard(this);
919 
920   if (vd.Is1D() || vd.Is2D()) {
921     Fmov(vd, static_cast<double>(imm));
922     return;
923   }
924 
925   VIXL_ASSERT(vd.Is1S() || vd.Is2S() || vd.Is4S());
926   if (IsImmFP32(imm)) {
927     fmov(vd, imm);
928   } else {
929     uint32_t rawbits = float_to_rawbits(imm);
930     if (vd.IsScalar()) {
931       if (rawbits == 0) {
932         fmov(vd, wzr);
933       } else {
934         Assembler::fImmPool32(vd, imm);
935       }
936     } else {
937       // TODO: consider NEON support for load literal.
938       Movi(vd, rawbits);
939     }
940   }
941 }
942 
943 
944 
Neg(const Register & rd,const Operand & operand)945 void MacroAssembler::Neg(const Register& rd,
946                          const Operand& operand) {
947   if (operand.IsImmediate()) {
948     Mov(rd, -operand.immediate());
949   } else {
950     Sub(rd, AppropriateZeroRegFor(rd), operand);
951   }
952 }
953 
954 
Negs(const Register & rd,const Operand & operand)955 void MacroAssembler::Negs(const Register& rd,
956                           const Operand& operand) {
957   Subs(rd, AppropriateZeroRegFor(rd), operand);
958 }
959 
960 
TryOneInstrMoveImmediate(const Register & dst,int64_t imm)961 bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
962                                               int64_t imm) {
963   return OneInstrMoveImmediateHelper(this, dst, imm);
964 }
965 
966 
MoveImmediateForShiftedOp(const Register & dst,int64_t imm)967 Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
968                                                   int64_t imm) {
969   int reg_size = dst.size();
970 
971   // Encode the immediate in a single move instruction, if possible.
972   if (TryOneInstrMoveImmediate(dst, imm)) {
973     // The move was successful; nothing to do here.
974   } else {
975     // Pre-shift the immediate to the least-significant bits of the register.
976     int shift_low = CountTrailingZeros(imm, reg_size);
977     int64_t imm_low = imm >> shift_low;
978 
979     // Pre-shift the immediate to the most-significant bits of the register,
980     // inserting set bits in the least-significant bits.
981     int shift_high = CountLeadingZeros(imm, reg_size);
982     int64_t imm_high = (imm << shift_high) | ((INT64_C(1) << shift_high) - 1);
983 
984     if (TryOneInstrMoveImmediate(dst, imm_low)) {
985       // The new immediate has been moved into the destination's low bits:
986       // return a new leftward-shifting operand.
987       return Operand(dst, LSL, shift_low);
988     } else if (TryOneInstrMoveImmediate(dst, imm_high)) {
989       // The new immediate has been moved into the destination's high bits:
990       // return a new rightward-shifting operand.
991       return Operand(dst, LSR, shift_high);
992     } else {
993       Mov(dst, imm);
994     }
995   }
996   return Operand(dst);
997 }
998 
999 
ComputeAddress(const Register & dst,const MemOperand & mem_op)1000 void MacroAssembler::ComputeAddress(const Register& dst,
1001                                     const MemOperand& mem_op) {
1002   // We cannot handle pre-indexing or post-indexing.
1003   VIXL_ASSERT(mem_op.addrmode() == Offset);
1004   Register base = mem_op.base();
1005   if (mem_op.IsImmediateOffset()) {
1006     Add(dst, base, mem_op.offset());
1007   } else {
1008     VIXL_ASSERT(mem_op.IsRegisterOffset());
1009     Register reg_offset = mem_op.regoffset();
1010     Shift shift = mem_op.shift();
1011     Extend extend = mem_op.extend();
1012     if (shift == NO_SHIFT) {
1013       VIXL_ASSERT(extend != NO_EXTEND);
1014       Add(dst, base, Operand(reg_offset, extend, mem_op.shift_amount()));
1015     } else {
1016       VIXL_ASSERT(extend == NO_EXTEND);
1017       Add(dst, base, Operand(reg_offset, shift, mem_op.shift_amount()));
1018     }
1019   }
1020 }
1021 
1022 
AddSubMacro(const Register & rd,const Register & rn,const Operand & operand,FlagsUpdate S,AddSubOp op)1023 void MacroAssembler::AddSubMacro(const Register& rd,
1024                                  const Register& rn,
1025                                  const Operand& operand,
1026                                  FlagsUpdate S,
1027                                  AddSubOp op) {
1028   // Worst case is add/sub immediate:
1029   //  * up to 4 instructions to materialise the constant
1030   //  * 1 instruction for add/sub
1031   MacroEmissionCheckScope guard(this);
1032 
1033   if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
1034       (S == LeaveFlags)) {
1035     // The instruction would be a nop. Avoid generating useless code.
1036     return;
1037   }
1038 
1039   if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) ||
1040       (rn.IsZero() && !operand.IsShiftedRegister())                ||
1041       (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
1042     UseScratchRegisterScope temps(this);
1043     Register temp = temps.AcquireSameSizeAs(rn);
1044 
1045     // VIXL can acquire temp registers. Assert that the caller is aware.
1046     VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn));
1047     VIXL_ASSERT(!temp.Is(operand.maybeReg()));
1048 
1049     if (operand.IsImmediate()) {
1050       Operand imm_operand =
1051           MoveImmediateForShiftedOp(temp, operand.immediate());
1052       AddSub(rd, rn, imm_operand, S, op);
1053     } else {
1054       Mov(temp, operand);
1055       AddSub(rd, rn, temp, S, op);
1056     }
1057   } else {
1058     AddSub(rd, rn, operand, S, op);
1059   }
1060 }
1061 
1062 
Adc(const Register & rd,const Register & rn,const Operand & operand)1063 void MacroAssembler::Adc(const Register& rd,
1064                          const Register& rn,
1065                          const Operand& operand) {
1066   AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, ADC);
1067 }
1068 
1069 
Adcs(const Register & rd,const Register & rn,const Operand & operand)1070 void MacroAssembler::Adcs(const Register& rd,
1071                           const Register& rn,
1072                           const Operand& operand) {
1073   AddSubWithCarryMacro(rd, rn, operand, SetFlags, ADC);
1074 }
1075 
1076 
Sbc(const Register & rd,const Register & rn,const Operand & operand)1077 void MacroAssembler::Sbc(const Register& rd,
1078                          const Register& rn,
1079                          const Operand& operand) {
1080   AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, SBC);
1081 }
1082 
1083 
Sbcs(const Register & rd,const Register & rn,const Operand & operand)1084 void MacroAssembler::Sbcs(const Register& rd,
1085                           const Register& rn,
1086                           const Operand& operand) {
1087   AddSubWithCarryMacro(rd, rn, operand, SetFlags, SBC);
1088 }
1089 
1090 
Ngc(const Register & rd,const Operand & operand)1091 void MacroAssembler::Ngc(const Register& rd,
1092                          const Operand& operand) {
1093   Register zr = AppropriateZeroRegFor(rd);
1094   Sbc(rd, zr, operand);
1095 }
1096 
1097 
Ngcs(const Register & rd,const Operand & operand)1098 void MacroAssembler::Ngcs(const Register& rd,
1099                          const Operand& operand) {
1100   Register zr = AppropriateZeroRegFor(rd);
1101   Sbcs(rd, zr, operand);
1102 }
1103 
1104 
AddSubWithCarryMacro(const Register & rd,const Register & rn,const Operand & operand,FlagsUpdate S,AddSubWithCarryOp op)1105 void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
1106                                           const Register& rn,
1107                                           const Operand& operand,
1108                                           FlagsUpdate S,
1109                                           AddSubWithCarryOp op) {
1110   VIXL_ASSERT(rd.size() == rn.size());
1111   // Worst case is addc/subc immediate:
1112   //  * up to 4 instructions to materialise the constant
1113   //  * 1 instruction for add/sub
1114   MacroEmissionCheckScope guard(this);
1115   UseScratchRegisterScope temps(this);
1116 
1117   if (operand.IsImmediate() ||
1118       (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
1119     // Add/sub with carry (immediate or ROR shifted register.)
1120     Register temp = temps.AcquireSameSizeAs(rn);
1121     VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn) && !temp.Is(operand.maybeReg()));
1122     Mov(temp, operand);
1123     AddSubWithCarry(rd, rn, Operand(temp), S, op);
1124   } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
1125     // Add/sub with carry (shifted register).
1126     VIXL_ASSERT(operand.reg().size() == rd.size());
1127     VIXL_ASSERT(operand.shift() != ROR);
1128     VIXL_ASSERT(is_uintn(rd.size() == kXRegSize ? kXRegSizeLog2 : kWRegSizeLog2,
1129                     operand.shift_amount()));
1130     temps.Exclude(operand.reg());
1131     Register temp = temps.AcquireSameSizeAs(rn);
1132     VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn) && !temp.Is(operand.maybeReg()));
1133     EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
1134     AddSubWithCarry(rd, rn, Operand(temp), S, op);
1135   } else if (operand.IsExtendedRegister()) {
1136     // Add/sub with carry (extended register).
1137     VIXL_ASSERT(operand.reg().size() <= rd.size());
1138     // Add/sub extended supports a shift <= 4. We want to support exactly the
1139     // same modes.
1140     VIXL_ASSERT(operand.shift_amount() <= 4);
1141     VIXL_ASSERT(operand.reg().Is64Bits() ||
1142            ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
1143     temps.Exclude(operand.reg());
1144     Register temp = temps.AcquireSameSizeAs(rn);
1145     VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn) && !temp.Is(operand.maybeReg()));
1146     EmitExtendShift(temp, operand.reg(), operand.extend(),
1147                     operand.shift_amount());
1148     AddSubWithCarry(rd, rn, Operand(temp), S, op);
1149   } else {
1150     // The addressing mode is directly supported by the instruction.
1151     AddSubWithCarry(rd, rn, operand, S, op);
1152   }
1153 }
1154 
1155 
1156 #define DEFINE_FUNCTION(FN, REGTYPE, REG, OP)                         \
1157 void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) {  \
1158   LoadStoreMacro(REG, addr, OP);                                      \
1159 }
LS_MACRO_LIST(DEFINE_FUNCTION)1160 LS_MACRO_LIST(DEFINE_FUNCTION)
1161 #undef DEFINE_FUNCTION
1162 
1163 
1164 void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
1165                                     const MemOperand& addr,
1166                                     LoadStoreOp op) {
1167   // Worst case is ldr/str pre/post index:
1168   //  * 1 instruction for ldr/str
1169   //  * up to 4 instructions to materialise the constant
1170   //  * 1 instruction to update the base
1171   MacroEmissionCheckScope guard(this);
1172 
1173   int64_t offset = addr.offset();
1174   unsigned access_size = CalcLSDataSize(op);
1175 
1176   // Check if an immediate offset fits in the immediate field of the
1177   // appropriate instruction. If not, emit two instructions to perform
1178   // the operation.
1179   if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, access_size) &&
1180       !IsImmLSUnscaled(offset)) {
1181     // Immediate offset that can't be encoded using unsigned or unscaled
1182     // addressing modes.
1183     UseScratchRegisterScope temps(this);
1184     Register temp = temps.AcquireSameSizeAs(addr.base());
1185     VIXL_ASSERT(!temp.Is(rt));
1186     VIXL_ASSERT(!temp.Is(addr.base()) && !temp.Is(addr.regoffset()));
1187     Mov(temp, addr.offset());
1188     LoadStore(rt, MemOperand(addr.base(), temp), op);
1189   } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
1190     // Post-index beyond unscaled addressing range.
1191     LoadStore(rt, MemOperand(addr.base()), op);
1192     Add(addr.base(), addr.base(), Operand(offset));
1193   } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
1194     // Pre-index beyond unscaled addressing range.
1195     Add(addr.base(), addr.base(), Operand(offset));
1196     LoadStore(rt, MemOperand(addr.base()), op);
1197   } else {
1198     // Encodable in one load/store instruction.
1199     LoadStore(rt, addr, op);
1200   }
1201 }
1202 
1203 
1204 #define DEFINE_FUNCTION(FN, REGTYPE, REG, REG2, OP)  \
1205 void MacroAssembler::FN(const REGTYPE REG,           \
1206                         const REGTYPE REG2,          \
1207                         const MemOperand& addr) {    \
1208   LoadStorePairMacro(REG, REG2, addr, OP);           \
1209 }
LSPAIR_MACRO_LIST(DEFINE_FUNCTION)1210 LSPAIR_MACRO_LIST(DEFINE_FUNCTION)
1211 #undef DEFINE_FUNCTION
1212 
1213 void MacroAssembler::LoadStorePairMacro(const CPURegister& rt,
1214                                         const CPURegister& rt2,
1215                                         const MemOperand& addr,
1216                                         LoadStorePairOp op) {
1217   // TODO(all): Should we support register offset for load-store-pair?
1218   VIXL_ASSERT(!addr.IsRegisterOffset());
1219   // Worst case is ldp/stp immediate:
1220   //  * 1 instruction for ldp/stp
1221   //  * up to 4 instructions to materialise the constant
1222   //  * 1 instruction to update the base
1223   MacroEmissionCheckScope guard(this);
1224 
1225   int64_t offset = addr.offset();
1226   unsigned access_size = CalcLSPairDataSize(op);
1227 
1228   // Check if the offset fits in the immediate field of the appropriate
1229   // instruction. If not, emit two instructions to perform the operation.
1230   if (IsImmLSPair(offset, access_size)) {
1231     // Encodable in one load/store pair instruction.
1232     LoadStorePair(rt, rt2, addr, op);
1233   } else {
1234     Register base = addr.base();
1235     if (addr.IsImmediateOffset()) {
1236       UseScratchRegisterScope temps(this);
1237       Register temp = temps.AcquireSameSizeAs(base);
1238       Add(temp, base, offset);
1239       LoadStorePair(rt, rt2, MemOperand(temp), op);
1240     } else if (addr.IsPostIndex()) {
1241       LoadStorePair(rt, rt2, MemOperand(base), op);
1242       Add(base, base, offset);
1243     } else {
1244       VIXL_ASSERT(addr.IsPreIndex());
1245       Add(base, base, offset);
1246       LoadStorePair(rt, rt2, MemOperand(base), op);
1247     }
1248   }
1249 }
1250 
1251 
Prfm(PrefetchOperation op,const MemOperand & addr)1252 void MacroAssembler::Prfm(PrefetchOperation op, const MemOperand& addr) {
1253   MacroEmissionCheckScope guard(this);
1254 
1255   // There are no pre- or post-index modes for prfm.
1256   VIXL_ASSERT(addr.IsImmediateOffset() || addr.IsRegisterOffset());
1257 
1258   // The access size is implicitly 8 bytes for all prefetch operations.
1259   unsigned size = kXRegSizeInBytesLog2;
1260 
1261   // Check if an immediate offset fits in the immediate field of the
1262   // appropriate instruction. If not, emit two instructions to perform
1263   // the operation.
1264   if (addr.IsImmediateOffset() && !IsImmLSScaled(addr.offset(), size) &&
1265       !IsImmLSUnscaled(addr.offset())) {
1266     // Immediate offset that can't be encoded using unsigned or unscaled
1267     // addressing modes.
1268     UseScratchRegisterScope temps(this);
1269     Register temp = temps.AcquireSameSizeAs(addr.base());
1270     Mov(temp, addr.offset());
1271     Prefetch(op, MemOperand(addr.base(), temp));
1272   } else {
1273     // Simple register-offsets are encodable in one instruction.
1274     Prefetch(op, addr);
1275   }
1276 }
1277 
1278 
PushStackPointer()1279 void MacroAssembler::PushStackPointer() {
1280   PrepareForPush(1, 8);
1281 
1282   // Pushing a stack pointer leads to implementation-defined
1283   // behavior, which may be surprising. In particular,
1284   //   str x28, [x28, #-8]!
1285   // pre-decrements the stack pointer, storing the decremented value.
1286   // Additionally, sp is read as xzr in this context, so it cannot be pushed.
1287   // So we must use a scratch register.
1288   UseScratchRegisterScope temps(this);
1289   Register scratch = temps.AcquireX();
1290 
1291   Mov(scratch, GetStackPointer64());
1292   str(scratch, MemOperand(GetStackPointer64(), -8, PreIndex));
1293 }
1294 
1295 
Push(const CPURegister & src0,const CPURegister & src1,const CPURegister & src2,const CPURegister & src3)1296 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
1297                           const CPURegister& src2, const CPURegister& src3) {
1298   VIXL_ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
1299   VIXL_ASSERT(src0.IsValid());
1300 
1301   int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
1302   int size = src0.SizeInBytes();
1303 
1304   if (src0.Is(GetStackPointer64())) {
1305     VIXL_ASSERT(count == 1);
1306     VIXL_ASSERT(size == 8);
1307     PushStackPointer();
1308     return;
1309   }
1310 
1311   PrepareForPush(count, size);
1312   PushHelper(count, size, src0, src1, src2, src3);
1313 }
1314 
1315 
Pop(const CPURegister & dst0,const CPURegister & dst1,const CPURegister & dst2,const CPURegister & dst3)1316 void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
1317                          const CPURegister& dst2, const CPURegister& dst3) {
1318   // It is not valid to pop into the same register more than once in one
1319   // instruction, not even into the zero register.
1320   VIXL_ASSERT(!AreAliased(dst0, dst1, dst2, dst3));
1321   VIXL_ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
1322   VIXL_ASSERT(dst0.IsValid());
1323 
1324   int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
1325   int size = dst0.SizeInBytes();
1326 
1327   PrepareForPop(count, size);
1328   PopHelper(count, size, dst0, dst1, dst2, dst3);
1329 }
1330 
1331 
PushCPURegList(CPURegList registers)1332 void MacroAssembler::PushCPURegList(CPURegList registers) {
1333   VIXL_ASSERT(!registers.Overlaps(*TmpList()));
1334   VIXL_ASSERT(!registers.Overlaps(*FPTmpList()));
1335 
1336   int reg_size = registers.RegisterSizeInBytes();
1337   PrepareForPush(registers.Count(), reg_size);
1338 
1339   // Bump the stack pointer and store two registers at the bottom.
1340   int size = registers.TotalSizeInBytes();
1341   const CPURegister& bottom_0 = registers.PopLowestIndex();
1342   const CPURegister& bottom_1 = registers.PopLowestIndex();
1343   if (bottom_0.IsValid() && bottom_1.IsValid()) {
1344     Stp(bottom_0, bottom_1, MemOperand(GetStackPointer64(), -size, PreIndex));
1345   } else if (bottom_0.IsValid()) {
1346     Str(bottom_0, MemOperand(GetStackPointer64(), -size, PreIndex));
1347   }
1348 
1349   int offset = 2 * reg_size;
1350   while (!registers.IsEmpty()) {
1351     const CPURegister& src0 = registers.PopLowestIndex();
1352     const CPURegister& src1 = registers.PopLowestIndex();
1353     if (src1.IsValid()) {
1354       Stp(src0, src1, MemOperand(GetStackPointer64(), offset));
1355     } else {
1356       Str(src0, MemOperand(GetStackPointer64(), offset));
1357     }
1358     offset += 2 * reg_size;
1359   }
1360 }
1361 
1362 
PopCPURegList(CPURegList registers)1363 void MacroAssembler::PopCPURegList(CPURegList registers) {
1364   VIXL_ASSERT(!registers.Overlaps(*TmpList()));
1365   VIXL_ASSERT(!registers.Overlaps(*FPTmpList()));
1366 
1367   int reg_size = registers.RegisterSizeInBytes();
1368   PrepareForPop(registers.Count(), reg_size);
1369 
1370 
1371   int size = registers.TotalSizeInBytes();
1372   const CPURegister& bottom_0 = registers.PopLowestIndex();
1373   const CPURegister& bottom_1 = registers.PopLowestIndex();
1374 
1375   int offset = 2 * reg_size;
1376   while (!registers.IsEmpty()) {
1377     const CPURegister& dst0 = registers.PopLowestIndex();
1378     const CPURegister& dst1 = registers.PopLowestIndex();
1379     if (dst1.IsValid()) {
1380       Ldp(dst0, dst1, MemOperand(GetStackPointer64(), offset));
1381     } else {
1382       Ldr(dst0, MemOperand(GetStackPointer64(), offset));
1383     }
1384     offset += 2 * reg_size;
1385   }
1386 
1387   // Load the two registers at the bottom and drop the stack pointer.
1388   if (bottom_0.IsValid() && bottom_1.IsValid()) {
1389     Ldp(bottom_0, bottom_1, MemOperand(GetStackPointer64(), size, PostIndex));
1390   } else if (bottom_0.IsValid()) {
1391     Ldr(bottom_0, MemOperand(GetStackPointer64(), size, PostIndex));
1392   }
1393 }
1394 
1395 
PushMultipleTimes(int count,Register src)1396 void MacroAssembler::PushMultipleTimes(int count, Register src) {
1397   int size = src.SizeInBytes();
1398 
1399   PrepareForPush(count, size);
1400   // Push up to four registers at a time if possible because if the current
1401   // stack pointer is sp and the register size is 32, registers must be pushed
1402   // in blocks of four in order to maintain the 16-byte alignment for sp.
1403   while (count >= 4) {
1404     PushHelper(4, size, src, src, src, src);
1405     count -= 4;
1406   }
1407   if (count >= 2) {
1408     PushHelper(2, size, src, src, NoReg, NoReg);
1409     count -= 2;
1410   }
1411   if (count == 1) {
1412     PushHelper(1, size, src, NoReg, NoReg, NoReg);
1413     count -= 1;
1414   }
1415   VIXL_ASSERT(count == 0);
1416 }
1417 
1418 
PushHelper(int count,int size,const CPURegister & src0,const CPURegister & src1,const CPURegister & src2,const CPURegister & src3)1419 void MacroAssembler::PushHelper(int count, int size,
1420                                 const CPURegister& src0,
1421                                 const CPURegister& src1,
1422                                 const CPURegister& src2,
1423                                 const CPURegister& src3) {
1424   // Ensure that we don't unintentionally modify scratch or debug registers.
1425   // Worst case for size is 2 stp.
1426   InstructionAccurateScope scope(this, 2,
1427                                  InstructionAccurateScope::kMaximumSize);
1428 
1429   VIXL_ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
1430   VIXL_ASSERT(size == src0.SizeInBytes());
1431 
1432   // Pushing the stack pointer has unexpected behavior. See PushStackPointer().
1433   VIXL_ASSERT(!src0.Is(GetStackPointer64()) && !src0.Is(sp));
1434   VIXL_ASSERT(!src1.Is(GetStackPointer64()) && !src1.Is(sp));
1435   VIXL_ASSERT(!src2.Is(GetStackPointer64()) && !src2.Is(sp));
1436   VIXL_ASSERT(!src3.Is(GetStackPointer64()) && !src3.Is(sp));
1437 
1438   // The JS engine should never push 4 bytes.
1439   VIXL_ASSERT(size >= 8);
1440 
1441   // When pushing multiple registers, the store order is chosen such that
1442   // Push(a, b) is equivalent to Push(a) followed by Push(b).
1443   switch (count) {
1444     case 1:
1445       VIXL_ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone());
1446       str(src0, MemOperand(GetStackPointer64(), -1 * size, PreIndex));
1447       break;
1448     case 2:
1449       VIXL_ASSERT(src2.IsNone() && src3.IsNone());
1450       stp(src1, src0, MemOperand(GetStackPointer64(), -2 * size, PreIndex));
1451       break;
1452     case 3:
1453       VIXL_ASSERT(src3.IsNone());
1454       stp(src2, src1, MemOperand(GetStackPointer64(), -3 * size, PreIndex));
1455       str(src0, MemOperand(GetStackPointer64(), 2 * size));
1456       break;
1457     case 4:
1458       // Skip over 4 * size, then fill in the gap. This allows four W registers
1459       // to be pushed using sp, whilst maintaining 16-byte alignment for sp at
1460       // all times.
1461       stp(src3, src2, MemOperand(GetStackPointer64(), -4 * size, PreIndex));
1462       stp(src1, src0, MemOperand(GetStackPointer64(), 2 * size));
1463       break;
1464     default:
1465       VIXL_UNREACHABLE();
1466   }
1467 }
1468 
1469 
PopHelper(int count,int size,const CPURegister & dst0,const CPURegister & dst1,const CPURegister & dst2,const CPURegister & dst3)1470 void MacroAssembler::PopHelper(int count, int size,
1471                                const CPURegister& dst0,
1472                                const CPURegister& dst1,
1473                                const CPURegister& dst2,
1474                                const CPURegister& dst3) {
1475   // Ensure that we don't unintentionally modify scratch or debug registers.
1476   // Worst case for size is 2 ldp.
1477   InstructionAccurateScope scope(this, 2,
1478                                  InstructionAccurateScope::kMaximumSize);
1479 
1480   VIXL_ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
1481   VIXL_ASSERT(size == dst0.SizeInBytes());
1482 
1483   // When popping multiple registers, the load order is chosen such that
1484   // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
1485   switch (count) {
1486     case 1:
1487       VIXL_ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
1488       ldr(dst0, MemOperand(GetStackPointer64(), 1 * size, PostIndex));
1489       break;
1490     case 2:
1491       VIXL_ASSERT(dst2.IsNone() && dst3.IsNone());
1492       ldp(dst0, dst1, MemOperand(GetStackPointer64(), 2 * size, PostIndex));
1493       break;
1494     case 3:
1495       VIXL_ASSERT(dst3.IsNone());
1496       ldr(dst2, MemOperand(GetStackPointer64(), 2 * size));
1497       ldp(dst0, dst1, MemOperand(GetStackPointer64(), 3 * size, PostIndex));
1498       break;
1499     case 4:
1500       // Load the higher addresses first, then load the lower addresses and skip
1501       // the whole block in the second instruction. This allows four W registers
1502       // to be popped using sp, whilst maintaining 16-byte alignment for sp at
1503       // all times.
1504       ldp(dst2, dst3, MemOperand(GetStackPointer64(), 2 * size));
1505       ldp(dst0, dst1, MemOperand(GetStackPointer64(), 4 * size, PostIndex));
1506       break;
1507     default:
1508       VIXL_UNREACHABLE();
1509   }
1510 }
1511 
1512 
PrepareForPush(int count,int size)1513 void MacroAssembler::PrepareForPush(int count, int size) {
1514   if (sp.Is(GetStackPointer64())) {
1515     // If the current stack pointer is sp, then it must be aligned to 16 bytes
1516     // on entry and the total size of the specified registers must also be a
1517     // multiple of 16 bytes.
1518     VIXL_ASSERT((count * size) % 16 == 0);
1519   } else {
1520     // Even if the current stack pointer is not the system stack pointer (sp),
1521     // the system stack pointer will still be modified in order to comply with
1522     // ABI rules about accessing memory below the system stack pointer.
1523     BumpSystemStackPointer(count * size);
1524   }
1525 }
1526 
1527 
PrepareForPop(int count,int size)1528 void MacroAssembler::PrepareForPop(int count, int size) {
1529   USE(count, size);
1530   if (sp.Is(GetStackPointer64())) {
1531     // If the current stack pointer is sp, then it must be aligned to 16 bytes
1532     // on entry and the total size of the specified registers must also be a
1533     // multiple of 16 bytes.
1534     VIXL_ASSERT((count * size) % 16 == 0);
1535   }
1536 }
1537 
Poke(const Register & src,const Operand & offset)1538 void MacroAssembler::Poke(const Register& src, const Operand& offset) {
1539   if (offset.IsImmediate()) {
1540     VIXL_ASSERT(offset.immediate() >= 0);
1541   }
1542 
1543   Str(src, MemOperand(GetStackPointer64(), offset));
1544 }
1545 
1546 
Peek(const Register & dst,const Operand & offset)1547 void MacroAssembler::Peek(const Register& dst, const Operand& offset) {
1548   if (offset.IsImmediate()) {
1549     VIXL_ASSERT(offset.immediate() >= 0);
1550   }
1551 
1552   Ldr(dst, MemOperand(GetStackPointer64(), offset));
1553 }
1554 
1555 
Claim(const Operand & size)1556 void MacroAssembler::Claim(const Operand& size) {
1557 
1558   if (size.IsZero()) {
1559     return;
1560   }
1561 
1562   if (size.IsImmediate()) {
1563     VIXL_ASSERT(size.immediate() > 0);
1564     if (sp.Is(GetStackPointer64())) {
1565       VIXL_ASSERT((size.immediate() % 16) == 0);
1566     }
1567   }
1568 
1569   Sub(GetStackPointer64(), GetStackPointer64(), size);
1570 
1571   // Make sure the real stack pointer reflects the claimed stack space.
1572   // We can't use stack memory below the stack pointer, it could be clobbered by
1573   // interupts and signal handlers.
1574   if (!sp.Is(GetStackPointer64())) {
1575     Mov(sp, GetStackPointer64());
1576   }
1577 }
1578 
1579 
Drop(const Operand & size)1580 void MacroAssembler::Drop(const Operand& size) {
1581 
1582   if (size.IsZero()) {
1583     return;
1584   }
1585 
1586   if (size.IsImmediate()) {
1587     VIXL_ASSERT(size.immediate() > 0);
1588     if (sp.Is(GetStackPointer64())) {
1589       VIXL_ASSERT((size.immediate() % 16) == 0);
1590     }
1591   }
1592 
1593   Add(GetStackPointer64(), GetStackPointer64(), size);
1594 }
1595 
1596 
PushCalleeSavedRegisters()1597 void MacroAssembler::PushCalleeSavedRegisters() {
1598   // Ensure that the macro-assembler doesn't use any scratch registers.
1599   // 10 stp will be emitted.
1600   // TODO(all): Should we use GetCalleeSaved and SavedFP.
1601   InstructionAccurateScope scope(this, 10);
1602 
1603   // This method must not be called unless the current stack pointer is sp.
1604   VIXL_ASSERT(sp.Is(GetStackPointer64()));
1605 
1606   MemOperand tos(sp, -2 * static_cast<int>(kXRegSizeInBytes), PreIndex);
1607 
1608   stp(x29, x30, tos);
1609   stp(x27, x28, tos);
1610   stp(x25, x26, tos);
1611   stp(x23, x24, tos);
1612   stp(x21, x22, tos);
1613   stp(x19, x20, tos);
1614 
1615   stp(d14, d15, tos);
1616   stp(d12, d13, tos);
1617   stp(d10, d11, tos);
1618   stp(d8, d9, tos);
1619 }
1620 
1621 
PopCalleeSavedRegisters()1622 void MacroAssembler::PopCalleeSavedRegisters() {
1623   // Ensure that the macro-assembler doesn't use any scratch registers.
1624   // 10 ldp will be emitted.
1625   // TODO(all): Should we use GetCalleeSaved and SavedFP.
1626   InstructionAccurateScope scope(this, 10);
1627 
1628   // This method must not be called unless the current stack pointer is sp.
1629   VIXL_ASSERT(sp.Is(GetStackPointer64()));
1630 
1631   MemOperand tos(sp, 2 * kXRegSizeInBytes, PostIndex);
1632 
1633   ldp(d8, d9, tos);
1634   ldp(d10, d11, tos);
1635   ldp(d12, d13, tos);
1636   ldp(d14, d15, tos);
1637 
1638   ldp(x19, x20, tos);
1639   ldp(x21, x22, tos);
1640   ldp(x23, x24, tos);
1641   ldp(x25, x26, tos);
1642   ldp(x27, x28, tos);
1643   ldp(x29, x30, tos);
1644 }
1645 
LoadCPURegList(CPURegList registers,const MemOperand & src)1646 void MacroAssembler::LoadCPURegList(CPURegList registers,
1647                                     const MemOperand& src) {
1648   LoadStoreCPURegListHelper(kLoad, registers, src);
1649 }
1650 
StoreCPURegList(CPURegList registers,const MemOperand & dst)1651 void MacroAssembler::StoreCPURegList(CPURegList registers,
1652                                      const MemOperand& dst) {
1653   LoadStoreCPURegListHelper(kStore, registers, dst);
1654 }
1655 
1656 
LoadStoreCPURegListHelper(LoadStoreCPURegListAction op,CPURegList registers,const MemOperand & mem)1657 void MacroAssembler::LoadStoreCPURegListHelper(LoadStoreCPURegListAction op,
1658                                                CPURegList registers,
1659                                                const MemOperand& mem) {
1660   // We do not handle pre-indexing or post-indexing.
1661   VIXL_ASSERT(!(mem.IsPreIndex() || mem.IsPostIndex()));
1662   VIXL_ASSERT(!registers.Overlaps(tmp_list_));
1663   VIXL_ASSERT(!registers.Overlaps(fptmp_list_));
1664   VIXL_ASSERT(!registers.IncludesAliasOf(sp));
1665 
1666   UseScratchRegisterScope temps(this);
1667 
1668   MemOperand loc = BaseMemOperandForLoadStoreCPURegList(registers,
1669                                                         mem,
1670                                                         &temps);
1671 
1672   while (registers.Count() >= 2) {
1673     const CPURegister& dst0 = registers.PopLowestIndex();
1674     const CPURegister& dst1 = registers.PopLowestIndex();
1675     if (op == kStore) {
1676       Stp(dst0, dst1, loc);
1677     } else {
1678       VIXL_ASSERT(op == kLoad);
1679       Ldp(dst0, dst1, loc);
1680     }
1681     loc.AddOffset(2 * registers.RegisterSizeInBytes());
1682   }
1683   if (!registers.IsEmpty()) {
1684     if (op == kStore) {
1685       Str(registers.PopLowestIndex(), loc);
1686     } else {
1687       VIXL_ASSERT(op == kLoad);
1688       Ldr(registers.PopLowestIndex(), loc);
1689     }
1690   }
1691 }
1692 
BaseMemOperandForLoadStoreCPURegList(const CPURegList & registers,const MemOperand & mem,UseScratchRegisterScope * scratch_scope)1693 MemOperand MacroAssembler::BaseMemOperandForLoadStoreCPURegList(
1694     const CPURegList& registers,
1695     const MemOperand& mem,
1696     UseScratchRegisterScope* scratch_scope) {
1697   // If necessary, pre-compute the base address for the accesses.
1698   if (mem.IsRegisterOffset()) {
1699     Register reg_base = scratch_scope->AcquireX();
1700     ComputeAddress(reg_base, mem);
1701     return MemOperand(reg_base);
1702 
1703   } else if (mem.IsImmediateOffset()) {
1704     int reg_size = registers.RegisterSizeInBytes();
1705     int total_size = registers.TotalSizeInBytes();
1706     int64_t min_offset = mem.offset();
1707     int64_t max_offset = mem.offset() + std::max(0, total_size - 2 * reg_size);
1708     if ((registers.Count() >= 2) &&
1709         (!Assembler::IsImmLSPair(min_offset, WhichPowerOf2(reg_size)) ||
1710          !Assembler::IsImmLSPair(max_offset, WhichPowerOf2(reg_size)))) {
1711       Register reg_base = scratch_scope->AcquireX();
1712       ComputeAddress(reg_base, mem);
1713       return MemOperand(reg_base);
1714     }
1715   }
1716 
1717   return mem;
1718 }
1719 
BumpSystemStackPointer(const Operand & space)1720 void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
1721   VIXL_ASSERT(!sp.Is(GetStackPointer64()));
1722   // TODO: Several callers rely on this not using scratch registers, so we use
1723   // the assembler directly here. However, this means that large immediate
1724   // values of 'space' cannot be handled.
1725   InstructionAccurateScope scope(this, 1);
1726   sub(sp, GetStackPointer64(), space);
1727 }
1728 
1729 
Trace(TraceParameters parameters,TraceCommand command)1730 void MacroAssembler::Trace(TraceParameters parameters, TraceCommand command) {
1731 
1732 #ifdef JS_SIMULATOR_ARM64
1733   // The arguments to the trace pseudo instruction need to be contiguous in
1734   // memory, so make sure we don't try to emit a literal pool.
1735   InstructionAccurateScope scope(this, kTraceLength / kInstructionSize);
1736 
1737   Label start;
1738   bind(&start);
1739 
1740   // Refer to simulator-a64.h for a description of the marker and its
1741   // arguments.
1742   hlt(kTraceOpcode);
1743 
1744   // VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kTraceParamsOffset);
1745   dc32(parameters);
1746 
1747   // VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kTraceCommandOffset);
1748   dc32(command);
1749 #else
1750   // Emit nothing on real hardware.
1751   USE(parameters, command);
1752 #endif
1753 }
1754 
1755 
Log(TraceParameters parameters)1756 void MacroAssembler::Log(TraceParameters parameters) {
1757 
1758 #ifdef JS_SIMULATOR_ARM64
1759   // The arguments to the log pseudo instruction need to be contiguous in
1760   // memory, so make sure we don't try to emit a literal pool.
1761   InstructionAccurateScope scope(this, kLogLength / kInstructionSize);
1762 
1763   Label start;
1764   bind(&start);
1765 
1766   // Refer to simulator-a64.h for a description of the marker and its
1767   // arguments.
1768   hlt(kLogOpcode);
1769 
1770   // VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kLogParamsOffset);
1771   dc32(parameters);
1772 #else
1773   // Emit nothing on real hardware.
1774   USE(parameters);
1775 #endif
1776 }
1777 
1778 
EnableInstrumentation()1779 void MacroAssembler::EnableInstrumentation() {
1780   VIXL_ASSERT(!isprint(InstrumentStateEnable));
1781   InstructionAccurateScope scope(this, 1);
1782   movn(xzr, InstrumentStateEnable);
1783 }
1784 
1785 
DisableInstrumentation()1786 void MacroAssembler::DisableInstrumentation() {
1787   VIXL_ASSERT(!isprint(InstrumentStateDisable));
1788   InstructionAccurateScope scope(this, 1);
1789   movn(xzr, InstrumentStateDisable);
1790 }
1791 
1792 
AnnotateInstrumentation(const char * marker_name)1793 void MacroAssembler::AnnotateInstrumentation(const char* marker_name) {
1794   VIXL_ASSERT(strlen(marker_name) == 2);
1795 
1796   // We allow only printable characters in the marker names. Unprintable
1797   // characters are reserved for controlling features of the instrumentation.
1798   VIXL_ASSERT(isprint(marker_name[0]) && isprint(marker_name[1]));
1799 
1800   InstructionAccurateScope scope(this, 1);
1801   movn(xzr, (marker_name[1] << 8) | marker_name[0]);
1802 }
1803 
1804 
Open(MacroAssembler * masm)1805 void UseScratchRegisterScope::Open(MacroAssembler* masm) {
1806   VIXL_ASSERT(!initialised_);
1807   available_ = masm->TmpList();
1808   availablefp_ = masm->FPTmpList();
1809   old_available_ = available_->list();
1810   old_availablefp_ = availablefp_->list();
1811   VIXL_ASSERT(available_->type() == CPURegister::kRegister);
1812   VIXL_ASSERT(availablefp_->type() == CPURegister::kVRegister);
1813 #ifdef DEBUG
1814   initialised_ = true;
1815 #endif
1816 }
1817 
1818 
Close()1819 void UseScratchRegisterScope::Close() {
1820   if (available_) {
1821     available_->set_list(old_available_);
1822     available_ = NULL;
1823   }
1824   if (availablefp_) {
1825     availablefp_->set_list(old_availablefp_);
1826     availablefp_ = NULL;
1827   }
1828 #ifdef DEBUG
1829   initialised_ = false;
1830 #endif
1831 }
1832 
1833 
UseScratchRegisterScope(MacroAssembler * masm)1834 UseScratchRegisterScope::UseScratchRegisterScope(MacroAssembler* masm) {
1835 #ifdef DEBUG
1836   initialised_ = false;
1837 #endif
1838   Open(masm);
1839 }
1840 
1841 // This allows deferred (and optional) initialisation of the scope.
UseScratchRegisterScope()1842 UseScratchRegisterScope::UseScratchRegisterScope()
1843     : available_(NULL), availablefp_(NULL),
1844       old_available_(0), old_availablefp_(0) {
1845 #ifdef DEBUG
1846   initialised_ = false;
1847 #endif
1848 }
1849 
~UseScratchRegisterScope()1850 UseScratchRegisterScope::~UseScratchRegisterScope() {
1851   Close();
1852 }
1853 
1854 
IsAvailable(const CPURegister & reg) const1855 bool UseScratchRegisterScope::IsAvailable(const CPURegister& reg) const {
1856   return available_->IncludesAliasOf(reg) || availablefp_->IncludesAliasOf(reg);
1857 }
1858 
1859 
AcquireSameSizeAs(const Register & reg)1860 Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
1861   int code = AcquireNextAvailable(available_).code();
1862   return Register(code, reg.size());
1863 }
1864 
1865 
AcquireSameSizeAs(const FPRegister & reg)1866 FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) {
1867   int code = AcquireNextAvailable(availablefp_).code();
1868   return FPRegister(code, reg.size());
1869 }
1870 
1871 
Release(const CPURegister & reg)1872 void UseScratchRegisterScope::Release(const CPURegister& reg) {
1873   VIXL_ASSERT(initialised_);
1874   if (reg.IsRegister()) {
1875     ReleaseByCode(available_, reg.code());
1876   } else if (reg.IsFPRegister()) {
1877     ReleaseByCode(availablefp_, reg.code());
1878   } else {
1879     VIXL_ASSERT(reg.IsNone());
1880   }
1881 }
1882 
1883 
Include(const CPURegList & list)1884 void UseScratchRegisterScope::Include(const CPURegList& list) {
1885   VIXL_ASSERT(initialised_);
1886   if (list.type() == CPURegister::kRegister) {
1887     // Make sure that neither sp nor xzr are included the list.
1888     IncludeByRegList(available_, list.list() & ~(xzr.Bit() | sp.Bit()));
1889   } else {
1890     VIXL_ASSERT(list.type() == CPURegister::kVRegister);
1891     IncludeByRegList(availablefp_, list.list());
1892   }
1893 }
1894 
1895 
Include(const Register & reg1,const Register & reg2,const Register & reg3,const Register & reg4)1896 void UseScratchRegisterScope::Include(const Register& reg1,
1897                                       const Register& reg2,
1898                                       const Register& reg3,
1899                                       const Register& reg4) {
1900   VIXL_ASSERT(initialised_);
1901   RegList include = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
1902   // Make sure that neither sp nor xzr are included the list.
1903   include &= ~(xzr.Bit() | sp.Bit());
1904 
1905   IncludeByRegList(available_, include);
1906 }
1907 
1908 
Include(const FPRegister & reg1,const FPRegister & reg2,const FPRegister & reg3,const FPRegister & reg4)1909 void UseScratchRegisterScope::Include(const FPRegister& reg1,
1910                                       const FPRegister& reg2,
1911                                       const FPRegister& reg3,
1912                                       const FPRegister& reg4) {
1913   RegList include = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
1914   IncludeByRegList(availablefp_, include);
1915 }
1916 
1917 
Exclude(const CPURegList & list)1918 void UseScratchRegisterScope::Exclude(const CPURegList& list) {
1919   if (list.type() == CPURegister::kRegister) {
1920     ExcludeByRegList(available_, list.list());
1921   } else {
1922     VIXL_ASSERT(list.type() == CPURegister::kVRegister);
1923     ExcludeByRegList(availablefp_, list.list());
1924   }
1925 }
1926 
1927 
Exclude(const Register & reg1,const Register & reg2,const Register & reg3,const Register & reg4)1928 void UseScratchRegisterScope::Exclude(const Register& reg1,
1929                                       const Register& reg2,
1930                                       const Register& reg3,
1931                                       const Register& reg4) {
1932   RegList exclude = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
1933   ExcludeByRegList(available_, exclude);
1934 }
1935 
1936 
Exclude(const FPRegister & reg1,const FPRegister & reg2,const FPRegister & reg3,const FPRegister & reg4)1937 void UseScratchRegisterScope::Exclude(const FPRegister& reg1,
1938                                       const FPRegister& reg2,
1939                                       const FPRegister& reg3,
1940                                       const FPRegister& reg4) {
1941   RegList excludefp = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
1942   ExcludeByRegList(availablefp_, excludefp);
1943 }
1944 
1945 
Exclude(const CPURegister & reg1,const CPURegister & reg2,const CPURegister & reg3,const CPURegister & reg4)1946 void UseScratchRegisterScope::Exclude(const CPURegister& reg1,
1947                                       const CPURegister& reg2,
1948                                       const CPURegister& reg3,
1949                                       const CPURegister& reg4) {
1950   RegList exclude = 0;
1951   RegList excludefp = 0;
1952 
1953   const CPURegister regs[] = {reg1, reg2, reg3, reg4};
1954 
1955   for (unsigned i = 0; i < (sizeof(regs) / sizeof(regs[0])); i++) {
1956     if (regs[i].IsRegister()) {
1957       exclude |= regs[i].Bit();
1958     } else if (regs[i].IsFPRegister()) {
1959       excludefp |= regs[i].Bit();
1960     } else {
1961       VIXL_ASSERT(regs[i].IsNone());
1962     }
1963   }
1964 
1965   ExcludeByRegList(available_, exclude);
1966   ExcludeByRegList(availablefp_, excludefp);
1967 }
1968 
1969 
ExcludeAll()1970 void UseScratchRegisterScope::ExcludeAll() {
1971   ExcludeByRegList(available_, available_->list());
1972   ExcludeByRegList(availablefp_, availablefp_->list());
1973 }
1974 
1975 
AcquireNextAvailable(CPURegList * available)1976 CPURegister UseScratchRegisterScope::AcquireNextAvailable(
1977     CPURegList* available) {
1978   VIXL_CHECK(!available->IsEmpty());
1979   CPURegister result = available->PopLowestIndex();
1980   VIXL_ASSERT(!AreAliased(result, xzr, sp));
1981   return result;
1982 }
1983 
1984 
ReleaseByCode(CPURegList * available,int code)1985 void UseScratchRegisterScope::ReleaseByCode(CPURegList* available, int code) {
1986   ReleaseByRegList(available, static_cast<RegList>(1) << code);
1987 }
1988 
1989 
ReleaseByRegList(CPURegList * available,RegList regs)1990 void UseScratchRegisterScope::ReleaseByRegList(CPURegList* available,
1991                                                RegList regs) {
1992   available->set_list(available->list() | regs);
1993 }
1994 
1995 
IncludeByRegList(CPURegList * available,RegList regs)1996 void UseScratchRegisterScope::IncludeByRegList(CPURegList* available,
1997                                                RegList regs) {
1998   available->set_list(available->list() | regs);
1999 }
2000 
2001 
ExcludeByRegList(CPURegList * available,RegList exclude)2002 void UseScratchRegisterScope::ExcludeByRegList(CPURegList* available,
2003                                                RegList exclude) {
2004   available->set_list(available->list() & ~exclude);
2005 }
2006 
2007 }  // namespace vixl
2008