1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "jit/arm/MoveEmitter-arm.h"
8 
9 #include "jit/MacroAssembler-inl.h"
10 
11 using namespace js;
12 using namespace js::jit;
13 
MoveEmitterARM(MacroAssembler & masm)14 MoveEmitterARM::MoveEmitterARM(MacroAssembler& masm)
15     : inCycle_(0),
16       masm(masm),
17       pushedAtCycle_(-1),
18       pushedAtSpill_(-1),
19       spilledReg_(InvalidReg),
20       spilledFloatReg_(InvalidFloatReg) {
21   pushedAtStart_ = masm.framePushed();
22 }
23 
emit(const MoveResolver & moves)24 void MoveEmitterARM::emit(const MoveResolver& moves) {
25   if (moves.numCycles()) {
26     // Reserve stack for cycle resolution
27     masm.reserveStack(moves.numCycles() * sizeof(double));
28     pushedAtCycle_ = masm.framePushed();
29   }
30 
31   for (size_t i = 0; i < moves.numMoves(); i++) {
32     emit(moves.getMove(i));
33   }
34 }
35 
~MoveEmitterARM()36 MoveEmitterARM::~MoveEmitterARM() { assertDone(); }
37 
cycleSlot(uint32_t slot,uint32_t subslot) const38 Address MoveEmitterARM::cycleSlot(uint32_t slot, uint32_t subslot) const {
39   int32_t offset = masm.framePushed() - pushedAtCycle_;
40   MOZ_ASSERT(offset < 4096 && offset > -4096);
41   return Address(StackPointer, offset + slot * sizeof(double) + subslot);
42 }
43 
spillSlot() const44 Address MoveEmitterARM::spillSlot() const {
45   int32_t offset = masm.framePushed() - pushedAtSpill_;
46   MOZ_ASSERT(offset < 4096 && offset > -4096);
47   return Address(StackPointer, offset);
48 }
49 
toAddress(const MoveOperand & operand) const50 Address MoveEmitterARM::toAddress(const MoveOperand& operand) const {
51   MOZ_ASSERT(operand.isMemoryOrEffectiveAddress());
52 
53   if (operand.base() != StackPointer) {
54     MOZ_ASSERT(operand.disp() < 1024 && operand.disp() > -1024);
55     return Operand(operand.base(), operand.disp()).toAddress();
56   }
57 
58   MOZ_ASSERT(operand.disp() >= 0);
59 
60   // Otherwise, the stack offset may need to be adjusted.
61   return Address(StackPointer,
62                  operand.disp() + (masm.framePushed() - pushedAtStart_));
63 }
64 
tempReg()65 Register MoveEmitterARM::tempReg() {
66   if (spilledReg_ != InvalidReg) {
67     return spilledReg_;
68   }
69 
70   // For now, just pick r12/ip as the eviction point. This is totally random,
71   // and if it ends up being bad, we can use actual heuristics later. r12 is
72   // actually a bad choice. It is the scratch register, which is frequently
73   // used for address computations, such as those found when we attempt to
74   // access values more than 4096 off of the stack pointer. Instead, use lr,
75   // the LinkRegister.
76   spilledReg_ = r14;
77   if (pushedAtSpill_ == -1) {
78     masm.Push(spilledReg_);
79     pushedAtSpill_ = masm.framePushed();
80   } else {
81     ScratchRegisterScope scratch(masm);
82     masm.ma_str(spilledReg_, spillSlot(), scratch);
83   }
84   return spilledReg_;
85 }
86 
breakCycle(const MoveOperand & from,const MoveOperand & to,MoveOp::Type type,uint32_t slotId)87 void MoveEmitterARM::breakCycle(const MoveOperand& from, const MoveOperand& to,
88                                 MoveOp::Type type, uint32_t slotId) {
89   // There is some pattern:
90   //   (A -> B)
91   //   (B -> A)
92   //
93   // This case handles (A -> B), which we reach first. We save B, then allow
94   // the original move to continue.
95 
96   ScratchRegisterScope scratch(masm);
97 
98   switch (type) {
99     case MoveOp::FLOAT32:
100       if (to.isMemory()) {
101         ScratchFloat32Scope scratchFloat32(masm);
102         masm.ma_vldr(toAddress(to), scratchFloat32, scratch);
103         // Since it is uncertain if the load will be aligned or not
104         // just fill both of them with the same value.
105         masm.ma_vstr(scratchFloat32, cycleSlot(slotId, 0), scratch);
106         masm.ma_vstr(scratchFloat32, cycleSlot(slotId, 4), scratch);
107       } else if (to.isGeneralReg()) {
108         // Since it is uncertain if the load will be aligned or not
109         // just fill both of them with the same value.
110         masm.ma_str(to.reg(), cycleSlot(slotId, 0), scratch);
111         masm.ma_str(to.reg(), cycleSlot(slotId, 4), scratch);
112       } else {
113         FloatRegister src = to.floatReg();
114         // Just always store the largest possible size. Currently, this is
115         // a double. When SIMD is added, two doubles will need to be stored.
116         masm.ma_vstr(src.doubleOverlay(), cycleSlot(slotId, 0), scratch);
117       }
118       break;
119     case MoveOp::DOUBLE:
120       if (to.isMemory()) {
121         ScratchDoubleScope scratchDouble(masm);
122         masm.ma_vldr(toAddress(to), scratchDouble, scratch);
123         masm.ma_vstr(scratchDouble, cycleSlot(slotId, 0), scratch);
124       } else if (to.isGeneralRegPair()) {
125         ScratchDoubleScope scratchDouble(masm);
126         masm.ma_vxfer(to.evenReg(), to.oddReg(), scratchDouble);
127         masm.ma_vstr(scratchDouble, cycleSlot(slotId, 0), scratch);
128       } else {
129         masm.ma_vstr(to.floatReg().doubleOverlay(), cycleSlot(slotId, 0),
130                      scratch);
131       }
132       break;
133     case MoveOp::INT32:
134     case MoveOp::GENERAL:
135       // an non-vfp value
136       if (to.isMemory()) {
137         Register temp = tempReg();
138         masm.ma_ldr(toAddress(to), temp, scratch);
139         masm.ma_str(temp, cycleSlot(0, 0), scratch);
140       } else {
141         if (to.reg() == spilledReg_) {
142           // If the destination was spilled, restore it first.
143           masm.ma_ldr(spillSlot(), spilledReg_, scratch);
144           spilledReg_ = InvalidReg;
145         }
146         masm.ma_str(to.reg(), cycleSlot(0, 0), scratch);
147       }
148       break;
149     default:
150       MOZ_CRASH("Unexpected move type");
151   }
152 }
153 
completeCycle(const MoveOperand & from,const MoveOperand & to,MoveOp::Type type,uint32_t slotId)154 void MoveEmitterARM::completeCycle(const MoveOperand& from,
155                                    const MoveOperand& to, MoveOp::Type type,
156                                    uint32_t slotId) {
157   // There is some pattern:
158   //   (A -> B)
159   //   (B -> A)
160   //
161   // This case handles (B -> A), which we reach last. We emit a move from the
162   // saved value of B, to A.
163 
164   ScratchRegisterScope scratch(masm);
165 
166   switch (type) {
167     case MoveOp::FLOAT32:
168       MOZ_ASSERT(!to.isGeneralRegPair());
169       if (to.isMemory()) {
170         ScratchFloat32Scope scratchFloat32(masm);
171         masm.ma_vldr(cycleSlot(slotId, 0), scratchFloat32, scratch);
172         masm.ma_vstr(scratchFloat32, toAddress(to), scratch);
173       } else if (to.isGeneralReg()) {
174         MOZ_ASSERT(type == MoveOp::FLOAT32);
175         masm.ma_ldr(toAddress(from), to.reg(), scratch);
176       } else {
177         uint32_t offset = 0;
178         if ((!from.isMemory()) && from.floatReg().numAlignedAliased() == 1) {
179           offset = sizeof(float);
180         }
181         masm.ma_vldr(cycleSlot(slotId, offset), to.floatReg(), scratch);
182       }
183       break;
184     case MoveOp::DOUBLE:
185       MOZ_ASSERT(!to.isGeneralReg());
186       if (to.isMemory()) {
187         ScratchDoubleScope scratchDouble(masm);
188         masm.ma_vldr(cycleSlot(slotId, 0), scratchDouble, scratch);
189         masm.ma_vstr(scratchDouble, toAddress(to), scratch);
190       } else if (to.isGeneralRegPair()) {
191         MOZ_ASSERT(type == MoveOp::DOUBLE);
192         ScratchDoubleScope scratchDouble(masm);
193         masm.ma_vldr(toAddress(from), scratchDouble, scratch);
194         masm.ma_vxfer(scratchDouble, to.evenReg(), to.oddReg());
195       } else {
196         uint32_t offset = 0;
197         if ((!from.isMemory()) && from.floatReg().numAlignedAliased() == 1) {
198           offset = sizeof(float);
199         }
200         masm.ma_vldr(cycleSlot(slotId, offset), to.floatReg(), scratch);
201       }
202       break;
203     case MoveOp::INT32:
204     case MoveOp::GENERAL:
205       MOZ_ASSERT(slotId == 0);
206       if (to.isMemory()) {
207         Register temp = tempReg();
208         masm.ma_ldr(cycleSlot(slotId, 0), temp, scratch);
209         masm.ma_str(temp, toAddress(to), scratch);
210       } else {
211         if (to.reg() == spilledReg_) {
212           // Make sure we don't re-clobber the spilled register later.
213           spilledReg_ = InvalidReg;
214         }
215         masm.ma_ldr(cycleSlot(slotId, 0), to.reg(), scratch);
216       }
217       break;
218     default:
219       MOZ_CRASH("Unexpected move type");
220   }
221 }
222 
emitMove(const MoveOperand & from,const MoveOperand & to)223 void MoveEmitterARM::emitMove(const MoveOperand& from, const MoveOperand& to) {
224   // Register pairs are used to store Double values during calls.
225   MOZ_ASSERT(!from.isGeneralRegPair());
226   MOZ_ASSERT(!to.isGeneralRegPair());
227 
228   ScratchRegisterScope scratch(masm);
229 
230   if (to.isGeneralReg() && to.reg() == spilledReg_) {
231     // If the destination is the spilled register, make sure we
232     // don't re-clobber its value.
233     spilledReg_ = InvalidReg;
234   }
235 
236   if (from.isGeneralReg()) {
237     if (from.reg() == spilledReg_) {
238       // If the source is a register that has been spilled, make sure
239       // to load the source back into that register.
240       masm.ma_ldr(spillSlot(), spilledReg_, scratch);
241       spilledReg_ = InvalidReg;
242     }
243     if (to.isMemoryOrEffectiveAddress()) {
244       masm.ma_str(from.reg(), toAddress(to), scratch);
245     } else {
246       masm.ma_mov(from.reg(), to.reg());
247     }
248   } else if (to.isGeneralReg()) {
249     MOZ_ASSERT(from.isMemoryOrEffectiveAddress());
250     if (from.isMemory()) {
251       masm.ma_ldr(toAddress(from), to.reg(), scratch);
252     } else {
253       masm.ma_add(from.base(), Imm32(from.disp()), to.reg(), scratch);
254     }
255   } else {
256     // Memory to memory gpr move.
257     Register reg = tempReg();
258 
259     MOZ_ASSERT(from.isMemoryOrEffectiveAddress());
260     if (from.isMemory()) {
261       masm.ma_ldr(toAddress(from), reg, scratch);
262     } else {
263       masm.ma_add(from.base(), Imm32(from.disp()), reg, scratch);
264     }
265     MOZ_ASSERT(to.base() != reg);
266     masm.ma_str(reg, toAddress(to), scratch);
267   }
268 }
269 
emitFloat32Move(const MoveOperand & from,const MoveOperand & to)270 void MoveEmitterARM::emitFloat32Move(const MoveOperand& from,
271                                      const MoveOperand& to) {
272   // Register pairs are used to store Double values during calls.
273   MOZ_ASSERT(!from.isGeneralRegPair());
274   MOZ_ASSERT(!to.isGeneralRegPair());
275 
276   ScratchRegisterScope scratch(masm);
277 
278   if (from.isFloatReg()) {
279     if (to.isFloatReg()) {
280       masm.ma_vmov_f32(from.floatReg(), to.floatReg());
281     } else if (to.isGeneralReg()) {
282       masm.ma_vxfer(from.floatReg(), to.reg());
283     } else {
284       masm.ma_vstr(VFPRegister(from.floatReg()).singleOverlay(), toAddress(to),
285                    scratch);
286     }
287   } else if (from.isGeneralReg()) {
288     if (to.isFloatReg()) {
289       masm.ma_vxfer(from.reg(), to.floatReg());
290     } else if (to.isGeneralReg()) {
291       masm.ma_mov(from.reg(), to.reg());
292     } else {
293       masm.ma_str(from.reg(), toAddress(to), scratch);
294     }
295   } else if (to.isFloatReg()) {
296     masm.ma_vldr(toAddress(from), VFPRegister(to.floatReg()).singleOverlay(),
297                  scratch);
298   } else if (to.isGeneralReg()) {
299     masm.ma_ldr(toAddress(from), to.reg(), scratch);
300   } else {
301     // Memory to memory move.
302     MOZ_ASSERT(from.isMemory());
303     ScratchFloat32Scope scratchFloat32(masm);
304     masm.ma_vldr(toAddress(from), scratchFloat32, scratch);
305     masm.ma_vstr(scratchFloat32, toAddress(to), scratch);
306   }
307 }
308 
emitDoubleMove(const MoveOperand & from,const MoveOperand & to)309 void MoveEmitterARM::emitDoubleMove(const MoveOperand& from,
310                                     const MoveOperand& to) {
311   // Registers are used to store pointers / int32 / float32 values.
312   MOZ_ASSERT(!from.isGeneralReg());
313   MOZ_ASSERT(!to.isGeneralReg());
314 
315   ScratchRegisterScope scratch(masm);
316 
317   if (from.isFloatReg()) {
318     if (to.isFloatReg()) {
319       masm.ma_vmov(from.floatReg(), to.floatReg());
320     } else if (to.isGeneralRegPair()) {
321       masm.ma_vxfer(from.floatReg(), to.evenReg(), to.oddReg());
322     } else {
323       masm.ma_vstr(from.floatReg(), toAddress(to), scratch);
324     }
325   } else if (from.isGeneralRegPair()) {
326     if (to.isFloatReg()) {
327       masm.ma_vxfer(from.evenReg(), from.oddReg(), to.floatReg());
328     } else if (to.isGeneralRegPair()) {
329       MOZ_ASSERT(!from.aliases(to));
330       masm.ma_mov(from.evenReg(), to.evenReg());
331       masm.ma_mov(from.oddReg(), to.oddReg());
332     } else {
333       ScratchDoubleScope scratchDouble(masm);
334       masm.ma_vxfer(from.evenReg(), from.oddReg(), scratchDouble);
335       masm.ma_vstr(scratchDouble, toAddress(to), scratch);
336     }
337   } else if (to.isFloatReg()) {
338     masm.ma_vldr(toAddress(from), to.floatReg(), scratch);
339   } else if (to.isGeneralRegPair()) {
340     MOZ_ASSERT(from.isMemory());
341     Address src = toAddress(from);
342     // Note: We can safely use the MoveOperand's displacement here,
343     // even if the base is SP: MoveEmitter::toOperand adjusts
344     // SP-relative operands by the difference between the current
345     // stack usage and stackAdjust, which emitter.finish() resets to
346     // 0.
347     //
348     // Warning: if the offset isn't within [-255,+255] then this
349     // will assert-fail (or, if non-debug, load the wrong words).
350     // Nothing uses such an offset at the time of this writing.
351     masm.ma_ldrd(EDtrAddr(src.base, EDtrOffImm(src.offset)), to.evenReg(),
352                  to.oddReg());
353   } else {
354     // Memory to memory move.
355     MOZ_ASSERT(from.isMemory());
356     ScratchDoubleScope scratchDouble(masm);
357     masm.ma_vldr(toAddress(from), scratchDouble, scratch);
358     masm.ma_vstr(scratchDouble, toAddress(to), scratch);
359   }
360 }
361 
emit(const MoveOp & move)362 void MoveEmitterARM::emit(const MoveOp& move) {
363   const MoveOperand& from = move.from();
364   const MoveOperand& to = move.to();
365 
366   if (move.isCycleEnd() && move.isCycleBegin()) {
367     // A fun consequence of aliased registers is you can have multiple
368     // cycles at once, and one can end exactly where another begins.
369     breakCycle(from, to, move.endCycleType(), move.cycleBeginSlot());
370     completeCycle(from, to, move.type(), move.cycleEndSlot());
371     return;
372   }
373 
374   if (move.isCycleEnd()) {
375     MOZ_ASSERT(inCycle_);
376     completeCycle(from, to, move.type(), move.cycleEndSlot());
377     MOZ_ASSERT(inCycle_ > 0);
378     inCycle_--;
379     return;
380   }
381 
382   if (move.isCycleBegin()) {
383     breakCycle(from, to, move.endCycleType(), move.cycleBeginSlot());
384     inCycle_++;
385   }
386 
387   switch (move.type()) {
388     case MoveOp::FLOAT32:
389       emitFloat32Move(from, to);
390       break;
391     case MoveOp::DOUBLE:
392       emitDoubleMove(from, to);
393       break;
394     case MoveOp::INT32:
395     case MoveOp::GENERAL:
396       emitMove(from, to);
397       break;
398     default:
399       MOZ_CRASH("Unexpected move type");
400   }
401 }
402 
assertDone()403 void MoveEmitterARM::assertDone() { MOZ_ASSERT(inCycle_ == 0); }
404 
finish()405 void MoveEmitterARM::finish() {
406   assertDone();
407 
408   if (pushedAtSpill_ != -1 && spilledReg_ != InvalidReg) {
409     ScratchRegisterScope scratch(masm);
410     masm.ma_ldr(spillSlot(), spilledReg_, scratch);
411   }
412   masm.freeStack(masm.framePushed() - pushedAtStart_);
413 }
414