1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2  * vim: set ts=8 sts=4 et sw=4 tw=99:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "jit/arm/MoveEmitter-arm.h"
8 
9 #include "jit/MacroAssembler-inl.h"
10 
11 using namespace js;
12 using namespace js::jit;
13 
MoveEmitterARM(MacroAssembler & masm)14 MoveEmitterARM::MoveEmitterARM(MacroAssembler& masm)
15   : inCycle_(0),
16     masm(masm),
17     pushedAtCycle_(-1),
18     pushedAtSpill_(-1),
19     spilledReg_(InvalidReg),
20     spilledFloatReg_(InvalidFloatReg)
21 {
22     pushedAtStart_ = masm.framePushed();
23 }
24 
25 void
emit(const MoveResolver & moves)26 MoveEmitterARM::emit(const MoveResolver& moves)
27 {
28     if (moves.numCycles()) {
29         // Reserve stack for cycle resolution
30         masm.reserveStack(moves.numCycles() * sizeof(double));
31         pushedAtCycle_ = masm.framePushed();
32     }
33 
34     for (size_t i = 0; i < moves.numMoves(); i++)
35         emit(moves.getMove(i));
36 }
37 
~MoveEmitterARM()38 MoveEmitterARM::~MoveEmitterARM()
39 {
40     assertDone();
41 }
42 
43 Address
cycleSlot(uint32_t slot,uint32_t subslot) const44 MoveEmitterARM::cycleSlot(uint32_t slot, uint32_t subslot) const
45 {
46     int32_t offset =  masm.framePushed() - pushedAtCycle_;
47     MOZ_ASSERT(offset < 4096 && offset > -4096);
48     return Address(StackPointer, offset + slot * sizeof(double) + subslot);
49 }
50 
51 Address
spillSlot() const52 MoveEmitterARM::spillSlot() const
53 {
54     int32_t offset =  masm.framePushed() - pushedAtSpill_;
55     MOZ_ASSERT(offset < 4096 && offset > -4096);
56     return Address(StackPointer, offset);
57 }
58 
59 Address
toAddress(const MoveOperand & operand) const60 MoveEmitterARM::toAddress(const MoveOperand& operand) const
61 {
62     MOZ_ASSERT(operand.isMemoryOrEffectiveAddress());
63 
64     if (operand.base() != StackPointer) {
65         MOZ_ASSERT(operand.disp() < 1024 && operand.disp() > -1024);
66         return Operand(operand.base(), operand.disp()).toAddress();
67     }
68 
69     MOZ_ASSERT(operand.disp() >= 0);
70 
71     // Otherwise, the stack offset may need to be adjusted.
72     return Address(StackPointer, operand.disp() + (masm.framePushed() - pushedAtStart_));
73 }
74 
75 Register
tempReg()76 MoveEmitterARM::tempReg()
77 {
78     if (spilledReg_ != InvalidReg)
79         return spilledReg_;
80 
81     // For now, just pick r12/ip as the eviction point. This is totally random,
82     // and if it ends up being bad, we can use actual heuristics later. r12 is
83     // actually a bad choice. It is the scratch register, which is frequently
84     // used for address computations, such as those found when we attempt to
85     // access values more than 4096 off of the stack pointer. Instead, use lr,
86     // the LinkRegister.
87     spilledReg_ = r14;
88     if (pushedAtSpill_ == -1) {
89         masm.Push(spilledReg_);
90         pushedAtSpill_ = masm.framePushed();
91     } else {
92         masm.ma_str(spilledReg_, spillSlot());
93     }
94     return spilledReg_;
95 }
96 
97 void
breakCycle(const MoveOperand & from,const MoveOperand & to,MoveOp::Type type,uint32_t slotId)98 MoveEmitterARM::breakCycle(const MoveOperand& from, const MoveOperand& to,
99                            MoveOp::Type type, uint32_t slotId)
100 {
101     // There is some pattern:
102     //   (A -> B)
103     //   (B -> A)
104     //
105     // This case handles (A -> B), which we reach first. We save B, then allow
106     // the original move to continue.
107     switch (type) {
108       case MoveOp::FLOAT32:
109         if (to.isMemory()) {
110             VFPRegister temp = ScratchFloat32Reg;
111             masm.ma_vldr(toAddress(to), temp);
112             // Since it is uncertain if the load will be aligned or not
113             // just fill both of them with the same value.
114             masm.ma_vstr(temp, cycleSlot(slotId, 0));
115             masm.ma_vstr(temp, cycleSlot(slotId, 4));
116         } else if (to.isGeneralReg()) {
117             // Since it is uncertain if the load will be aligned or not
118             // just fill both of them with the same value.
119             masm.ma_str(to.reg(), cycleSlot(slotId, 0));
120             masm.ma_str(to.reg(), cycleSlot(slotId, 4));
121         } else {
122             FloatRegister src = to.floatReg();
123             // Just always store the largest possible size. Currently, this is
124             // a double. When SIMD is added, two doubles will need to be stored.
125             masm.ma_vstr(src.doubleOverlay(), cycleSlot(slotId, 0));
126         }
127         break;
128       case MoveOp::DOUBLE:
129         if (to.isMemory()) {
130             ScratchDoubleScope scratch(masm);
131             masm.ma_vldr(toAddress(to), scratch);
132             masm.ma_vstr(scratch, cycleSlot(slotId, 0));
133         } else if (to.isGeneralRegPair()) {
134             ScratchDoubleScope scratch(masm);
135             masm.ma_vxfer(to.evenReg(), to.oddReg(), scratch);
136             masm.ma_vstr(scratch, cycleSlot(slotId, 0));
137         } else {
138             masm.ma_vstr(to.floatReg().doubleOverlay(), cycleSlot(slotId, 0));
139         }
140         break;
141       case MoveOp::INT32:
142       case MoveOp::GENERAL:
143         // an non-vfp value
144         if (to.isMemory()) {
145             Register temp = tempReg();
146             masm.ma_ldr(toAddress(to), temp);
147             masm.ma_str(temp, cycleSlot(0,0));
148         } else {
149             if (to.reg() == spilledReg_) {
150                 // If the destination was spilled, restore it first.
151                 masm.ma_ldr(spillSlot(), spilledReg_);
152                 spilledReg_ = InvalidReg;
153             }
154             masm.ma_str(to.reg(), cycleSlot(0,0));
155         }
156         break;
157       default:
158         MOZ_CRASH("Unexpected move type");
159     }
160 }
161 
162 void
completeCycle(const MoveOperand & from,const MoveOperand & to,MoveOp::Type type,uint32_t slotId)163 MoveEmitterARM::completeCycle(const MoveOperand& from, const MoveOperand& to, MoveOp::Type type, uint32_t slotId)
164 {
165     // There is some pattern:
166     //   (A -> B)
167     //   (B -> A)
168     //
169     // This case handles (B -> A), which we reach last. We emit a move from the
170     // saved value of B, to A.
171     switch (type) {
172       case MoveOp::FLOAT32:
173       case MoveOp::DOUBLE:
174         if (to.isMemory()) {
175             ScratchDoubleScope scratch(masm);
176             masm.ma_vldr(cycleSlot(slotId, 0), scratch);
177             masm.ma_vstr(scratch, toAddress(to));
178         } else if (to.isGeneralReg()) {
179             MOZ_ASSERT(type == MoveOp::FLOAT32);
180             masm.ma_ldr(toAddress(from), to.reg());
181         } else if (to.isGeneralRegPair()) {
182             MOZ_ASSERT(type == MoveOp::DOUBLE);
183             ScratchDoubleScope scratch(masm);
184             masm.ma_vldr(toAddress(from), scratch);
185             masm.ma_vxfer(scratch, to.evenReg(), to.oddReg());
186         } else {
187             uint32_t offset = 0;
188             if ((!from.isMemory()) && from.floatReg().numAlignedAliased() == 1)
189                 offset = sizeof(float);
190             masm.ma_vldr(cycleSlot(slotId, offset), to.floatReg());
191         }
192         break;
193       case MoveOp::INT32:
194       case MoveOp::GENERAL:
195         MOZ_ASSERT(slotId == 0);
196         if (to.isMemory()) {
197             Register temp = tempReg();
198             masm.ma_ldr(cycleSlot(slotId, 0), temp);
199             masm.ma_str(temp, toAddress(to));
200         } else {
201             if (to.reg() == spilledReg_) {
202                 // Make sure we don't re-clobber the spilled register later.
203                 spilledReg_ = InvalidReg;
204             }
205             masm.ma_ldr(cycleSlot(slotId, 0), to.reg());
206         }
207         break;
208       default:
209         MOZ_CRASH("Unexpected move type");
210     }
211 }
212 
213 void
emitMove(const MoveOperand & from,const MoveOperand & to)214 MoveEmitterARM::emitMove(const MoveOperand& from, const MoveOperand& to)
215 {
216     // Register pairs are used to store Double values during calls.
217     MOZ_ASSERT(!from.isGeneralRegPair());
218     MOZ_ASSERT(!to.isGeneralRegPair());
219 
220     if (to.isGeneralReg() && to.reg() == spilledReg_) {
221         // If the destination is the spilled register, make sure we
222         // don't re-clobber its value.
223         spilledReg_ = InvalidReg;
224     }
225 
226     if (from.isGeneralReg()) {
227         if (from.reg() == spilledReg_) {
228             // If the source is a register that has been spilled, make sure
229             // to load the source back into that register.
230             masm.ma_ldr(spillSlot(), spilledReg_);
231             spilledReg_ = InvalidReg;
232         }
233         if (to.isMemoryOrEffectiveAddress())
234             masm.ma_str(from.reg(), toAddress(to));
235         else
236             masm.ma_mov(from.reg(), to.reg());
237     } else if (to.isGeneralReg()) {
238         MOZ_ASSERT(from.isMemoryOrEffectiveAddress());
239         if (from.isMemory())
240             masm.ma_ldr(toAddress(from), to.reg());
241         else
242             masm.ma_add(from.base(), Imm32(from.disp()), to.reg());
243     } else {
244         // Memory to memory gpr move.
245         Register reg = tempReg();
246 
247         MOZ_ASSERT(from.isMemoryOrEffectiveAddress());
248         if (from.isMemory())
249             masm.ma_ldr(toAddress(from), reg);
250         else
251             masm.ma_add(from.base(), Imm32(from.disp()), reg);
252         MOZ_ASSERT(to.base() != reg);
253         masm.ma_str(reg, toAddress(to));
254     }
255 }
256 
257 void
emitFloat32Move(const MoveOperand & from,const MoveOperand & to)258 MoveEmitterARM::emitFloat32Move(const MoveOperand& from, const MoveOperand& to)
259 {
260     // Register pairs are used to store Double values during calls.
261     MOZ_ASSERT(!from.isGeneralRegPair());
262     MOZ_ASSERT(!to.isGeneralRegPair());
263 
264     if (from.isFloatReg()) {
265         if (to.isFloatReg())
266             masm.ma_vmov_f32(from.floatReg(), to.floatReg());
267         else if (to.isGeneralReg())
268             masm.ma_vxfer(from.floatReg(), to.reg());
269         else
270             masm.ma_vstr(VFPRegister(from.floatReg()).singleOverlay(), toAddress(to));
271     } else if (from.isGeneralReg()) {
272         if (to.isFloatReg())
273             masm.ma_vxfer(from.reg(), to.floatReg());
274         else if (to.isGeneralReg())
275             masm.ma_mov(from.reg(), to.reg());
276         else
277             masm.ma_str(from.reg(), toAddress(to));
278     } else if (to.isFloatReg()) {
279         masm.ma_vldr(toAddress(from), VFPRegister(to.floatReg()).singleOverlay());
280     } else if (to.isGeneralReg()) {
281         masm.ma_ldr(toAddress(from), to.reg());
282     } else {
283         // Memory to memory move.
284         MOZ_ASSERT(from.isMemory());
285         FloatRegister reg = ScratchFloat32Reg;
286         masm.ma_vldr(toAddress(from), VFPRegister(reg).singleOverlay());
287         masm.ma_vstr(VFPRegister(reg).singleOverlay(), toAddress(to));
288     }
289 }
290 
291 void
emitDoubleMove(const MoveOperand & from,const MoveOperand & to)292 MoveEmitterARM::emitDoubleMove(const MoveOperand& from, const MoveOperand& to)
293 {
294     // Registers are used to store pointers / int32 / float32 values.
295     MOZ_ASSERT(!from.isGeneralReg());
296     MOZ_ASSERT(!to.isGeneralReg());
297 
298     if (from.isFloatReg()) {
299         if (to.isFloatReg())
300             masm.ma_vmov(from.floatReg(), to.floatReg());
301         else if (to.isGeneralRegPair())
302             masm.ma_vxfer(from.floatReg(), to.evenReg(), to.oddReg());
303         else
304             masm.ma_vstr(from.floatReg(), toAddress(to));
305     } else if (from.isGeneralRegPair()) {
306         if (to.isFloatReg())
307             masm.ma_vxfer(from.evenReg(), from.oddReg(), to.floatReg());
308         else if (to.isGeneralRegPair()) {
309             MOZ_ASSERT(!from.aliases(to));
310             masm.ma_mov(from.evenReg(), to.evenReg());
311             masm.ma_mov(from.oddReg(), to.oddReg());
312         } else {
313             FloatRegister reg = ScratchDoubleReg;
314             masm.ma_vxfer(from.evenReg(), from.oddReg(), reg);
315             masm.ma_vstr(reg, toAddress(to));
316         }
317     } else if (to.isFloatReg()) {
318         masm.ma_vldr(toAddress(from), to.floatReg());
319     } else if (to.isGeneralRegPair()) {
320         MOZ_ASSERT(from.isMemory());
321         Address src = toAddress(from);
322         // Note: We can safely use the MoveOperand's displacement here,
323         // even if the base is SP: MoveEmitter::toOperand adjusts
324         // SP-relative operands by the difference between the current
325         // stack usage and stackAdjust, which emitter.finish() resets to
326         // 0.
327         //
328         // Warning: if the offset isn't within [-255,+255] then this
329         // will assert-fail (or, if non-debug, load the wrong words).
330         // Nothing uses such an offset at the time of this writing.
331         masm.ma_ldrd(EDtrAddr(src.base, EDtrOffImm(src.offset)), to.evenReg(), to.oddReg());
332     } else {
333         // Memory to memory move.
334         MOZ_ASSERT(from.isMemory());
335         ScratchDoubleScope scratch(masm);
336         masm.ma_vldr(toAddress(from), scratch);
337         masm.ma_vstr(scratch, toAddress(to));
338     }
339 }
340 
341 void
emit(const MoveOp & move)342 MoveEmitterARM::emit(const MoveOp& move)
343 {
344     const MoveOperand& from = move.from();
345     const MoveOperand& to = move.to();
346 
347     if (move.isCycleEnd() && move.isCycleBegin()) {
348         // A fun consequence of aliased registers is you can have multiple
349         // cycles at once, and one can end exactly where another begins.
350         breakCycle(from, to, move.endCycleType(), move.cycleBeginSlot());
351         completeCycle(from, to, move.type(), move.cycleEndSlot());
352         return;
353     }
354 
355     if (move.isCycleEnd()) {
356         MOZ_ASSERT(inCycle_);
357         completeCycle(from, to, move.type(), move.cycleEndSlot());
358         MOZ_ASSERT(inCycle_ > 0);
359         inCycle_--;
360         return;
361     }
362 
363     if (move.isCycleBegin()) {
364         breakCycle(from, to, move.endCycleType(), move.cycleBeginSlot());
365         inCycle_++;
366     }
367 
368     switch (move.type()) {
369       case MoveOp::FLOAT32:
370         emitFloat32Move(from, to);
371         break;
372       case MoveOp::DOUBLE:
373         emitDoubleMove(from, to);
374         break;
375       case MoveOp::INT32:
376       case MoveOp::GENERAL:
377         emitMove(from, to);
378         break;
379       default:
380         MOZ_CRASH("Unexpected move type");
381     }
382 }
383 
384 void
assertDone()385 MoveEmitterARM::assertDone()
386 {
387     MOZ_ASSERT(inCycle_ == 0);
388 }
389 
390 void
finish()391 MoveEmitterARM::finish()
392 {
393     assertDone();
394 
395     if (pushedAtSpill_ != -1 && spilledReg_ != InvalidReg)
396         masm.ma_ldr(spillSlot(), spilledReg_);
397     masm.freeStack(masm.framePushed() - pushedAtStart_);
398 }
399