1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #ifndef jit_arm_Assembler_arm_h
8 #define jit_arm_Assembler_arm_h
9 
10 #include "mozilla/Attributes.h"
11 #include "mozilla/MathAlgorithms.h"
12 
13 #include <algorithm>
14 #include <iterator>
15 
16 #include "jit/arm/Architecture-arm.h"
17 #include "jit/arm/disasm/Disasm-arm.h"
18 #include "jit/CompactBuffer.h"
19 #include "jit/JitCode.h"
20 #include "jit/shared/Assembler-shared.h"
21 #include "jit/shared/Disassembler-shared.h"
22 #include "jit/shared/IonAssemblerBufferWithConstantPools.h"
23 #include "wasm/WasmTypes.h"
24 
25 union PoolHintPun;
26 
27 namespace js {
28 namespace jit {
29 
30 using LiteralDoc = DisassemblerSpew::LiteralDoc;
31 using LabelDoc = DisassemblerSpew::LabelDoc;
32 
33 // NOTE: there are duplicates in this list! Sometimes we want to specifically
34 // refer to the link register as a link register (bl lr is much clearer than bl
35 // r14). HOWEVER, this register can easily be a gpr when it is not busy holding
36 // the return address.
37 static constexpr Register r0{Registers::r0};
38 static constexpr Register r1{Registers::r1};
39 static constexpr Register r2{Registers::r2};
40 static constexpr Register r3{Registers::r3};
41 static constexpr Register r4{Registers::r4};
42 static constexpr Register r5{Registers::r5};
43 static constexpr Register r6{Registers::r6};
44 static constexpr Register r7{Registers::r7};
45 static constexpr Register r8{Registers::r8};
46 static constexpr Register r9{Registers::r9};
47 static constexpr Register r10{Registers::r10};
48 static constexpr Register r11{Registers::r11};
49 static constexpr Register r12{Registers::ip};
50 static constexpr Register ip{Registers::ip};
51 static constexpr Register sp{Registers::sp};
52 static constexpr Register r14{Registers::lr};
53 static constexpr Register lr{Registers::lr};
54 static constexpr Register pc{Registers::pc};
55 
56 static constexpr Register ScratchRegister{Registers::ip};
57 
58 // Helper class for ScratchRegister usage. Asserts that only one piece
59 // of code thinks it has exclusive ownership of the scratch register.
60 struct ScratchRegisterScope : public AutoRegisterScope {
ScratchRegisterScopeScratchRegisterScope61   explicit ScratchRegisterScope(MacroAssembler& masm)
62       : AutoRegisterScope(masm, ScratchRegister) {}
63 };
64 
65 struct SecondScratchRegisterScope : public AutoRegisterScope {
66   explicit SecondScratchRegisterScope(MacroAssembler& masm);
67 };
68 
69 static constexpr Register OsrFrameReg = r3;
70 static constexpr Register CallTempReg0 = r5;
71 static constexpr Register CallTempReg1 = r6;
72 static constexpr Register CallTempReg2 = r7;
73 static constexpr Register CallTempReg3 = r8;
74 static constexpr Register CallTempReg4 = r0;
75 static constexpr Register CallTempReg5 = r1;
76 
77 static constexpr Register IntArgReg0 = r0;
78 static constexpr Register IntArgReg1 = r1;
79 static constexpr Register IntArgReg2 = r2;
80 static constexpr Register IntArgReg3 = r3;
81 static constexpr Register HeapReg = r10;
82 static constexpr Register CallTempNonArgRegs[] = {r5, r6, r7, r8};
83 static const uint32_t NumCallTempNonArgRegs = std::size(CallTempNonArgRegs);
84 
85 // These register assignments for the 64-bit atomic ops are frequently too
86 // constraining, but we have no way of expressing looser constraints to the
87 // register allocator.
88 
89 // CompareExchange: Any two odd/even pairs would do for `new` and `out`, and any
90 // pair would do for `old`, so long as none of them overlap.
91 
92 static constexpr Register CmpXchgOldLo = r4;
93 static constexpr Register CmpXchgOldHi = r5;
94 static constexpr Register64 CmpXchgOld64 =
95     Register64(CmpXchgOldHi, CmpXchgOldLo);
96 static constexpr Register CmpXchgNewLo = IntArgReg2;
97 static constexpr Register CmpXchgNewHi = IntArgReg3;
98 static constexpr Register64 CmpXchgNew64 =
99     Register64(CmpXchgNewHi, CmpXchgNewLo);
100 static constexpr Register CmpXchgOutLo = IntArgReg0;
101 static constexpr Register CmpXchgOutHi = IntArgReg1;
102 static constexpr Register64 CmpXchgOut64 =
103     Register64(CmpXchgOutHi, CmpXchgOutLo);
104 
105 // Exchange: Any two non-equal odd/even pairs would do for `new` and `out`.
106 
107 static constexpr Register XchgNewLo = IntArgReg2;
108 static constexpr Register XchgNewHi = IntArgReg3;
109 static constexpr Register64 XchgNew64 = Register64(XchgNewHi, XchgNewLo);
110 static constexpr Register XchgOutLo = IntArgReg0;
111 static constexpr Register XchgOutHi = IntArgReg1;
112 
113 // Atomic rmw operations: Any two odd/even pairs would do for `tmp` and `out`,
114 // and any pair would do for `val`, so long as none of them overlap.
115 
116 static constexpr Register FetchOpValLo = r4;
117 static constexpr Register FetchOpValHi = r5;
118 static constexpr Register64 FetchOpVal64 =
119     Register64(FetchOpValHi, FetchOpValLo);
120 static constexpr Register FetchOpTmpLo = IntArgReg2;
121 static constexpr Register FetchOpTmpHi = IntArgReg3;
122 static constexpr Register64 FetchOpTmp64 =
123     Register64(FetchOpTmpHi, FetchOpTmpLo);
124 static constexpr Register FetchOpOutLo = IntArgReg0;
125 static constexpr Register FetchOpOutHi = IntArgReg1;
126 static constexpr Register64 FetchOpOut64 =
127     Register64(FetchOpOutHi, FetchOpOutLo);
128 
129 class ABIArgGenerator {
130   unsigned intRegIndex_;
131   unsigned floatRegIndex_;
132   uint32_t stackOffset_;
133   ABIArg current_;
134 
135   // ARM can either use HardFp (use float registers for float arguments), or
136   // SoftFp (use general registers for float arguments) ABI.  We keep this
137   // switch as a runtime switch because wasm always use the HardFp back-end
138   // while the calls to native functions have to use the one provided by the
139   // system.
140   bool useHardFp_;
141 
142   ABIArg softNext(MIRType argType);
143   ABIArg hardNext(MIRType argType);
144 
145  public:
146   ABIArgGenerator();
147 
setUseHardFp(bool useHardFp)148   void setUseHardFp(bool useHardFp) {
149     MOZ_ASSERT(intRegIndex_ == 0 && floatRegIndex_ == 0);
150     useHardFp_ = useHardFp;
151   }
152   ABIArg next(MIRType argType);
current()153   ABIArg& current() { return current_; }
stackBytesConsumedSoFar()154   uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
increaseStackOffset(uint32_t bytes)155   void increaseStackOffset(uint32_t bytes) { stackOffset_ += bytes; }
156 };
157 
158 bool IsUnaligned(const wasm::MemoryAccessDesc& access);
159 
160 // These registers may be volatile or nonvolatile.
161 static constexpr Register ABINonArgReg0 = r4;
162 static constexpr Register ABINonArgReg1 = r5;
163 static constexpr Register ABINonArgReg2 = r6;
164 static constexpr Register ABINonArgReg3 = r7;
165 
166 // This register may be volatile or nonvolatile. Avoid d15 which is the
167 // ScratchDoubleReg_.
168 static constexpr FloatRegister ABINonArgDoubleReg{FloatRegisters::d8,
169                                                   VFPRegister::Double};
170 
171 // These registers may be volatile or nonvolatile.
172 // Note: these three registers are all guaranteed to be different
173 static constexpr Register ABINonArgReturnReg0 = r4;
174 static constexpr Register ABINonArgReturnReg1 = r5;
175 static constexpr Register ABINonVolatileReg = r6;
176 
177 // This register is guaranteed to be clobberable during the prologue and
178 // epilogue of an ABI call which must preserve both ABI argument, return
179 // and non-volatile registers.
180 static constexpr Register ABINonArgReturnVolatileReg = lr;
181 
182 // TLS pointer argument register for WebAssembly functions. This must not alias
183 // any other register used for passing function arguments or return values.
184 // Preserved by WebAssembly functions.
185 static constexpr Register WasmTlsReg = r9;
186 
187 // Registers used for wasm table calls. These registers must be disjoint
188 // from the ABI argument registers, WasmTlsReg and each other.
189 static constexpr Register WasmTableCallScratchReg0 = ABINonArgReg0;
190 static constexpr Register WasmTableCallScratchReg1 = ABINonArgReg1;
191 static constexpr Register WasmTableCallSigReg = ABINonArgReg2;
192 static constexpr Register WasmTableCallIndexReg = ABINonArgReg3;
193 
194 // Register used as a scratch along the return path in the fast js -> wasm stub
195 // code.  This must not overlap ReturnReg, JSReturnOperand, or WasmTlsReg.  It
196 // must be a volatile register.
197 static constexpr Register WasmJitEntryReturnScratch = r5;
198 
199 // Register used to store a reference to an exception thrown by Wasm to an
200 // exception handling block. Should not overlap with WasmTlsReg.
201 static constexpr Register WasmExceptionReg = ABINonArgReg2;
202 
203 static constexpr Register PreBarrierReg = r1;
204 
205 static constexpr Register InterpreterPCReg = r9;
206 
207 static constexpr Register InvalidReg{Registers::invalid_reg};
208 static constexpr FloatRegister InvalidFloatReg;
209 
210 static constexpr Register JSReturnReg_Type = r3;
211 static constexpr Register JSReturnReg_Data = r2;
212 static constexpr Register StackPointer = sp;
213 static constexpr Register FramePointer = r11;
214 static constexpr Register ReturnReg = r0;
215 static constexpr Register64 ReturnReg64(r1, r0);
216 
217 // The attribute '__value_in_regs' alters the calling convention of a function
218 // so that a structure of up to four elements can be returned via the argument
219 // registers rather than being written to memory.
220 static constexpr Register ReturnRegVal0 = IntArgReg0;
221 static constexpr Register ReturnRegVal1 = IntArgReg1;
222 static constexpr Register ReturnRegVal2 = IntArgReg2;
223 static constexpr Register ReturnRegVal3 = IntArgReg3;
224 
225 static constexpr FloatRegister ReturnFloat32Reg = {FloatRegisters::d0,
226                                                    VFPRegister::Single};
227 static constexpr FloatRegister ReturnDoubleReg = {FloatRegisters::d0,
228                                                   VFPRegister::Double};
229 static constexpr FloatRegister ReturnSimd128Reg = InvalidFloatReg;
230 static constexpr FloatRegister ScratchFloat32Reg_ = {FloatRegisters::s30,
231                                                      VFPRegister::Single};
232 static constexpr FloatRegister ScratchDoubleReg_ = {FloatRegisters::d15,
233                                                     VFPRegister::Double};
234 static constexpr FloatRegister ScratchSimd128Reg = InvalidFloatReg;
235 static constexpr FloatRegister ScratchUIntReg = {FloatRegisters::d15,
236                                                  VFPRegister::UInt};
237 static constexpr FloatRegister ScratchIntReg = {FloatRegisters::d15,
238                                                 VFPRegister::Int};
239 
240 // Do not reference ScratchFloat32Reg_ directly, use ScratchFloat32Scope
241 // instead.
242 struct ScratchFloat32Scope : public AutoFloatRegisterScope {
ScratchFloat32ScopeScratchFloat32Scope243   explicit ScratchFloat32Scope(MacroAssembler& masm)
244       : AutoFloatRegisterScope(masm, ScratchFloat32Reg_) {}
245 };
246 
247 // Do not reference ScratchDoubleReg_ directly, use ScratchDoubleScope instead.
248 struct ScratchDoubleScope : public AutoFloatRegisterScope {
ScratchDoubleScopeScratchDoubleScope249   explicit ScratchDoubleScope(MacroAssembler& masm)
250       : AutoFloatRegisterScope(masm, ScratchDoubleReg_) {}
251 };
252 
253 // Registerd used in RegExpMatcher instruction (do not use JSReturnOperand).
254 static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
255 static constexpr Register RegExpMatcherStringReg = CallTempReg1;
256 static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2;
257 
258 // Registerd used in RegExpTester instruction (do not use ReturnReg).
259 static constexpr Register RegExpTesterRegExpReg = CallTempReg0;
260 static constexpr Register RegExpTesterStringReg = CallTempReg1;
261 static constexpr Register RegExpTesterLastIndexReg = CallTempReg2;
262 
263 static constexpr FloatRegister d0 = {FloatRegisters::d0, VFPRegister::Double};
264 static constexpr FloatRegister d1 = {FloatRegisters::d1, VFPRegister::Double};
265 static constexpr FloatRegister d2 = {FloatRegisters::d2, VFPRegister::Double};
266 static constexpr FloatRegister d3 = {FloatRegisters::d3, VFPRegister::Double};
267 static constexpr FloatRegister d4 = {FloatRegisters::d4, VFPRegister::Double};
268 static constexpr FloatRegister d5 = {FloatRegisters::d5, VFPRegister::Double};
269 static constexpr FloatRegister d6 = {FloatRegisters::d6, VFPRegister::Double};
270 static constexpr FloatRegister d7 = {FloatRegisters::d7, VFPRegister::Double};
271 static constexpr FloatRegister d8 = {FloatRegisters::d8, VFPRegister::Double};
272 static constexpr FloatRegister d9 = {FloatRegisters::d9, VFPRegister::Double};
273 static constexpr FloatRegister d10 = {FloatRegisters::d10, VFPRegister::Double};
274 static constexpr FloatRegister d11 = {FloatRegisters::d11, VFPRegister::Double};
275 static constexpr FloatRegister d12 = {FloatRegisters::d12, VFPRegister::Double};
276 static constexpr FloatRegister d13 = {FloatRegisters::d13, VFPRegister::Double};
277 static constexpr FloatRegister d14 = {FloatRegisters::d14, VFPRegister::Double};
278 static constexpr FloatRegister d15 = {FloatRegisters::d15, VFPRegister::Double};
279 
280 // For maximal awesomeness, 8 should be sufficent. ldrd/strd (dual-register
281 // load/store) operate in a single cycle when the address they are dealing with
282 // is 8 byte aligned. Also, the ARM abi wants the stack to be 8 byte aligned at
283 // function boundaries. I'm trying to make sure this is always true.
284 static constexpr uint32_t ABIStackAlignment = 8;
285 static constexpr uint32_t CodeAlignment = 8;
286 static constexpr uint32_t JitStackAlignment = 8;
287 
288 static constexpr uint32_t JitStackValueAlignment =
289     JitStackAlignment / sizeof(Value);
290 static_assert(JitStackAlignment % sizeof(Value) == 0 &&
291                   JitStackValueAlignment >= 1,
292               "Stack alignment should be a non-zero multiple of sizeof(Value)");
293 
294 static constexpr uint32_t SimdMemoryAlignment = 8;
295 
296 static_assert(CodeAlignment % SimdMemoryAlignment == 0,
297               "Code alignment should be larger than any of the alignments "
298               "which are used for "
299               "the constant sections of the code buffer.  Thus it should be "
300               "larger than the "
301               "alignment for SIMD constants.");
302 
303 static_assert(JitStackAlignment % SimdMemoryAlignment == 0,
304               "Stack alignment should be larger than any of the alignments "
305               "which are used for "
306               "spilled values.  Thus it should be larger than the alignment "
307               "for SIMD accesses.");
308 
309 static const uint32_t WasmStackAlignment = SimdMemoryAlignment;
310 static const uint32_t WasmTrapInstructionLength = 4;
311 
312 // See comments in wasm::GenerateFunctionPrologue.  The difference between these
313 // is the size of the largest callable prologue on the platform.
314 static constexpr uint32_t WasmCheckedCallEntryOffset = 0u;
315 static constexpr uint32_t WasmCheckedTailEntryOffset = 12u;
316 
317 static const Scale ScalePointer = TimesFour;
318 
319 class Instruction;
320 class InstBranchImm;
321 uint32_t RM(Register r);
322 uint32_t RS(Register r);
323 uint32_t RD(Register r);
324 uint32_t RT(Register r);
325 uint32_t RN(Register r);
326 
327 uint32_t maybeRD(Register r);
328 uint32_t maybeRT(Register r);
329 uint32_t maybeRN(Register r);
330 
331 Register toRN(Instruction i);
332 Register toRM(Instruction i);
333 Register toRD(Instruction i);
334 Register toR(Instruction i);
335 
336 class VFPRegister;
337 uint32_t VD(VFPRegister vr);
338 uint32_t VN(VFPRegister vr);
339 uint32_t VM(VFPRegister vr);
340 
341 // For being passed into the generic vfp instruction generator when there is an
342 // instruction that only takes two registers.
343 static constexpr VFPRegister NoVFPRegister(VFPRegister::Double, 0, false, true);
344 
345 struct ImmTag : public Imm32 {
ImmTagImmTag346   explicit ImmTag(JSValueTag mask) : Imm32(int32_t(mask)) {}
347 };
348 
349 struct ImmType : public ImmTag {
ImmTypeImmType350   explicit ImmType(JSValueType type) : ImmTag(JSVAL_TYPE_TO_TAG(type)) {}
351 };
352 
353 enum Index {
354   Offset = 0 << 21 | 1 << 24,
355   PreIndex = 1 << 21 | 1 << 24,
356   PostIndex = 0 << 21 | 0 << 24
357   // The docs were rather unclear on this. It sounds like
358   // 1 << 21 | 0 << 24 encodes dtrt.
359 };
360 
361 enum IsImmOp2_ { IsImmOp2 = 1 << 25, IsNotImmOp2 = 0 << 25 };
362 enum IsImmDTR_ { IsImmDTR = 0 << 25, IsNotImmDTR = 1 << 25 };
363 // For the extra memory operations, ldrd, ldrsb, ldrh.
364 enum IsImmEDTR_ { IsImmEDTR = 1 << 22, IsNotImmEDTR = 0 << 22 };
365 
366 enum ShiftType {
367   LSL = 0,   // << 5
368   LSR = 1,   // << 5
369   ASR = 2,   // << 5
370   ROR = 3,   // << 5
371   RRX = ROR  // RRX is encoded as ROR with a 0 offset.
372 };
373 
374 // Modes for STM/LDM. Names are the suffixes applied to the instruction.
375 enum DTMMode {
376   A = 0 << 24,  // empty / after
377   B = 1 << 24,  // full / before
378   D = 0 << 23,  // decrement
379   I = 1 << 23,  // increment
380   DA = D | A,
381   DB = D | B,
382   IA = I | A,
383   IB = I | B
384 };
385 
386 enum DTMWriteBack { WriteBack = 1 << 21, NoWriteBack = 0 << 21 };
387 
388 // Condition code updating mode.
389 enum SBit {
390   SetCC = 1 << 20,   // Set condition code.
391   LeaveCC = 0 << 20  // Leave condition code unchanged.
392 };
393 
394 enum LoadStore { IsLoad = 1 << 20, IsStore = 0 << 20 };
395 
396 // You almost never want to use this directly. Instead, you wantto pass in a
397 // signed constant, and let this bit be implicitly set for you. This is however,
398 // necessary if we want a negative index.
399 enum IsUp_ { IsUp = 1 << 23, IsDown = 0 << 23 };
400 enum ALUOp {
401   OpMov = 0xd << 21,
402   OpMvn = 0xf << 21,
403   OpAnd = 0x0 << 21,
404   OpBic = 0xe << 21,
405   OpEor = 0x1 << 21,
406   OpOrr = 0xc << 21,
407   OpAdc = 0x5 << 21,
408   OpAdd = 0x4 << 21,
409   OpSbc = 0x6 << 21,
410   OpSub = 0x2 << 21,
411   OpRsb = 0x3 << 21,
412   OpRsc = 0x7 << 21,
413   OpCmn = 0xb << 21,
414   OpCmp = 0xa << 21,
415   OpTeq = 0x9 << 21,
416   OpTst = 0x8 << 21,
417   OpInvalid = -1
418 };
419 
420 enum MULOp {
421   OpmMul = 0 << 21,
422   OpmMla = 1 << 21,
423   OpmUmaal = 2 << 21,
424   OpmMls = 3 << 21,
425   OpmUmull = 4 << 21,
426   OpmUmlal = 5 << 21,
427   OpmSmull = 6 << 21,
428   OpmSmlal = 7 << 21
429 };
430 enum BranchTag {
431   OpB = 0x0a000000,
432   OpBMask = 0x0f000000,
433   OpBDestMask = 0x00ffffff,
434   OpBl = 0x0b000000,
435   OpBlx = 0x012fff30,
436   OpBx = 0x012fff10
437 };
438 
439 // Just like ALUOp, but for the vfp instruction set.
440 enum VFPOp {
441   OpvMul = 0x2 << 20,
442   OpvAdd = 0x3 << 20,
443   OpvSub = 0x3 << 20 | 0x1 << 6,
444   OpvDiv = 0x8 << 20,
445   OpvMov = 0xB << 20 | 0x1 << 6,
446   OpvAbs = 0xB << 20 | 0x3 << 6,
447   OpvNeg = 0xB << 20 | 0x1 << 6 | 0x1 << 16,
448   OpvSqrt = 0xB << 20 | 0x3 << 6 | 0x1 << 16,
449   OpvCmp = 0xB << 20 | 0x1 << 6 | 0x4 << 16,
450   OpvCmpz = 0xB << 20 | 0x1 << 6 | 0x5 << 16
451 };
452 
453 // Negate the operation, AND negate the immediate that we were passed in.
454 ALUOp ALUNeg(ALUOp op, Register dest, Register scratch, Imm32* imm,
455              Register* negDest);
456 bool can_dbl(ALUOp op);
457 bool condsAreSafe(ALUOp op);
458 
459 // If there is a variant of op that has a dest (think cmp/sub) return that
460 // variant of it.
461 ALUOp getDestVariant(ALUOp op);
462 
463 static constexpr ValueOperand JSReturnOperand{JSReturnReg_Type,
464                                               JSReturnReg_Data};
465 static const ValueOperand softfpReturnOperand = ValueOperand(r1, r0);
466 
467 // All of these classes exist solely to shuffle data into the various operands.
468 // For example Operand2 can be an imm8, a register-shifted-by-a-constant or a
469 // register-shifted-by-a-register. We represent this in C++ by having a base
470 // class Operand2, which just stores the 32 bits of data as they will be encoded
471 // in the instruction. You cannot directly create an Operand2 since it is
472 // tricky, and not entirely sane to do so. Instead, you create one of its child
473 // classes, e.g. Imm8. Imm8's constructor takes a single integer argument. Imm8
474 // will verify that its argument can be encoded as an ARM 12 bit imm8, encode it
475 // using an Imm8data, and finally call its parent's (Operand2) constructor with
476 // the Imm8data. The Operand2 constructor will then call the Imm8data's encode()
477 // function to extract the raw bits from it.
478 //
479 // In the future, we should be able to extract data from the Operand2 by asking
480 // it for its component Imm8data structures. The reason this is so horribly
481 // round-about is we wanted to have Imm8 and RegisterShiftedRegister inherit
482 // directly from Operand2 but have all of them take up only a single word of
483 // storage. We also wanted to avoid passing around raw integers at all since
484 // they are error prone.
485 class Op2Reg;
486 class O2RegImmShift;
487 class O2RegRegShift;
488 
489 namespace datastore {
490 
491 class Reg {
492   // The "second register".
493   uint32_t rm_ : 4;
494   // Do we get another register for shifting.
495   uint32_t rrs_ : 1;
496   uint32_t type_ : 2;
497   // We'd like this to be a more sensible encoding, but that would need to be
498   // a struct and that would not pack :(
499   uint32_t shiftAmount_ : 5;
500 
501  protected:
502   // Mark as a protected field to avoid unused private field warnings.
503   uint32_t pad_ : 20;
504 
505  public:
Reg(uint32_t rm,ShiftType type,uint32_t rsr,uint32_t shiftAmount)506   Reg(uint32_t rm, ShiftType type, uint32_t rsr, uint32_t shiftAmount)
507       : rm_(rm), rrs_(rsr), type_(type), shiftAmount_(shiftAmount), pad_(0) {}
Reg(const Op2Reg & op)508   explicit Reg(const Op2Reg& op) { memcpy(this, &op, sizeof(*this)); }
509 
shiftAmount()510   uint32_t shiftAmount() const { return shiftAmount_; }
511 
encode()512   uint32_t encode() const {
513     return rm_ | (rrs_ << 4) | (type_ << 5) | (shiftAmount_ << 7);
514   }
515 };
516 
517 // Op2 has a mode labelled "<imm8m>", which is arm's magical immediate encoding.
518 // Some instructions actually get 8 bits of data, which is called Imm8Data
519 // below. These should have edit distance > 1, but this is how it is for now.
520 class Imm8mData {
521   uint32_t data_ : 8;
522   uint32_t rot_ : 4;
523 
524  protected:
525   // Mark as a protected field to avoid unused private field warnings.
526   uint32_t buff_ : 19;
527 
528  private:
529   // Throw in an extra bit that will be 1 if we can't encode this properly.
530   // if we can encode it properly, a simple "|" will still suffice to meld it
531   // into the instruction.
532   uint32_t invalid_ : 1;
533 
534  public:
535   // Default constructor makes an invalid immediate.
Imm8mData()536   Imm8mData() : data_(0xff), rot_(0xf), buff_(0), invalid_(true) {}
537 
Imm8mData(uint32_t data,uint32_t rot)538   Imm8mData(uint32_t data, uint32_t rot)
539       : data_(data), rot_(rot), buff_(0), invalid_(false) {
540     MOZ_ASSERT(data == data_);
541     MOZ_ASSERT(rot == rot_);
542   }
543 
invalid()544   bool invalid() const { return invalid_; }
545 
encode()546   uint32_t encode() const {
547     MOZ_ASSERT(!invalid_);
548     return data_ | (rot_ << 8);
549   };
550 };
551 
552 class Imm8Data {
553   uint32_t imm4L_ : 4;
554 
555  protected:
556   // Mark as a protected field to avoid unused private field warnings.
557   uint32_t pad_ : 4;
558 
559  private:
560   uint32_t imm4H_ : 4;
561 
562  public:
Imm8Data(uint32_t imm)563   explicit Imm8Data(uint32_t imm) : imm4L_(imm & 0xf), imm4H_(imm >> 4) {
564     MOZ_ASSERT(imm <= 0xff);
565   }
566 
encode()567   uint32_t encode() const { return imm4L_ | (imm4H_ << 8); };
568 };
569 
570 // VLDR/VSTR take an 8 bit offset, which is implicitly left shifted by 2.
571 class Imm8VFPOffData {
572   uint32_t data_;
573 
574  public:
Imm8VFPOffData(uint32_t imm)575   explicit Imm8VFPOffData(uint32_t imm) : data_(imm) {
576     MOZ_ASSERT((imm & ~(0xff)) == 0);
577   }
encode()578   uint32_t encode() const { return data_; };
579 };
580 
581 // ARM can magically encode 256 very special immediates to be moved into a
582 // register.
583 struct Imm8VFPImmData {
584   // This structure's members are public and it has no constructor to
585   // initialize them, for a very special reason. Were this structure to
586   // have a constructor, the initialization for DoubleEncoder's internal
587   // table (see below) would require a rather large static constructor on
588   // some of our supported compilers. The known solution to this is to mark
589   // the constructor constexpr, but, again, some of our supported
590   // compilers don't support constexpr! So we are reduced to public
591   // members and eschewing a constructor in hopes that the initialization
592   // of DoubleEncoder's table is correct.
593   uint32_t imm4L : 4;
594   uint32_t imm4H : 4;
595   int32_t isInvalid : 24;
596 
encodeImm8VFPImmData597   uint32_t encode() const {
598     // This assert is an attempting at ensuring that we don't create random
599     // instances of this structure and then asking to encode() it.
600     MOZ_ASSERT(isInvalid == 0);
601     return imm4L | (imm4H << 16);
602   };
603 };
604 
605 class Imm12Data {
606   uint32_t data_ : 12;
607 
608  public:
Imm12Data(uint32_t imm)609   explicit Imm12Data(uint32_t imm) : data_(imm) { MOZ_ASSERT(data_ == imm); }
610 
encode()611   uint32_t encode() const { return data_; }
612 };
613 
614 class RIS {
615   uint32_t shiftAmount_ : 5;
616 
617  public:
RIS(uint32_t imm)618   explicit RIS(uint32_t imm) : shiftAmount_(imm) {
619     MOZ_ASSERT(shiftAmount_ == imm);
620   }
621 
RIS(Reg r)622   explicit RIS(Reg r) : shiftAmount_(r.shiftAmount()) {}
623 
encode()624   uint32_t encode() const { return shiftAmount_; }
625 };
626 
627 class RRS {
628  protected:
629   // Mark as a protected field to avoid unused private field warnings.
630   uint32_t mustZero_ : 1;
631 
632  private:
633   // The register that holds the shift amount.
634   uint32_t rs_ : 4;
635 
636  public:
RRS(uint32_t rs)637   explicit RRS(uint32_t rs) : rs_(rs) { MOZ_ASSERT(rs_ == rs); }
638 
encode()639   uint32_t encode() const { return rs_ << 1; }
640 };
641 
642 }  // namespace datastore
643 
644 class MacroAssemblerARM;
645 class Operand;
646 
647 class Operand2 {
648   friend class Operand;
649   friend class MacroAssemblerARM;
650   friend class InstALU;
651 
652   uint32_t oper_ : 31;
653   uint32_t invalid_ : 1;
654 
655  protected:
Operand2(datastore::Imm8mData base)656   explicit Operand2(datastore::Imm8mData base)
657       : oper_(base.invalid() ? -1 : (base.encode() | uint32_t(IsImmOp2))),
658         invalid_(base.invalid()) {}
659 
Operand2(datastore::Reg base)660   explicit Operand2(datastore::Reg base)
661       : oper_(base.encode() | uint32_t(IsNotImmOp2)), invalid_(false) {}
662 
663  private:
Operand2(uint32_t blob)664   explicit Operand2(uint32_t blob) : oper_(blob), invalid_(false) {}
665 
666  public:
isO2Reg()667   bool isO2Reg() const { return !(oper_ & IsImmOp2); }
668 
669   Op2Reg toOp2Reg() const;
670 
isImm8()671   bool isImm8() const { return oper_ & IsImmOp2; }
672 
invalid()673   bool invalid() const { return invalid_; }
674 
encode()675   uint32_t encode() const { return oper_; }
676 };
677 
678 class Imm8 : public Operand2 {
679  public:
Imm8(uint32_t imm)680   explicit Imm8(uint32_t imm) : Operand2(EncodeImm(imm)) {}
681 
EncodeImm(uint32_t imm)682   static datastore::Imm8mData EncodeImm(uint32_t imm) {
683     // RotateLeft below may not be called with a shift of zero.
684     if (imm <= 0xFF) {
685       return datastore::Imm8mData(imm, 0);
686     }
687 
688     // An encodable integer has a maximum of 8 contiguous set bits,
689     // with an optional wrapped left rotation to even bit positions.
690     for (int rot = 1; rot < 16; rot++) {
691       uint32_t rotimm = mozilla::RotateLeft(imm, rot * 2);
692       if (rotimm <= 0xFF) {
693         return datastore::Imm8mData(rotimm, rot);
694       }
695     }
696     return datastore::Imm8mData();
697   }
698 
699   // Pair template?
700   struct TwoImm8mData {
701     datastore::Imm8mData fst_, snd_;
702 
703     TwoImm8mData() = default;
704 
TwoImm8mDataTwoImm8mData705     TwoImm8mData(datastore::Imm8mData fst, datastore::Imm8mData snd)
706         : fst_(fst), snd_(snd) {}
707 
fstTwoImm8mData708     datastore::Imm8mData fst() const { return fst_; }
sndTwoImm8mData709     datastore::Imm8mData snd() const { return snd_; }
710   };
711 
712   static TwoImm8mData EncodeTwoImms(uint32_t);
713 };
714 
715 class Op2Reg : public Operand2 {
716  public:
Op2Reg(Register rm,ShiftType type,datastore::RIS shiftImm)717   explicit Op2Reg(Register rm, ShiftType type, datastore::RIS shiftImm)
718       : Operand2(datastore::Reg(rm.code(), type, 0, shiftImm.encode())) {}
719 
Op2Reg(Register rm,ShiftType type,datastore::RRS shiftReg)720   explicit Op2Reg(Register rm, ShiftType type, datastore::RRS shiftReg)
721       : Operand2(datastore::Reg(rm.code(), type, 1, shiftReg.encode())) {}
722 };
723 
724 static_assert(sizeof(Op2Reg) == sizeof(datastore::Reg),
725               "datastore::Reg(const Op2Reg&) constructor relies on Reg/Op2Reg "
726               "having same size");
727 
728 class O2RegImmShift : public Op2Reg {
729  public:
O2RegImmShift(Register rn,ShiftType type,uint32_t shift)730   explicit O2RegImmShift(Register rn, ShiftType type, uint32_t shift)
731       : Op2Reg(rn, type, datastore::RIS(shift)) {}
732 };
733 
734 class O2RegRegShift : public Op2Reg {
735  public:
O2RegRegShift(Register rn,ShiftType type,Register rs)736   explicit O2RegRegShift(Register rn, ShiftType type, Register rs)
737       : Op2Reg(rn, type, datastore::RRS(rs.code())) {}
738 };
739 
740 O2RegImmShift O2Reg(Register r);
741 O2RegImmShift lsl(Register r, int amt);
742 O2RegImmShift lsr(Register r, int amt);
743 O2RegImmShift asr(Register r, int amt);
744 O2RegImmShift rol(Register r, int amt);
745 O2RegImmShift ror(Register r, int amt);
746 
747 O2RegRegShift lsl(Register r, Register amt);
748 O2RegRegShift lsr(Register r, Register amt);
749 O2RegRegShift asr(Register r, Register amt);
750 O2RegRegShift ror(Register r, Register amt);
751 
752 // An offset from a register to be used for ldr/str. This should include the
753 // sign bit, since ARM has "signed-magnitude" offsets. That is it encodes an
754 // unsigned offset, then the instruction specifies if the offset is positive or
755 // negative. The +/- bit is necessary if the instruction set wants to be able to
756 // have a negative register offset e.g. ldr pc, [r1,-r2];
757 class DtrOff {
758   uint32_t data_;
759 
760  protected:
DtrOff(datastore::Imm12Data immdata,IsUp_ iu)761   explicit DtrOff(datastore::Imm12Data immdata, IsUp_ iu)
762       : data_(immdata.encode() | uint32_t(IsImmDTR) | uint32_t(iu)) {}
763 
764   explicit DtrOff(datastore::Reg reg, IsUp_ iu = IsUp)
765       : data_(reg.encode() | uint32_t(IsNotImmDTR) | iu) {}
766 
767  public:
encode()768   uint32_t encode() const { return data_; }
769 };
770 
771 class DtrOffImm : public DtrOff {
772  public:
DtrOffImm(int32_t imm)773   explicit DtrOffImm(int32_t imm)
774       : DtrOff(datastore::Imm12Data(mozilla::Abs(imm)),
775                imm >= 0 ? IsUp : IsDown) {
776     MOZ_ASSERT(mozilla::Abs(imm) < 4096);
777   }
778 };
779 
780 class DtrOffReg : public DtrOff {
781   // These are designed to be called by a constructor of a subclass.
782   // Constructing the necessary RIS/RRS structures is annoying.
783 
784  protected:
785   explicit DtrOffReg(Register rn, ShiftType type, datastore::RIS shiftImm,
786                      IsUp_ iu = IsUp)
787       : DtrOff(datastore::Reg(rn.code(), type, 0, shiftImm.encode()), iu) {}
788 
789   explicit DtrOffReg(Register rn, ShiftType type, datastore::RRS shiftReg,
790                      IsUp_ iu = IsUp)
791       : DtrOff(datastore::Reg(rn.code(), type, 1, shiftReg.encode()), iu) {}
792 };
793 
794 class DtrRegImmShift : public DtrOffReg {
795  public:
796   explicit DtrRegImmShift(Register rn, ShiftType type, uint32_t shift,
797                           IsUp_ iu = IsUp)
DtrOffReg(rn,type,datastore::RIS (shift),iu)798       : DtrOffReg(rn, type, datastore::RIS(shift), iu) {}
799 };
800 
801 class DtrRegRegShift : public DtrOffReg {
802  public:
803   explicit DtrRegRegShift(Register rn, ShiftType type, Register rs,
804                           IsUp_ iu = IsUp)
805       : DtrOffReg(rn, type, datastore::RRS(rs.code()), iu) {}
806 };
807 
808 // We will frequently want to bundle a register with its offset so that we have
809 // an "operand" to a load instruction.
810 class DTRAddr {
811   friend class Operand;
812 
813   uint32_t data_;
814 
815  public:
DTRAddr(Register reg,DtrOff dtr)816   explicit DTRAddr(Register reg, DtrOff dtr)
817       : data_(dtr.encode() | (reg.code() << 16)) {}
818 
encode()819   uint32_t encode() const { return data_; }
820 
getBase()821   Register getBase() const { return Register::FromCode((data_ >> 16) & 0xf); }
822 };
823 
824 // Offsets for the extended data transfer instructions:
825 // ldrsh, ldrd, ldrsb, etc.
826 class EDtrOff {
827   uint32_t data_;
828 
829  protected:
830   explicit EDtrOff(datastore::Imm8Data imm8, IsUp_ iu = IsUp)
831       : data_(imm8.encode() | IsImmEDTR | uint32_t(iu)) {}
832 
833   explicit EDtrOff(Register rm, IsUp_ iu = IsUp)
834       : data_(rm.code() | IsNotImmEDTR | iu) {}
835 
836  public:
encode()837   uint32_t encode() const { return data_; }
838 };
839 
840 class EDtrOffImm : public EDtrOff {
841  public:
EDtrOffImm(int32_t imm)842   explicit EDtrOffImm(int32_t imm)
843       : EDtrOff(datastore::Imm8Data(mozilla::Abs(imm)),
844                 (imm >= 0) ? IsUp : IsDown) {
845     MOZ_ASSERT(mozilla::Abs(imm) < 256);
846   }
847 };
848 
849 // This is the most-derived class, since the extended data transfer instructions
850 // don't support any sort of modifying the "index" operand.
851 class EDtrOffReg : public EDtrOff {
852  public:
EDtrOffReg(Register rm)853   explicit EDtrOffReg(Register rm) : EDtrOff(rm) {}
854 };
855 
856 class EDtrAddr {
857   uint32_t data_;
858 
859  public:
EDtrAddr(Register r,EDtrOff off)860   explicit EDtrAddr(Register r, EDtrOff off) : data_(RN(r) | off.encode()) {}
861 
encode()862   uint32_t encode() const { return data_; }
863 #ifdef DEBUG
maybeOffsetRegister()864   Register maybeOffsetRegister() const {
865     if (data_ & IsImmEDTR) {
866       return InvalidReg;
867     }
868     return Register::FromCode(data_ & 0xf);
869   }
870 #endif
871 };
872 
873 class VFPOff {
874   uint32_t data_;
875 
876  protected:
VFPOff(datastore::Imm8VFPOffData imm,IsUp_ isup)877   explicit VFPOff(datastore::Imm8VFPOffData imm, IsUp_ isup)
878       : data_(imm.encode() | uint32_t(isup)) {}
879 
880  public:
encode()881   uint32_t encode() const { return data_; }
882 };
883 
884 class VFPOffImm : public VFPOff {
885  public:
VFPOffImm(int32_t imm)886   explicit VFPOffImm(int32_t imm)
887       : VFPOff(datastore::Imm8VFPOffData(mozilla::Abs(imm) / 4),
888                imm < 0 ? IsDown : IsUp) {
889     MOZ_ASSERT(mozilla::Abs(imm) <= 255 * 4);
890   }
891 };
892 
893 class VFPAddr {
894   friend class Operand;
895 
896   uint32_t data_;
897 
898  public:
VFPAddr(Register base,VFPOff off)899   explicit VFPAddr(Register base, VFPOff off)
900       : data_(RN(base) | off.encode()) {}
901 
encode()902   uint32_t encode() const { return data_; }
903 };
904 
905 class VFPImm {
906   uint32_t data_;
907 
908  public:
909   explicit VFPImm(uint32_t topWordOfDouble);
910 
911   static const VFPImm One;
912 
encode()913   uint32_t encode() const { return data_; }
isValid()914   bool isValid() const { return data_ != (~0U); }
915 };
916 
917 // A BOffImm is an immediate that is used for branches. Namely, it is the offset
918 // that will be encoded in the branch instruction. This is the only sane way of
919 // constructing a branch.
920 class BOffImm {
921   friend class InstBranchImm;
922 
923   uint32_t data_;
924 
925  public:
BOffImm(int offset)926   explicit BOffImm(int offset) : data_((offset - 8) >> 2 & 0x00ffffff) {
927     MOZ_ASSERT((offset & 0x3) == 0);
928     if (!IsInRange(offset)) {
929       MOZ_CRASH("BOffImm offset out of range");
930     }
931   }
932 
BOffImm()933   explicit BOffImm() : data_(INVALID) {}
934 
935  private:
936   explicit BOffImm(const Instruction& inst);
937 
938  public:
939   static const uint32_t INVALID = 0x00800000;
940 
encode()941   uint32_t encode() const { return data_; }
decode()942   int32_t decode() const { return ((int32_t(data_) << 8) >> 6) + 8; }
943 
IsInRange(int offset)944   static bool IsInRange(int offset) {
945     if ((offset - 8) < -33554432) {
946       return false;
947     }
948     if ((offset - 8) > 33554428) {
949       return false;
950     }
951     return true;
952   }
953 
isInvalid()954   bool isInvalid() const { return data_ == INVALID; }
955   Instruction* getDest(Instruction* src) const;
956 };
957 
958 class Imm16 {
959   uint32_t lower_ : 12;
960 
961  protected:
962   // Mark as a protected field to avoid unused private field warnings.
963   uint32_t pad_ : 4;
964 
965  private:
966   uint32_t upper_ : 4;
967   uint32_t invalid_ : 12;
968 
969  public:
970   explicit Imm16();
971   explicit Imm16(uint32_t imm);
972   explicit Imm16(Instruction& inst);
973 
encode()974   uint32_t encode() const { return lower_ | (upper_ << 16); }
decode()975   uint32_t decode() const { return lower_ | (upper_ << 12); }
976 
isInvalid()977   bool isInvalid() const { return invalid_; }
978 };
979 
980 // I would preffer that these do not exist, since there are essentially no
981 // instructions that would ever take more than one of these, however, the MIR
982 // wants to only have one type of arguments to functions, so bugger.
983 class Operand {
984   // The encoding of registers is the same for OP2, DTR and EDTR yet the type
985   // system doesn't let us express this, so choices must be made.
986  public:
987   enum class Tag : uint8_t { OP2, MEM, FOP };
988 
989  private:
990   uint32_t tag_ : 8;
991   uint32_t reg_ : 5;
992   int32_t offset_;
993 
994  protected:
Operand(Tag tag,uint32_t regCode,int32_t offset)995   Operand(Tag tag, uint32_t regCode, int32_t offset)
996       : tag_(static_cast<uint32_t>(tag)), reg_(regCode), offset_(offset) {}
997 
998  public:
Operand(Register reg)999   explicit Operand(Register reg) : Operand(Tag::OP2, reg.code(), 0) {}
1000 
Operand(FloatRegister freg)1001   explicit Operand(FloatRegister freg) : Operand(Tag::FOP, freg.code(), 0) {}
1002 
Operand(Register base,Imm32 off)1003   explicit Operand(Register base, Imm32 off)
1004       : Operand(Tag::MEM, base.code(), off.value) {}
1005 
Operand(Register base,int32_t off)1006   explicit Operand(Register base, int32_t off)
1007       : Operand(Tag::MEM, base.code(), off) {}
1008 
Operand(const Address & addr)1009   explicit Operand(const Address& addr)
1010       : Operand(Tag::MEM, addr.base.code(), addr.offset) {}
1011 
1012  public:
tag()1013   Tag tag() const { return static_cast<Tag>(tag_); }
1014 
toOp2()1015   Operand2 toOp2() const {
1016     MOZ_ASSERT(tag() == Tag::OP2);
1017     return O2Reg(Register::FromCode(reg_));
1018   }
1019 
toReg()1020   Register toReg() const {
1021     MOZ_ASSERT(tag() == Tag::OP2);
1022     return Register::FromCode(reg_);
1023   }
1024 
toAddress()1025   Address toAddress() const {
1026     MOZ_ASSERT(tag() == Tag::MEM);
1027     return Address(Register::FromCode(reg_), offset_);
1028   }
disp()1029   int32_t disp() const {
1030     MOZ_ASSERT(tag() == Tag::MEM);
1031     return offset_;
1032   }
1033 
base()1034   int32_t base() const {
1035     MOZ_ASSERT(tag() == Tag::MEM);
1036     return reg_;
1037   }
baseReg()1038   Register baseReg() const {
1039     MOZ_ASSERT(tag() == Tag::MEM);
1040     return Register::FromCode(reg_);
1041   }
toDTRAddr()1042   DTRAddr toDTRAddr() const {
1043     MOZ_ASSERT(tag() == Tag::MEM);
1044     return DTRAddr(baseReg(), DtrOffImm(offset_));
1045   }
toVFPAddr()1046   VFPAddr toVFPAddr() const {
1047     MOZ_ASSERT(tag() == Tag::MEM);
1048     return VFPAddr(baseReg(), VFPOffImm(offset_));
1049   }
1050 };
1051 
firstHalf()1052 inline Imm32 Imm64::firstHalf() const { return low(); }
1053 
secondHalf()1054 inline Imm32 Imm64::secondHalf() const { return hi(); }
1055 
1056 class InstructionIterator {
1057  private:
1058   Instruction* inst_;
1059 
1060  public:
InstructionIterator(Instruction * inst)1061   explicit InstructionIterator(Instruction* inst) : inst_(inst) {
1062     maybeSkipAutomaticInstructions();
1063   }
1064 
1065   // Advances to the next intentionally-inserted instruction.
1066   Instruction* next();
1067 
1068   // Advances past any automatically-inserted instructions.
1069   Instruction* maybeSkipAutomaticInstructions();
1070 
cur()1071   Instruction* cur() const { return inst_; }
1072 
1073  protected:
1074   // Advances past the given number of instruction-length bytes.
1075   inline void advanceRaw(ptrdiff_t instructions = 1);
1076 };
1077 
1078 class Assembler;
1079 typedef js::jit::AssemblerBufferWithConstantPools<1024, 4, Instruction,
1080                                                   Assembler>
1081     ARMBuffer;
1082 
1083 class Assembler : public AssemblerShared {
1084  public:
1085   // ARM conditional constants:
1086   enum ARMCondition : uint32_t {
1087     EQ = 0x00000000,  // Zero
1088     NE = 0x10000000,  // Non-zero
1089     CS = 0x20000000,
1090     CC = 0x30000000,
1091     MI = 0x40000000,
1092     PL = 0x50000000,
1093     VS = 0x60000000,
1094     VC = 0x70000000,
1095     HI = 0x80000000,
1096     LS = 0x90000000,
1097     GE = 0xa0000000,
1098     LT = 0xb0000000,
1099     GT = 0xc0000000,
1100     LE = 0xd0000000,
1101     AL = 0xe0000000
1102   };
1103 
1104   enum Condition : uint32_t {
1105     Equal = EQ,
1106     NotEqual = NE,
1107     Above = HI,
1108     AboveOrEqual = CS,
1109     Below = CC,
1110     BelowOrEqual = LS,
1111     GreaterThan = GT,
1112     GreaterThanOrEqual = GE,
1113     LessThan = LT,
1114     LessThanOrEqual = LE,
1115     Overflow = VS,
1116     CarrySet = CS,
1117     CarryClear = CC,
1118     Signed = MI,
1119     NotSigned = PL,
1120     Zero = EQ,
1121     NonZero = NE,
1122     Always = AL,
1123 
1124     VFP_NotEqualOrUnordered = NE,
1125     VFP_Equal = EQ,
1126     VFP_Unordered = VS,
1127     VFP_NotUnordered = VC,
1128     VFP_GreaterThanOrEqualOrUnordered = CS,
1129     VFP_GreaterThanOrEqual = GE,
1130     VFP_GreaterThanOrUnordered = HI,
1131     VFP_GreaterThan = GT,
1132     VFP_LessThanOrEqualOrUnordered = LE,
1133     VFP_LessThanOrEqual = LS,
1134     VFP_LessThanOrUnordered = LT,
1135     VFP_LessThan = CC  // MI is valid too.
1136   };
1137 
1138   // Bit set when a DoubleCondition does not map to a single ARM condition.
1139   // The macro assembler has to special-case these conditions, or else
1140   // ConditionFromDoubleCondition will complain.
1141   static const int DoubleConditionBitSpecial = 0x1;
1142 
1143   enum DoubleCondition : uint32_t {
1144     // These conditions will only evaluate to true if the comparison is
1145     // ordered - i.e. neither operand is NaN.
1146     DoubleOrdered = VFP_NotUnordered,
1147     DoubleEqual = VFP_Equal,
1148     DoubleNotEqual = VFP_NotEqualOrUnordered | DoubleConditionBitSpecial,
1149     DoubleGreaterThan = VFP_GreaterThan,
1150     DoubleGreaterThanOrEqual = VFP_GreaterThanOrEqual,
1151     DoubleLessThan = VFP_LessThan,
1152     DoubleLessThanOrEqual = VFP_LessThanOrEqual,
1153     // If either operand is NaN, these conditions always evaluate to true.
1154     DoubleUnordered = VFP_Unordered,
1155     DoubleEqualOrUnordered = VFP_Equal | DoubleConditionBitSpecial,
1156     DoubleNotEqualOrUnordered = VFP_NotEqualOrUnordered,
1157     DoubleGreaterThanOrUnordered = VFP_GreaterThanOrUnordered,
1158     DoubleGreaterThanOrEqualOrUnordered = VFP_GreaterThanOrEqualOrUnordered,
1159     DoubleLessThanOrUnordered = VFP_LessThanOrUnordered,
1160     DoubleLessThanOrEqualOrUnordered = VFP_LessThanOrEqualOrUnordered
1161   };
1162 
getCondition(uint32_t inst)1163   Condition getCondition(uint32_t inst) {
1164     return (Condition)(0xf0000000 & inst);
1165   }
ConditionFromDoubleCondition(DoubleCondition cond)1166   static inline Condition ConditionFromDoubleCondition(DoubleCondition cond) {
1167     MOZ_ASSERT(!(cond & DoubleConditionBitSpecial));
1168     return static_cast<Condition>(cond);
1169   }
1170 
1171   enum BarrierOption {
1172     BarrierSY = 15,  // Full system barrier
1173     BarrierST = 14   // StoreStore barrier
1174   };
1175 
1176   // This should be protected, but since CodeGenerator wants to use it, it
1177   // needs to go out here :(
1178 
nextOffset()1179   BufferOffset nextOffset() { return m_buffer.nextOffset(); }
1180 
1181  protected:
1182   // Shim around AssemblerBufferWithConstantPools::allocEntry.
1183   BufferOffset allocLiteralLoadEntry(size_t numInst, unsigned numPoolEntries,
1184                                      PoolHintPun& php, uint8_t* data,
1185                                      const LiteralDoc& doc = LiteralDoc(),
1186                                      ARMBuffer::PoolEntry* pe = nullptr,
1187                                      bool loadToPC = false);
1188 
editSrc(BufferOffset bo)1189   Instruction* editSrc(BufferOffset bo) { return m_buffer.getInst(bo); }
1190 
1191 #ifdef JS_DISASM_ARM
1192   typedef disasm::EmbeddedVector<char, disasm::ReasonableBufferSize>
1193       DisasmBuffer;
1194 
1195   static void disassembleInstruction(const Instruction* i,
1196                                      DisasmBuffer& buffer);
1197 
1198   void initDisassembler();
1199   void finishDisassembler();
1200   void spew(Instruction* i);
1201   void spewBranch(Instruction* i, const LabelDoc& target);
1202   void spewLiteralLoad(PoolHintPun& php, bool loadToPC, const Instruction* offs,
1203                        const LiteralDoc& doc);
1204 #endif
1205 
1206  public:
1207   void resetCounter();
1208   static uint32_t NopFill;
1209   static uint32_t GetNopFill();
1210   static uint32_t AsmPoolMaxOffset;
1211   static uint32_t GetPoolMaxOffset();
1212 
1213  protected:
1214   // Structure for fixing up pc-relative loads/jumps when a the machine code
1215   // gets moved (executable copy, gc, etc.).
1216   class RelativePatch {
1217     void* target_;
1218     RelocationKind kind_;
1219 
1220    public:
RelativePatch(void * target,RelocationKind kind)1221     RelativePatch(void* target, RelocationKind kind)
1222         : target_(target), kind_(kind) {}
target()1223     void* target() const { return target_; }
kind()1224     RelocationKind kind() const { return kind_; }
1225   };
1226 
1227   // TODO: this should actually be a pool-like object. It is currently a big
1228   // hack, and probably shouldn't exist.
1229   js::Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
1230 
1231   CompactBufferWriter jumpRelocations_;
1232   CompactBufferWriter dataRelocations_;
1233 
1234   ARMBuffer m_buffer;
1235 
1236 #ifdef JS_DISASM_ARM
1237   DisassemblerSpew spew_;
1238 #endif
1239 
1240  public:
1241   // For the alignment fill use NOP: 0x0320f000 or (Always | InstNOP::NopInst).
1242   // For the nopFill use a branch to the next instruction: 0xeaffffff.
Assembler()1243   Assembler()
1244       : m_buffer(1, 1, 8, GetPoolMaxOffset(), 8, 0xe320f000, 0xeaffffff,
1245                  GetNopFill()),
1246         isFinished(false),
1247         dtmActive(false),
1248         dtmCond(Always) {
1249 #ifdef JS_DISASM_ARM
1250     initDisassembler();
1251 #endif
1252   }
1253 
~Assembler()1254   ~Assembler() {
1255 #ifdef JS_DISASM_ARM
1256     finishDisassembler();
1257 #endif
1258   }
1259 
1260   // We need to wait until an AutoJitContextAlloc is created by the
1261   // MacroAssembler, before allocating any space.
initWithAllocator()1262   void initWithAllocator() { m_buffer.initWithAllocator(); }
1263 
setUnlimitedBuffer()1264   void setUnlimitedBuffer() { m_buffer.setUnlimited(); }
1265 
1266   static Condition InvertCondition(Condition cond);
1267   static Condition UnsignedCondition(Condition cond);
1268   static Condition ConditionWithoutEqual(Condition cond);
1269 
1270   static DoubleCondition InvertCondition(DoubleCondition cond);
1271 
writeDataRelocation(BufferOffset offset,ImmGCPtr ptr)1272   void writeDataRelocation(BufferOffset offset, ImmGCPtr ptr) {
1273     // Raw GC pointer relocations and Value relocations both end up in
1274     // Assembler::TraceDataRelocations.
1275     if (ptr.value) {
1276       if (gc::IsInsideNursery(ptr.value)) {
1277         embedsNurseryPointers_ = true;
1278       }
1279       dataRelocations_.writeUnsigned(offset.getOffset());
1280     }
1281   }
1282 
1283   enum RelocBranchStyle { B_MOVWT, B_LDR_BX, B_LDR, B_MOVW_ADD };
1284 
1285   enum RelocStyle { L_MOVWT, L_LDR };
1286 
1287  public:
1288   // Given the start of a Control Flow sequence, grab the value that is
1289   // finally branched to given the start of a function that loads an address
1290   // into a register get the address that ends up in the register.
1291   template <class Iter>
1292   static const uint32_t* GetCF32Target(Iter* iter);
1293 
1294   static uintptr_t GetPointer(uint8_t*);
1295   static const uint32_t* GetPtr32Target(InstructionIterator iter,
1296                                         Register* dest = nullptr,
1297                                         RelocStyle* rs = nullptr);
1298 
1299   bool oom() const;
1300 
setPrinter(Sprinter * sp)1301   void setPrinter(Sprinter* sp) {
1302 #ifdef JS_DISASM_ARM
1303     spew_.setPrinter(sp);
1304 #endif
1305   }
1306 
getStackPointer()1307   Register getStackPointer() const { return StackPointer; }
1308 
1309  private:
1310   bool isFinished;
1311 
1312  protected:
refLabel(const Label * label)1313   LabelDoc refLabel(const Label* label) {
1314 #ifdef JS_DISASM_ARM
1315     return spew_.refLabel(label);
1316 #else
1317     return LabelDoc();
1318 #endif
1319   }
1320 
1321  public:
1322   void finish();
1323   bool appendRawCode(const uint8_t* code, size_t numBytes);
1324   bool reserve(size_t size);
1325   bool swapBuffer(wasm::Bytes& bytes);
1326   void copyJumpRelocationTable(uint8_t* dest);
1327   void copyDataRelocationTable(uint8_t* dest);
1328 
1329   // Size of the instruction stream, in bytes, after pools are flushed.
1330   size_t size() const;
1331   // Size of the jump relocation table, in bytes.
1332   size_t jumpRelocationTableBytes() const;
1333   size_t dataRelocationTableBytes() const;
1334 
1335   // Size of the data table, in bytes.
1336   size_t bytesNeeded() const;
1337 
1338   // Write a single instruction into the instruction stream.  Very hot,
1339   // inlined for performance
writeInst(uint32_t x)1340   MOZ_ALWAYS_INLINE BufferOffset writeInst(uint32_t x) {
1341     BufferOffset offs = m_buffer.putInt(x);
1342 #ifdef JS_DISASM_ARM
1343     spew(m_buffer.getInstOrNull(offs));
1344 #endif
1345     return offs;
1346   }
1347 
1348   // As above, but also mark the instruction as a branch.  Very hot, inlined
1349   // for performance
1350   MOZ_ALWAYS_INLINE BufferOffset
writeBranchInst(uint32_t x,const LabelDoc & documentation)1351   writeBranchInst(uint32_t x, const LabelDoc& documentation) {
1352     BufferOffset offs = m_buffer.putInt(x);
1353 #ifdef JS_DISASM_ARM
1354     spewBranch(m_buffer.getInstOrNull(offs), documentation);
1355 #endif
1356     return offs;
1357   }
1358 
1359   // Write a placeholder NOP for a branch into the instruction stream
1360   // (in order to adjust assembler addresses and mark it as a branch), it will
1361   // be overwritten subsequently.
1362   BufferOffset allocBranchInst();
1363 
1364   // A static variant for the cases where we don't want to have an assembler
1365   // object.
1366   static void WriteInstStatic(uint32_t x, uint32_t* dest);
1367 
1368  public:
1369   void writeCodePointer(CodeLabel* label);
1370 
1371   void haltingAlign(int alignment);
1372   void nopAlign(int alignment);
1373   BufferOffset as_nop();
1374   BufferOffset as_alu(Register dest, Register src1, Operand2 op2, ALUOp op,
1375                       SBit s = LeaveCC, Condition c = Always);
1376   BufferOffset as_mov(Register dest, Operand2 op2, SBit s = LeaveCC,
1377                       Condition c = Always);
1378   BufferOffset as_mvn(Register dest, Operand2 op2, SBit s = LeaveCC,
1379                       Condition c = Always);
1380 
1381   static void as_alu_patch(Register dest, Register src1, Operand2 op2, ALUOp op,
1382                            SBit s, Condition c, uint32_t* pos);
1383   static void as_mov_patch(Register dest, Operand2 op2, SBit s, Condition c,
1384                            uint32_t* pos);
1385 
1386   // Logical operations:
1387   BufferOffset as_and(Register dest, Register src1, Operand2 op2,
1388                       SBit s = LeaveCC, Condition c = Always);
1389   BufferOffset as_bic(Register dest, Register src1, Operand2 op2,
1390                       SBit s = LeaveCC, Condition c = Always);
1391   BufferOffset as_eor(Register dest, Register src1, Operand2 op2,
1392                       SBit s = LeaveCC, Condition c = Always);
1393   BufferOffset as_orr(Register dest, Register src1, Operand2 op2,
1394                       SBit s = LeaveCC, Condition c = Always);
1395   // Reverse byte operations:
1396   BufferOffset as_rev(Register dest, Register src, Condition c = Always);
1397   BufferOffset as_rev16(Register dest, Register src, Condition c = Always);
1398   BufferOffset as_revsh(Register dest, Register src, Condition c = Always);
1399   // Mathematical operations:
1400   BufferOffset as_adc(Register dest, Register src1, Operand2 op2,
1401                       SBit s = LeaveCC, Condition c = Always);
1402   BufferOffset as_add(Register dest, Register src1, Operand2 op2,
1403                       SBit s = LeaveCC, Condition c = Always);
1404   BufferOffset as_sbc(Register dest, Register src1, Operand2 op2,
1405                       SBit s = LeaveCC, Condition c = Always);
1406   BufferOffset as_sub(Register dest, Register src1, Operand2 op2,
1407                       SBit s = LeaveCC, Condition c = Always);
1408   BufferOffset as_rsb(Register dest, Register src1, Operand2 op2,
1409                       SBit s = LeaveCC, Condition c = Always);
1410   BufferOffset as_rsc(Register dest, Register src1, Operand2 op2,
1411                       SBit s = LeaveCC, Condition c = Always);
1412   // Test operations:
1413   BufferOffset as_cmn(Register src1, Operand2 op2, Condition c = Always);
1414   BufferOffset as_cmp(Register src1, Operand2 op2, Condition c = Always);
1415   BufferOffset as_teq(Register src1, Operand2 op2, Condition c = Always);
1416   BufferOffset as_tst(Register src1, Operand2 op2, Condition c = Always);
1417 
1418   // Sign extension operations:
1419   BufferOffset as_sxtb(Register dest, Register src, int rotate,
1420                        Condition c = Always);
1421   BufferOffset as_sxth(Register dest, Register src, int rotate,
1422                        Condition c = Always);
1423   BufferOffset as_uxtb(Register dest, Register src, int rotate,
1424                        Condition c = Always);
1425   BufferOffset as_uxth(Register dest, Register src, int rotate,
1426                        Condition c = Always);
1427 
1428   // Not quite ALU worthy, but useful none the less: These also have the issue
1429   // of these being formatted completly differently from the standard ALU
1430   // operations.
1431   BufferOffset as_movw(Register dest, Imm16 imm, Condition c = Always);
1432   BufferOffset as_movt(Register dest, Imm16 imm, Condition c = Always);
1433 
1434   static void as_movw_patch(Register dest, Imm16 imm, Condition c,
1435                             Instruction* pos);
1436   static void as_movt_patch(Register dest, Imm16 imm, Condition c,
1437                             Instruction* pos);
1438 
1439   BufferOffset as_genmul(Register d1, Register d2, Register rm, Register rn,
1440                          MULOp op, SBit s, Condition c = Always);
1441   BufferOffset as_mul(Register dest, Register src1, Register src2,
1442                       SBit s = LeaveCC, Condition c = Always);
1443   BufferOffset as_mla(Register dest, Register acc, Register src1, Register src2,
1444                       SBit s = LeaveCC, Condition c = Always);
1445   BufferOffset as_umaal(Register dest1, Register dest2, Register src1,
1446                         Register src2, Condition c = Always);
1447   BufferOffset as_mls(Register dest, Register acc, Register src1, Register src2,
1448                       Condition c = Always);
1449   BufferOffset as_umull(Register dest1, Register dest2, Register src1,
1450                         Register src2, SBit s = LeaveCC, Condition c = Always);
1451   BufferOffset as_umlal(Register dest1, Register dest2, Register src1,
1452                         Register src2, SBit s = LeaveCC, Condition c = Always);
1453   BufferOffset as_smull(Register dest1, Register dest2, Register src1,
1454                         Register src2, SBit s = LeaveCC, Condition c = Always);
1455   BufferOffset as_smlal(Register dest1, Register dest2, Register src1,
1456                         Register src2, SBit s = LeaveCC, Condition c = Always);
1457 
1458   BufferOffset as_sdiv(Register dest, Register num, Register div,
1459                        Condition c = Always);
1460   BufferOffset as_udiv(Register dest, Register num, Register div,
1461                        Condition c = Always);
1462   BufferOffset as_clz(Register dest, Register src, Condition c = Always);
1463 
1464   // Data transfer instructions: ldr, str, ldrb, strb.
1465   // Using an int to differentiate between 8 bits and 32 bits is overkill.
1466   BufferOffset as_dtr(LoadStore ls, int size, Index mode, Register rt,
1467                       DTRAddr addr, Condition c = Always);
1468 
1469   static void as_dtr_patch(LoadStore ls, int size, Index mode, Register rt,
1470                            DTRAddr addr, Condition c, uint32_t* dest);
1471 
1472   // Handles all of the other integral data transferring functions:
1473   // ldrsb, ldrsh, ldrd, etc. The size is given in bits.
1474   BufferOffset as_extdtr(LoadStore ls, int size, bool IsSigned, Index mode,
1475                          Register rt, EDtrAddr addr, Condition c = Always);
1476 
1477   BufferOffset as_dtm(LoadStore ls, Register rn, uint32_t mask, DTMMode mode,
1478                       DTMWriteBack wb, Condition c = Always);
1479 
1480   // Overwrite a pool entry with new data.
1481   static void WritePoolEntry(Instruction* addr, Condition c, uint32_t data);
1482 
1483   // Load a 32 bit immediate from a pool into a register.
1484   BufferOffset as_Imm32Pool(Register dest, uint32_t value,
1485                             Condition c = Always);
1486 
1487   // Load a 64 bit floating point immediate from a pool into a register.
1488   BufferOffset as_FImm64Pool(VFPRegister dest, double value,
1489                              Condition c = Always);
1490   // Load a 32 bit floating point immediate from a pool into a register.
1491   BufferOffset as_FImm32Pool(VFPRegister dest, float value,
1492                              Condition c = Always);
1493 
1494   // Atomic instructions: ldrexd, ldrex, ldrexh, ldrexb, strexd, strex, strexh,
1495   // strexb.
1496   //
1497   // The doubleword, halfword, and byte versions are available from ARMv6K
1498   // forward.
1499   //
1500   // The word versions are available from ARMv6 forward and can be used to
1501   // implement the halfword and byte versions on older systems.
1502 
1503   // LDREXD rt, rt2, [rn].  Constraint: rt even register, rt2=rt+1.
1504   BufferOffset as_ldrexd(Register rt, Register rt2, Register rn,
1505                          Condition c = Always);
1506 
1507   // LDREX rt, [rn]
1508   BufferOffset as_ldrex(Register rt, Register rn, Condition c = Always);
1509   BufferOffset as_ldrexh(Register rt, Register rn, Condition c = Always);
1510   BufferOffset as_ldrexb(Register rt, Register rn, Condition c = Always);
1511 
1512   // STREXD rd, rt, rt2, [rn].  Constraint: rt even register, rt2=rt+1.
1513   BufferOffset as_strexd(Register rd, Register rt, Register rt2, Register rn,
1514                          Condition c = Always);
1515 
1516   // STREX rd, rt, [rn].  Constraint: rd != rn, rd != rt.
1517   BufferOffset as_strex(Register rd, Register rt, Register rn,
1518                         Condition c = Always);
1519   BufferOffset as_strexh(Register rd, Register rt, Register rn,
1520                          Condition c = Always);
1521   BufferOffset as_strexb(Register rd, Register rt, Register rn,
1522                          Condition c = Always);
1523 
1524   // CLREX
1525   BufferOffset as_clrex();
1526 
1527   // Memory synchronization.
1528   // These are available from ARMv7 forward.
1529   BufferOffset as_dmb(BarrierOption option = BarrierSY);
1530   BufferOffset as_dsb(BarrierOption option = BarrierSY);
1531   BufferOffset as_isb();
1532 
1533   // Memory synchronization for architectures before ARMv7.
1534   BufferOffset as_dsb_trap();
1535   BufferOffset as_dmb_trap();
1536   BufferOffset as_isb_trap();
1537 
1538   // Speculation barrier
1539   BufferOffset as_csdb();
1540 
1541   // Control flow stuff:
1542 
1543   // bx can *only* branch to a register never to an immediate.
1544   BufferOffset as_bx(Register r, Condition c = Always);
1545 
1546   // Branch can branch to an immediate *or* to a register. Branches to
1547   // immediates are pc relative, branches to registers are absolute.
1548   BufferOffset as_b(BOffImm off, Condition c, Label* documentation = nullptr);
1549 
1550   BufferOffset as_b(Label* l, Condition c = Always);
1551   BufferOffset as_b(BOffImm off, Condition c, BufferOffset inst);
1552 
1553   // blx can go to either an immediate or a register. When blx'ing to a
1554   // register, we change processor mode depending on the low bit of the
1555   // register when blx'ing to an immediate, we *always* change processor
1556   // state.
1557   BufferOffset as_blx(Label* l);
1558 
1559   BufferOffset as_blx(Register r, Condition c = Always);
1560   BufferOffset as_bl(BOffImm off, Condition c, Label* documentation = nullptr);
1561   // bl can only branch+link to an immediate, never to a register it never
1562   // changes processor state.
1563   BufferOffset as_bl();
1564   // bl #imm can have a condition code, blx #imm cannot.
1565   // blx reg can be conditional.
1566   BufferOffset as_bl(Label* l, Condition c);
1567   BufferOffset as_bl(BOffImm off, Condition c, BufferOffset inst);
1568 
1569   BufferOffset as_mrs(Register r, Condition c = Always);
1570   BufferOffset as_msr(Register r, Condition c = Always);
1571 
1572   // VFP instructions!
1573  private:
1574   enum vfp_size { IsDouble = 1 << 8, IsSingle = 0 << 8 };
1575 
1576   BufferOffset writeVFPInst(vfp_size sz, uint32_t blob);
1577 
1578   static void WriteVFPInstStatic(vfp_size sz, uint32_t blob, uint32_t* dest);
1579 
1580   // Unityped variants: all registers hold the same (ieee754 single/double)
1581   // notably not included are vcvt; vmov vd, #imm; vmov rt, vn.
1582   BufferOffset as_vfp_float(VFPRegister vd, VFPRegister vn, VFPRegister vm,
1583                             VFPOp op, Condition c = Always);
1584 
1585  public:
1586   BufferOffset as_vadd(VFPRegister vd, VFPRegister vn, VFPRegister vm,
1587                        Condition c = Always);
1588   BufferOffset as_vdiv(VFPRegister vd, VFPRegister vn, VFPRegister vm,
1589                        Condition c = Always);
1590   BufferOffset as_vmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
1591                        Condition c = Always);
1592   BufferOffset as_vnmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
1593                         Condition c = Always);
1594   BufferOffset as_vnmla(VFPRegister vd, VFPRegister vn, VFPRegister vm,
1595                         Condition c = Always);
1596   BufferOffset as_vnmls(VFPRegister vd, VFPRegister vn, VFPRegister vm,
1597                         Condition c = Always);
1598   BufferOffset as_vneg(VFPRegister vd, VFPRegister vm, Condition c = Always);
1599   BufferOffset as_vsqrt(VFPRegister vd, VFPRegister vm, Condition c = Always);
1600   BufferOffset as_vabs(VFPRegister vd, VFPRegister vm, Condition c = Always);
1601   BufferOffset as_vsub(VFPRegister vd, VFPRegister vn, VFPRegister vm,
1602                        Condition c = Always);
1603   BufferOffset as_vcmp(VFPRegister vd, VFPRegister vm, Condition c = Always);
1604   BufferOffset as_vcmpz(VFPRegister vd, Condition c = Always);
1605 
1606   // Specifically, a move between two same sized-registers.
1607   BufferOffset as_vmov(VFPRegister vd, VFPRegister vsrc, Condition c = Always);
1608 
1609   // Transfer between Core and VFP.
1610   enum FloatToCore_ { FloatToCore = 1 << 20, CoreToFloat = 0 << 20 };
1611 
1612  private:
1613   enum VFPXferSize { WordTransfer = 0x02000010, DoubleTransfer = 0x00400010 };
1614 
1615  public:
1616   // Unlike the next function, moving between the core registers and vfp
1617   // registers can't be *that* properly typed. Namely, since I don't want to
1618   // munge the type VFPRegister to also include core registers. Thus, the core
1619   // and vfp registers are passed in based on their type, and src/dest is
1620   // determined by the float2core.
1621 
1622   BufferOffset as_vxfer(Register vt1, Register vt2, VFPRegister vm,
1623                         FloatToCore_ f2c, Condition c = Always, int idx = 0);
1624 
1625   // Our encoding actually allows just the src and the dest (and their types)
1626   // to uniquely specify the encoding that we are going to use.
1627   BufferOffset as_vcvt(VFPRegister vd, VFPRegister vm, bool useFPSCR = false,
1628                        Condition c = Always);
1629 
1630   // Hard coded to a 32 bit fixed width result for now.
1631   BufferOffset as_vcvtFixed(VFPRegister vd, bool isSigned, uint32_t fixedPoint,
1632                             bool toFixed, Condition c = Always);
1633 
1634   // Transfer between VFP and memory.
1635   BufferOffset as_vdtr(LoadStore ls, VFPRegister vd, VFPAddr addr,
1636                        Condition c = Always /* vfp doesn't have a wb option*/);
1637 
1638   static void as_vdtr_patch(LoadStore ls, VFPRegister vd, VFPAddr addr,
1639                             Condition c /* vfp doesn't have a wb option */,
1640                             uint32_t* dest);
1641 
1642   // VFP's ldm/stm work differently from the standard arm ones. You can only
1643   // transfer a range.
1644 
1645   BufferOffset as_vdtm(LoadStore st, Register rn, VFPRegister vd, int length,
1646                        /* also has update conditions */ Condition c = Always);
1647 
1648   // vldr/vstr variants that handle unaligned accesses.  These encode as NEON
1649   // single-element instructions and can only be used if NEON is available.
1650   // Here, vd must be tagged as a float or double register.
1651   BufferOffset as_vldr_unaligned(VFPRegister vd, Register rn);
1652   BufferOffset as_vstr_unaligned(VFPRegister vd, Register rn);
1653 
1654   BufferOffset as_vimm(VFPRegister vd, VFPImm imm, Condition c = Always);
1655 
1656   BufferOffset as_vmrs(Register r, Condition c = Always);
1657   BufferOffset as_vmsr(Register r, Condition c = Always);
1658 
1659   // Label operations.
1660   bool nextLink(BufferOffset b, BufferOffset* next);
1661   void bind(Label* label, BufferOffset boff = BufferOffset());
currentOffset()1662   uint32_t currentOffset() { return nextOffset().getOffset(); }
1663   void retarget(Label* label, Label* target);
1664   // I'm going to pretend this doesn't exist for now.
1665   void retarget(Label* label, void* target, RelocationKind reloc);
1666 
1667   static void Bind(uint8_t* rawCode, const CodeLabel& label);
1668 
1669   void as_bkpt();
1670   BufferOffset as_illegal_trap();
1671 
1672  public:
1673   static void TraceJumpRelocations(JSTracer* trc, JitCode* code,
1674                                    CompactBufferReader& reader);
1675   static void TraceDataRelocations(JSTracer* trc, JitCode* code,
1676                                    CompactBufferReader& reader);
1677 
assertNoGCThings()1678   void assertNoGCThings() const {
1679 #ifdef DEBUG
1680     MOZ_ASSERT(dataRelocations_.length() == 0);
1681     for (auto& j : jumps_) {
1682       MOZ_ASSERT(j.kind() == RelocationKind::HARDCODED);
1683     }
1684 #endif
1685   }
1686 
SupportsFloatingPoint()1687   static bool SupportsFloatingPoint() { return HasVFP(); }
SupportsUnalignedAccesses()1688   static bool SupportsUnalignedAccesses() { return HasARMv7(); }
SupportsFastUnalignedAccesses()1689   static bool SupportsFastUnalignedAccesses() { return false; }
1690 
HasRoundInstruction(RoundingMode mode)1691   static bool HasRoundInstruction(RoundingMode mode) { return false; }
1692 
1693  protected:
addPendingJump(BufferOffset src,ImmPtr target,RelocationKind kind)1694   void addPendingJump(BufferOffset src, ImmPtr target, RelocationKind kind) {
1695     enoughMemory_ &= jumps_.append(RelativePatch(target.value, kind));
1696     if (kind == RelocationKind::JITCODE) {
1697       jumpRelocations_.writeUnsigned(src.getOffset());
1698     }
1699   }
1700 
1701  public:
1702   // The buffer is about to be linked, make sure any constant pools or excess
1703   // bookkeeping has been flushed to the instruction stream.
flush()1704   void flush() {
1705     MOZ_ASSERT(!isFinished);
1706     m_buffer.flushPool();
1707     return;
1708   }
1709 
comment(const char * msg)1710   void comment(const char* msg) {
1711 #ifdef JS_DISASM_ARM
1712     spew_.spew("; %s", msg);
1713 #endif
1714   }
1715 
1716   // Copy the assembly code to the given buffer, and perform any pending
1717   // relocations relying on the target address.
1718   void executableCopy(uint8_t* buffer);
1719 
1720   // Actual assembly emitting functions.
1721 
1722   // Since I can't think of a reasonable default for the mode, I'm going to
1723   // leave it as a required argument.
1724   void startDataTransferM(LoadStore ls, Register rm, DTMMode mode,
1725                           DTMWriteBack update = NoWriteBack,
1726                           Condition c = Always) {
1727     MOZ_ASSERT(!dtmActive);
1728     dtmUpdate = update;
1729     dtmBase = rm;
1730     dtmLoadStore = ls;
1731     dtmLastReg = -1;
1732     dtmRegBitField = 0;
1733     dtmActive = 1;
1734     dtmCond = c;
1735     dtmMode = mode;
1736   }
1737 
transferReg(Register rn)1738   void transferReg(Register rn) {
1739     MOZ_ASSERT(dtmActive);
1740     MOZ_ASSERT(rn.code() > dtmLastReg);
1741     dtmRegBitField |= 1 << rn.code();
1742     if (dtmLoadStore == IsLoad && rn.code() == 13 && dtmBase.code() == 13) {
1743       MOZ_CRASH("ARM Spec says this is invalid");
1744     }
1745   }
finishDataTransfer()1746   void finishDataTransfer() {
1747     dtmActive = false;
1748     as_dtm(dtmLoadStore, dtmBase, dtmRegBitField, dtmMode, dtmUpdate, dtmCond);
1749   }
1750 
1751   void startFloatTransferM(LoadStore ls, Register rm, DTMMode mode,
1752                            DTMWriteBack update = NoWriteBack,
1753                            Condition c = Always) {
1754     MOZ_ASSERT(!dtmActive);
1755     dtmActive = true;
1756     dtmUpdate = update;
1757     dtmLoadStore = ls;
1758     dtmBase = rm;
1759     dtmCond = c;
1760     dtmLastReg = -1;
1761     dtmMode = mode;
1762     dtmDelta = 0;
1763   }
transferFloatReg(VFPRegister rn)1764   void transferFloatReg(VFPRegister rn) {
1765     if (dtmLastReg == -1) {
1766       vdtmFirstReg = rn.code();
1767     } else {
1768       if (dtmDelta == 0) {
1769         dtmDelta = rn.code() - dtmLastReg;
1770         MOZ_ASSERT(dtmDelta == 1 || dtmDelta == -1);
1771       }
1772       MOZ_ASSERT(dtmLastReg >= 0);
1773       MOZ_ASSERT(rn.code() == unsigned(dtmLastReg) + dtmDelta);
1774     }
1775 
1776     dtmLastReg = rn.code();
1777   }
finishFloatTransfer()1778   void finishFloatTransfer() {
1779     MOZ_ASSERT(dtmActive);
1780     dtmActive = false;
1781     MOZ_ASSERT(dtmLastReg != -1);
1782     dtmDelta = dtmDelta ? dtmDelta : 1;
1783     // The operand for the vstr/vldr instruction is the lowest register in the
1784     // range.
1785     int low = std::min(dtmLastReg, vdtmFirstReg);
1786     int high = std::max(dtmLastReg, vdtmFirstReg);
1787     // Fencepost problem.
1788     int len = high - low + 1;
1789     // vdtm can only transfer 16 registers at once.  If we need to transfer
1790     // more, then either hoops are necessary, or we need to be updating the
1791     // register.
1792     MOZ_ASSERT_IF(len > 16, dtmUpdate == WriteBack);
1793 
1794     int adjustLow = dtmLoadStore == IsStore ? 0 : 1;
1795     int adjustHigh = dtmLoadStore == IsStore ? -1 : 0;
1796     while (len > 0) {
1797       // Limit the instruction to 16 registers.
1798       int curLen = std::min(len, 16);
1799       // If it is a store, we want to start at the high end and move down
1800       // (e.g. vpush d16-d31; vpush d0-d15).
1801       int curStart = (dtmLoadStore == IsStore) ? high - curLen + 1 : low;
1802       as_vdtm(dtmLoadStore, dtmBase,
1803               VFPRegister(FloatRegister::FromCode(curStart)), curLen, dtmCond);
1804       // Update the bounds.
1805       low += adjustLow * curLen;
1806       high += adjustHigh * curLen;
1807       // Update the length parameter.
1808       len -= curLen;
1809     }
1810   }
1811 
1812  private:
1813   int dtmRegBitField;
1814   int vdtmFirstReg;
1815   int dtmLastReg;
1816   int dtmDelta;
1817   Register dtmBase;
1818   DTMWriteBack dtmUpdate;
1819   DTMMode dtmMode;
1820   LoadStore dtmLoadStore;
1821   bool dtmActive;
1822   Condition dtmCond;
1823 
1824  public:
1825   enum {
1826     PadForAlign8 = (int)0x00,
1827     PadForAlign16 = (int)0x0000,
1828     PadForAlign32 = (int)0xe12fff7f  // 'bkpt 0xffff'
1829   };
1830 
1831   // API for speaking with the IonAssemblerBufferWithConstantPools generate an
1832   // initial placeholder instruction that we want to later fix up.
1833   static void InsertIndexIntoTag(uint8_t* load, uint32_t index);
1834 
1835   // Take the stub value that was written in before, and write in an actual
1836   // load using the index we'd computed previously as well as the address of
1837   // the pool start.
1838   static void PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr);
1839 
1840   // We're not tracking short-range branches for ARM for now.
PatchShortRangeBranchToVeneer(ARMBuffer *,unsigned rangeIdx,BufferOffset deadline,BufferOffset veneer)1841   static void PatchShortRangeBranchToVeneer(ARMBuffer*, unsigned rangeIdx,
1842                                             BufferOffset deadline,
1843                                             BufferOffset veneer) {
1844     MOZ_CRASH();
1845   }
1846   // END API
1847 
1848   // Move our entire pool into the instruction stream. This is to force an
1849   // opportunistic dump of the pool, prefferably when it is more convenient to
1850   // do a dump.
1851   void flushBuffer();
1852   void enterNoPool(size_t maxInst);
1853   void leaveNoPool();
1854   void enterNoNops();
1855   void leaveNoNops();
1856 
1857   static void WritePoolHeader(uint8_t* start, Pool* p, bool isNatural);
1858   static void WritePoolGuard(BufferOffset branch, Instruction* inst,
1859                              BufferOffset dest);
1860 
1861   static uint32_t PatchWrite_NearCallSize();
NopSize()1862   static uint32_t NopSize() { return 4; }
1863   static void PatchWrite_NearCall(CodeLocationLabel start,
1864                                   CodeLocationLabel toCall);
1865   static void PatchDataWithValueCheck(CodeLocationLabel label,
1866                                       PatchedImmPtr newValue,
1867                                       PatchedImmPtr expectedValue);
1868   static void PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
1869                                       ImmPtr expectedValue);
1870   static void PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm);
1871 
AlignDoubleArg(uint32_t offset)1872   static uint32_t AlignDoubleArg(uint32_t offset) { return (offset + 1) & ~1; }
1873   static uint8_t* NextInstruction(uint8_t* instruction,
1874                                   uint32_t* count = nullptr);
1875 
1876   // Toggle a jmp or cmp emitted by toggledJump().
1877   static void ToggleToJmp(CodeLocationLabel inst_);
1878   static void ToggleToCmp(CodeLocationLabel inst_);
1879 
1880   static uint8_t* BailoutTableStart(uint8_t* code);
1881 
1882   static size_t ToggledCallSize(uint8_t* code);
1883   static void ToggleCall(CodeLocationLabel inst_, bool enabled);
1884 
1885   void processCodeLabels(uint8_t* rawCode);
1886 
verifyHeapAccessDisassembly(uint32_t begin,uint32_t end,const Disassembler::HeapAccess & heapAccess)1887   void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end,
1888                                    const Disassembler::HeapAccess& heapAccess) {
1889     // Implement this if we implement a disassembler.
1890   }
1891 };  // Assembler
1892 
1893 // An Instruction is a structure for both encoding and decoding any and all ARM
1894 // instructions. Many classes have not been implemented thus far.
1895 class Instruction {
1896   uint32_t data;
1897 
1898  protected:
1899   // This is not for defaulting to always, this is for instructions that
1900   // cannot be made conditional, and have the usually invalid 4b1111 cond
1901   // field.
1902   explicit Instruction(uint32_t data_, bool fake = false)
1903       : data(data_ | 0xf0000000) {
1904     MOZ_ASSERT(fake || ((data_ & 0xf0000000) == 0));
1905   }
1906   // Standard constructor.
Instruction(uint32_t data_,Assembler::Condition c)1907   Instruction(uint32_t data_, Assembler::Condition c)
1908       : data(data_ | (uint32_t)c) {
1909     MOZ_ASSERT((data_ & 0xf0000000) == 0);
1910   }
1911   // You should never create an instruction directly. You should create a more
1912   // specific instruction which will eventually call one of these constructors
1913   // for you.
1914  public:
encode()1915   uint32_t encode() const { return data; }
1916   // Check if this instruction is really a particular case.
1917   template <class C>
is()1918   bool is() const {
1919     return C::IsTHIS(*this);
1920   }
1921 
1922   // Safely get a more specific variant of this pointer.
1923   template <class C>
as()1924   C* as() const {
1925     return C::AsTHIS(*this);
1926   }
1927 
1928   const Instruction& operator=(Instruction src) {
1929     data = src.data;
1930     return *this;
1931   }
1932   // Since almost all instructions have condition codes, the condition code
1933   // extractor resides in the base class.
extractCond()1934   Assembler::Condition extractCond() const {
1935     MOZ_ASSERT(data >> 28 != 0xf,
1936                "The instruction does not have condition code");
1937     return (Assembler::Condition)(data & 0xf0000000);
1938   }
1939 
1940   // Sometimes, an api wants a uint32_t (or a pointer to it) rather than an
1941   // instruction. raw() just coerces this into a pointer to a uint32_t.
raw()1942   const uint32_t* raw() const { return &data; }
size()1943   uint32_t size() const { return 4; }
1944 };  // Instruction
1945 
1946 // Make sure that it is the right size.
1947 static_assert(sizeof(Instruction) == 4);
1948 
advanceRaw(ptrdiff_t instructions)1949 inline void InstructionIterator::advanceRaw(ptrdiff_t instructions) {
1950   inst_ = inst_ + instructions;
1951 }
1952 
1953 // Data Transfer Instructions.
1954 class InstDTR : public Instruction {
1955  public:
1956   enum IsByte_ { IsByte = 0x00400000, IsWord = 0x00000000 };
1957   static const int IsDTR = 0x04000000;
1958   static const int IsDTRMask = 0x0c000000;
1959 
1960   // TODO: Replace the initialization with something that is safer.
InstDTR(LoadStore ls,IsByte_ ib,Index mode,Register rt,DTRAddr addr,Assembler::Condition c)1961   InstDTR(LoadStore ls, IsByte_ ib, Index mode, Register rt, DTRAddr addr,
1962           Assembler::Condition c)
1963       : Instruction(ls | ib | mode | RT(rt) | addr.encode() | IsDTR, c) {}
1964 
1965   static bool IsTHIS(const Instruction& i);
1966   static InstDTR* AsTHIS(const Instruction& i);
1967 };
1968 static_assert(sizeof(InstDTR) == sizeof(Instruction));
1969 
1970 class InstLDR : public InstDTR {
1971  public:
InstLDR(Index mode,Register rt,DTRAddr addr,Assembler::Condition c)1972   InstLDR(Index mode, Register rt, DTRAddr addr, Assembler::Condition c)
1973       : InstDTR(IsLoad, IsWord, mode, rt, addr, c) {}
1974 
1975   static bool IsTHIS(const Instruction& i);
1976   static InstLDR* AsTHIS(const Instruction& i);
1977 
signedOffset()1978   int32_t signedOffset() const {
1979     int32_t offset = encode() & 0xfff;
1980     if (IsUp_(encode() & IsUp) != IsUp) {
1981       return -offset;
1982     }
1983     return offset;
1984   }
dest()1985   uint32_t* dest() const {
1986     int32_t offset = signedOffset();
1987     // When patching the load in PatchConstantPoolLoad, we ensure that the
1988     // offset is a multiple of 4, offset by 8 bytes from the actual
1989     // location.  Indeed, when the base register is PC, ARM's 3 stages
1990     // pipeline design makes it that PC is off by 8 bytes (= 2 *
1991     // sizeof(uint32*)) when we actually executed it.
1992     MOZ_ASSERT(offset % 4 == 0);
1993     offset >>= 2;
1994     return (uint32_t*)raw() + offset + 2;
1995   }
1996 };
1997 static_assert(sizeof(InstDTR) == sizeof(InstLDR));
1998 
1999 class InstNOP : public Instruction {
2000  public:
2001   static const uint32_t NopInst = 0x0320f000;
2002 
InstNOP()2003   InstNOP() : Instruction(NopInst, Assembler::Always) {}
2004 
2005   static bool IsTHIS(const Instruction& i);
2006   static InstNOP* AsTHIS(Instruction& i);
2007 };
2008 
2009 // Branching to a register, or calling a register
2010 class InstBranchReg : public Instruction {
2011  protected:
2012   // Don't use BranchTag yourself, use a derived instruction.
2013   enum BranchTag { IsBX = 0x012fff10, IsBLX = 0x012fff30 };
2014 
2015   static const uint32_t IsBRegMask = 0x0ffffff0;
2016 
InstBranchReg(BranchTag tag,Register rm,Assembler::Condition c)2017   InstBranchReg(BranchTag tag, Register rm, Assembler::Condition c)
2018       : Instruction(tag | rm.code(), c) {}
2019 
2020  public:
2021   static bool IsTHIS(const Instruction& i);
2022   static InstBranchReg* AsTHIS(const Instruction& i);
2023 
2024   // Get the register that is being branched to
2025   void extractDest(Register* dest);
2026   // Make sure we are branching to a pre-known register
2027   bool checkDest(Register dest);
2028 };
2029 static_assert(sizeof(InstBranchReg) == sizeof(Instruction));
2030 
2031 // Branching to an immediate offset, or calling an immediate offset
2032 class InstBranchImm : public Instruction {
2033  protected:
2034   enum BranchTag { IsB = 0x0a000000, IsBL = 0x0b000000 };
2035 
2036   static const uint32_t IsBImmMask = 0x0f000000;
2037 
InstBranchImm(BranchTag tag,BOffImm off,Assembler::Condition c)2038   InstBranchImm(BranchTag tag, BOffImm off, Assembler::Condition c)
2039       : Instruction(tag | off.encode(), c) {}
2040 
2041  public:
2042   static bool IsTHIS(const Instruction& i);
2043   static InstBranchImm* AsTHIS(const Instruction& i);
2044 
2045   void extractImm(BOffImm* dest);
2046 };
2047 static_assert(sizeof(InstBranchImm) == sizeof(Instruction));
2048 
2049 // Very specific branching instructions.
2050 class InstBXReg : public InstBranchReg {
2051  public:
2052   static bool IsTHIS(const Instruction& i);
2053   static InstBXReg* AsTHIS(const Instruction& i);
2054 };
2055 
2056 class InstBLXReg : public InstBranchReg {
2057  public:
InstBLXReg(Register reg,Assembler::Condition c)2058   InstBLXReg(Register reg, Assembler::Condition c)
2059       : InstBranchReg(IsBLX, reg, c) {}
2060 
2061   static bool IsTHIS(const Instruction& i);
2062   static InstBLXReg* AsTHIS(const Instruction& i);
2063 };
2064 
2065 class InstBImm : public InstBranchImm {
2066  public:
InstBImm(BOffImm off,Assembler::Condition c)2067   InstBImm(BOffImm off, Assembler::Condition c) : InstBranchImm(IsB, off, c) {}
2068 
2069   static bool IsTHIS(const Instruction& i);
2070   static InstBImm* AsTHIS(const Instruction& i);
2071 };
2072 
2073 class InstBLImm : public InstBranchImm {
2074  public:
InstBLImm(BOffImm off,Assembler::Condition c)2075   InstBLImm(BOffImm off, Assembler::Condition c)
2076       : InstBranchImm(IsBL, off, c) {}
2077 
2078   static bool IsTHIS(const Instruction& i);
2079   static InstBLImm* AsTHIS(const Instruction& i);
2080 };
2081 
2082 // Both movw and movt. The layout of both the immediate and the destination
2083 // register is the same so the code is being shared.
2084 class InstMovWT : public Instruction {
2085  protected:
2086   enum WT { IsW = 0x03000000, IsT = 0x03400000 };
2087   static const uint32_t IsWTMask = 0x0ff00000;
2088 
InstMovWT(Register rd,Imm16 imm,WT wt,Assembler::Condition c)2089   InstMovWT(Register rd, Imm16 imm, WT wt, Assembler::Condition c)
2090       : Instruction(RD(rd) | imm.encode() | wt, c) {}
2091 
2092  public:
2093   void extractImm(Imm16* dest);
2094   void extractDest(Register* dest);
2095   bool checkImm(Imm16 dest);
2096   bool checkDest(Register dest);
2097 
2098   static bool IsTHIS(Instruction& i);
2099   static InstMovWT* AsTHIS(Instruction& i);
2100 };
2101 static_assert(sizeof(InstMovWT) == sizeof(Instruction));
2102 
2103 class InstMovW : public InstMovWT {
2104  public:
InstMovW(Register rd,Imm16 imm,Assembler::Condition c)2105   InstMovW(Register rd, Imm16 imm, Assembler::Condition c)
2106       : InstMovWT(rd, imm, IsW, c) {}
2107 
2108   static bool IsTHIS(const Instruction& i);
2109   static InstMovW* AsTHIS(const Instruction& i);
2110 };
2111 
2112 class InstMovT : public InstMovWT {
2113  public:
InstMovT(Register rd,Imm16 imm,Assembler::Condition c)2114   InstMovT(Register rd, Imm16 imm, Assembler::Condition c)
2115       : InstMovWT(rd, imm, IsT, c) {}
2116 
2117   static bool IsTHIS(const Instruction& i);
2118   static InstMovT* AsTHIS(const Instruction& i);
2119 };
2120 
2121 class InstALU : public Instruction {
2122   static const int32_t ALUMask = 0xc << 24;
2123 
2124  public:
InstALU(Register rd,Register rn,Operand2 op2,ALUOp op,SBit s,Assembler::Condition c)2125   InstALU(Register rd, Register rn, Operand2 op2, ALUOp op, SBit s,
2126           Assembler::Condition c)
2127       : Instruction(maybeRD(rd) | maybeRN(rn) | op2.encode() | op | s, c) {}
2128 
2129   static bool IsTHIS(const Instruction& i);
2130   static InstALU* AsTHIS(const Instruction& i);
2131 
2132   void extractOp(ALUOp* ret);
2133   bool checkOp(ALUOp op);
2134   void extractDest(Register* ret);
2135   bool checkDest(Register rd);
2136   void extractOp1(Register* ret);
2137   bool checkOp1(Register rn);
2138   Operand2 extractOp2();
2139 };
2140 
2141 class InstCMP : public InstALU {
2142  public:
2143   static bool IsTHIS(const Instruction& i);
2144   static InstCMP* AsTHIS(const Instruction& i);
2145 };
2146 
2147 class InstMOV : public InstALU {
2148  public:
2149   static bool IsTHIS(const Instruction& i);
2150   static InstMOV* AsTHIS(const Instruction& i);
2151 };
2152 
2153 // Compile-time iterator over instructions, with a safe interface that
2154 // references not-necessarily-linear Instructions by linear BufferOffset.
2155 class BufferInstructionIterator
2156     : public ARMBuffer::AssemblerBufferInstIterator {
2157  public:
BufferInstructionIterator(BufferOffset bo,ARMBuffer * buffer)2158   BufferInstructionIterator(BufferOffset bo, ARMBuffer* buffer)
2159       : ARMBuffer::AssemblerBufferInstIterator(bo, buffer) {}
2160 
2161   // Advances the buffer to the next intentionally-inserted instruction.
next()2162   Instruction* next() {
2163     advance(cur()->size());
2164     maybeSkipAutomaticInstructions();
2165     return cur();
2166   }
2167 
2168   // Advances the BufferOffset past any automatically-inserted instructions.
2169   Instruction* maybeSkipAutomaticInstructions();
2170 };
2171 
2172 static const uint32_t NumIntArgRegs = 4;
2173 
2174 // There are 16 *float* registers available for arguments
2175 // If doubles are used, only half the number of registers are available.
2176 static const uint32_t NumFloatArgRegs = 16;
2177 
GetIntArgReg(uint32_t usedIntArgs,uint32_t usedFloatArgs,Register * out)2178 static inline bool GetIntArgReg(uint32_t usedIntArgs, uint32_t usedFloatArgs,
2179                                 Register* out) {
2180   if (usedIntArgs >= NumIntArgRegs) {
2181     return false;
2182   }
2183 
2184   *out = Register::FromCode(usedIntArgs);
2185   return true;
2186 }
2187 
2188 // Get a register in which we plan to put a quantity that will be used as an
2189 // integer argument. This differs from GetIntArgReg in that if we have no more
2190 // actual argument registers to use we will fall back on using whatever
2191 // CallTempReg* don't overlap the argument registers, and only fail once those
2192 // run out too.
GetTempRegForIntArg(uint32_t usedIntArgs,uint32_t usedFloatArgs,Register * out)2193 static inline bool GetTempRegForIntArg(uint32_t usedIntArgs,
2194                                        uint32_t usedFloatArgs, Register* out) {
2195   if (GetIntArgReg(usedIntArgs, usedFloatArgs, out)) {
2196     return true;
2197   }
2198 
2199   // Unfortunately, we have to assume things about the point at which
2200   // GetIntArgReg returns false, because we need to know how many registers it
2201   // can allocate.
2202   usedIntArgs -= NumIntArgRegs;
2203   if (usedIntArgs >= NumCallTempNonArgRegs) {
2204     return false;
2205   }
2206 
2207   *out = CallTempNonArgRegs[usedIntArgs];
2208   return true;
2209 }
2210 
2211 #if defined(JS_CODEGEN_ARM_HARDFP) || defined(JS_SIMULATOR_ARM)
2212 
GetFloat32ArgReg(uint32_t usedIntArgs,uint32_t usedFloatArgs,FloatRegister * out)2213 static inline bool GetFloat32ArgReg(uint32_t usedIntArgs,
2214                                     uint32_t usedFloatArgs,
2215                                     FloatRegister* out) {
2216   MOZ_ASSERT(UseHardFpABI());
2217   if (usedFloatArgs >= NumFloatArgRegs) {
2218     return false;
2219   }
2220   *out = VFPRegister(usedFloatArgs, VFPRegister::Single);
2221   return true;
2222 }
GetDoubleArgReg(uint32_t usedIntArgs,uint32_t usedFloatArgs,FloatRegister * out)2223 static inline bool GetDoubleArgReg(uint32_t usedIntArgs, uint32_t usedFloatArgs,
2224                                    FloatRegister* out) {
2225   MOZ_ASSERT(UseHardFpABI());
2226   MOZ_ASSERT((usedFloatArgs % 2) == 0);
2227   if (usedFloatArgs >= NumFloatArgRegs) {
2228     return false;
2229   }
2230   *out = VFPRegister(usedFloatArgs >> 1, VFPRegister::Double);
2231   return true;
2232 }
2233 
2234 #endif
2235 
2236 class DoubleEncoder {
2237   struct DoubleEntry {
2238     uint32_t dblTop;
2239     datastore::Imm8VFPImmData data;
2240   };
2241 
2242   static const DoubleEntry table[256];
2243 
2244  public:
lookup(uint32_t top,datastore::Imm8VFPImmData * ret)2245   bool lookup(uint32_t top, datastore::Imm8VFPImmData* ret) const {
2246     for (int i = 0; i < 256; i++) {
2247       if (table[i].dblTop == top) {
2248         *ret = table[i].data;
2249         return true;
2250       }
2251     }
2252     return false;
2253   }
2254 };
2255 
2256 // Forbids nop filling for testing purposes. Not nestable.
2257 class AutoForbidNops {
2258  protected:
2259   Assembler* masm_;
2260 
2261  public:
AutoForbidNops(Assembler * masm)2262   explicit AutoForbidNops(Assembler* masm) : masm_(masm) {
2263     masm_->enterNoNops();
2264   }
~AutoForbidNops()2265   ~AutoForbidNops() { masm_->leaveNoNops(); }
2266 };
2267 
2268 class AutoForbidPoolsAndNops : public AutoForbidNops {
2269  public:
2270   // The maxInst argument is the maximum number of word sized instructions
2271   // that will be allocated within this context. It is used to determine if
2272   // the pool needs to be dumped before entering this content. The debug code
2273   // checks that no more than maxInst instructions are actually allocated.
2274   //
2275   // Allocation of pool entries is not supported within this content so the
2276   // code can not use large integers or float constants etc.
AutoForbidPoolsAndNops(Assembler * masm,size_t maxInst)2277   AutoForbidPoolsAndNops(Assembler* masm, size_t maxInst)
2278       : AutoForbidNops(masm) {
2279     masm_->enterNoPool(maxInst);
2280   }
2281 
~AutoForbidPoolsAndNops()2282   ~AutoForbidPoolsAndNops() { masm_->leaveNoPool(); }
2283 };
2284 
2285 }  // namespace jit
2286 }  // namespace js
2287 
2288 #endif /* jit_arm_Assembler_arm_h */
2289