1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2  * vim: set ts=8 sts=4 et sw=4 tw=99:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "jit/arm/Assembler-arm.h"
8 
9 #include "mozilla/DebugOnly.h"
10 #include "mozilla/MathAlgorithms.h"
11 
12 #include "jsutil.h"
13 
14 #include "gc/Marking.h"
15 #include "jit/arm/disasm/Disasm-arm.h"
16 #include "jit/arm/MacroAssembler-arm.h"
17 #include "jit/ExecutableAllocator.h"
18 #include "jit/JitCompartment.h"
19 #include "jit/MacroAssembler.h"
20 #include "vm/JSCompartment.h"
21 
22 using namespace js;
23 using namespace js::jit;
24 
25 using mozilla::CountLeadingZeroes32;
26 
27 using LabelDoc = DisassemblerSpew::LabelDoc;
28 using LiteralDoc = DisassemblerSpew::LiteralDoc;
29 
dbg_break()30 void dbg_break() {}
31 
32 // The ABIArgGenerator is used for making system ABI calls and for inter-wasm
33 // calls. The system ABI can either be SoftFp or HardFp, and inter-wasm calls
34 // are always HardFp calls. The initialization defaults to HardFp, and the ABI
35 // choice is made before any system ABI calls with the method "setUseHardFp".
ABIArgGenerator()36 ABIArgGenerator::ABIArgGenerator()
37     : intRegIndex_(0),
38       floatRegIndex_(0),
39       stackOffset_(0),
40       current_(),
41       useHardFp_(true) {}
42 
43 // See the "Parameter Passing" section of the "Procedure Call Standard for the
44 // ARM Architecture" documentation.
softNext(MIRType type)45 ABIArg ABIArgGenerator::softNext(MIRType type) {
46   switch (type) {
47     case MIRType::Int32:
48     case MIRType::Pointer:
49       if (intRegIndex_ == NumIntArgRegs) {
50         current_ = ABIArg(stackOffset_);
51         stackOffset_ += sizeof(uint32_t);
52         break;
53       }
54       current_ = ABIArg(Register::FromCode(intRegIndex_));
55       intRegIndex_++;
56       break;
57     case MIRType::Int64:
58       // Make sure to use an even register index. Increase to next even number
59       // when odd.
60       intRegIndex_ = (intRegIndex_ + 1) & ~1;
61       if (intRegIndex_ == NumIntArgRegs) {
62         // Align the stack on 8 bytes.
63         static const uint32_t align = sizeof(uint64_t) - 1;
64         stackOffset_ = (stackOffset_ + align) & ~align;
65         current_ = ABIArg(stackOffset_);
66         stackOffset_ += sizeof(uint64_t);
67         break;
68       }
69       current_ = ABIArg(Register::FromCode(intRegIndex_),
70                         Register::FromCode(intRegIndex_ + 1));
71       intRegIndex_ += 2;
72       break;
73     case MIRType::Float32:
74       if (intRegIndex_ == NumIntArgRegs) {
75         current_ = ABIArg(stackOffset_);
76         stackOffset_ += sizeof(uint32_t);
77         break;
78       }
79       current_ = ABIArg(Register::FromCode(intRegIndex_));
80       intRegIndex_++;
81       break;
82     case MIRType::Double:
83       // Make sure to use an even register index. Increase to next even number
84       // when odd.
85       intRegIndex_ = (intRegIndex_ + 1) & ~1;
86       if (intRegIndex_ == NumIntArgRegs) {
87         // Align the stack on 8 bytes.
88         static const uint32_t align = sizeof(double) - 1;
89         stackOffset_ = (stackOffset_ + align) & ~align;
90         current_ = ABIArg(stackOffset_);
91         stackOffset_ += sizeof(double);
92         break;
93       }
94       current_ = ABIArg(Register::FromCode(intRegIndex_),
95                         Register::FromCode(intRegIndex_ + 1));
96       intRegIndex_ += 2;
97       break;
98     default:
99       MOZ_CRASH("Unexpected argument type");
100   }
101 
102   return current_;
103 }
104 
hardNext(MIRType type)105 ABIArg ABIArgGenerator::hardNext(MIRType type) {
106   switch (type) {
107     case MIRType::Int32:
108     case MIRType::Pointer:
109       if (intRegIndex_ == NumIntArgRegs) {
110         current_ = ABIArg(stackOffset_);
111         stackOffset_ += sizeof(uint32_t);
112         break;
113       }
114       current_ = ABIArg(Register::FromCode(intRegIndex_));
115       intRegIndex_++;
116       break;
117     case MIRType::Int64:
118       // Make sure to use an even register index. Increase to next even number
119       // when odd.
120       intRegIndex_ = (intRegIndex_ + 1) & ~1;
121       if (intRegIndex_ == NumIntArgRegs) {
122         // Align the stack on 8 bytes.
123         static const uint32_t align = sizeof(uint64_t) - 1;
124         stackOffset_ = (stackOffset_ + align) & ~align;
125         current_ = ABIArg(stackOffset_);
126         stackOffset_ += sizeof(uint64_t);
127         break;
128       }
129       current_ = ABIArg(Register::FromCode(intRegIndex_),
130                         Register::FromCode(intRegIndex_ + 1));
131       intRegIndex_ += 2;
132       break;
133     case MIRType::Float32:
134       if (floatRegIndex_ == NumFloatArgRegs) {
135         current_ = ABIArg(stackOffset_);
136         stackOffset_ += sizeof(uint32_t);
137         break;
138       }
139       current_ = ABIArg(VFPRegister(floatRegIndex_, VFPRegister::Single));
140       floatRegIndex_++;
141       break;
142     case MIRType::Double:
143       // Double register are composed of 2 float registers, thus we have to
144       // skip any float register which cannot be used in a pair of float
145       // registers in which a double value can be stored.
146       floatRegIndex_ = (floatRegIndex_ + 1) & ~1;
147       if (floatRegIndex_ == NumFloatArgRegs) {
148         static const uint32_t align = sizeof(double) - 1;
149         stackOffset_ = (stackOffset_ + align) & ~align;
150         current_ = ABIArg(stackOffset_);
151         stackOffset_ += sizeof(uint64_t);
152         break;
153       }
154       current_ = ABIArg(VFPRegister(floatRegIndex_ >> 1, VFPRegister::Double));
155       floatRegIndex_ += 2;
156       break;
157     default:
158       MOZ_CRASH("Unexpected argument type");
159   }
160 
161   return current_;
162 }
163 
next(MIRType type)164 ABIArg ABIArgGenerator::next(MIRType type) {
165   if (useHardFp_) return hardNext(type);
166   return softNext(type);
167 }
168 
IsUnaligned(const wasm::MemoryAccessDesc & access)169 bool js::jit::IsUnaligned(const wasm::MemoryAccessDesc& access) {
170   if (!access.align()) return false;
171 
172   if (access.type() == Scalar::Float64 && access.align() >= 4) return false;
173 
174   return access.align() < access.byteSize();
175 }
176 
177 // Encode a standard register when it is being used as src1, the dest, and an
178 // extra register. These should never be called with an InvalidReg.
RT(Register r)179 uint32_t js::jit::RT(Register r) {
180   MOZ_ASSERT((r.code() & ~0xf) == 0);
181   return r.code() << 12;
182 }
183 
RN(Register r)184 uint32_t js::jit::RN(Register r) {
185   MOZ_ASSERT((r.code() & ~0xf) == 0);
186   return r.code() << 16;
187 }
188 
RD(Register r)189 uint32_t js::jit::RD(Register r) {
190   MOZ_ASSERT((r.code() & ~0xf) == 0);
191   return r.code() << 12;
192 }
193 
RM(Register r)194 uint32_t js::jit::RM(Register r) {
195   MOZ_ASSERT((r.code() & ~0xf) == 0);
196   return r.code() << 8;
197 }
198 
199 // Encode a standard register when it is being used as src1, the dest, and an
200 // extra register. For these, an InvalidReg is used to indicate a optional
201 // register that has been omitted.
maybeRT(Register r)202 uint32_t js::jit::maybeRT(Register r) {
203   if (r == InvalidReg) return 0;
204 
205   MOZ_ASSERT((r.code() & ~0xf) == 0);
206   return r.code() << 12;
207 }
208 
maybeRN(Register r)209 uint32_t js::jit::maybeRN(Register r) {
210   if (r == InvalidReg) return 0;
211 
212   MOZ_ASSERT((r.code() & ~0xf) == 0);
213   return r.code() << 16;
214 }
215 
maybeRD(Register r)216 uint32_t js::jit::maybeRD(Register r) {
217   if (r == InvalidReg) return 0;
218 
219   MOZ_ASSERT((r.code() & ~0xf) == 0);
220   return r.code() << 12;
221 }
222 
toRD(Instruction i)223 Register js::jit::toRD(Instruction i) {
224   return Register::FromCode((i.encode() >> 12) & 0xf);
225 }
toR(Instruction i)226 Register js::jit::toR(Instruction i) {
227   return Register::FromCode(i.encode() & 0xf);
228 }
229 
toRM(Instruction i)230 Register js::jit::toRM(Instruction i) {
231   return Register::FromCode((i.encode() >> 8) & 0xf);
232 }
233 
toRN(Instruction i)234 Register js::jit::toRN(Instruction i) {
235   return Register::FromCode((i.encode() >> 16) & 0xf);
236 }
237 
VD(VFPRegister vr)238 uint32_t js::jit::VD(VFPRegister vr) {
239   if (vr.isMissing()) return 0;
240 
241   // Bits 15,14,13,12, 22.
242   VFPRegister::VFPRegIndexSplit s = vr.encode();
243   return s.bit << 22 | s.block << 12;
244 }
VN(VFPRegister vr)245 uint32_t js::jit::VN(VFPRegister vr) {
246   if (vr.isMissing()) return 0;
247 
248   // Bits 19,18,17,16, 7.
249   VFPRegister::VFPRegIndexSplit s = vr.encode();
250   return s.bit << 7 | s.block << 16;
251 }
VM(VFPRegister vr)252 uint32_t js::jit::VM(VFPRegister vr) {
253   if (vr.isMissing()) return 0;
254 
255   // Bits 5, 3,2,1,0.
256   VFPRegister::VFPRegIndexSplit s = vr.encode();
257   return s.bit << 5 | s.block;
258 }
259 
encode()260 VFPRegister::VFPRegIndexSplit jit::VFPRegister::encode() {
261   MOZ_ASSERT(!_isInvalid);
262 
263   switch (kind) {
264     case Double:
265       return VFPRegIndexSplit(code_ & 0xf, code_ >> 4);
266     case Single:
267       return VFPRegIndexSplit(code_ >> 1, code_ & 1);
268     default:
269       // VFP register treated as an integer, NOT a gpr.
270       return VFPRegIndexSplit(code_ >> 1, code_ & 1);
271   }
272 }
273 
IsTHIS(const Instruction & i)274 bool InstDTR::IsTHIS(const Instruction& i) {
275   return (i.encode() & IsDTRMask) == (uint32_t)IsDTR;
276 }
277 
AsTHIS(const Instruction & i)278 InstDTR* InstDTR::AsTHIS(const Instruction& i) {
279   if (IsTHIS(i)) return (InstDTR*)&i;
280   return nullptr;
281 }
282 
IsTHIS(const Instruction & i)283 bool InstLDR::IsTHIS(const Instruction& i) {
284   return (i.encode() & IsDTRMask) == (uint32_t)IsDTR;
285 }
286 
AsTHIS(const Instruction & i)287 InstLDR* InstLDR::AsTHIS(const Instruction& i) {
288   if (IsTHIS(i)) return (InstLDR*)&i;
289   return nullptr;
290 }
291 
AsTHIS(Instruction & i)292 InstNOP* InstNOP::AsTHIS(Instruction& i) {
293   if (IsTHIS(i)) return (InstNOP*)&i;
294   return nullptr;
295 }
296 
IsTHIS(const Instruction & i)297 bool InstNOP::IsTHIS(const Instruction& i) {
298   return (i.encode() & 0x0fffffff) == NopInst;
299 }
300 
IsTHIS(const Instruction & i)301 bool InstBranchReg::IsTHIS(const Instruction& i) {
302   return InstBXReg::IsTHIS(i) || InstBLXReg::IsTHIS(i);
303 }
304 
AsTHIS(const Instruction & i)305 InstBranchReg* InstBranchReg::AsTHIS(const Instruction& i) {
306   if (IsTHIS(i)) return (InstBranchReg*)&i;
307   return nullptr;
308 }
extractDest(Register * dest)309 void InstBranchReg::extractDest(Register* dest) { *dest = toR(*this); }
checkDest(Register dest)310 bool InstBranchReg::checkDest(Register dest) { return dest == toR(*this); }
311 
IsTHIS(const Instruction & i)312 bool InstBranchImm::IsTHIS(const Instruction& i) {
313   return InstBImm::IsTHIS(i) || InstBLImm::IsTHIS(i);
314 }
315 
AsTHIS(const Instruction & i)316 InstBranchImm* InstBranchImm::AsTHIS(const Instruction& i) {
317   if (IsTHIS(i)) return (InstBranchImm*)&i;
318   return nullptr;
319 }
320 
extractImm(BOffImm * dest)321 void InstBranchImm::extractImm(BOffImm* dest) { *dest = BOffImm(*this); }
322 
IsTHIS(const Instruction & i)323 bool InstBXReg::IsTHIS(const Instruction& i) {
324   return (i.encode() & IsBRegMask) == IsBX;
325 }
326 
AsTHIS(const Instruction & i)327 InstBXReg* InstBXReg::AsTHIS(const Instruction& i) {
328   if (IsTHIS(i)) return (InstBXReg*)&i;
329   return nullptr;
330 }
331 
IsTHIS(const Instruction & i)332 bool InstBLXReg::IsTHIS(const Instruction& i) {
333   return (i.encode() & IsBRegMask) == IsBLX;
334 }
AsTHIS(const Instruction & i)335 InstBLXReg* InstBLXReg::AsTHIS(const Instruction& i) {
336   if (IsTHIS(i)) return (InstBLXReg*)&i;
337   return nullptr;
338 }
339 
IsTHIS(const Instruction & i)340 bool InstBImm::IsTHIS(const Instruction& i) {
341   return (i.encode() & IsBImmMask) == IsB;
342 }
AsTHIS(const Instruction & i)343 InstBImm* InstBImm::AsTHIS(const Instruction& i) {
344   if (IsTHIS(i)) return (InstBImm*)&i;
345   return nullptr;
346 }
347 
IsTHIS(const Instruction & i)348 bool InstBLImm::IsTHIS(const Instruction& i) {
349   return (i.encode() & IsBImmMask) == IsBL;
350 }
AsTHIS(const Instruction & i)351 InstBLImm* InstBLImm::AsTHIS(const Instruction& i) {
352   if (IsTHIS(i)) return (InstBLImm*)&i;
353   return nullptr;
354 }
355 
IsTHIS(Instruction & i)356 bool InstMovWT::IsTHIS(Instruction& i) {
357   return InstMovW::IsTHIS(i) || InstMovT::IsTHIS(i);
358 }
AsTHIS(Instruction & i)359 InstMovWT* InstMovWT::AsTHIS(Instruction& i) {
360   if (IsTHIS(i)) return (InstMovWT*)&i;
361   return nullptr;
362 }
363 
extractImm(Imm16 * imm)364 void InstMovWT::extractImm(Imm16* imm) { *imm = Imm16(*this); }
checkImm(Imm16 imm)365 bool InstMovWT::checkImm(Imm16 imm) {
366   return imm.decode() == Imm16(*this).decode();
367 }
368 
extractDest(Register * dest)369 void InstMovWT::extractDest(Register* dest) { *dest = toRD(*this); }
checkDest(Register dest)370 bool InstMovWT::checkDest(Register dest) { return dest == toRD(*this); }
371 
IsTHIS(const Instruction & i)372 bool InstMovW::IsTHIS(const Instruction& i) {
373   return (i.encode() & IsWTMask) == IsW;
374 }
375 
AsTHIS(const Instruction & i)376 InstMovW* InstMovW::AsTHIS(const Instruction& i) {
377   if (IsTHIS(i)) return (InstMovW*)&i;
378   return nullptr;
379 }
AsTHIS(const Instruction & i)380 InstMovT* InstMovT::AsTHIS(const Instruction& i) {
381   if (IsTHIS(i)) return (InstMovT*)&i;
382   return nullptr;
383 }
384 
IsTHIS(const Instruction & i)385 bool InstMovT::IsTHIS(const Instruction& i) {
386   return (i.encode() & IsWTMask) == IsT;
387 }
388 
AsTHIS(const Instruction & i)389 InstALU* InstALU::AsTHIS(const Instruction& i) {
390   if (IsTHIS(i)) return (InstALU*)&i;
391   return nullptr;
392 }
IsTHIS(const Instruction & i)393 bool InstALU::IsTHIS(const Instruction& i) {
394   return (i.encode() & ALUMask) == 0;
395 }
extractOp(ALUOp * ret)396 void InstALU::extractOp(ALUOp* ret) { *ret = ALUOp(encode() & (0xf << 21)); }
checkOp(ALUOp op)397 bool InstALU::checkOp(ALUOp op) {
398   ALUOp mine;
399   extractOp(&mine);
400   return mine == op;
401 }
extractDest(Register * ret)402 void InstALU::extractDest(Register* ret) { *ret = toRD(*this); }
checkDest(Register rd)403 bool InstALU::checkDest(Register rd) { return rd == toRD(*this); }
extractOp1(Register * ret)404 void InstALU::extractOp1(Register* ret) { *ret = toRN(*this); }
checkOp1(Register rn)405 bool InstALU::checkOp1(Register rn) { return rn == toRN(*this); }
extractOp2()406 Operand2 InstALU::extractOp2() { return Operand2(encode()); }
407 
AsTHIS(const Instruction & i)408 InstCMP* InstCMP::AsTHIS(const Instruction& i) {
409   if (IsTHIS(i)) return (InstCMP*)&i;
410   return nullptr;
411 }
412 
IsTHIS(const Instruction & i)413 bool InstCMP::IsTHIS(const Instruction& i) {
414   return InstALU::IsTHIS(i) && InstALU::AsTHIS(i)->checkDest(r0) &&
415          InstALU::AsTHIS(i)->checkOp(OpCmp);
416 }
417 
AsTHIS(const Instruction & i)418 InstMOV* InstMOV::AsTHIS(const Instruction& i) {
419   if (IsTHIS(i)) return (InstMOV*)&i;
420   return nullptr;
421 }
422 
IsTHIS(const Instruction & i)423 bool InstMOV::IsTHIS(const Instruction& i) {
424   return InstALU::IsTHIS(i) && InstALU::AsTHIS(i)->checkOp1(r0) &&
425          InstALU::AsTHIS(i)->checkOp(OpMov);
426 }
427 
toOp2Reg() const428 Op2Reg Operand2::toOp2Reg() const { return *(Op2Reg*)this; }
429 
Imm16(Instruction & inst)430 Imm16::Imm16(Instruction& inst)
431     : lower_(inst.encode() & 0xfff),
432       upper_(inst.encode() >> 16),
433       invalid_(0xfff) {}
434 
Imm16(uint32_t imm)435 Imm16::Imm16(uint32_t imm)
436     : lower_(imm & 0xfff), pad_(0), upper_((imm >> 12) & 0xf), invalid_(0) {
437   MOZ_ASSERT(decode() == imm);
438 }
439 
Imm16()440 Imm16::Imm16() : invalid_(0xfff) {}
441 
PatchJump(CodeLocationJump & jump_,CodeLocationLabel label,ReprotectCode reprotect)442 void jit::PatchJump(CodeLocationJump& jump_, CodeLocationLabel label,
443                     ReprotectCode reprotect) {
444   // We need to determine if this jump can fit into the standard 24+2 bit
445   // address or if we need a larger branch (or just need to use our pool
446   // entry).
447   Instruction* jump = (Instruction*)jump_.raw();
448   // jumpWithPatch() returns the offset of the jump and never a pool or nop.
449   Assembler::Condition c = jump->extractCond();
450   MOZ_ASSERT(jump->is<InstBranchImm>() || jump->is<InstLDR>());
451 
452   int jumpOffset = label.raw() - jump_.raw();
453   if (BOffImm::IsInRange(jumpOffset)) {
454     // This instruction started off as a branch, and will remain one.
455     MaybeAutoWritableJitCode awjc(jump, sizeof(Instruction), reprotect);
456     Assembler::RetargetNearBranch(jump, jumpOffset, c);
457   } else {
458     // This instruction started off as a branch, but now needs to be demoted
459     // to an ldr.
460     uint8_t** slot = reinterpret_cast<uint8_t**>(jump_.jumpTableEntry());
461 
462     // Ensure both the branch and the slot are writable.
463     MOZ_ASSERT(uintptr_t(slot) > uintptr_t(jump));
464     size_t size = uintptr_t(slot) - uintptr_t(jump) + sizeof(void*);
465     MaybeAutoWritableJitCode awjc(jump, size, reprotect);
466 
467     Assembler::RetargetFarBranch(jump, slot, label.raw(), c);
468   }
469 }
470 
finish()471 void Assembler::finish() {
472   flush();
473   MOZ_ASSERT(!isFinished);
474   isFinished = true;
475 }
476 
appendRawCode(const uint8_t * code,size_t numBytes)477 bool Assembler::appendRawCode(const uint8_t* code, size_t numBytes) {
478   flush();
479   return m_buffer.appendRawCode(code, numBytes);
480 }
481 
reserve(size_t size)482 bool Assembler::reserve(size_t size) {
483   // This buffer uses fixed-size chunks so there's no point in reserving
484   // now vs. on-demand.
485   return !oom();
486 }
487 
swapBuffer(wasm::Bytes & bytes)488 bool Assembler::swapBuffer(wasm::Bytes& bytes) {
489   // For now, specialize to the one use case. As long as wasm::Bytes is a
490   // Vector, not a linked-list of chunks, there's not much we can do other
491   // than copy.
492   MOZ_ASSERT(bytes.empty());
493   if (!bytes.resize(bytesNeeded())) return false;
494   m_buffer.executableCopy(bytes.begin());
495   return true;
496 }
497 
executableCopy(uint8_t * buffer,bool flushICache)498 void Assembler::executableCopy(uint8_t* buffer, bool flushICache) {
499   MOZ_ASSERT(isFinished);
500   m_buffer.executableCopy(buffer);
501   if (flushICache)
502     AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size());
503 }
504 
actualIndex(uint32_t idx_) const505 uint32_t Assembler::actualIndex(uint32_t idx_) const {
506   ARMBuffer::PoolEntry pe(idx_);
507   return m_buffer.poolEntryOffset(pe);
508 }
509 
PatchableJumpAddress(JitCode * code,uint32_t pe_)510 uint8_t* Assembler::PatchableJumpAddress(JitCode* code, uint32_t pe_) {
511   return code->raw() + pe_;
512 }
513 
514 class RelocationIterator {
515   CompactBufferReader reader_;
516   // Offset in bytes.
517   uint32_t offset_;
518 
519  public:
RelocationIterator(CompactBufferReader & reader)520   RelocationIterator(CompactBufferReader& reader) : reader_(reader) {}
521 
read()522   bool read() {
523     if (!reader_.more()) return false;
524     offset_ = reader_.readUnsigned();
525     return true;
526   }
527 
offset() const528   uint32_t offset() const { return offset_; }
529 };
530 
531 template <class Iter>
GetCF32Target(Iter * iter)532 const uint32_t* Assembler::GetCF32Target(Iter* iter) {
533   Instruction* inst1 = iter->cur();
534 
535   if (inst1->is<InstBranchImm>()) {
536     // See if we have a simple case, b #offset.
537     BOffImm imm;
538     InstBranchImm* jumpB = inst1->as<InstBranchImm>();
539     jumpB->extractImm(&imm);
540     return imm.getDest(inst1)->raw();
541   }
542 
543   if (inst1->is<InstMovW>()) {
544     // See if we have the complex case:
545     //  movw r_temp, #imm1
546     //  movt r_temp, #imm2
547     //  bx r_temp
548     // OR
549     //  movw r_temp, #imm1
550     //  movt r_temp, #imm2
551     //  str pc, [sp]
552     //  bx r_temp
553 
554     Imm16 targ_bot;
555     Imm16 targ_top;
556     Register temp;
557 
558     // Extract both the temp register and the bottom immediate.
559     InstMovW* bottom = inst1->as<InstMovW>();
560     bottom->extractImm(&targ_bot);
561     bottom->extractDest(&temp);
562 
563     // Extract the top part of the immediate.
564     Instruction* inst2 = iter->next();
565     MOZ_ASSERT(inst2->is<InstMovT>());
566     InstMovT* top = inst2->as<InstMovT>();
567     top->extractImm(&targ_top);
568 
569     // Make sure they are being loaded into the same register.
570     MOZ_ASSERT(top->checkDest(temp));
571 
572     // Make sure we're branching to the same register.
573 #ifdef DEBUG
574     // A toggled call sometimes has a NOP instead of a branch for the third
575     // instruction. No way to assert that it's valid in that situation.
576     Instruction* inst3 = iter->next();
577     if (!inst3->is<InstNOP>()) {
578       InstBranchReg* realBranch = nullptr;
579       if (inst3->is<InstBranchReg>()) {
580         realBranch = inst3->as<InstBranchReg>();
581       } else {
582         Instruction* inst4 = iter->next();
583         realBranch = inst4->as<InstBranchReg>();
584       }
585       MOZ_ASSERT(realBranch->checkDest(temp));
586     }
587 #endif
588 
589     uint32_t* dest = (uint32_t*)(targ_bot.decode() | (targ_top.decode() << 16));
590     return dest;
591   }
592 
593   if (inst1->is<InstLDR>()) return *(uint32_t**)inst1->as<InstLDR>()->dest();
594 
595   MOZ_CRASH("unsupported branch relocation");
596 }
597 
GetPointer(uint8_t * instPtr)598 uintptr_t Assembler::GetPointer(uint8_t* instPtr) {
599   InstructionIterator iter((Instruction*)instPtr);
600   uintptr_t ret = (uintptr_t)GetPtr32Target(iter, nullptr, nullptr);
601   return ret;
602 }
603 
604 template <class Iter>
GetPtr32Target(Iter start,Register * dest,RelocStyle * style)605 const uint32_t* Assembler::GetPtr32Target(Iter start, Register* dest,
606                                           RelocStyle* style) {
607   Instruction* load1 = start.cur();
608   Instruction* load2 = start.next();
609 
610   if (load1->is<InstMovW>() && load2->is<InstMovT>()) {
611     if (style) *style = L_MOVWT;
612 
613     // See if we have the complex case:
614     //  movw r_temp, #imm1
615     //  movt r_temp, #imm2
616 
617     Imm16 targ_bot;
618     Imm16 targ_top;
619     Register temp;
620 
621     // Extract both the temp register and the bottom immediate.
622     InstMovW* bottom = load1->as<InstMovW>();
623     bottom->extractImm(&targ_bot);
624     bottom->extractDest(&temp);
625 
626     // Extract the top part of the immediate.
627     InstMovT* top = load2->as<InstMovT>();
628     top->extractImm(&targ_top);
629 
630     // Make sure they are being loaded into the same register.
631     MOZ_ASSERT(top->checkDest(temp));
632 
633     if (dest) *dest = temp;
634 
635     uint32_t* value =
636         (uint32_t*)(targ_bot.decode() | (targ_top.decode() << 16));
637     return value;
638   }
639 
640   if (load1->is<InstLDR>()) {
641     if (style) *style = L_LDR;
642     if (dest) *dest = toRD(*load1);
643     return *(uint32_t**)load1->as<InstLDR>()->dest();
644   }
645 
646   MOZ_CRASH("unsupported relocation");
647 }
648 
CodeFromJump(InstructionIterator * jump)649 static JitCode* CodeFromJump(InstructionIterator* jump) {
650   uint8_t* target = (uint8_t*)Assembler::GetCF32Target(jump);
651   return JitCode::FromExecutable(target);
652 }
653 
TraceJumpRelocations(JSTracer * trc,JitCode * code,CompactBufferReader & reader)654 void Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code,
655                                      CompactBufferReader& reader) {
656   RelocationIterator iter(reader);
657   while (iter.read()) {
658     InstructionIterator institer((Instruction*)(code->raw() + iter.offset()));
659     JitCode* child = CodeFromJump(&institer);
660     TraceManuallyBarrieredEdge(trc, &child, "rel32");
661   }
662 }
663 
664 template <class Iter>
TraceOneDataRelocation(JSTracer * trc,Iter iter)665 static void TraceOneDataRelocation(JSTracer* trc, Iter iter) {
666   Register dest;
667   Assembler::RelocStyle rs;
668   const void* prior = Assembler::GetPtr32Target(iter, &dest, &rs);
669   void* ptr = const_cast<void*>(prior);
670 
671   // No barrier needed since these are constants.
672   TraceManuallyBarrieredGenericPointerEdge(
673       trc, reinterpret_cast<gc::Cell**>(&ptr), "ion-masm-ptr");
674 
675   if (ptr != prior) {
676     MacroAssemblerARM::ma_mov_patch(Imm32(int32_t(ptr)), dest,
677                                     Assembler::Always, rs, iter);
678 
679     // L_LDR won't cause any instructions to be updated.
680     if (rs != Assembler::L_LDR) {
681       AutoFlushICache::flush(uintptr_t(iter.cur()), 4);
682       AutoFlushICache::flush(uintptr_t(iter.next()), 4);
683     }
684   }
685 }
686 
TraceDataRelocations(JSTracer * trc,uint8_t * buffer,CompactBufferReader & reader)687 static void TraceDataRelocations(JSTracer* trc, uint8_t* buffer,
688                                  CompactBufferReader& reader) {
689   while (reader.more()) {
690     size_t offset = reader.readUnsigned();
691     InstructionIterator iter((Instruction*)(buffer + offset));
692     TraceOneDataRelocation(trc, iter);
693   }
694 }
695 
TraceDataRelocations(JSTracer * trc,ARMBuffer * buffer,CompactBufferReader & reader)696 static void TraceDataRelocations(JSTracer* trc, ARMBuffer* buffer,
697                                  CompactBufferReader& reader) {
698   while (reader.more()) {
699     BufferOffset offset(reader.readUnsigned());
700     BufferInstructionIterator iter(offset, buffer);
701     TraceOneDataRelocation(trc, iter);
702   }
703 }
704 
TraceDataRelocations(JSTracer * trc,JitCode * code,CompactBufferReader & reader)705 void Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code,
706                                      CompactBufferReader& reader) {
707   ::TraceDataRelocations(trc, code->raw(), reader);
708 }
709 
copyJumpRelocationTable(uint8_t * dest)710 void Assembler::copyJumpRelocationTable(uint8_t* dest) {
711   if (jumpRelocations_.length())
712     memcpy(dest, jumpRelocations_.buffer(), jumpRelocations_.length());
713 }
714 
copyDataRelocationTable(uint8_t * dest)715 void Assembler::copyDataRelocationTable(uint8_t* dest) {
716   if (dataRelocations_.length())
717     memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length());
718 }
719 
trace(JSTracer * trc)720 void Assembler::trace(JSTracer* trc) {
721   for (size_t i = 0; i < jumps_.length(); i++) {
722     RelativePatch& rp = jumps_[i];
723     if (rp.kind() == Relocation::JITCODE) {
724       JitCode* code = JitCode::FromExecutable((uint8_t*)rp.target());
725       TraceManuallyBarrieredEdge(trc, &code, "masmrel32");
726       MOZ_ASSERT(code == JitCode::FromExecutable((uint8_t*)rp.target()));
727     }
728   }
729 
730   if (dataRelocations_.length()) {
731     CompactBufferReader reader(dataRelocations_);
732     ::TraceDataRelocations(trc, &m_buffer, reader);
733   }
734 }
735 
processCodeLabels(uint8_t * rawCode)736 void Assembler::processCodeLabels(uint8_t* rawCode) {
737   for (const CodeLabel& label : codeLabels_) {
738     Bind(rawCode, label);
739   }
740 }
741 
writeCodePointer(CodeLabel * label)742 void Assembler::writeCodePointer(CodeLabel* label) {
743   BufferOffset off = writeInst(-1);
744   label->patchAt()->bind(off.getOffset());
745 }
746 
Bind(uint8_t * rawCode,const CodeLabel & label)747 void Assembler::Bind(uint8_t* rawCode, const CodeLabel& label) {
748   size_t offset = label.patchAt().offset();
749   size_t target = label.target().offset();
750   *reinterpret_cast<const void**>(rawCode + offset) = rawCode + target;
751 }
752 
InvertCondition(Condition cond)753 Assembler::Condition Assembler::InvertCondition(Condition cond) {
754   const uint32_t ConditionInversionBit = 0x10000000;
755   return Condition(ConditionInversionBit ^ cond);
756 }
757 
UnsignedCondition(Condition cond)758 Assembler::Condition Assembler::UnsignedCondition(Condition cond) {
759   switch (cond) {
760     case Zero:
761     case NonZero:
762       return cond;
763     case LessThan:
764     case Below:
765       return Below;
766     case LessThanOrEqual:
767     case BelowOrEqual:
768       return BelowOrEqual;
769     case GreaterThan:
770     case Above:
771       return Above;
772     case AboveOrEqual:
773     case GreaterThanOrEqual:
774       return AboveOrEqual;
775     default:
776       MOZ_CRASH("unexpected condition");
777   }
778 }
779 
ConditionWithoutEqual(Condition cond)780 Assembler::Condition Assembler::ConditionWithoutEqual(Condition cond) {
781   switch (cond) {
782     case LessThan:
783     case LessThanOrEqual:
784       return LessThan;
785     case Below:
786     case BelowOrEqual:
787       return Below;
788     case GreaterThan:
789     case GreaterThanOrEqual:
790       return GreaterThan;
791     case Above:
792     case AboveOrEqual:
793       return Above;
794     default:
795       MOZ_CRASH("unexpected condition");
796   }
797 }
798 
InvertCondition(DoubleCondition cond)799 Assembler::DoubleCondition Assembler::InvertCondition(DoubleCondition cond) {
800   const uint32_t ConditionInversionBit = 0x10000000;
801   return DoubleCondition(ConditionInversionBit ^ cond);
802 }
803 
EncodeTwoImms(uint32_t imm)804 Imm8::TwoImm8mData Imm8::EncodeTwoImms(uint32_t imm) {
805   // In the ideal case, we are looking for a number that (in binary) looks
806   // like:
807   //   0b((00)*)n_1((00)*)n_2((00)*)
808   //      left  n1   mid  n2
809   //   where both n_1 and n_2 fit into 8 bits.
810   // Since this is being done with rotates, we also need to handle the case
811   // that one of these numbers is in fact split between the left and right
812   // sides, in which case the constant will look like:
813   //   0bn_1a((00)*)n_2((00)*)n_1b
814   //     n1a  mid  n2   rgh    n1b
815   // Also remember, values are rotated by multiples of two, and left, mid or
816   // right can have length zero.
817   uint32_t imm1, imm2;
818   int left = CountLeadingZeroes32(imm) & 0x1E;
819   uint32_t no_n1 = imm & ~(0xff << (24 - left));
820 
821   // Not technically needed: this case only happens if we can encode as a
822   // single imm8m. There is a perfectly reasonable encoding in this case, but
823   // we shouldn't encourage people to do things like this.
824   if (no_n1 == 0) return TwoImm8mData();
825 
826   int mid = CountLeadingZeroes32(no_n1) & 0x1E;
827   uint32_t no_n2 =
828       no_n1 & ~((0xff << ((24 - mid) & 0x1f)) | 0xff >> ((8 + mid) & 0x1f));
829 
830   if (no_n2 == 0) {
831     // We hit the easy case, no wraparound.
832     // Note: a single constant *may* look like this.
833     int imm1shift = left + 8;
834     int imm2shift = mid + 8;
835     imm1 = (imm >> (32 - imm1shift)) & 0xff;
836     if (imm2shift >= 32) {
837       imm2shift = 0;
838       // This assert does not always hold, in fact, this would lead to
839       // some incredibly subtle bugs.
840       // assert((imm & 0xff) == no_n1);
841       imm2 = no_n1;
842     } else {
843       imm2 = ((imm >> (32 - imm2shift)) | (imm << imm2shift)) & 0xff;
844       MOZ_ASSERT(((no_n1 >> (32 - imm2shift)) | (no_n1 << imm2shift)) == imm2);
845     }
846     MOZ_ASSERT((imm1shift & 0x1) == 0);
847     MOZ_ASSERT((imm2shift & 0x1) == 0);
848     return TwoImm8mData(datastore::Imm8mData(imm1, imm1shift >> 1),
849                         datastore::Imm8mData(imm2, imm2shift >> 1));
850   }
851 
852   // Either it wraps, or it does not fit. If we initially chopped off more
853   // than 8 bits, then it won't fit.
854   if (left >= 8) return TwoImm8mData();
855 
856   int right = 32 - (CountLeadingZeroes32(no_n2) & 30);
857   // All remaining set bits *must* fit into the lower 8 bits.
858   // The right == 8 case should be handled by the previous case.
859   if (right > 8) return TwoImm8mData();
860 
861   // Make sure the initial bits that we removed for no_n1 fit into the
862   // 8-(32-right) leftmost bits.
863   if (((imm & (0xff << (24 - left))) << (8 - right)) != 0) {
864     // BUT we may have removed more bits than we needed to for no_n1
865     // 0x04104001 e.g. we can encode 0x104 with a single op, then 0x04000001
866     // with a second, but we try to encode 0x0410000 and find that we need a
867     // second op for 0x4000, and 0x1 cannot be included in the encoding of
868     // 0x04100000.
869     no_n1 = imm & ~((0xff >> (8 - right)) | (0xff << (24 + right)));
870     mid = CountLeadingZeroes32(no_n1) & 30;
871     no_n2 = no_n1 & ~((0xff << ((24 - mid) & 31)) | 0xff >> ((8 + mid) & 31));
872     if (no_n2 != 0) return TwoImm8mData();
873   }
874 
875   // Now assemble all of this information into a two coherent constants it is
876   // a rotate right from the lower 8 bits.
877   int imm1shift = 8 - right;
878   imm1 = 0xff & ((imm << imm1shift) | (imm >> (32 - imm1shift)));
879   MOZ_ASSERT((imm1shift & ~0x1e) == 0);
880   // left + 8 + mid is the position of the leftmost bit of n_2.
881   // We needed to rotate 0x000000ab right by 8 in order to get 0xab000000,
882   // then shift again by the leftmost bit in order to get the constant that we
883   // care about.
884   int imm2shift = mid + 8;
885   imm2 = ((imm >> (32 - imm2shift)) | (imm << imm2shift)) & 0xff;
886   MOZ_ASSERT((imm1shift & 0x1) == 0);
887   MOZ_ASSERT((imm2shift & 0x1) == 0);
888   return TwoImm8mData(datastore::Imm8mData(imm1, imm1shift >> 1),
889                       datastore::Imm8mData(imm2, imm2shift >> 1));
890 }
891 
ALUNeg(ALUOp op,Register dest,Register scratch,Imm32 * imm,Register * negDest)892 ALUOp jit::ALUNeg(ALUOp op, Register dest, Register scratch, Imm32* imm,
893                   Register* negDest) {
894   // Find an alternate ALUOp to get the job done, and use a different imm.
895   *negDest = dest;
896   switch (op) {
897     case OpMov:
898       *imm = Imm32(~imm->value);
899       return OpMvn;
900     case OpMvn:
901       *imm = Imm32(~imm->value);
902       return OpMov;
903     case OpAnd:
904       *imm = Imm32(~imm->value);
905       return OpBic;
906     case OpBic:
907       *imm = Imm32(~imm->value);
908       return OpAnd;
909     case OpAdd:
910       *imm = Imm32(-imm->value);
911       return OpSub;
912     case OpSub:
913       *imm = Imm32(-imm->value);
914       return OpAdd;
915     case OpCmp:
916       *imm = Imm32(-imm->value);
917       return OpCmn;
918     case OpCmn:
919       *imm = Imm32(-imm->value);
920       return OpCmp;
921     case OpTst:
922       MOZ_ASSERT(dest == InvalidReg);
923       *imm = Imm32(~imm->value);
924       *negDest = scratch;
925       return OpBic;
926       // orr has orn on thumb2 only.
927     default:
928       return OpInvalid;
929   }
930 }
931 
can_dbl(ALUOp op)932 bool jit::can_dbl(ALUOp op) {
933   // Some instructions can't be processed as two separate instructions such as
934   // and, and possibly add (when we're setting ccodes). There is also some
935   // hilarity with *reading* condition codes. For example, adc dest, src1,
936   // 0xfff; (add with carry) can be split up into adc dest, src1, 0xf00; add
937   // dest, dest, 0xff, since "reading" the condition code increments the
938   // result by one conditionally, that only needs to be done on one of the two
939   // instructions.
940   switch (op) {
941     case OpBic:
942     case OpAdd:
943     case OpSub:
944     case OpEor:
945     case OpOrr:
946       return true;
947     default:
948       return false;
949   }
950 }
951 
condsAreSafe(ALUOp op)952 bool jit::condsAreSafe(ALUOp op) {
953   // Even when we are setting condition codes, sometimes we can get away with
954   // splitting an operation into two. For example, if our immediate is
955   // 0x00ff00ff, and the operation is eors we can split this in half, since x
956   // ^ 0x00ff0000 ^ 0x000000ff should set all of its condition codes exactly
957   // the same as x ^ 0x00ff00ff. However, if the operation were adds, we
958   // cannot split this in half. If the source on the add is 0xfff00ff0, the
959   // result sholud be 0xef10ef, but do we set the overflow bit or not?
960   // Depending on which half is performed first (0x00ff0000 or 0x000000ff) the
961   // V bit will be set differently, and *not* updating the V bit would be
962   // wrong. Theoretically, the following should work:
963   //  adds r0, r1, 0x00ff0000;
964   //  addsvs r0, r1, 0x000000ff;
965   //  addvc r0, r1, 0x000000ff;
966   // But this is 3 instructions, and at that point, we might as well use
967   // something else.
968   switch (op) {
969     case OpBic:
970     case OpOrr:
971     case OpEor:
972       return true;
973     default:
974       return false;
975   }
976 }
977 
getDestVariant(ALUOp op)978 ALUOp jit::getDestVariant(ALUOp op) {
979   // All of the compare operations are dest-less variants of a standard
980   // operation. Given the dest-less variant, return the dest-ful variant.
981   switch (op) {
982     case OpCmp:
983       return OpSub;
984     case OpCmn:
985       return OpAdd;
986     case OpTst:
987       return OpAnd;
988     case OpTeq:
989       return OpEor;
990     default:
991       return op;
992   }
993 }
994 
O2Reg(Register r)995 O2RegImmShift jit::O2Reg(Register r) { return O2RegImmShift(r, LSL, 0); }
996 
lsl(Register r,int amt)997 O2RegImmShift jit::lsl(Register r, int amt) {
998   MOZ_ASSERT(0 <= amt && amt <= 31);
999   return O2RegImmShift(r, LSL, amt);
1000 }
1001 
lsr(Register r,int amt)1002 O2RegImmShift jit::lsr(Register r, int amt) {
1003   MOZ_ASSERT(1 <= amt && amt <= 32);
1004   return O2RegImmShift(r, LSR, amt);
1005 }
1006 
ror(Register r,int amt)1007 O2RegImmShift jit::ror(Register r, int amt) {
1008   MOZ_ASSERT(1 <= amt && amt <= 31);
1009   return O2RegImmShift(r, ROR, amt);
1010 }
rol(Register r,int amt)1011 O2RegImmShift jit::rol(Register r, int amt) {
1012   MOZ_ASSERT(1 <= amt && amt <= 31);
1013   return O2RegImmShift(r, ROR, 32 - amt);
1014 }
1015 
asr(Register r,int amt)1016 O2RegImmShift jit::asr(Register r, int amt) {
1017   MOZ_ASSERT(1 <= amt && amt <= 32);
1018   return O2RegImmShift(r, ASR, amt);
1019 }
1020 
lsl(Register r,Register amt)1021 O2RegRegShift jit::lsl(Register r, Register amt) {
1022   return O2RegRegShift(r, LSL, amt);
1023 }
1024 
lsr(Register r,Register amt)1025 O2RegRegShift jit::lsr(Register r, Register amt) {
1026   return O2RegRegShift(r, LSR, amt);
1027 }
1028 
ror(Register r,Register amt)1029 O2RegRegShift jit::ror(Register r, Register amt) {
1030   return O2RegRegShift(r, ROR, amt);
1031 }
1032 
asr(Register r,Register amt)1033 O2RegRegShift jit::asr(Register r, Register amt) {
1034   return O2RegRegShift(r, ASR, amt);
1035 }
1036 
1037 static js::jit::DoubleEncoder doubleEncoder;
1038 
1039 /* static */ const js::jit::VFPImm js::jit::VFPImm::One(0x3FF00000);
1040 
VFPImm(uint32_t top)1041 js::jit::VFPImm::VFPImm(uint32_t top) {
1042   data_ = -1;
1043   datastore::Imm8VFPImmData tmp;
1044   if (doubleEncoder.lookup(top, &tmp)) data_ = tmp.encode();
1045 }
1046 
BOffImm(const Instruction & inst)1047 BOffImm::BOffImm(const Instruction& inst) : data_(inst.encode() & 0x00ffffff) {}
1048 
getDest(Instruction * src) const1049 Instruction* BOffImm::getDest(Instruction* src) const {
1050   // TODO: It is probably worthwhile to verify that src is actually a branch.
1051   // NOTE: This does not explicitly shift the offset of the destination left by
1052   // 2, since it is indexing into an array of instruction sized objects.
1053   return &src[((int32_t(data_) << 8) >> 8) + 2];
1054 }
1055 
1056 const js::jit::DoubleEncoder::DoubleEntry js::jit::DoubleEncoder::table[256] = {
1057 #include "jit/arm/DoubleEntryTable.tbl"
1058 };
1059 
1060 // VFPRegister implementation
doubleOverlay(unsigned int which) const1061 VFPRegister VFPRegister::doubleOverlay(unsigned int which) const {
1062   MOZ_ASSERT(!_isInvalid);
1063   MOZ_ASSERT(which == 0);
1064   if (kind != Double) return VFPRegister(code_ >> 1, Double);
1065   return *this;
1066 }
singleOverlay(unsigned int which) const1067 VFPRegister VFPRegister::singleOverlay(unsigned int which) const {
1068   MOZ_ASSERT(!_isInvalid);
1069   if (kind == Double) {
1070     // There are no corresponding float registers for d16-d31.
1071     MOZ_ASSERT(code_ < 16);
1072     MOZ_ASSERT(which < 2);
1073     return VFPRegister((code_ << 1) + which, Single);
1074   }
1075   MOZ_ASSERT(which == 0);
1076   return VFPRegister(code_, Single);
1077 }
1078 
sintOverlay(unsigned int which) const1079 VFPRegister VFPRegister::sintOverlay(unsigned int which) const {
1080   MOZ_ASSERT(!_isInvalid);
1081   if (kind == Double) {
1082     // There are no corresponding float registers for d16-d31.
1083     MOZ_ASSERT(code_ < 16);
1084     MOZ_ASSERT(which < 2);
1085     return VFPRegister((code_ << 1) + which, Int);
1086   }
1087   MOZ_ASSERT(which == 0);
1088   return VFPRegister(code_, Int);
1089 }
uintOverlay(unsigned int which) const1090 VFPRegister VFPRegister::uintOverlay(unsigned int which) const {
1091   MOZ_ASSERT(!_isInvalid);
1092   if (kind == Double) {
1093     // There are no corresponding float registers for d16-d31.
1094     MOZ_ASSERT(code_ < 16);
1095     MOZ_ASSERT(which < 2);
1096     return VFPRegister((code_ << 1) + which, UInt);
1097   }
1098   MOZ_ASSERT(which == 0);
1099   return VFPRegister(code_, UInt);
1100 }
1101 
isInvalid() const1102 bool VFPRegister::isInvalid() const { return _isInvalid; }
1103 
isMissing() const1104 bool VFPRegister::isMissing() const {
1105   MOZ_ASSERT(!_isInvalid);
1106   return _isMissing;
1107 }
1108 
oom() const1109 bool Assembler::oom() const {
1110   return AssemblerShared::oom() || m_buffer.oom() || jumpRelocations_.oom() ||
1111          dataRelocations_.oom();
1112 }
1113 
1114 // Size of the instruction stream, in bytes. Including pools. This function
1115 // expects all pools that need to be placed have been placed. If they haven't
1116 // then we need to go an flush the pools :(
size() const1117 size_t Assembler::size() const { return m_buffer.size(); }
1118 // Size of the relocation table, in bytes.
jumpRelocationTableBytes() const1119 size_t Assembler::jumpRelocationTableBytes() const {
1120   return jumpRelocations_.length();
1121 }
dataRelocationTableBytes() const1122 size_t Assembler::dataRelocationTableBytes() const {
1123   return dataRelocations_.length();
1124 }
1125 
1126 // Size of the data table, in bytes.
bytesNeeded() const1127 size_t Assembler::bytesNeeded() const {
1128   return size() + jumpRelocationTableBytes() + dataRelocationTableBytes();
1129 }
1130 
1131 // Allocate memory for a branch instruction, it will be overwritten
1132 // subsequently and should not be disassembled.
1133 
allocBranchInst()1134 BufferOffset Assembler::allocBranchInst() {
1135   return m_buffer.putInt(Always | InstNOP::NopInst);
1136 }
1137 
WriteInstStatic(uint32_t x,uint32_t * dest)1138 void Assembler::WriteInstStatic(uint32_t x, uint32_t* dest) {
1139   MOZ_ASSERT(dest != nullptr);
1140   *dest = x;
1141 }
1142 
haltingAlign(int alignment)1143 void Assembler::haltingAlign(int alignment) {
1144   // TODO: Implement a proper halting align.
1145   nopAlign(alignment);
1146 }
1147 
nopAlign(int alignment)1148 void Assembler::nopAlign(int alignment) { m_buffer.align(alignment); }
1149 
as_nop()1150 BufferOffset Assembler::as_nop() { return writeInst(0xe320f000); }
1151 
EncodeAlu(Register dest,Register src1,Operand2 op2,ALUOp op,SBit s,Assembler::Condition c)1152 static uint32_t EncodeAlu(Register dest, Register src1, Operand2 op2, ALUOp op,
1153                           SBit s, Assembler::Condition c) {
1154   return (int)op | (int)s | (int)c | op2.encode() |
1155          ((dest == InvalidReg) ? 0 : RD(dest)) |
1156          ((src1 == InvalidReg) ? 0 : RN(src1));
1157 }
1158 
as_alu(Register dest,Register src1,Operand2 op2,ALUOp op,SBit s,Condition c)1159 BufferOffset Assembler::as_alu(Register dest, Register src1, Operand2 op2,
1160                                ALUOp op, SBit s, Condition c) {
1161   return writeInst(EncodeAlu(dest, src1, op2, op, s, c));
1162 }
1163 
as_mov(Register dest,Operand2 op2,SBit s,Condition c)1164 BufferOffset Assembler::as_mov(Register dest, Operand2 op2, SBit s,
1165                                Condition c) {
1166   return as_alu(dest, InvalidReg, op2, OpMov, s, c);
1167 }
1168 
as_alu_patch(Register dest,Register src1,Operand2 op2,ALUOp op,SBit s,Condition c,uint32_t * pos)1169 /* static */ void Assembler::as_alu_patch(Register dest, Register src1,
1170                                           Operand2 op2, ALUOp op, SBit s,
1171                                           Condition c, uint32_t* pos) {
1172   WriteInstStatic(EncodeAlu(dest, src1, op2, op, s, c), pos);
1173 }
1174 
as_mov_patch(Register dest,Operand2 op2,SBit s,Condition c,uint32_t * pos)1175 /* static */ void Assembler::as_mov_patch(Register dest, Operand2 op2, SBit s,
1176                                           Condition c, uint32_t* pos) {
1177   as_alu_patch(dest, InvalidReg, op2, OpMov, s, c, pos);
1178 }
1179 
as_mvn(Register dest,Operand2 op2,SBit s,Condition c)1180 BufferOffset Assembler::as_mvn(Register dest, Operand2 op2, SBit s,
1181                                Condition c) {
1182   return as_alu(dest, InvalidReg, op2, OpMvn, s, c);
1183 }
1184 
1185 // Logical operations.
as_and(Register dest,Register src1,Operand2 op2,SBit s,Condition c)1186 BufferOffset Assembler::as_and(Register dest, Register src1, Operand2 op2,
1187                                SBit s, Condition c) {
1188   return as_alu(dest, src1, op2, OpAnd, s, c);
1189 }
as_bic(Register dest,Register src1,Operand2 op2,SBit s,Condition c)1190 BufferOffset Assembler::as_bic(Register dest, Register src1, Operand2 op2,
1191                                SBit s, Condition c) {
1192   return as_alu(dest, src1, op2, OpBic, s, c);
1193 }
as_eor(Register dest,Register src1,Operand2 op2,SBit s,Condition c)1194 BufferOffset Assembler::as_eor(Register dest, Register src1, Operand2 op2,
1195                                SBit s, Condition c) {
1196   return as_alu(dest, src1, op2, OpEor, s, c);
1197 }
as_orr(Register dest,Register src1,Operand2 op2,SBit s,Condition c)1198 BufferOffset Assembler::as_orr(Register dest, Register src1, Operand2 op2,
1199                                SBit s, Condition c) {
1200   return as_alu(dest, src1, op2, OpOrr, s, c);
1201 }
1202 
1203 // Mathematical operations.
as_adc(Register dest,Register src1,Operand2 op2,SBit s,Condition c)1204 BufferOffset Assembler::as_adc(Register dest, Register src1, Operand2 op2,
1205                                SBit s, Condition c) {
1206   return as_alu(dest, src1, op2, OpAdc, s, c);
1207 }
as_add(Register dest,Register src1,Operand2 op2,SBit s,Condition c)1208 BufferOffset Assembler::as_add(Register dest, Register src1, Operand2 op2,
1209                                SBit s, Condition c) {
1210   return as_alu(dest, src1, op2, OpAdd, s, c);
1211 }
as_sbc(Register dest,Register src1,Operand2 op2,SBit s,Condition c)1212 BufferOffset Assembler::as_sbc(Register dest, Register src1, Operand2 op2,
1213                                SBit s, Condition c) {
1214   return as_alu(dest, src1, op2, OpSbc, s, c);
1215 }
as_sub(Register dest,Register src1,Operand2 op2,SBit s,Condition c)1216 BufferOffset Assembler::as_sub(Register dest, Register src1, Operand2 op2,
1217                                SBit s, Condition c) {
1218   return as_alu(dest, src1, op2, OpSub, s, c);
1219 }
as_rsb(Register dest,Register src1,Operand2 op2,SBit s,Condition c)1220 BufferOffset Assembler::as_rsb(Register dest, Register src1, Operand2 op2,
1221                                SBit s, Condition c) {
1222   return as_alu(dest, src1, op2, OpRsb, s, c);
1223 }
as_rsc(Register dest,Register src1,Operand2 op2,SBit s,Condition c)1224 BufferOffset Assembler::as_rsc(Register dest, Register src1, Operand2 op2,
1225                                SBit s, Condition c) {
1226   return as_alu(dest, src1, op2, OpRsc, s, c);
1227 }
1228 
1229 // Test operations.
as_cmn(Register src1,Operand2 op2,Condition c)1230 BufferOffset Assembler::as_cmn(Register src1, Operand2 op2, Condition c) {
1231   return as_alu(InvalidReg, src1, op2, OpCmn, SetCC, c);
1232 }
as_cmp(Register src1,Operand2 op2,Condition c)1233 BufferOffset Assembler::as_cmp(Register src1, Operand2 op2, Condition c) {
1234   return as_alu(InvalidReg, src1, op2, OpCmp, SetCC, c);
1235 }
as_teq(Register src1,Operand2 op2,Condition c)1236 BufferOffset Assembler::as_teq(Register src1, Operand2 op2, Condition c) {
1237   return as_alu(InvalidReg, src1, op2, OpTeq, SetCC, c);
1238 }
as_tst(Register src1,Operand2 op2,Condition c)1239 BufferOffset Assembler::as_tst(Register src1, Operand2 op2, Condition c) {
1240   return as_alu(InvalidReg, src1, op2, OpTst, SetCC, c);
1241 }
1242 
1243 static constexpr Register NoAddend{Registers::pc};
1244 
1245 static const int SignExtend = 0x06000070;
1246 
1247 enum SignExtend {
1248   SxSxtb = 10 << 20,
1249   SxSxth = 11 << 20,
1250   SxUxtb = 14 << 20,
1251   SxUxth = 15 << 20
1252 };
1253 
1254 // Sign extension operations.
as_sxtb(Register dest,Register src,int rotate,Condition c)1255 BufferOffset Assembler::as_sxtb(Register dest, Register src, int rotate,
1256                                 Condition c) {
1257   return writeInst((int)c | SignExtend | SxSxtb | RN(NoAddend) | RD(dest) |
1258                    ((rotate & 3) << 10) | src.code());
1259 }
as_sxth(Register dest,Register src,int rotate,Condition c)1260 BufferOffset Assembler::as_sxth(Register dest, Register src, int rotate,
1261                                 Condition c) {
1262   return writeInst((int)c | SignExtend | SxSxth | RN(NoAddend) | RD(dest) |
1263                    ((rotate & 3) << 10) | src.code());
1264 }
as_uxtb(Register dest,Register src,int rotate,Condition c)1265 BufferOffset Assembler::as_uxtb(Register dest, Register src, int rotate,
1266                                 Condition c) {
1267   return writeInst((int)c | SignExtend | SxUxtb | RN(NoAddend) | RD(dest) |
1268                    ((rotate & 3) << 10) | src.code());
1269 }
as_uxth(Register dest,Register src,int rotate,Condition c)1270 BufferOffset Assembler::as_uxth(Register dest, Register src, int rotate,
1271                                 Condition c) {
1272   return writeInst((int)c | SignExtend | SxUxth | RN(NoAddend) | RD(dest) |
1273                    ((rotate & 3) << 10) | src.code());
1274 }
1275 
EncodeMovW(Register dest,Imm16 imm,Assembler::Condition c)1276 static uint32_t EncodeMovW(Register dest, Imm16 imm, Assembler::Condition c) {
1277   MOZ_ASSERT(HasMOVWT());
1278   return 0x03000000 | c | imm.encode() | RD(dest);
1279 }
1280 
EncodeMovT(Register dest,Imm16 imm,Assembler::Condition c)1281 static uint32_t EncodeMovT(Register dest, Imm16 imm, Assembler::Condition c) {
1282   MOZ_ASSERT(HasMOVWT());
1283   return 0x03400000 | c | imm.encode() | RD(dest);
1284 }
1285 
1286 // Not quite ALU worthy, but these are useful none the less. These also have
1287 // the isue of these being formatted completly differently from the standard ALU
1288 // operations.
as_movw(Register dest,Imm16 imm,Condition c)1289 BufferOffset Assembler::as_movw(Register dest, Imm16 imm, Condition c) {
1290   return writeInst(EncodeMovW(dest, imm, c));
1291 }
1292 
as_movw_patch(Register dest,Imm16 imm,Condition c,Instruction * pos)1293 /* static */ void Assembler::as_movw_patch(Register dest, Imm16 imm,
1294                                            Condition c, Instruction* pos) {
1295   WriteInstStatic(EncodeMovW(dest, imm, c), (uint32_t*)pos);
1296 }
1297 
as_movt(Register dest,Imm16 imm,Condition c)1298 BufferOffset Assembler::as_movt(Register dest, Imm16 imm, Condition c) {
1299   return writeInst(EncodeMovT(dest, imm, c));
1300 }
1301 
as_movt_patch(Register dest,Imm16 imm,Condition c,Instruction * pos)1302 /* static */ void Assembler::as_movt_patch(Register dest, Imm16 imm,
1303                                            Condition c, Instruction* pos) {
1304   WriteInstStatic(EncodeMovT(dest, imm, c), (uint32_t*)pos);
1305 }
1306 
1307 static const int mull_tag = 0x90;
1308 
as_genmul(Register dhi,Register dlo,Register rm,Register rn,MULOp op,SBit s,Condition c)1309 BufferOffset Assembler::as_genmul(Register dhi, Register dlo, Register rm,
1310                                   Register rn, MULOp op, SBit s, Condition c) {
1311   return writeInst(RN(dhi) | maybeRD(dlo) | RM(rm) | rn.code() | op | s | c |
1312                    mull_tag);
1313 }
as_mul(Register dest,Register src1,Register src2,SBit s,Condition c)1314 BufferOffset Assembler::as_mul(Register dest, Register src1, Register src2,
1315                                SBit s, Condition c) {
1316   return as_genmul(dest, InvalidReg, src1, src2, OpmMul, s, c);
1317 }
as_mla(Register dest,Register acc,Register src1,Register src2,SBit s,Condition c)1318 BufferOffset Assembler::as_mla(Register dest, Register acc, Register src1,
1319                                Register src2, SBit s, Condition c) {
1320   return as_genmul(dest, acc, src1, src2, OpmMla, s, c);
1321 }
as_umaal(Register destHI,Register destLO,Register src1,Register src2,Condition c)1322 BufferOffset Assembler::as_umaal(Register destHI, Register destLO,
1323                                  Register src1, Register src2, Condition c) {
1324   return as_genmul(destHI, destLO, src1, src2, OpmUmaal, LeaveCC, c);
1325 }
as_mls(Register dest,Register acc,Register src1,Register src2,Condition c)1326 BufferOffset Assembler::as_mls(Register dest, Register acc, Register src1,
1327                                Register src2, Condition c) {
1328   return as_genmul(dest, acc, src1, src2, OpmMls, LeaveCC, c);
1329 }
1330 
as_umull(Register destHI,Register destLO,Register src1,Register src2,SBit s,Condition c)1331 BufferOffset Assembler::as_umull(Register destHI, Register destLO,
1332                                  Register src1, Register src2, SBit s,
1333                                  Condition c) {
1334   return as_genmul(destHI, destLO, src1, src2, OpmUmull, s, c);
1335 }
1336 
as_umlal(Register destHI,Register destLO,Register src1,Register src2,SBit s,Condition c)1337 BufferOffset Assembler::as_umlal(Register destHI, Register destLO,
1338                                  Register src1, Register src2, SBit s,
1339                                  Condition c) {
1340   return as_genmul(destHI, destLO, src1, src2, OpmUmlal, s, c);
1341 }
1342 
as_smull(Register destHI,Register destLO,Register src1,Register src2,SBit s,Condition c)1343 BufferOffset Assembler::as_smull(Register destHI, Register destLO,
1344                                  Register src1, Register src2, SBit s,
1345                                  Condition c) {
1346   return as_genmul(destHI, destLO, src1, src2, OpmSmull, s, c);
1347 }
1348 
as_smlal(Register destHI,Register destLO,Register src1,Register src2,SBit s,Condition c)1349 BufferOffset Assembler::as_smlal(Register destHI, Register destLO,
1350                                  Register src1, Register src2, SBit s,
1351                                  Condition c) {
1352   return as_genmul(destHI, destLO, src1, src2, OpmSmlal, s, c);
1353 }
1354 
as_sdiv(Register rd,Register rn,Register rm,Condition c)1355 BufferOffset Assembler::as_sdiv(Register rd, Register rn, Register rm,
1356                                 Condition c) {
1357   return writeInst(0x0710f010 | c | RN(rd) | RM(rm) | rn.code());
1358 }
1359 
as_udiv(Register rd,Register rn,Register rm,Condition c)1360 BufferOffset Assembler::as_udiv(Register rd, Register rn, Register rm,
1361                                 Condition c) {
1362   return writeInst(0x0730f010 | c | RN(rd) | RM(rm) | rn.code());
1363 }
1364 
as_clz(Register dest,Register src,Condition c)1365 BufferOffset Assembler::as_clz(Register dest, Register src, Condition c) {
1366   MOZ_ASSERT(src != pc && dest != pc);
1367   return writeInst(RD(dest) | src.code() | c | 0x016f0f10);
1368 }
1369 
1370 // Data transfer instructions: ldr, str, ldrb, strb. Using an int to
1371 // differentiate between 8 bits and 32 bits is overkill, but meh.
1372 
EncodeDtr(LoadStore ls,int size,Index mode,Register rt,DTRAddr addr,Assembler::Condition c)1373 static uint32_t EncodeDtr(LoadStore ls, int size, Index mode, Register rt,
1374                           DTRAddr addr, Assembler::Condition c) {
1375   MOZ_ASSERT(mode == Offset || (rt != addr.getBase() && pc != addr.getBase()));
1376   MOZ_ASSERT(size == 32 || size == 8);
1377   return 0x04000000 | ls | (size == 8 ? 0x00400000 : 0) | mode | c | RT(rt) |
1378          addr.encode();
1379 }
1380 
as_dtr(LoadStore ls,int size,Index mode,Register rt,DTRAddr addr,Condition c)1381 BufferOffset Assembler::as_dtr(LoadStore ls, int size, Index mode, Register rt,
1382                                DTRAddr addr, Condition c) {
1383   return writeInst(EncodeDtr(ls, size, mode, rt, addr, c));
1384 }
1385 
as_dtr_patch(LoadStore ls,int size,Index mode,Register rt,DTRAddr addr,Condition c,uint32_t * dest)1386 /* static */ void Assembler::as_dtr_patch(LoadStore ls, int size, Index mode,
1387                                           Register rt, DTRAddr addr,
1388                                           Condition c, uint32_t* dest) {
1389   WriteInstStatic(EncodeDtr(ls, size, mode, rt, addr, c), dest);
1390 }
1391 
1392 class PoolHintData {
1393  public:
1394   enum LoadType {
1395     // Set 0 to bogus, since that is the value most likely to be
1396     // accidentally left somewhere.
1397     PoolBOGUS = 0,
1398     PoolDTR = 1,
1399     PoolBranch = 2,
1400     PoolVDTR = 3
1401   };
1402 
1403  private:
1404   uint32_t index_ : 16;
1405   uint32_t cond_ : 4;
1406   LoadType loadType_ : 2;
1407   uint32_t destReg_ : 5;
1408   uint32_t destType_ : 1;
1409   uint32_t ONES : 4;
1410 
1411   static const uint32_t ExpectedOnes = 0xfu;
1412 
1413  public:
init(uint32_t index,Assembler::Condition cond,LoadType lt,Register destReg)1414   void init(uint32_t index, Assembler::Condition cond, LoadType lt,
1415             Register destReg) {
1416     index_ = index;
1417     MOZ_ASSERT(index_ == index);
1418     cond_ = cond >> 28;
1419     MOZ_ASSERT(cond_ == cond >> 28);
1420     loadType_ = lt;
1421     ONES = ExpectedOnes;
1422     destReg_ = destReg.code();
1423     destType_ = 0;
1424   }
init(uint32_t index,Assembler::Condition cond,LoadType lt,const VFPRegister & destReg)1425   void init(uint32_t index, Assembler::Condition cond, LoadType lt,
1426             const VFPRegister& destReg) {
1427     MOZ_ASSERT(destReg.isFloat());
1428     index_ = index;
1429     MOZ_ASSERT(index_ == index);
1430     cond_ = cond >> 28;
1431     MOZ_ASSERT(cond_ == cond >> 28);
1432     loadType_ = lt;
1433     ONES = ExpectedOnes;
1434     destReg_ = destReg.id();
1435     destType_ = destReg.isDouble();
1436   }
getCond() const1437   Assembler::Condition getCond() const {
1438     return Assembler::Condition(cond_ << 28);
1439   }
1440 
getReg() const1441   Register getReg() const { return Register::FromCode(destReg_); }
getVFPReg() const1442   VFPRegister getVFPReg() const {
1443     VFPRegister r = VFPRegister(
1444         destReg_, destType_ ? VFPRegister::Double : VFPRegister::Single);
1445     return r;
1446   }
1447 
getIndex() const1448   int32_t getIndex() const { return index_; }
setIndex(uint32_t index)1449   void setIndex(uint32_t index) {
1450     MOZ_ASSERT(ONES == ExpectedOnes && loadType_ != PoolBOGUS);
1451     index_ = index;
1452     MOZ_ASSERT(index_ == index);
1453   }
1454 
getLoadType() const1455   LoadType getLoadType() const {
1456     // If this *was* a PoolBranch, but the branch has already been bound
1457     // then this isn't going to look like a real poolhintdata, but we still
1458     // want to lie about it so everyone knows it *used* to be a branch.
1459     if (ONES != ExpectedOnes) return PoolHintData::PoolBranch;
1460     return loadType_;
1461   }
1462 
isValidPoolHint() const1463   bool isValidPoolHint() const {
1464     // Most instructions cannot have a condition that is 0xf. Notable
1465     // exceptions are blx and the entire NEON instruction set. For the
1466     // purposes of pool loads, and possibly patched branches, the possible
1467     // instructions are ldr and b, neither of which can have a condition
1468     // code of 0xf.
1469     return ONES == ExpectedOnes;
1470   }
1471 };
1472 
1473 union PoolHintPun {
1474   PoolHintData phd;
1475   uint32_t raw;
1476 };
1477 
1478 // Handles all of the other integral data transferring functions: ldrsb, ldrsh,
1479 // ldrd, etc. The size is given in bits.
as_extdtr(LoadStore ls,int size,bool IsSigned,Index mode,Register rt,EDtrAddr addr,Condition c)1480 BufferOffset Assembler::as_extdtr(LoadStore ls, int size, bool IsSigned,
1481                                   Index mode, Register rt, EDtrAddr addr,
1482                                   Condition c) {
1483   int extra_bits2 = 0;
1484   int extra_bits1 = 0;
1485   switch (size) {
1486     case 8:
1487       MOZ_ASSERT(IsSigned);
1488       MOZ_ASSERT(ls != IsStore);
1489       extra_bits1 = 0x1;
1490       extra_bits2 = 0x2;
1491       break;
1492     case 16:
1493       // 'case 32' doesn't need to be handled, it is handled by the default
1494       // ldr/str.
1495       extra_bits2 = 0x01;
1496       extra_bits1 = (ls == IsStore) ? 0 : 1;
1497       if (IsSigned) {
1498         MOZ_ASSERT(ls != IsStore);
1499         extra_bits2 |= 0x2;
1500       }
1501       break;
1502     case 64:
1503       extra_bits2 = (ls == IsStore) ? 0x3 : 0x2;
1504       extra_bits1 = 0;
1505       break;
1506     default:
1507       MOZ_CRASH("unexpected size in as_extdtr");
1508   }
1509   return writeInst(extra_bits2 << 5 | extra_bits1 << 20 | 0x90 | addr.encode() |
1510                    RT(rt) | mode | c);
1511 }
1512 
as_dtm(LoadStore ls,Register rn,uint32_t mask,DTMMode mode,DTMWriteBack wb,Condition c)1513 BufferOffset Assembler::as_dtm(LoadStore ls, Register rn, uint32_t mask,
1514                                DTMMode mode, DTMWriteBack wb, Condition c) {
1515   return writeInst(0x08000000 | RN(rn) | ls | mode | mask | c | wb);
1516 }
1517 
allocLiteralLoadEntry(size_t numInst,unsigned numPoolEntries,PoolHintPun & php,uint8_t * data,const LiteralDoc & doc,ARMBuffer::PoolEntry * pe,bool loadToPC)1518 BufferOffset Assembler::allocLiteralLoadEntry(
1519     size_t numInst, unsigned numPoolEntries, PoolHintPun& php, uint8_t* data,
1520     const LiteralDoc& doc, ARMBuffer::PoolEntry* pe, bool loadToPC) {
1521   uint8_t* inst = (uint8_t*)&php.raw;
1522 
1523   MOZ_ASSERT(inst);
1524   MOZ_ASSERT(numInst == 1);  // Or fix the disassembly
1525 
1526   BufferOffset offs =
1527       m_buffer.allocEntry(numInst, numPoolEntries, inst, data, pe);
1528   propagateOOM(offs.assigned());
1529 #ifdef JS_DISASM_ARM
1530   Instruction* instruction = m_buffer.getInstOrNull(offs);
1531   if (instruction) spewLiteralLoad(php, loadToPC, instruction, doc);
1532 #endif
1533   return offs;
1534 }
1535 
1536 // This is also used for instructions that might be resolved into branches,
1537 // or might not.  If dest==pc then it is effectively a branch.
1538 
as_Imm32Pool(Register dest,uint32_t value,Condition c)1539 BufferOffset Assembler::as_Imm32Pool(Register dest, uint32_t value,
1540                                      Condition c) {
1541   PoolHintPun php;
1542   php.phd.init(0, c, PoolHintData::PoolDTR, dest);
1543   BufferOffset offs = allocLiteralLoadEntry(
1544       1, 1, php, (uint8_t*)&value, LiteralDoc(value), nullptr, dest == pc);
1545   return offs;
1546 }
1547 
WritePoolEntry(Instruction * addr,Condition c,uint32_t data)1548 /* static */ void Assembler::WritePoolEntry(Instruction* addr, Condition c,
1549                                             uint32_t data) {
1550   MOZ_ASSERT(addr->is<InstLDR>());
1551   *addr->as<InstLDR>()->dest() = data;
1552   MOZ_ASSERT(addr->extractCond() == c);
1553 }
1554 
as_BranchPool(uint32_t value,RepatchLabel * label,const LabelDoc & documentation,ARMBuffer::PoolEntry * pe,Condition c)1555 BufferOffset Assembler::as_BranchPool(uint32_t value, RepatchLabel* label,
1556                                       const LabelDoc& documentation,
1557                                       ARMBuffer::PoolEntry* pe, Condition c) {
1558   PoolHintPun php;
1559   php.phd.init(0, c, PoolHintData::PoolBranch, pc);
1560   BufferOffset ret =
1561       allocLiteralLoadEntry(1, 1, php, (uint8_t*)&value, LiteralDoc(), pe,
1562                             /* loadToPC = */ true);
1563   // If this label is already bound, then immediately replace the stub load
1564   // with a correct branch.
1565   if (label->bound()) {
1566     BufferOffset dest(label);
1567     BOffImm offset = dest.diffB<BOffImm>(ret);
1568     if (offset.isInvalid()) {
1569       m_buffer.fail_bail();
1570       return ret;
1571     }
1572     as_b(offset, c, ret);
1573   } else if (!oom()) {
1574     label->use(ret.getOffset());
1575   }
1576 #ifdef JS_DISASM_ARM
1577   spew_.spewRef(documentation);
1578 #endif
1579   return ret;
1580 }
1581 
as_FImm64Pool(VFPRegister dest,double d,Condition c)1582 BufferOffset Assembler::as_FImm64Pool(VFPRegister dest, double d, Condition c) {
1583   MOZ_ASSERT(dest.isDouble());
1584   PoolHintPun php;
1585   php.phd.init(0, c, PoolHintData::PoolVDTR, dest);
1586   return allocLiteralLoadEntry(1, 2, php, (uint8_t*)&d, LiteralDoc(d));
1587 }
1588 
as_FImm32Pool(VFPRegister dest,float f,Condition c)1589 BufferOffset Assembler::as_FImm32Pool(VFPRegister dest, float f, Condition c) {
1590   // Insert floats into the double pool as they have the same limitations on
1591   // immediate offset. This wastes 4 bytes padding per float. An alternative
1592   // would be to have a separate pool for floats.
1593   MOZ_ASSERT(dest.isSingle());
1594   PoolHintPun php;
1595   php.phd.init(0, c, PoolHintData::PoolVDTR, dest);
1596   return allocLiteralLoadEntry(1, 1, php, (uint8_t*)&f, LiteralDoc(f));
1597 }
1598 
1599 // Pool callbacks stuff:
InsertIndexIntoTag(uint8_t * load_,uint32_t index)1600 void Assembler::InsertIndexIntoTag(uint8_t* load_, uint32_t index) {
1601   uint32_t* load = (uint32_t*)load_;
1602   PoolHintPun php;
1603   php.raw = *load;
1604   php.phd.setIndex(index);
1605   *load = php.raw;
1606 }
1607 
1608 // patchConstantPoolLoad takes the address of the instruction that wants to be
1609 // patched, and the address of the start of the constant pool, and figures
1610 // things out from there.
PatchConstantPoolLoad(void * loadAddr,void * constPoolAddr)1611 void Assembler::PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr) {
1612   PoolHintData data = *(PoolHintData*)loadAddr;
1613   uint32_t* instAddr = (uint32_t*)loadAddr;
1614   int offset = (char*)constPoolAddr - (char*)loadAddr;
1615   switch (data.getLoadType()) {
1616     case PoolHintData::PoolBOGUS:
1617       MOZ_CRASH("bogus load type!");
1618     case PoolHintData::PoolDTR:
1619       Assembler::as_dtr_patch(
1620           IsLoad, 32, Offset, data.getReg(),
1621           DTRAddr(pc, DtrOffImm(offset + 4 * data.getIndex() - 8)),
1622           data.getCond(), instAddr);
1623       break;
1624     case PoolHintData::PoolBranch:
1625       // Either this used to be a poolBranch, and the label was already bound,
1626       // so it was replaced with a real branch, or this may happen in the
1627       // future. If this is going to happen in the future, then the actual
1628       // bits that are written here don't matter (except the condition code,
1629       // since that is always preserved across patchings) but if it does not
1630       // get bound later, then we want to make sure this is a load from the
1631       // pool entry (and the pool entry should be nullptr so it will crash).
1632       if (data.isValidPoolHint()) {
1633         Assembler::as_dtr_patch(
1634             IsLoad, 32, Offset, pc,
1635             DTRAddr(pc, DtrOffImm(offset + 4 * data.getIndex() - 8)),
1636             data.getCond(), instAddr);
1637       }
1638       break;
1639     case PoolHintData::PoolVDTR: {
1640       VFPRegister dest = data.getVFPReg();
1641       int32_t imm = offset + (data.getIndex() * 4) - 8;
1642       MOZ_ASSERT(-1024 < imm && imm < 1024);
1643       Assembler::as_vdtr_patch(IsLoad, dest, VFPAddr(pc, VFPOffImm(imm)),
1644                                data.getCond(), instAddr);
1645       break;
1646     }
1647   }
1648 }
1649 
1650 // Atomic instruction stuff:
1651 
as_ldrexd(Register rt,Register rt2,Register rn,Condition c)1652 BufferOffset Assembler::as_ldrexd(Register rt, Register rt2, Register rn,
1653                                   Condition c) {
1654   MOZ_ASSERT(!(rt.code() & 1) && rt2.code() == rt.code() + 1);
1655   MOZ_ASSERT(rt.code() != 14 && rn.code() != 15);
1656   return writeInst(0x01b00f9f | (int)c | RT(rt) | RN(rn));
1657 }
1658 
as_ldrex(Register rt,Register rn,Condition c)1659 BufferOffset Assembler::as_ldrex(Register rt, Register rn, Condition c) {
1660   MOZ_ASSERT(rt.code() != 15 && rn.code() != 15);
1661   return writeInst(0x01900f9f | (int)c | RT(rt) | RN(rn));
1662 }
1663 
as_ldrexh(Register rt,Register rn,Condition c)1664 BufferOffset Assembler::as_ldrexh(Register rt, Register rn, Condition c) {
1665   MOZ_ASSERT(rt.code() != 15 && rn.code() != 15);
1666   return writeInst(0x01f00f9f | (int)c | RT(rt) | RN(rn));
1667 }
1668 
as_ldrexb(Register rt,Register rn,Condition c)1669 BufferOffset Assembler::as_ldrexb(Register rt, Register rn, Condition c) {
1670   MOZ_ASSERT(rt.code() != 15 && rn.code() != 15);
1671   return writeInst(0x01d00f9f | (int)c | RT(rt) | RN(rn));
1672 }
1673 
as_strexd(Register rd,Register rt,Register rt2,Register rn,Condition c)1674 BufferOffset Assembler::as_strexd(Register rd, Register rt, Register rt2,
1675                                   Register rn, Condition c) {
1676   MOZ_ASSERT(!(rt.code() & 1) && rt2.code() == rt.code() + 1);
1677   MOZ_ASSERT(rt.code() != 14 && rn.code() != 15 && rd.code() != 15);
1678   MOZ_ASSERT(rd != rn && rd != rt && rd != rt2);
1679   return writeInst(0x01a00f90 | (int)c | RD(rd) | RN(rn) | rt.code());
1680 }
1681 
as_strex(Register rd,Register rt,Register rn,Condition c)1682 BufferOffset Assembler::as_strex(Register rd, Register rt, Register rn,
1683                                  Condition c) {
1684   MOZ_ASSERT(rd != rn && rd != rt);  // True restriction on Cortex-A7 (RPi2)
1685   return writeInst(0x01800f90 | (int)c | RD(rd) | RN(rn) | rt.code());
1686 }
1687 
as_strexh(Register rd,Register rt,Register rn,Condition c)1688 BufferOffset Assembler::as_strexh(Register rd, Register rt, Register rn,
1689                                   Condition c) {
1690   MOZ_ASSERT(rd != rn && rd != rt);  // True restriction on Cortex-A7 (RPi2)
1691   return writeInst(0x01e00f90 | (int)c | RD(rd) | RN(rn) | rt.code());
1692 }
1693 
as_strexb(Register rd,Register rt,Register rn,Condition c)1694 BufferOffset Assembler::as_strexb(Register rd, Register rt, Register rn,
1695                                   Condition c) {
1696   MOZ_ASSERT(rd != rn && rd != rt);  // True restriction on Cortex-A7 (RPi2)
1697   return writeInst(0x01c00f90 | (int)c | RD(rd) | RN(rn) | rt.code());
1698 }
1699 
as_clrex()1700 BufferOffset Assembler::as_clrex() { return writeInst(0xf57ff01f); }
1701 
1702 // Memory barrier stuff:
1703 
as_dmb(BarrierOption option)1704 BufferOffset Assembler::as_dmb(BarrierOption option) {
1705   return writeInst(0xf57ff050U | (int)option);
1706 }
as_dsb(BarrierOption option)1707 BufferOffset Assembler::as_dsb(BarrierOption option) {
1708   return writeInst(0xf57ff040U | (int)option);
1709 }
as_isb()1710 BufferOffset Assembler::as_isb() {
1711   return writeInst(0xf57ff06fU);  // option == SY
1712 }
as_dsb_trap()1713 BufferOffset Assembler::as_dsb_trap() {
1714   // DSB is "mcr 15, 0, r0, c7, c10, 4".
1715   // See eg https://bugs.kde.org/show_bug.cgi?id=228060.
1716   // ARMv7 manual, "VMSA CP15 c7 register summary".
1717   // Flagged as "legacy" starting with ARMv8, may be disabled on chip, see
1718   // ARMv8 manual E2.7.3 and G3.18.16.
1719   return writeInst(0xee070f9a);
1720 }
as_dmb_trap()1721 BufferOffset Assembler::as_dmb_trap() {
1722   // DMB is "mcr 15, 0, r0, c7, c10, 5".
1723   // ARMv7 manual, "VMSA CP15 c7 register summary".
1724   // Flagged as "legacy" starting with ARMv8, may be disabled on chip, see
1725   // ARMv8 manual E2.7.3 and G3.18.16.
1726   return writeInst(0xee070fba);
1727 }
as_isb_trap()1728 BufferOffset Assembler::as_isb_trap() {
1729   // ISB is "mcr 15, 0, r0, c7, c5, 4".
1730   // ARMv7 manual, "VMSA CP15 c7 register summary".
1731   // Flagged as "legacy" starting with ARMv8, may be disabled on chip, see
1732   // ARMv8 manual E2.7.3 and G3.18.16.
1733   return writeInst(0xee070f94);
1734 }
1735 
as_csdb()1736 BufferOffset Assembler::as_csdb() {
1737   // NOP (see as_nop) on architectures where this instruction is not defined.
1738   //
1739   // https://developer.arm.com/-/media/developer/pdf/Cache_Speculation_Side-channels_22Feb18.pdf
1740   // CSDB A32: 1110_0011_0010_0000_1111_0000_0001_0100
1741   return writeInst(0xe320f000 | 0x14);
1742 }
1743 
1744 // Control flow stuff:
1745 
1746 // bx can *only* branch to a register, never to an immediate.
as_bx(Register r,Condition c)1747 BufferOffset Assembler::as_bx(Register r, Condition c) {
1748   BufferOffset ret = writeInst(((int)c) | OpBx | r.code());
1749   return ret;
1750 }
1751 
WritePoolGuard(BufferOffset branch,Instruction * dest,BufferOffset afterPool)1752 void Assembler::WritePoolGuard(BufferOffset branch, Instruction* dest,
1753                                BufferOffset afterPool) {
1754   BOffImm off = afterPool.diffB<BOffImm>(branch);
1755   if (off.isInvalid()) MOZ_CRASH("BOffImm invalid");
1756   *dest = InstBImm(off, Always);
1757 }
1758 
1759 // Branch can branch to an immediate *or* to a register.
1760 // Branches to immediates are pc relative, branches to registers are absolute.
as_b(BOffImm off,Condition c,Label * documentation)1761 BufferOffset Assembler::as_b(BOffImm off, Condition c, Label* documentation) {
1762   return writeBranchInst(((int)c) | OpB | off.encode(),
1763                          refLabel(documentation));
1764 }
1765 
as_b(Label * l,Condition c)1766 BufferOffset Assembler::as_b(Label* l, Condition c) {
1767   if (l->bound()) {
1768     // Note only one instruction is emitted here, the NOP is overwritten.
1769     BufferOffset ret = allocBranchInst();
1770     if (oom()) return BufferOffset();
1771 
1772     BOffImm off = BufferOffset(l).diffB<BOffImm>(ret);
1773     if (off.isInvalid()) {
1774       m_buffer.fail_bail();
1775       return BufferOffset();
1776     }
1777     as_b(off, c, ret);
1778 #ifdef JS_DISASM_ARM
1779     spewBranch(m_buffer.getInstOrNull(ret), refLabel(l));
1780 #endif
1781     return ret;
1782   }
1783 
1784   if (oom()) return BufferOffset();
1785 
1786   BufferOffset ret;
1787   if (l->used()) {
1788     int32_t old = l->offset();
1789     // This will currently throw an assertion if we couldn't actually
1790     // encode the offset of the branch.
1791     if (!BOffImm::IsInRange(old)) {
1792       m_buffer.fail_bail();
1793       return ret;
1794     }
1795     ret = as_b(BOffImm(old), c, l);
1796   } else {
1797     BOffImm inv;
1798     ret = as_b(inv, c, l);
1799   }
1800 
1801   if (oom()) return BufferOffset();
1802 
1803   l->use(ret.getOffset());
1804   return ret;
1805 }
1806 
as_b(wasm::OldTrapDesc target,Condition c)1807 BufferOffset Assembler::as_b(wasm::OldTrapDesc target, Condition c) {
1808   Label l;
1809   BufferOffset ret = as_b(&l, c);
1810   bindLater(&l, target);
1811   return ret;
1812 }
1813 
as_b(BOffImm off,Condition c,BufferOffset inst)1814 BufferOffset Assembler::as_b(BOffImm off, Condition c, BufferOffset inst) {
1815   // JS_DISASM_ARM NOTE: Can't disassemble here, because numerous callers use
1816   // this to patchup old code.  Must disassemble in caller where it makes sense.
1817   // Not many callers.
1818   *editSrc(inst) = InstBImm(off, c);
1819   return inst;
1820 }
1821 
1822 // blx can go to either an immediate or a register.
1823 // When blx'ing to a register, we change processor state depending on the low
1824 // bit of the register when blx'ing to an immediate, we *always* change
1825 // processor state.
1826 
as_blx(Register r,Condition c)1827 BufferOffset Assembler::as_blx(Register r, Condition c) {
1828   return writeInst(((int)c) | OpBlx | r.code());
1829 }
1830 
1831 // bl can only branch to an pc-relative immediate offset
1832 // It cannot change the processor state.
as_bl(BOffImm off,Condition c,Label * documentation)1833 BufferOffset Assembler::as_bl(BOffImm off, Condition c, Label* documentation) {
1834   return writeBranchInst(((int)c) | OpBl | off.encode(),
1835                          refLabel(documentation));
1836 }
1837 
as_bl(Label * l,Condition c)1838 BufferOffset Assembler::as_bl(Label* l, Condition c) {
1839   if (l->bound()) {
1840     // Note only one instruction is emitted here, the NOP is overwritten.
1841     BufferOffset ret = allocBranchInst();
1842     if (oom()) return BufferOffset();
1843 
1844     BOffImm offset = BufferOffset(l).diffB<BOffImm>(ret);
1845     if (offset.isInvalid()) {
1846       m_buffer.fail_bail();
1847       return BufferOffset();
1848     }
1849 
1850     as_bl(offset, c, ret);
1851 #ifdef JS_DISASM_ARM
1852     spewBranch(m_buffer.getInstOrNull(ret), refLabel(l));
1853 #endif
1854     return ret;
1855   }
1856 
1857   if (oom()) return BufferOffset();
1858 
1859   BufferOffset ret;
1860   // See if the list was empty.
1861   if (l->used()) {
1862     // This will currently throw an assertion if we couldn't actually encode
1863     // the offset of the branch.
1864     int32_t old = l->offset();
1865     if (!BOffImm::IsInRange(old)) {
1866       m_buffer.fail_bail();
1867       return ret;
1868     }
1869     ret = as_bl(BOffImm(old), c, l);
1870   } else {
1871     BOffImm inv;
1872     ret = as_bl(inv, c, l);
1873   }
1874 
1875   if (oom()) return BufferOffset();
1876 
1877   l->use(ret.getOffset());
1878   return ret;
1879 }
1880 
as_bl(BOffImm off,Condition c,BufferOffset inst)1881 BufferOffset Assembler::as_bl(BOffImm off, Condition c, BufferOffset inst) {
1882   *editSrc(inst) = InstBLImm(off, c);
1883   return inst;
1884 }
1885 
as_mrs(Register r,Condition c)1886 BufferOffset Assembler::as_mrs(Register r, Condition c) {
1887   return writeInst(0x010f0000 | int(c) | RD(r));
1888 }
1889 
as_msr(Register r,Condition c)1890 BufferOffset Assembler::as_msr(Register r, Condition c) {
1891   // Hardcode the 'mask' field to 0b11 for now. It is bits 18 and 19, which
1892   // are the two high bits of the 'c' in this constant.
1893   MOZ_ASSERT((r.code() & ~0xf) == 0);
1894   return writeInst(0x012cf000 | int(c) | r.code());
1895 }
1896 
1897 // VFP instructions!
1898 enum vfp_tags { VfpTag = 0x0C000A00, VfpArith = 0x02000000 };
1899 
writeVFPInst(vfp_size sz,uint32_t blob)1900 BufferOffset Assembler::writeVFPInst(vfp_size sz, uint32_t blob) {
1901   MOZ_ASSERT((sz & blob) == 0);
1902   MOZ_ASSERT((VfpTag & blob) == 0);
1903   return writeInst(VfpTag | sz | blob);
1904 }
1905 
WriteVFPInstStatic(vfp_size sz,uint32_t blob,uint32_t * dest)1906 /* static */ void Assembler::WriteVFPInstStatic(vfp_size sz, uint32_t blob,
1907                                                 uint32_t* dest) {
1908   MOZ_ASSERT((sz & blob) == 0);
1909   MOZ_ASSERT((VfpTag & blob) == 0);
1910   WriteInstStatic(VfpTag | sz | blob, dest);
1911 }
1912 
1913 // Unityped variants: all registers hold the same (ieee754 single/double)
1914 // notably not included are vcvt; vmov vd, #imm; vmov rt, vn.
as_vfp_float(VFPRegister vd,VFPRegister vn,VFPRegister vm,VFPOp op,Condition c)1915 BufferOffset Assembler::as_vfp_float(VFPRegister vd, VFPRegister vn,
1916                                      VFPRegister vm, VFPOp op, Condition c) {
1917   // Make sure we believe that all of our operands are the same kind.
1918   MOZ_ASSERT_IF(!vn.isMissing(), vd.equiv(vn));
1919   MOZ_ASSERT_IF(!vm.isMissing(), vd.equiv(vm));
1920   vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
1921   return writeVFPInst(sz, VD(vd) | VN(vn) | VM(vm) | op | VfpArith | c);
1922 }
1923 
as_vadd(VFPRegister vd,VFPRegister vn,VFPRegister vm,Condition c)1924 BufferOffset Assembler::as_vadd(VFPRegister vd, VFPRegister vn, VFPRegister vm,
1925                                 Condition c) {
1926   return as_vfp_float(vd, vn, vm, OpvAdd, c);
1927 }
1928 
as_vdiv(VFPRegister vd,VFPRegister vn,VFPRegister vm,Condition c)1929 BufferOffset Assembler::as_vdiv(VFPRegister vd, VFPRegister vn, VFPRegister vm,
1930                                 Condition c) {
1931   return as_vfp_float(vd, vn, vm, OpvDiv, c);
1932 }
1933 
as_vmul(VFPRegister vd,VFPRegister vn,VFPRegister vm,Condition c)1934 BufferOffset Assembler::as_vmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
1935                                 Condition c) {
1936   return as_vfp_float(vd, vn, vm, OpvMul, c);
1937 }
1938 
as_vnmul(VFPRegister vd,VFPRegister vn,VFPRegister vm,Condition c)1939 BufferOffset Assembler::as_vnmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
1940                                  Condition c) {
1941   return as_vfp_float(vd, vn, vm, OpvMul, c);
1942 }
1943 
as_vnmla(VFPRegister vd,VFPRegister vn,VFPRegister vm,Condition c)1944 BufferOffset Assembler::as_vnmla(VFPRegister vd, VFPRegister vn, VFPRegister vm,
1945                                  Condition c) {
1946   MOZ_CRASH("Feature NYI");
1947 }
1948 
as_vnmls(VFPRegister vd,VFPRegister vn,VFPRegister vm,Condition c)1949 BufferOffset Assembler::as_vnmls(VFPRegister vd, VFPRegister vn, VFPRegister vm,
1950                                  Condition c) {
1951   MOZ_CRASH("Feature NYI");
1952 }
1953 
as_vneg(VFPRegister vd,VFPRegister vm,Condition c)1954 BufferOffset Assembler::as_vneg(VFPRegister vd, VFPRegister vm, Condition c) {
1955   return as_vfp_float(vd, NoVFPRegister, vm, OpvNeg, c);
1956 }
1957 
as_vsqrt(VFPRegister vd,VFPRegister vm,Condition c)1958 BufferOffset Assembler::as_vsqrt(VFPRegister vd, VFPRegister vm, Condition c) {
1959   return as_vfp_float(vd, NoVFPRegister, vm, OpvSqrt, c);
1960 }
1961 
as_vabs(VFPRegister vd,VFPRegister vm,Condition c)1962 BufferOffset Assembler::as_vabs(VFPRegister vd, VFPRegister vm, Condition c) {
1963   return as_vfp_float(vd, NoVFPRegister, vm, OpvAbs, c);
1964 }
1965 
as_vsub(VFPRegister vd,VFPRegister vn,VFPRegister vm,Condition c)1966 BufferOffset Assembler::as_vsub(VFPRegister vd, VFPRegister vn, VFPRegister vm,
1967                                 Condition c) {
1968   return as_vfp_float(vd, vn, vm, OpvSub, c);
1969 }
1970 
as_vcmp(VFPRegister vd,VFPRegister vm,Condition c)1971 BufferOffset Assembler::as_vcmp(VFPRegister vd, VFPRegister vm, Condition c) {
1972   return as_vfp_float(vd, NoVFPRegister, vm, OpvCmp, c);
1973 }
1974 
as_vcmpz(VFPRegister vd,Condition c)1975 BufferOffset Assembler::as_vcmpz(VFPRegister vd, Condition c) {
1976   return as_vfp_float(vd, NoVFPRegister, NoVFPRegister, OpvCmpz, c);
1977 }
1978 
1979 // Specifically, a move between two same sized-registers.
as_vmov(VFPRegister vd,VFPRegister vsrc,Condition c)1980 BufferOffset Assembler::as_vmov(VFPRegister vd, VFPRegister vsrc, Condition c) {
1981   return as_vfp_float(vd, NoVFPRegister, vsrc, OpvMov, c);
1982 }
1983 
1984 // Transfer between Core and VFP.
1985 
1986 // Unlike the next function, moving between the core registers and vfp registers
1987 // can't be *that* properly typed. Namely, since I don't want to munge the type
1988 // VFPRegister to also include core registers. Thus, the core and vfp registers
1989 // are passed in based on their type, and src/dest is determined by the
1990 // float2core.
1991 
as_vxfer(Register vt1,Register vt2,VFPRegister vm,FloatToCore_ f2c,Condition c,int idx)1992 BufferOffset Assembler::as_vxfer(Register vt1, Register vt2, VFPRegister vm,
1993                                  FloatToCore_ f2c, Condition c, int idx) {
1994   vfp_size sz = IsSingle;
1995   if (vm.isDouble()) {
1996     // Technically, this can be done with a vmov à la ARM ARM under vmov
1997     // however, that requires at least an extra bit saying if the operation
1998     // should be performed on the lower or upper half of the double. Moving
1999     // a single to/from 2N/2N+1 isn't equivalent, since there are 32 single
2000     // registers, and 32 double registers so there is no way to encode the
2001     // last 16 double registers.
2002     sz = IsDouble;
2003     MOZ_ASSERT(idx == 0 || idx == 1);
2004     // If we are transferring a single half of the double then it must be
2005     // moving a VFP reg to a core reg.
2006     MOZ_ASSERT_IF(vt2 == InvalidReg, f2c == FloatToCore);
2007     idx = idx << 21;
2008   } else {
2009     MOZ_ASSERT(idx == 0);
2010   }
2011 
2012   if (vt2 == InvalidReg)
2013     return writeVFPInst(
2014         sz, WordTransfer | f2c | c | RT(vt1) | maybeRN(vt2) | VN(vm) | idx);
2015 
2016   // We are doing a 64 bit transfer.
2017   return writeVFPInst(
2018       sz, DoubleTransfer | f2c | c | RT(vt1) | maybeRN(vt2) | VM(vm) | idx);
2019 }
2020 
2021 enum vcvt_destFloatness { VcvtToInteger = 1 << 18, VcvtToFloat = 0 << 18 };
2022 enum vcvt_toZero {
2023   VcvtToZero =
2024       1 << 7,  // Use the default rounding mode, which rounds truncates.
2025   VcvtToFPSCR = 0 << 7  // Use whatever rounding mode the fpscr specifies.
2026 };
2027 enum vcvt_Signedness {
2028   VcvtToSigned = 1 << 16,
2029   VcvtToUnsigned = 0 << 16,
2030   VcvtFromSigned = 1 << 7,
2031   VcvtFromUnsigned = 0 << 7
2032 };
2033 
2034 // Our encoding actually allows just the src and the dest (and their types) to
2035 // uniquely specify the encoding that we are going to use.
as_vcvt(VFPRegister vd,VFPRegister vm,bool useFPSCR,Condition c)2036 BufferOffset Assembler::as_vcvt(VFPRegister vd, VFPRegister vm, bool useFPSCR,
2037                                 Condition c) {
2038   // Unlike other cases, the source and dest types cannot be the same.
2039   MOZ_ASSERT(!vd.equiv(vm));
2040   vfp_size sz = IsDouble;
2041   if (vd.isFloat() && vm.isFloat()) {
2042     // Doing a float -> float conversion.
2043     if (vm.isSingle()) sz = IsSingle;
2044     return writeVFPInst(sz, c | 0x02B700C0 | VM(vm) | VD(vd));
2045   }
2046 
2047   // At least one of the registers should be a float.
2048   vcvt_destFloatness destFloat;
2049   vcvt_Signedness opSign;
2050   vcvt_toZero doToZero = VcvtToFPSCR;
2051   MOZ_ASSERT(vd.isFloat() || vm.isFloat());
2052   if (vd.isSingle() || vm.isSingle()) sz = IsSingle;
2053 
2054   if (vd.isFloat()) {
2055     destFloat = VcvtToFloat;
2056     opSign = (vm.isSInt()) ? VcvtFromSigned : VcvtFromUnsigned;
2057   } else {
2058     destFloat = VcvtToInteger;
2059     opSign = (vd.isSInt()) ? VcvtToSigned : VcvtToUnsigned;
2060     doToZero = useFPSCR ? VcvtToFPSCR : VcvtToZero;
2061   }
2062   return writeVFPInst(
2063       sz, c | 0x02B80040 | VD(vd) | VM(vm) | destFloat | opSign | doToZero);
2064 }
2065 
as_vcvtFixed(VFPRegister vd,bool isSigned,uint32_t fixedPoint,bool toFixed,Condition c)2066 BufferOffset Assembler::as_vcvtFixed(VFPRegister vd, bool isSigned,
2067                                      uint32_t fixedPoint, bool toFixed,
2068                                      Condition c) {
2069   MOZ_ASSERT(vd.isFloat());
2070   uint32_t sx = 0x1;
2071   vfp_size sf = vd.isDouble() ? IsDouble : IsSingle;
2072   int32_t imm5 = fixedPoint;
2073   imm5 = (sx ? 32 : 16) - imm5;
2074   MOZ_ASSERT(imm5 >= 0);
2075   imm5 = imm5 >> 1 | (imm5 & 1) << 5;
2076   return writeVFPInst(sf, 0x02BA0040 | VD(vd) | toFixed << 18 | sx << 7 |
2077                               (!isSigned) << 16 | imm5 | c);
2078 }
2079 
2080 // Transfer between VFP and memory.
EncodeVdtr(LoadStore ls,VFPRegister vd,VFPAddr addr,Assembler::Condition c)2081 static uint32_t EncodeVdtr(LoadStore ls, VFPRegister vd, VFPAddr addr,
2082                            Assembler::Condition c) {
2083   return ls | 0x01000000 | addr.encode() | VD(vd) | c;
2084 }
2085 
as_vdtr(LoadStore ls,VFPRegister vd,VFPAddr addr,Condition c)2086 BufferOffset Assembler::as_vdtr(
2087     LoadStore ls, VFPRegister vd, VFPAddr addr,
2088     Condition c /* vfp doesn't have a wb option */) {
2089   vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
2090   return writeVFPInst(sz, EncodeVdtr(ls, vd, addr, c));
2091 }
2092 
as_vdtr_patch(LoadStore ls,VFPRegister vd,VFPAddr addr,Condition c,uint32_t * dest)2093 /* static */ void Assembler::as_vdtr_patch(LoadStore ls, VFPRegister vd,
2094                                            VFPAddr addr, Condition c,
2095                                            uint32_t* dest) {
2096   vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
2097   WriteVFPInstStatic(sz, EncodeVdtr(ls, vd, addr, c), dest);
2098 }
2099 
2100 // VFP's ldm/stm work differently from the standard arm ones. You can only
2101 // transfer a range.
2102 
as_vdtm(LoadStore st,Register rn,VFPRegister vd,int length,Condition c)2103 BufferOffset Assembler::as_vdtm(LoadStore st, Register rn, VFPRegister vd,
2104                                 int length,
2105                                 /* also has update conditions */ Condition c) {
2106   MOZ_ASSERT(length <= 16 && length >= 0);
2107   vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
2108 
2109   if (vd.isDouble()) length *= 2;
2110 
2111   return writeVFPInst(sz, dtmLoadStore | RN(rn) | VD(vd) | length | dtmMode |
2112                               dtmUpdate | dtmCond);
2113 }
2114 
as_vimm(VFPRegister vd,VFPImm imm,Condition c)2115 BufferOffset Assembler::as_vimm(VFPRegister vd, VFPImm imm, Condition c) {
2116   MOZ_ASSERT(imm.isValid());
2117   vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
2118   return writeVFPInst(sz, c | imm.encode() | VD(vd) | 0x02B00000);
2119 }
2120 
as_vmrs(Register r,Condition c)2121 BufferOffset Assembler::as_vmrs(Register r, Condition c) {
2122   return writeInst(c | 0x0ef10a10 | RT(r));
2123 }
2124 
as_vmsr(Register r,Condition c)2125 BufferOffset Assembler::as_vmsr(Register r, Condition c) {
2126   return writeInst(c | 0x0ee10a10 | RT(r));
2127 }
2128 
nextLink(BufferOffset b,BufferOffset * next)2129 bool Assembler::nextLink(BufferOffset b, BufferOffset* next) {
2130   Instruction branch = *editSrc(b);
2131   MOZ_ASSERT(branch.is<InstBranchImm>());
2132 
2133   BOffImm destOff;
2134   branch.as<InstBranchImm>()->extractImm(&destOff);
2135   if (destOff.isInvalid()) return false;
2136 
2137   // Propagate the next link back to the caller, by constructing a new
2138   // BufferOffset into the space they provided.
2139   new (next) BufferOffset(destOff.decode());
2140   return true;
2141 }
2142 
bind(Label * label,BufferOffset boff)2143 void Assembler::bind(Label* label, BufferOffset boff) {
2144 #ifdef JS_DISASM_ARM
2145   spew_.spewBind(label);
2146 #endif
2147   if (oom()) {
2148     // Ensure we always bind the label. This matches what we do on
2149     // x86/x64 and silences the assert in ~Label.
2150     label->bind(0);
2151     return;
2152   }
2153 
2154   if (label->used()) {
2155     bool more;
2156     // If our caller didn't give us an explicit target to bind to then we
2157     // want to bind to the location of the next instruction.
2158     BufferOffset dest = boff.assigned() ? boff : nextOffset();
2159     BufferOffset b(label);
2160     do {
2161       BufferOffset next;
2162       more = nextLink(b, &next);
2163       Instruction branch = *editSrc(b);
2164       Condition c = branch.extractCond();
2165       BOffImm offset = dest.diffB<BOffImm>(b);
2166       if (offset.isInvalid()) {
2167         m_buffer.fail_bail();
2168         return;
2169       }
2170       if (branch.is<InstBImm>())
2171         as_b(offset, c, b);
2172       else if (branch.is<InstBLImm>())
2173         as_bl(offset, c, b);
2174       else
2175         MOZ_CRASH("crazy fixup!");
2176       b = next;
2177     } while (more);
2178   }
2179   label->bind(nextOffset().getOffset());
2180   MOZ_ASSERT(!oom());
2181 }
2182 
bindLater(Label * label,wasm::OldTrapDesc target)2183 void Assembler::bindLater(Label* label, wasm::OldTrapDesc target) {
2184   if (label->used()) {
2185     BufferOffset b(label);
2186     do {
2187       append(wasm::OldTrapSite(target, b.getOffset()));
2188     } while (nextLink(b, &b));
2189   }
2190   label->reset();
2191 }
2192 
bind(RepatchLabel * label)2193 void Assembler::bind(RepatchLabel* label) {
2194   // It does not seem to be useful to record this label for
2195   // disassembly, as the value that is bound to the label is often
2196   // effectively garbage and is replaced by something else later.
2197   BufferOffset dest = nextOffset();
2198   if (label->used() && !oom()) {
2199     // If the label has a use, then change this use to refer to the bound
2200     // label.
2201     BufferOffset branchOff(label->offset());
2202     // Since this was created with a RepatchLabel, the value written in the
2203     // instruction stream is not branch shaped, it is PoolHintData shaped.
2204     Instruction* branch = editSrc(branchOff);
2205     PoolHintPun p;
2206     p.raw = branch->encode();
2207     Condition cond;
2208     if (p.phd.isValidPoolHint())
2209       cond = p.phd.getCond();
2210     else
2211       cond = branch->extractCond();
2212 
2213     BOffImm offset = dest.diffB<BOffImm>(branchOff);
2214     if (offset.isInvalid()) {
2215       m_buffer.fail_bail();
2216       return;
2217     }
2218     as_b(offset, cond, branchOff);
2219   }
2220   label->bind(dest.getOffset());
2221 }
2222 
retarget(Label * label,Label * target)2223 void Assembler::retarget(Label* label, Label* target) {
2224 #ifdef JS_DISASM_ARM
2225   spew_.spewRetarget(label, target);
2226 #endif
2227   if (label->used() && !oom()) {
2228     if (target->bound()) {
2229       bind(label, BufferOffset(target));
2230     } else if (target->used()) {
2231       // The target is not bound but used. Prepend label's branch list
2232       // onto target's.
2233       BufferOffset labelBranchOffset(label);
2234       BufferOffset next;
2235 
2236       // Find the head of the use chain for label.
2237       while (nextLink(labelBranchOffset, &next)) labelBranchOffset = next;
2238 
2239       // Then patch the head of label's use chain to the tail of target's
2240       // use chain, prepending the entire use chain of target.
2241       Instruction branch = *editSrc(labelBranchOffset);
2242       Condition c = branch.extractCond();
2243       int32_t prev = target->offset();
2244       target->use(label->offset());
2245       if (branch.is<InstBImm>())
2246         as_b(BOffImm(prev), c, labelBranchOffset);
2247       else if (branch.is<InstBLImm>())
2248         as_bl(BOffImm(prev), c, labelBranchOffset);
2249       else
2250         MOZ_CRASH("crazy fixup!");
2251     } else {
2252       // The target is unbound and unused. We can just take the head of
2253       // the list hanging off of label, and dump that into target.
2254       target->use(label->offset());
2255     }
2256   }
2257   label->reset();
2258 }
2259 
2260 static int stopBKPT = -1;
as_bkpt()2261 void Assembler::as_bkpt() {
2262   // This is a count of how many times a breakpoint instruction has been
2263   // generated. It is embedded into the instruction for debugging
2264   // purposes. Gdb will print "bkpt xxx" when you attempt to dissassemble a
2265   // breakpoint with the number xxx embedded into it. If this breakpoint is
2266   // being hit, then you can run (in gdb):
2267   //  >b dbg_break
2268   //  >b main
2269   //  >commands
2270   //  >set stopBKPT = xxx
2271   //  >c
2272   //  >end
2273   // which will set a breakpoint on the function dbg_break above set a
2274   // scripted breakpoint on main that will set the (otherwise unmodified)
2275   // value to the number of the breakpoint, so dbg_break will actuall be
2276   // called and finally, when you run the executable, execution will halt when
2277   // that breakpoint is generated.
2278   static int hit = 0;
2279   if (stopBKPT == hit) dbg_break();
2280   writeInst(0xe1200070 | (hit & 0xf) | ((hit & 0xfff0) << 4));
2281   hit++;
2282 }
2283 
as_illegal_trap()2284 BufferOffset Assembler::as_illegal_trap() {
2285   // Encoding of the permanently-undefined 'udf' instruction, with the imm16
2286   // set to 0.
2287   return writeInst(0xe7f000f0);
2288 }
2289 
flushBuffer()2290 void Assembler::flushBuffer() { m_buffer.flushPool(); }
2291 
enterNoPool(size_t maxInst)2292 void Assembler::enterNoPool(size_t maxInst) { m_buffer.enterNoPool(maxInst); }
2293 
leaveNoPool()2294 void Assembler::leaveNoPool() { m_buffer.leaveNoPool(); }
2295 
GetBranchOffset(const Instruction * i_)2296 ptrdiff_t Assembler::GetBranchOffset(const Instruction* i_) {
2297   MOZ_ASSERT(i_->is<InstBranchImm>());
2298   InstBranchImm* i = i_->as<InstBranchImm>();
2299   BOffImm dest;
2300   i->extractImm(&dest);
2301   return dest.decode();
2302 }
2303 
RetargetNearBranch(Instruction * i,int offset,bool final)2304 void Assembler::RetargetNearBranch(Instruction* i, int offset, bool final) {
2305   Assembler::Condition c = i->extractCond();
2306   RetargetNearBranch(i, offset, c, final);
2307 }
2308 
RetargetNearBranch(Instruction * i,int offset,Condition cond,bool final)2309 void Assembler::RetargetNearBranch(Instruction* i, int offset, Condition cond,
2310                                    bool final) {
2311   // Retargeting calls is totally unsupported!
2312   MOZ_ASSERT_IF(i->is<InstBranchImm>(),
2313                 i->is<InstBImm>() || i->is<InstBLImm>());
2314   if (i->is<InstBLImm>())
2315     new (i) InstBLImm(BOffImm(offset), cond);
2316   else
2317     new (i) InstBImm(BOffImm(offset), cond);
2318 
2319   // Flush the cache, since an instruction was overwritten.
2320   if (final) AutoFlushICache::flush(uintptr_t(i), 4);
2321 }
2322 
RetargetFarBranch(Instruction * i,uint8_t ** slot,uint8_t * dest,Condition cond)2323 void Assembler::RetargetFarBranch(Instruction* i, uint8_t** slot, uint8_t* dest,
2324                                   Condition cond) {
2325   int32_t offset =
2326       reinterpret_cast<uint8_t*>(slot) - reinterpret_cast<uint8_t*>(i);
2327   if (!i->is<InstLDR>()) {
2328     new (i) InstLDR(Offset, pc, DTRAddr(pc, DtrOffImm(offset - 8)), cond);
2329     AutoFlushICache::flush(uintptr_t(i), 4);
2330   }
2331   *slot = dest;
2332 }
2333 
2334 struct PoolHeader : Instruction {
2335   struct Header {
2336     // The size should take into account the pool header.
2337     // The size is in units of Instruction (4 bytes), not byte.
2338     uint32_t size : 15;
2339     bool isNatural : 1;
2340     uint32_t ONES : 16;
2341 
HeaderPoolHeader::Header2342     Header(int size_, bool isNatural_)
2343         : size(size_), isNatural(isNatural_), ONES(0xffff) {}
2344 
HeaderPoolHeader::Header2345     Header(const Instruction* i) {
2346       JS_STATIC_ASSERT(sizeof(Header) == sizeof(uint32_t));
2347       memcpy(this, i, sizeof(Header));
2348       MOZ_ASSERT(ONES == 0xffff);
2349     }
2350 
rawPoolHeader::Header2351     uint32_t raw() const {
2352       JS_STATIC_ASSERT(sizeof(Header) == sizeof(uint32_t));
2353       uint32_t dest;
2354       memcpy(&dest, this, sizeof(Header));
2355       return dest;
2356     }
2357   };
2358 
PoolHeaderPoolHeader2359   PoolHeader(int size_, bool isNatural_)
2360       : Instruction(Header(size_, isNatural_).raw(), true) {}
2361 
sizePoolHeader2362   uint32_t size() const {
2363     Header tmp(this);
2364     return tmp.size;
2365   }
isNaturalPoolHeader2366   uint32_t isNatural() const {
2367     Header tmp(this);
2368     return tmp.isNatural;
2369   }
2370 
IsTHISPoolHeader2371   static bool IsTHIS(const Instruction& i) {
2372     return (*i.raw() & 0xffff0000) == 0xffff0000;
2373   }
AsTHISPoolHeader2374   static const PoolHeader* AsTHIS(const Instruction& i) {
2375     if (!IsTHIS(i)) return nullptr;
2376     return static_cast<const PoolHeader*>(&i);
2377   }
2378 };
2379 
WritePoolHeader(uint8_t * start,Pool * p,bool isNatural)2380 void Assembler::WritePoolHeader(uint8_t* start, Pool* p, bool isNatural) {
2381   static_assert(sizeof(PoolHeader) == 4,
2382                 "PoolHandler must have the correct size.");
2383   uint8_t* pool = start + 4;
2384   // Go through the usual rigmarole to get the size of the pool.
2385   pool += p->getPoolSize();
2386   uint32_t size = pool - start;
2387   MOZ_ASSERT((size & 3) == 0);
2388   size = size >> 2;
2389   MOZ_ASSERT(size < (1 << 15));
2390   PoolHeader header(size, isNatural);
2391   *(PoolHeader*)start = header;
2392 }
2393 
2394 // The size of an arbitrary 32-bit call in the instruction stream. On ARM this
2395 // sequence is |pc = ldr pc - 4; imm32| given that we never reach the imm32.
PatchWrite_NearCallSize()2396 uint32_t Assembler::PatchWrite_NearCallSize() { return sizeof(uint32_t); }
2397 
PatchWrite_NearCall(CodeLocationLabel start,CodeLocationLabel toCall)2398 void Assembler::PatchWrite_NearCall(CodeLocationLabel start,
2399                                     CodeLocationLabel toCall) {
2400   Instruction* inst = (Instruction*)start.raw();
2401   // Overwrite whatever instruction used to be here with a call. Since the
2402   // destination is in the same function, it will be within range of the
2403   // 24 << 2 byte bl instruction.
2404   uint8_t* dest = toCall.raw();
2405   new (inst) InstBLImm(BOffImm(dest - (uint8_t*)inst), Always);
2406   // Ensure everyone sees the code that was just written into memory.
2407   AutoFlushICache::flush(uintptr_t(inst), 4);
2408 }
2409 
PatchDataWithValueCheck(CodeLocationLabel label,PatchedImmPtr newValue,PatchedImmPtr expectedValue)2410 void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
2411                                         PatchedImmPtr newValue,
2412                                         PatchedImmPtr expectedValue) {
2413   Instruction* ptr = reinterpret_cast<Instruction*>(label.raw());
2414 
2415   Register dest;
2416   Assembler::RelocStyle rs;
2417 
2418   {
2419     InstructionIterator iter(ptr);
2420     DebugOnly<const uint32_t*> val = GetPtr32Target(iter, &dest, &rs);
2421     MOZ_ASSERT(uint32_t((const uint32_t*)val) == uint32_t(expectedValue.value));
2422   }
2423 
2424   // Patch over actual instructions.
2425   {
2426     InstructionIterator iter(ptr);
2427     MacroAssembler::ma_mov_patch(Imm32(int32_t(newValue.value)), dest, Always,
2428                                  rs, iter);
2429   }
2430 
2431   // L_LDR won't cause any instructions to be updated.
2432   if (rs != L_LDR) {
2433     InstructionIterator iter(ptr);
2434     AutoFlushICache::flush(uintptr_t(iter.cur()), 4);
2435     AutoFlushICache::flush(uintptr_t(iter.next()), 4);
2436   }
2437 }
2438 
PatchDataWithValueCheck(CodeLocationLabel label,ImmPtr newValue,ImmPtr expectedValue)2439 void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
2440                                         ImmPtr newValue, ImmPtr expectedValue) {
2441   PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value),
2442                           PatchedImmPtr(expectedValue.value));
2443 }
2444 
2445 // This just stomps over memory with 32 bits of raw data. Its purpose is to
2446 // overwrite the call of JITed code with 32 bits worth of an offset. This will
2447 // is only meant to function on code that has been invalidated, so it should be
2448 // totally safe. Since that instruction will never be executed again, a ICache
2449 // flush should not be necessary
PatchWrite_Imm32(CodeLocationLabel label,Imm32 imm)2450 void Assembler::PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm) {
2451   // Raw is going to be the return address.
2452   uint32_t* raw = (uint32_t*)label.raw();
2453   // Overwrite the 4 bytes before the return address, which will end up being
2454   // the call instruction.
2455   *(raw - 1) = imm.value;
2456 }
2457 
NextInstruction(uint8_t * inst_,uint32_t * count)2458 uint8_t* Assembler::NextInstruction(uint8_t* inst_, uint32_t* count) {
2459   if (count != nullptr) *count += sizeof(Instruction);
2460 
2461   InstructionIterator iter(reinterpret_cast<Instruction*>(inst_));
2462   return reinterpret_cast<uint8_t*>(iter.next());
2463 }
2464 
InstIsGuard(Instruction * inst,const PoolHeader ** ph)2465 static bool InstIsGuard(Instruction* inst, const PoolHeader** ph) {
2466   Assembler::Condition c = inst->extractCond();
2467   if (c != Assembler::Always) return false;
2468   if (!(inst->is<InstBXReg>() || inst->is<InstBImm>())) return false;
2469   // See if the next instruction is a pool header.
2470   *ph = (inst + 1)->as<const PoolHeader>();
2471   return *ph != nullptr;
2472 }
2473 
InstIsGuard(BufferInstructionIterator & iter,const PoolHeader ** ph)2474 static bool InstIsGuard(BufferInstructionIterator& iter,
2475                         const PoolHeader** ph) {
2476   Instruction* inst = iter.cur();
2477   Assembler::Condition c = inst->extractCond();
2478   if (c != Assembler::Always) return false;
2479   if (!(inst->is<InstBXReg>() || inst->is<InstBImm>())) return false;
2480   // See if the next instruction is a pool header.
2481   *ph = iter.peek()->as<const PoolHeader>();
2482   return *ph != nullptr;
2483 }
2484 
2485 template <class T>
InstIsBNop(const T & iter)2486 static bool InstIsBNop(const T& iter) {
2487   // In some special situations, it is necessary to insert a NOP into the
2488   // instruction stream that nobody knows about, since nobody should know
2489   // about it, make sure it gets skipped when Instruction::next() is called.
2490   // this generates a very specific nop, namely a branch to the next
2491   // instruction.
2492   const Instruction* cur = iter.cur();
2493   Assembler::Condition c = cur->extractCond();
2494   if (c != Assembler::Always) return false;
2495   if (!cur->is<InstBImm>()) return false;
2496   InstBImm* b = cur->as<InstBImm>();
2497   BOffImm offset;
2498   b->extractImm(&offset);
2499   return offset.decode() == 4;
2500 }
2501 
maybeSkipAutomaticInstructions()2502 Instruction* InstructionIterator::maybeSkipAutomaticInstructions() {
2503   // If the current instruction was automatically-inserted, skip past it.
2504   const PoolHeader* ph;
2505 
2506   // Loop until an intentionally-placed instruction is found.
2507   while (true) {
2508     if (InstIsGuard(cur(), &ph)) {
2509       // Don't skip a natural guard.
2510       if (ph->isNatural()) return cur();
2511       advanceRaw(1 + ph->size());
2512     } else if (InstIsBNop<InstructionIterator>(*this)) {
2513       advanceRaw(1);
2514     } else {
2515       return cur();
2516     }
2517   }
2518 }
2519 
maybeSkipAutomaticInstructions()2520 Instruction* BufferInstructionIterator::maybeSkipAutomaticInstructions() {
2521   const PoolHeader* ph;
2522   // If this is a guard, and the next instruction is a header, always work
2523   // around the pool. If it isn't a guard, then start looking ahead.
2524   if (InstIsGuard(*this, &ph)) {
2525     // Don't skip a natural guard.
2526     if (ph->isNatural()) return cur();
2527     advance(sizeof(Instruction) * ph->size());
2528     return next();
2529   }
2530   if (InstIsBNop<BufferInstructionIterator>(*this)) return next();
2531   return cur();
2532 }
2533 
2534 // Cases to be handled:
2535 // 1) no pools or branches in sight => return this+1
2536 // 2) branch to next instruction => return this+2, because a nop needed to be
2537 //    inserted into the stream.
2538 // 3) this+1 is an artificial guard for a pool => return first instruction
2539 //    after the pool
2540 // 4) this+1 is a natural guard => return the branch
2541 // 5) this is a branch, right before a pool => return first instruction after
2542 //    the pool
2543 // in assembly form:
2544 // 1) add r0, r0, r0 <= this
2545 //    add r1, r1, r1 <= returned value
2546 //    add r2, r2, r2
2547 //
2548 // 2) add r0, r0, r0 <= this
2549 //    b foo
2550 //    foo:
2551 //    add r2, r2, r2 <= returned value
2552 //
2553 // 3) add r0, r0, r0 <= this
2554 //    b after_pool;
2555 //    .word 0xffff0002  # bit 15 being 0 indicates that the branch was not
2556 //                      # requested by the assembler
2557 //    0xdeadbeef        # the 2 indicates that there is 1 pool entry, and the
2558 //                      # pool header
2559 //    add r4, r4, r4 <= returned value
2560 // 4) add r0, r0, r0 <= this
2561 //    b after_pool  <= returned value
2562 //    .word 0xffff8002  # bit 15 being 1 indicates that the branch was
2563 //                      # requested by the assembler
2564 //    0xdeadbeef
2565 //    add r4, r4, r4
2566 // 5) b after_pool  <= this
2567 //    .word 0xffff8002  # bit 15 has no bearing on the returned value
2568 //    0xdeadbeef
2569 //    add r4, r4, r4  <= returned value
2570 
next()2571 Instruction* InstructionIterator::next() {
2572   const PoolHeader* ph;
2573 
2574   // If the current instruction is followed by a pool header,
2575   // move past the current instruction and the pool.
2576   if (InstIsGuard(cur(), &ph)) {
2577     advanceRaw(1 + ph->size());
2578     return maybeSkipAutomaticInstructions();
2579   }
2580 
2581   // The next instruction is then known to not be a PoolHeader.
2582   advanceRaw(1);
2583   return maybeSkipAutomaticInstructions();
2584 }
2585 
ToggleToJmp(CodeLocationLabel inst_)2586 void Assembler::ToggleToJmp(CodeLocationLabel inst_) {
2587   uint32_t* ptr = (uint32_t*)inst_.raw();
2588 
2589   DebugOnly<Instruction*> inst = (Instruction*)inst_.raw();
2590   MOZ_ASSERT(inst->is<InstCMP>());
2591 
2592   // Zero bits 20-27, then set 24-27 to be correct for a branch.
2593   // 20-23 will be party of the B's immediate, and should be 0.
2594   *ptr = (*ptr & ~(0xff << 20)) | (0xa0 << 20);
2595   AutoFlushICache::flush(uintptr_t(ptr), 4);
2596 }
2597 
ToggleToCmp(CodeLocationLabel inst_)2598 void Assembler::ToggleToCmp(CodeLocationLabel inst_) {
2599   uint32_t* ptr = (uint32_t*)inst_.raw();
2600 
2601   DebugOnly<Instruction*> inst = (Instruction*)inst_.raw();
2602   MOZ_ASSERT(inst->is<InstBImm>());
2603 
2604   // Ensure that this masking operation doesn't affect the offset of the
2605   // branch instruction when it gets toggled back.
2606   MOZ_ASSERT((*ptr & (0xf << 20)) == 0);
2607 
2608   // Also make sure that the CMP is valid. Part of having a valid CMP is that
2609   // all of the bits describing the destination in most ALU instructions are
2610   // all unset (looks like it is encoding r0).
2611   MOZ_ASSERT(toRD(*inst) == r0);
2612 
2613   // Zero out bits 20-27, then set them to be correct for a compare.
2614   *ptr = (*ptr & ~(0xff << 20)) | (0x35 << 20);
2615 
2616   AutoFlushICache::flush(uintptr_t(ptr), 4);
2617 }
2618 
ToggleCall(CodeLocationLabel inst_,bool enabled)2619 void Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) {
2620   InstructionIterator iter(reinterpret_cast<Instruction*>(inst_.raw()));
2621   MOZ_ASSERT(iter.cur()->is<InstMovW>() || iter.cur()->is<InstLDR>());
2622 
2623   if (iter.cur()->is<InstMovW>()) {
2624     // If it looks like the start of a movw/movt sequence, then make sure we
2625     // have all of it (and advance the iterator past the full sequence).
2626     iter.next();
2627     MOZ_ASSERT(iter.cur()->is<InstMovT>());
2628   }
2629 
2630   iter.next();
2631   MOZ_ASSERT(iter.cur()->is<InstNOP>() || iter.cur()->is<InstBLXReg>());
2632 
2633   if (enabled == iter.cur()->is<InstBLXReg>()) {
2634     // Nothing to do.
2635     return;
2636   }
2637 
2638   Instruction* inst = iter.cur();
2639 
2640   if (enabled)
2641     *inst = InstBLXReg(ScratchRegister, Always);
2642   else
2643     *inst = InstNOP();
2644 
2645   AutoFlushICache::flush(uintptr_t(inst), 4);
2646 }
2647 
ToggledCallSize(uint8_t * code)2648 size_t Assembler::ToggledCallSize(uint8_t* code) {
2649   InstructionIterator iter(reinterpret_cast<Instruction*>(code));
2650   MOZ_ASSERT(iter.cur()->is<InstMovW>() || iter.cur()->is<InstLDR>());
2651 
2652   if (iter.cur()->is<InstMovW>()) {
2653     // If it looks like the start of a movw/movt sequence, then make sure we
2654     // have all of it (and advance the iterator past the full sequence).
2655     iter.next();
2656     MOZ_ASSERT(iter.cur()->is<InstMovT>());
2657   }
2658 
2659   iter.next();
2660   MOZ_ASSERT(iter.cur()->is<InstNOP>() || iter.cur()->is<InstBLXReg>());
2661   return uintptr_t(iter.cur()) + 4 - uintptr_t(code);
2662 }
2663 
BailoutTableStart(uint8_t * code)2664 uint8_t* Assembler::BailoutTableStart(uint8_t* code) {
2665   // The iterator skips over any automatically-inserted instructions.
2666   InstructionIterator iter(reinterpret_cast<Instruction*>(code));
2667   MOZ_ASSERT(iter.cur()->is<InstBLImm>());
2668   return reinterpret_cast<uint8_t*>(iter.cur());
2669 }
2670 
2671 uint32_t Assembler::NopFill = 0;
2672 
GetNopFill()2673 uint32_t Assembler::GetNopFill() {
2674   static bool isSet = false;
2675   if (!isSet) {
2676     char* fillStr = getenv("ARM_ASM_NOP_FILL");
2677     uint32_t fill;
2678     if (fillStr && sscanf(fillStr, "%u", &fill) == 1) NopFill = fill;
2679     if (NopFill > 8) MOZ_CRASH("Nop fill > 8 is not supported");
2680     isSet = true;
2681   }
2682   return NopFill;
2683 }
2684 
2685 uint32_t Assembler::AsmPoolMaxOffset = 1024;
2686 
GetPoolMaxOffset()2687 uint32_t Assembler::GetPoolMaxOffset() {
2688   static bool isSet = false;
2689   if (!isSet) {
2690     char* poolMaxOffsetStr = getenv("ASM_POOL_MAX_OFFSET");
2691     uint32_t poolMaxOffset;
2692     if (poolMaxOffsetStr && sscanf(poolMaxOffsetStr, "%u", &poolMaxOffset) == 1)
2693       AsmPoolMaxOffset = poolMaxOffset;
2694     isSet = true;
2695   }
2696   return AsmPoolMaxOffset;
2697 }
2698 
SecondScratchRegisterScope(MacroAssembler & masm)2699 SecondScratchRegisterScope::SecondScratchRegisterScope(MacroAssembler& masm)
2700     : AutoRegisterScope(masm, masm.getSecondScratchReg()) {}
2701 
2702 #ifdef JS_DISASM_ARM
2703 
disassembleInstruction(const Instruction * i,DisasmBuffer & buffer)2704 /* static */ void Assembler::disassembleInstruction(const Instruction* i,
2705                                                     DisasmBuffer& buffer) {
2706   disasm::NameConverter converter;
2707   disasm::Disassembler dasm(converter);
2708   uint8_t* loc = reinterpret_cast<uint8_t*>(const_cast<uint32_t*>(i->raw()));
2709   dasm.InstructionDecode(buffer, loc);
2710 }
2711 
initDisassembler()2712 void Assembler::initDisassembler() {
2713   // The line is normally laid out like this:
2714   //
2715   // xxxxxxxx        ldr r, op   ; comment
2716   //
2717   // where xx...x is the instruction bit pattern.
2718   //
2719   // Labels are laid out by themselves to line up with the instructions above
2720   // and below:
2721   //
2722   //            nnnn:
2723   //
2724   // Branch targets are normally on the same line as the branch instruction,
2725   // but when they cannot be they will be on a line by themselves, indented
2726   // significantly:
2727   //
2728   //                     -> label
2729 
2730   spew_.setLabelIndent("          ");             // 10
2731   spew_.setTargetIndent("                    ");  // 20
2732 }
2733 
finishDisassembler()2734 void Assembler::finishDisassembler() { spew_.spewOrphans(); }
2735 
2736 // Labels are named as they are encountered by adding names to a
2737 // table, using the Label address as the key.  This is made tricky by
2738 // the (memory for) Label objects being reused, but reused label
2739 // objects are recognizable from being marked as not used or not
2740 // bound.  See spew_.refLabel().
2741 //
2742 // In a number of cases there is no information about the target, and
2743 // we just end up printing "patchable constant load to PC".  This is
2744 // true especially for jumps to bailout handlers (which have no
2745 // names).  See allocLiteralLoadEntry() and its callers.  In some cases
2746 // (loop back edges) some information about the intended target may be
2747 // propagated from higher levels, and if so it's printed here.
2748 
spew(Instruction * i)2749 void Assembler::spew(Instruction* i) {
2750   if (spew_.isDisabled() || !i) return;
2751 
2752   DisasmBuffer buffer;
2753   disassembleInstruction(i, buffer);
2754   spew_.spew("%s", buffer.start());
2755 }
2756 
2757 // If a target label is known, always print that and do not attempt to
2758 // disassemble the branch operands, as they will often be encoding
2759 // metainformation (pointers for a chain of jump instructions), and
2760 // not actual branch targets.
2761 
spewBranch(Instruction * i,const LabelDoc & target)2762 void Assembler::spewBranch(Instruction* i, const LabelDoc& target) {
2763   if (spew_.isDisabled() || !i) return;
2764 
2765   DisasmBuffer buffer;
2766   disassembleInstruction(i, buffer);
2767 
2768   char labelBuf[128];
2769   labelBuf[0] = 0;
2770 
2771   bool haveTarget = target.valid;
2772   if (!haveTarget)
2773     snprintf(labelBuf, sizeof(labelBuf), "  -> (link-time target)");
2774 
2775   if (InstBranchImm::IsTHIS(*i)) {
2776     InstBranchImm* bimm = InstBranchImm::AsTHIS(*i);
2777     BOffImm destOff;
2778     bimm->extractImm(&destOff);
2779     if (destOff.isInvalid() || haveTarget) {
2780       // The target information in the instruction is likely garbage, so remove
2781       // it. The target label will in any case be printed if we have it.
2782       //
2783       // The format of the instruction disassembly is [0-9a-f]{8}\s+\S+\s+.*,
2784       // where the \S+ string is the opcode.  Strip everything after the opcode,
2785       // and attach the label if we have it.
2786       int i;
2787       for (i = 8; i < buffer.length() && buffer[i] == ' '; i++)
2788         ;
2789       for (; i < buffer.length() && buffer[i] != ' '; i++)
2790         ;
2791       buffer[i] = 0;
2792       if (haveTarget) {
2793         snprintf(labelBuf, sizeof(labelBuf), "  -> %d%s", target.doc,
2794                  !target.bound ? "f" : "");
2795         haveTarget = false;
2796       }
2797     }
2798   }
2799   spew_.spew("%s%s", buffer.start(), labelBuf);
2800 
2801   if (haveTarget) spew_.spewRef(target);
2802 }
2803 
spewLiteralLoad(PoolHintPun & php,bool loadToPC,const Instruction * i,const LiteralDoc & doc)2804 void Assembler::spewLiteralLoad(PoolHintPun& php, bool loadToPC,
2805                                 const Instruction* i, const LiteralDoc& doc) {
2806   if (spew_.isDisabled()) return;
2807 
2808   char litbuf[2048];
2809   spew_.formatLiteral(doc, litbuf, sizeof(litbuf));
2810 
2811   // See patchConstantPoolLoad, above.  We assemble the instruction into a
2812   // buffer with a zero offset, as documentation, but the offset will be
2813   // patched later.
2814 
2815   uint32_t inst;
2816   PoolHintData& data = php.phd;
2817   switch (php.phd.getLoadType()) {
2818     case PoolHintData::PoolDTR:
2819       Assembler::as_dtr_patch(IsLoad, 32, Offset, data.getReg(),
2820                               DTRAddr(pc, DtrOffImm(0)), data.getCond(), &inst);
2821       break;
2822     case PoolHintData::PoolBranch:
2823       if (data.isValidPoolHint()) {
2824         Assembler::as_dtr_patch(IsLoad, 32, Offset, pc,
2825                                 DTRAddr(pc, DtrOffImm(0)), data.getCond(),
2826                                 &inst);
2827       }
2828       break;
2829     case PoolHintData::PoolVDTR:
2830       Assembler::as_vdtr_patch(IsLoad, data.getVFPReg(),
2831                                VFPAddr(pc, VFPOffImm(0)), data.getCond(),
2832                                &inst);
2833       break;
2834 
2835     default:
2836       MOZ_CRASH();
2837   }
2838 
2839   DisasmBuffer buffer;
2840   disasm::NameConverter converter;
2841   disasm::Disassembler dasm(converter);
2842   dasm.InstructionDecode(buffer, reinterpret_cast<uint8_t*>(&inst));
2843   spew_.spew("%s    ; .const %s", buffer.start(), litbuf);
2844 }
2845 
2846 #endif  // JS_DISASM_ARM
2847