1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/arm/Assembler-arm.h"
8
9 #include "mozilla/DebugOnly.h"
10 #include "mozilla/MathAlgorithms.h"
11 #include "mozilla/Maybe.h"
12 #include "mozilla/Sprintf.h"
13
14 #include "gc/Marking.h"
15 #include "jit/arm/disasm/Disasm-arm.h"
16 #include "jit/arm/MacroAssembler-arm.h"
17 #include "jit/AutoWritableJitCode.h"
18 #include "jit/ExecutableAllocator.h"
19 #include "jit/MacroAssembler.h"
20 #include "vm/Realm.h"
21
22 using namespace js;
23 using namespace js::jit;
24
25 using mozilla::CountLeadingZeroes32;
26 using mozilla::DebugOnly;
27
28 using LabelDoc = DisassemblerSpew::LabelDoc;
29 using LiteralDoc = DisassemblerSpew::LiteralDoc;
30
dbg_break()31 void dbg_break() {}
32
33 // The ABIArgGenerator is used for making system ABI calls and for inter-wasm
34 // calls. The system ABI can either be SoftFp or HardFp, and inter-wasm calls
35 // are always HardFp calls. The initialization defaults to HardFp, and the ABI
36 // choice is made before any system ABI calls with the method "setUseHardFp".
ABIArgGenerator()37 ABIArgGenerator::ABIArgGenerator()
38 : intRegIndex_(0),
39 floatRegIndex_(0),
40 stackOffset_(0),
41 current_(),
42 useHardFp_(true) {}
43
44 // See the "Parameter Passing" section of the "Procedure Call Standard for the
45 // ARM Architecture" documentation.
softNext(MIRType type)46 ABIArg ABIArgGenerator::softNext(MIRType type) {
47 switch (type) {
48 case MIRType::Int32:
49 case MIRType::Pointer:
50 case MIRType::RefOrNull:
51 case MIRType::StackResults:
52 if (intRegIndex_ == NumIntArgRegs) {
53 current_ = ABIArg(stackOffset_);
54 stackOffset_ += sizeof(uint32_t);
55 break;
56 }
57 current_ = ABIArg(Register::FromCode(intRegIndex_));
58 intRegIndex_++;
59 break;
60 case MIRType::Int64:
61 // Make sure to use an even register index. Increase to next even number
62 // when odd.
63 intRegIndex_ = (intRegIndex_ + 1) & ~1;
64 if (intRegIndex_ == NumIntArgRegs) {
65 // Align the stack on 8 bytes.
66 static const uint32_t align = sizeof(uint64_t) - 1;
67 stackOffset_ = (stackOffset_ + align) & ~align;
68 current_ = ABIArg(stackOffset_);
69 stackOffset_ += sizeof(uint64_t);
70 break;
71 }
72 current_ = ABIArg(Register::FromCode(intRegIndex_),
73 Register::FromCode(intRegIndex_ + 1));
74 intRegIndex_ += 2;
75 break;
76 case MIRType::Float32:
77 if (intRegIndex_ == NumIntArgRegs) {
78 current_ = ABIArg(stackOffset_);
79 stackOffset_ += sizeof(uint32_t);
80 break;
81 }
82 current_ = ABIArg(Register::FromCode(intRegIndex_));
83 intRegIndex_++;
84 break;
85 case MIRType::Double:
86 // Make sure to use an even register index. Increase to next even number
87 // when odd.
88 intRegIndex_ = (intRegIndex_ + 1) & ~1;
89 if (intRegIndex_ == NumIntArgRegs) {
90 // Align the stack on 8 bytes.
91 static const uint32_t align = sizeof(double) - 1;
92 stackOffset_ = (stackOffset_ + align) & ~align;
93 current_ = ABIArg(stackOffset_);
94 stackOffset_ += sizeof(double);
95 break;
96 }
97 current_ = ABIArg(Register::FromCode(intRegIndex_),
98 Register::FromCode(intRegIndex_ + 1));
99 intRegIndex_ += 2;
100 break;
101 default:
102 MOZ_CRASH("Unexpected argument type");
103 }
104
105 return current_;
106 }
107
hardNext(MIRType type)108 ABIArg ABIArgGenerator::hardNext(MIRType type) {
109 switch (type) {
110 case MIRType::Int32:
111 case MIRType::Pointer:
112 case MIRType::RefOrNull:
113 case MIRType::StackResults:
114 if (intRegIndex_ == NumIntArgRegs) {
115 current_ = ABIArg(stackOffset_);
116 stackOffset_ += sizeof(uint32_t);
117 break;
118 }
119 current_ = ABIArg(Register::FromCode(intRegIndex_));
120 intRegIndex_++;
121 break;
122 case MIRType::Int64:
123 // Make sure to use an even register index. Increase to next even number
124 // when odd.
125 intRegIndex_ = (intRegIndex_ + 1) & ~1;
126 if (intRegIndex_ == NumIntArgRegs) {
127 // Align the stack on 8 bytes.
128 static const uint32_t align = sizeof(uint64_t) - 1;
129 stackOffset_ = (stackOffset_ + align) & ~align;
130 current_ = ABIArg(stackOffset_);
131 stackOffset_ += sizeof(uint64_t);
132 break;
133 }
134 current_ = ABIArg(Register::FromCode(intRegIndex_),
135 Register::FromCode(intRegIndex_ + 1));
136 intRegIndex_ += 2;
137 break;
138 case MIRType::Float32:
139 if (floatRegIndex_ == NumFloatArgRegs) {
140 current_ = ABIArg(stackOffset_);
141 stackOffset_ += sizeof(uint32_t);
142 break;
143 }
144 current_ = ABIArg(VFPRegister(floatRegIndex_, VFPRegister::Single));
145 floatRegIndex_++;
146 break;
147 case MIRType::Double:
148 // Double register are composed of 2 float registers, thus we have to
149 // skip any float register which cannot be used in a pair of float
150 // registers in which a double value can be stored.
151 floatRegIndex_ = (floatRegIndex_ + 1) & ~1;
152 if (floatRegIndex_ == NumFloatArgRegs) {
153 static const uint32_t align = sizeof(double) - 1;
154 stackOffset_ = (stackOffset_ + align) & ~align;
155 current_ = ABIArg(stackOffset_);
156 stackOffset_ += sizeof(uint64_t);
157 break;
158 }
159 current_ = ABIArg(VFPRegister(floatRegIndex_ >> 1, VFPRegister::Double));
160 floatRegIndex_ += 2;
161 break;
162 default:
163 MOZ_CRASH("Unexpected argument type");
164 }
165
166 return current_;
167 }
168
next(MIRType type)169 ABIArg ABIArgGenerator::next(MIRType type) {
170 if (useHardFp_) {
171 return hardNext(type);
172 }
173 return softNext(type);
174 }
175
IsUnaligned(const wasm::MemoryAccessDesc & access)176 bool js::jit::IsUnaligned(const wasm::MemoryAccessDesc& access) {
177 if (!access.align()) {
178 return false;
179 }
180
181 if (access.type() == Scalar::Float64 && access.align() >= 4) {
182 return false;
183 }
184
185 return access.align() < access.byteSize();
186 }
187
188 // Encode a standard register when it is being used as src1, the dest, and an
189 // extra register. These should never be called with an InvalidReg.
RT(Register r)190 uint32_t js::jit::RT(Register r) {
191 MOZ_ASSERT((r.code() & ~0xf) == 0);
192 return r.code() << 12;
193 }
194
RN(Register r)195 uint32_t js::jit::RN(Register r) {
196 MOZ_ASSERT((r.code() & ~0xf) == 0);
197 return r.code() << 16;
198 }
199
RD(Register r)200 uint32_t js::jit::RD(Register r) {
201 MOZ_ASSERT((r.code() & ~0xf) == 0);
202 return r.code() << 12;
203 }
204
RM(Register r)205 uint32_t js::jit::RM(Register r) {
206 MOZ_ASSERT((r.code() & ~0xf) == 0);
207 return r.code() << 8;
208 }
209
210 // Encode a standard register when it is being used as src1, the dest, and an
211 // extra register. For these, an InvalidReg is used to indicate a optional
212 // register that has been omitted.
maybeRT(Register r)213 uint32_t js::jit::maybeRT(Register r) {
214 if (r == InvalidReg) {
215 return 0;
216 }
217
218 MOZ_ASSERT((r.code() & ~0xf) == 0);
219 return r.code() << 12;
220 }
221
maybeRN(Register r)222 uint32_t js::jit::maybeRN(Register r) {
223 if (r == InvalidReg) {
224 return 0;
225 }
226
227 MOZ_ASSERT((r.code() & ~0xf) == 0);
228 return r.code() << 16;
229 }
230
maybeRD(Register r)231 uint32_t js::jit::maybeRD(Register r) {
232 if (r == InvalidReg) {
233 return 0;
234 }
235
236 MOZ_ASSERT((r.code() & ~0xf) == 0);
237 return r.code() << 12;
238 }
239
toRD(Instruction i)240 Register js::jit::toRD(Instruction i) {
241 return Register::FromCode((i.encode() >> 12) & 0xf);
242 }
toR(Instruction i)243 Register js::jit::toR(Instruction i) {
244 return Register::FromCode(i.encode() & 0xf);
245 }
246
toRM(Instruction i)247 Register js::jit::toRM(Instruction i) {
248 return Register::FromCode((i.encode() >> 8) & 0xf);
249 }
250
toRN(Instruction i)251 Register js::jit::toRN(Instruction i) {
252 return Register::FromCode((i.encode() >> 16) & 0xf);
253 }
254
VD(VFPRegister vr)255 uint32_t js::jit::VD(VFPRegister vr) {
256 if (vr.isMissing()) {
257 return 0;
258 }
259
260 // Bits 15,14,13,12, 22.
261 VFPRegister::VFPRegIndexSplit s = vr.encode();
262 return s.bit << 22 | s.block << 12;
263 }
VN(VFPRegister vr)264 uint32_t js::jit::VN(VFPRegister vr) {
265 if (vr.isMissing()) {
266 return 0;
267 }
268
269 // Bits 19,18,17,16, 7.
270 VFPRegister::VFPRegIndexSplit s = vr.encode();
271 return s.bit << 7 | s.block << 16;
272 }
VM(VFPRegister vr)273 uint32_t js::jit::VM(VFPRegister vr) {
274 if (vr.isMissing()) {
275 return 0;
276 }
277
278 // Bits 5, 3,2,1,0.
279 VFPRegister::VFPRegIndexSplit s = vr.encode();
280 return s.bit << 5 | s.block;
281 }
282
encode()283 VFPRegister::VFPRegIndexSplit jit::VFPRegister::encode() {
284 MOZ_ASSERT(!_isInvalid);
285
286 switch (kind) {
287 case Double:
288 return VFPRegIndexSplit(code_ & 0xf, code_ >> 4);
289 case Single:
290 return VFPRegIndexSplit(code_ >> 1, code_ & 1);
291 default:
292 // VFP register treated as an integer, NOT a gpr.
293 return VFPRegIndexSplit(code_ >> 1, code_ & 1);
294 }
295 }
296
IsTHIS(const Instruction & i)297 bool InstDTR::IsTHIS(const Instruction& i) {
298 return (i.encode() & IsDTRMask) == (uint32_t)IsDTR;
299 }
300
AsTHIS(const Instruction & i)301 InstDTR* InstDTR::AsTHIS(const Instruction& i) {
302 if (IsTHIS(i)) {
303 return (InstDTR*)&i;
304 }
305 return nullptr;
306 }
307
IsTHIS(const Instruction & i)308 bool InstLDR::IsTHIS(const Instruction& i) {
309 return (i.encode() & IsDTRMask) == (uint32_t)IsDTR;
310 }
311
AsTHIS(const Instruction & i)312 InstLDR* InstLDR::AsTHIS(const Instruction& i) {
313 if (IsTHIS(i)) {
314 return (InstLDR*)&i;
315 }
316 return nullptr;
317 }
318
AsTHIS(Instruction & i)319 InstNOP* InstNOP::AsTHIS(Instruction& i) {
320 if (IsTHIS(i)) {
321 return (InstNOP*)&i;
322 }
323 return nullptr;
324 }
325
IsTHIS(const Instruction & i)326 bool InstNOP::IsTHIS(const Instruction& i) {
327 return (i.encode() & 0x0fffffff) == NopInst;
328 }
329
IsTHIS(const Instruction & i)330 bool InstBranchReg::IsTHIS(const Instruction& i) {
331 return InstBXReg::IsTHIS(i) || InstBLXReg::IsTHIS(i);
332 }
333
AsTHIS(const Instruction & i)334 InstBranchReg* InstBranchReg::AsTHIS(const Instruction& i) {
335 if (IsTHIS(i)) {
336 return (InstBranchReg*)&i;
337 }
338 return nullptr;
339 }
extractDest(Register * dest)340 void InstBranchReg::extractDest(Register* dest) { *dest = toR(*this); }
checkDest(Register dest)341 bool InstBranchReg::checkDest(Register dest) { return dest == toR(*this); }
342
IsTHIS(const Instruction & i)343 bool InstBranchImm::IsTHIS(const Instruction& i) {
344 return InstBImm::IsTHIS(i) || InstBLImm::IsTHIS(i);
345 }
346
AsTHIS(const Instruction & i)347 InstBranchImm* InstBranchImm::AsTHIS(const Instruction& i) {
348 if (IsTHIS(i)) {
349 return (InstBranchImm*)&i;
350 }
351 return nullptr;
352 }
353
extractImm(BOffImm * dest)354 void InstBranchImm::extractImm(BOffImm* dest) { *dest = BOffImm(*this); }
355
IsTHIS(const Instruction & i)356 bool InstBXReg::IsTHIS(const Instruction& i) {
357 return (i.encode() & IsBRegMask) == IsBX;
358 }
359
AsTHIS(const Instruction & i)360 InstBXReg* InstBXReg::AsTHIS(const Instruction& i) {
361 if (IsTHIS(i)) {
362 return (InstBXReg*)&i;
363 }
364 return nullptr;
365 }
366
IsTHIS(const Instruction & i)367 bool InstBLXReg::IsTHIS(const Instruction& i) {
368 return (i.encode() & IsBRegMask) == IsBLX;
369 }
AsTHIS(const Instruction & i)370 InstBLXReg* InstBLXReg::AsTHIS(const Instruction& i) {
371 if (IsTHIS(i)) {
372 return (InstBLXReg*)&i;
373 }
374 return nullptr;
375 }
376
IsTHIS(const Instruction & i)377 bool InstBImm::IsTHIS(const Instruction& i) {
378 return (i.encode() & IsBImmMask) == IsB;
379 }
AsTHIS(const Instruction & i)380 InstBImm* InstBImm::AsTHIS(const Instruction& i) {
381 if (IsTHIS(i)) {
382 return (InstBImm*)&i;
383 }
384 return nullptr;
385 }
386
IsTHIS(const Instruction & i)387 bool InstBLImm::IsTHIS(const Instruction& i) {
388 return (i.encode() & IsBImmMask) == IsBL;
389 }
AsTHIS(const Instruction & i)390 InstBLImm* InstBLImm::AsTHIS(const Instruction& i) {
391 if (IsTHIS(i)) {
392 return (InstBLImm*)&i;
393 }
394 return nullptr;
395 }
396
IsTHIS(Instruction & i)397 bool InstMovWT::IsTHIS(Instruction& i) {
398 return InstMovW::IsTHIS(i) || InstMovT::IsTHIS(i);
399 }
AsTHIS(Instruction & i)400 InstMovWT* InstMovWT::AsTHIS(Instruction& i) {
401 if (IsTHIS(i)) {
402 return (InstMovWT*)&i;
403 }
404 return nullptr;
405 }
406
extractImm(Imm16 * imm)407 void InstMovWT::extractImm(Imm16* imm) { *imm = Imm16(*this); }
checkImm(Imm16 imm)408 bool InstMovWT::checkImm(Imm16 imm) {
409 return imm.decode() == Imm16(*this).decode();
410 }
411
extractDest(Register * dest)412 void InstMovWT::extractDest(Register* dest) { *dest = toRD(*this); }
checkDest(Register dest)413 bool InstMovWT::checkDest(Register dest) { return dest == toRD(*this); }
414
IsTHIS(const Instruction & i)415 bool InstMovW::IsTHIS(const Instruction& i) {
416 return (i.encode() & IsWTMask) == IsW;
417 }
418
AsTHIS(const Instruction & i)419 InstMovW* InstMovW::AsTHIS(const Instruction& i) {
420 if (IsTHIS(i)) {
421 return (InstMovW*)&i;
422 }
423 return nullptr;
424 }
AsTHIS(const Instruction & i)425 InstMovT* InstMovT::AsTHIS(const Instruction& i) {
426 if (IsTHIS(i)) {
427 return (InstMovT*)&i;
428 }
429 return nullptr;
430 }
431
IsTHIS(const Instruction & i)432 bool InstMovT::IsTHIS(const Instruction& i) {
433 return (i.encode() & IsWTMask) == IsT;
434 }
435
AsTHIS(const Instruction & i)436 InstALU* InstALU::AsTHIS(const Instruction& i) {
437 if (IsTHIS(i)) {
438 return (InstALU*)&i;
439 }
440 return nullptr;
441 }
IsTHIS(const Instruction & i)442 bool InstALU::IsTHIS(const Instruction& i) {
443 return (i.encode() & ALUMask) == 0;
444 }
extractOp(ALUOp * ret)445 void InstALU::extractOp(ALUOp* ret) { *ret = ALUOp(encode() & (0xf << 21)); }
checkOp(ALUOp op)446 bool InstALU::checkOp(ALUOp op) {
447 ALUOp mine;
448 extractOp(&mine);
449 return mine == op;
450 }
extractDest(Register * ret)451 void InstALU::extractDest(Register* ret) { *ret = toRD(*this); }
checkDest(Register rd)452 bool InstALU::checkDest(Register rd) { return rd == toRD(*this); }
extractOp1(Register * ret)453 void InstALU::extractOp1(Register* ret) { *ret = toRN(*this); }
checkOp1(Register rn)454 bool InstALU::checkOp1(Register rn) { return rn == toRN(*this); }
extractOp2()455 Operand2 InstALU::extractOp2() { return Operand2(encode()); }
456
AsTHIS(const Instruction & i)457 InstCMP* InstCMP::AsTHIS(const Instruction& i) {
458 if (IsTHIS(i)) {
459 return (InstCMP*)&i;
460 }
461 return nullptr;
462 }
463
IsTHIS(const Instruction & i)464 bool InstCMP::IsTHIS(const Instruction& i) {
465 return InstALU::IsTHIS(i) && InstALU::AsTHIS(i)->checkDest(r0) &&
466 InstALU::AsTHIS(i)->checkOp(OpCmp);
467 }
468
AsTHIS(const Instruction & i)469 InstMOV* InstMOV::AsTHIS(const Instruction& i) {
470 if (IsTHIS(i)) {
471 return (InstMOV*)&i;
472 }
473 return nullptr;
474 }
475
IsTHIS(const Instruction & i)476 bool InstMOV::IsTHIS(const Instruction& i) {
477 return InstALU::IsTHIS(i) && InstALU::AsTHIS(i)->checkOp1(r0) &&
478 InstALU::AsTHIS(i)->checkOp(OpMov);
479 }
480
toOp2Reg() const481 Op2Reg Operand2::toOp2Reg() const { return *(Op2Reg*)this; }
482
Imm16(Instruction & inst)483 Imm16::Imm16(Instruction& inst)
484 : lower_(inst.encode() & 0xfff),
485 upper_(inst.encode() >> 16),
486 invalid_(0xfff) {}
487
Imm16(uint32_t imm)488 Imm16::Imm16(uint32_t imm)
489 : lower_(imm & 0xfff), pad_(0), upper_((imm >> 12) & 0xf), invalid_(0) {
490 MOZ_ASSERT(decode() == imm);
491 }
492
Imm16()493 Imm16::Imm16() : invalid_(0xfff) {}
494
finish()495 void Assembler::finish() {
496 flush();
497 MOZ_ASSERT(!isFinished);
498 isFinished = true;
499 }
500
appendRawCode(const uint8_t * code,size_t numBytes)501 bool Assembler::appendRawCode(const uint8_t* code, size_t numBytes) {
502 flush();
503 return m_buffer.appendRawCode(code, numBytes);
504 }
505
reserve(size_t size)506 bool Assembler::reserve(size_t size) {
507 // This buffer uses fixed-size chunks so there's no point in reserving
508 // now vs. on-demand.
509 return !oom();
510 }
511
swapBuffer(wasm::Bytes & bytes)512 bool Assembler::swapBuffer(wasm::Bytes& bytes) {
513 // For now, specialize to the one use case. As long as wasm::Bytes is a
514 // Vector, not a linked-list of chunks, there's not much we can do other
515 // than copy.
516 MOZ_ASSERT(bytes.empty());
517 if (!bytes.resize(bytesNeeded())) {
518 return false;
519 }
520 m_buffer.executableCopy(bytes.begin());
521 return true;
522 }
523
executableCopy(uint8_t * buffer)524 void Assembler::executableCopy(uint8_t* buffer) {
525 MOZ_ASSERT(isFinished);
526 m_buffer.executableCopy(buffer);
527 }
528
529 class RelocationIterator {
530 CompactBufferReader reader_;
531 // Offset in bytes.
532 uint32_t offset_;
533
534 public:
RelocationIterator(CompactBufferReader & reader)535 explicit RelocationIterator(CompactBufferReader& reader) : reader_(reader) {}
536
read()537 bool read() {
538 if (!reader_.more()) {
539 return false;
540 }
541 offset_ = reader_.readUnsigned();
542 return true;
543 }
544
offset() const545 uint32_t offset() const { return offset_; }
546 };
547
548 template <class Iter>
GetCF32Target(Iter * iter)549 const uint32_t* Assembler::GetCF32Target(Iter* iter) {
550 Instruction* inst1 = iter->cur();
551
552 if (inst1->is<InstBranchImm>()) {
553 // See if we have a simple case, b #offset.
554 BOffImm imm;
555 InstBranchImm* jumpB = inst1->as<InstBranchImm>();
556 jumpB->extractImm(&imm);
557 return imm.getDest(inst1)->raw();
558 }
559
560 if (inst1->is<InstMovW>()) {
561 // See if we have the complex case:
562 // movw r_temp, #imm1
563 // movt r_temp, #imm2
564 // bx r_temp
565 // OR
566 // movw r_temp, #imm1
567 // movt r_temp, #imm2
568 // str pc, [sp]
569 // bx r_temp
570
571 Imm16 targ_bot;
572 Imm16 targ_top;
573 Register temp;
574
575 // Extract both the temp register and the bottom immediate.
576 InstMovW* bottom = inst1->as<InstMovW>();
577 bottom->extractImm(&targ_bot);
578 bottom->extractDest(&temp);
579
580 // Extract the top part of the immediate.
581 Instruction* inst2 = iter->next();
582 MOZ_ASSERT(inst2->is<InstMovT>());
583 InstMovT* top = inst2->as<InstMovT>();
584 top->extractImm(&targ_top);
585
586 // Make sure they are being loaded into the same register.
587 MOZ_ASSERT(top->checkDest(temp));
588
589 // Make sure we're branching to the same register.
590 #ifdef DEBUG
591 // A toggled call sometimes has a NOP instead of a branch for the third
592 // instruction. No way to assert that it's valid in that situation.
593 Instruction* inst3 = iter->next();
594 if (!inst3->is<InstNOP>()) {
595 InstBranchReg* realBranch = nullptr;
596 if (inst3->is<InstBranchReg>()) {
597 realBranch = inst3->as<InstBranchReg>();
598 } else {
599 Instruction* inst4 = iter->next();
600 realBranch = inst4->as<InstBranchReg>();
601 }
602 MOZ_ASSERT(realBranch->checkDest(temp));
603 }
604 #endif
605
606 uint32_t* dest = (uint32_t*)(targ_bot.decode() | (targ_top.decode() << 16));
607 return dest;
608 }
609
610 if (inst1->is<InstLDR>()) {
611 return *(uint32_t**)inst1->as<InstLDR>()->dest();
612 }
613
614 MOZ_CRASH("unsupported branch relocation");
615 }
616
GetPointer(uint8_t * instPtr)617 uintptr_t Assembler::GetPointer(uint8_t* instPtr) {
618 InstructionIterator iter((Instruction*)instPtr);
619 uintptr_t ret = (uintptr_t)GetPtr32Target(iter, nullptr, nullptr);
620 return ret;
621 }
622
GetPtr32Target(InstructionIterator start,Register * dest,RelocStyle * style)623 const uint32_t* Assembler::GetPtr32Target(InstructionIterator start,
624 Register* dest, RelocStyle* style) {
625 Instruction* load1 = start.cur();
626 Instruction* load2 = start.next();
627
628 if (load1->is<InstMovW>() && load2->is<InstMovT>()) {
629 if (style) {
630 *style = L_MOVWT;
631 }
632
633 // See if we have the complex case:
634 // movw r_temp, #imm1
635 // movt r_temp, #imm2
636
637 Imm16 targ_bot;
638 Imm16 targ_top;
639 Register temp;
640
641 // Extract both the temp register and the bottom immediate.
642 InstMovW* bottom = load1->as<InstMovW>();
643 bottom->extractImm(&targ_bot);
644 bottom->extractDest(&temp);
645
646 // Extract the top part of the immediate.
647 InstMovT* top = load2->as<InstMovT>();
648 top->extractImm(&targ_top);
649
650 // Make sure they are being loaded into the same register.
651 MOZ_ASSERT(top->checkDest(temp));
652
653 if (dest) {
654 *dest = temp;
655 }
656
657 uint32_t* value =
658 (uint32_t*)(targ_bot.decode() | (targ_top.decode() << 16));
659 return value;
660 }
661
662 if (load1->is<InstLDR>()) {
663 if (style) {
664 *style = L_LDR;
665 }
666 if (dest) {
667 *dest = toRD(*load1);
668 }
669 return *(uint32_t**)load1->as<InstLDR>()->dest();
670 }
671
672 MOZ_CRASH("unsupported relocation");
673 }
674
CodeFromJump(InstructionIterator * jump)675 static JitCode* CodeFromJump(InstructionIterator* jump) {
676 uint8_t* target = (uint8_t*)Assembler::GetCF32Target(jump);
677 return JitCode::FromExecutable(target);
678 }
679
TraceJumpRelocations(JSTracer * trc,JitCode * code,CompactBufferReader & reader)680 void Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code,
681 CompactBufferReader& reader) {
682 RelocationIterator iter(reader);
683 while (iter.read()) {
684 InstructionIterator institer((Instruction*)(code->raw() + iter.offset()));
685 JitCode* child = CodeFromJump(&institer);
686 TraceManuallyBarrieredEdge(trc, &child, "rel32");
687 }
688 }
689
TraceOneDataRelocation(JSTracer * trc,mozilla::Maybe<AutoWritableJitCode> & awjc,JitCode * code,InstructionIterator iter)690 static void TraceOneDataRelocation(JSTracer* trc,
691 mozilla::Maybe<AutoWritableJitCode>& awjc,
692 JitCode* code, InstructionIterator iter) {
693 Register dest;
694 Assembler::RelocStyle rs;
695 const void* prior = Assembler::GetPtr32Target(iter, &dest, &rs);
696 void* ptr = const_cast<void*>(prior);
697
698 // No barrier needed since these are constants.
699 TraceManuallyBarrieredGenericPointerEdge(
700 trc, reinterpret_cast<gc::Cell**>(&ptr), "jit-masm-ptr");
701
702 if (ptr != prior) {
703 if (awjc.isNothing()) {
704 awjc.emplace(code);
705 }
706
707 MacroAssemblerARM::ma_mov_patch(Imm32(int32_t(ptr)), dest,
708 Assembler::Always, rs, iter);
709 }
710 }
711
712 /* static */
TraceDataRelocations(JSTracer * trc,JitCode * code,CompactBufferReader & reader)713 void Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code,
714 CompactBufferReader& reader) {
715 mozilla::Maybe<AutoWritableJitCode> awjc;
716 while (reader.more()) {
717 size_t offset = reader.readUnsigned();
718 InstructionIterator iter((Instruction*)(code->raw() + offset));
719 TraceOneDataRelocation(trc, awjc, code, iter);
720 }
721 }
722
copyJumpRelocationTable(uint8_t * dest)723 void Assembler::copyJumpRelocationTable(uint8_t* dest) {
724 if (jumpRelocations_.length()) {
725 memcpy(dest, jumpRelocations_.buffer(), jumpRelocations_.length());
726 }
727 }
728
copyDataRelocationTable(uint8_t * dest)729 void Assembler::copyDataRelocationTable(uint8_t* dest) {
730 if (dataRelocations_.length()) {
731 memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length());
732 }
733 }
734
processCodeLabels(uint8_t * rawCode)735 void Assembler::processCodeLabels(uint8_t* rawCode) {
736 for (const CodeLabel& label : codeLabels_) {
737 Bind(rawCode, label);
738 }
739 }
740
writeCodePointer(CodeLabel * label)741 void Assembler::writeCodePointer(CodeLabel* label) {
742 m_buffer.assertNoPoolAndNoNops();
743 BufferOffset off = writeInst(-1);
744 label->patchAt()->bind(off.getOffset());
745 }
746
Bind(uint8_t * rawCode,const CodeLabel & label)747 void Assembler::Bind(uint8_t* rawCode, const CodeLabel& label) {
748 size_t offset = label.patchAt().offset();
749 size_t target = label.target().offset();
750 *reinterpret_cast<const void**>(rawCode + offset) = rawCode + target;
751 }
752
InvertCondition(Condition cond)753 Assembler::Condition Assembler::InvertCondition(Condition cond) {
754 const uint32_t ConditionInversionBit = 0x10000000;
755 return Condition(ConditionInversionBit ^ cond);
756 }
757
UnsignedCondition(Condition cond)758 Assembler::Condition Assembler::UnsignedCondition(Condition cond) {
759 switch (cond) {
760 case Zero:
761 case NonZero:
762 return cond;
763 case LessThan:
764 case Below:
765 return Below;
766 case LessThanOrEqual:
767 case BelowOrEqual:
768 return BelowOrEqual;
769 case GreaterThan:
770 case Above:
771 return Above;
772 case AboveOrEqual:
773 case GreaterThanOrEqual:
774 return AboveOrEqual;
775 default:
776 MOZ_CRASH("unexpected condition");
777 }
778 }
779
ConditionWithoutEqual(Condition cond)780 Assembler::Condition Assembler::ConditionWithoutEqual(Condition cond) {
781 switch (cond) {
782 case LessThan:
783 case LessThanOrEqual:
784 return LessThan;
785 case Below:
786 case BelowOrEqual:
787 return Below;
788 case GreaterThan:
789 case GreaterThanOrEqual:
790 return GreaterThan;
791 case Above:
792 case AboveOrEqual:
793 return Above;
794 default:
795 MOZ_CRASH("unexpected condition");
796 }
797 }
798
InvertCondition(DoubleCondition cond)799 Assembler::DoubleCondition Assembler::InvertCondition(DoubleCondition cond) {
800 const uint32_t ConditionInversionBit = 0x10000000;
801 return DoubleCondition(ConditionInversionBit ^ cond);
802 }
803
EncodeTwoImms(uint32_t imm)804 Imm8::TwoImm8mData Imm8::EncodeTwoImms(uint32_t imm) {
805 // In the ideal case, we are looking for a number that (in binary) looks
806 // like:
807 // 0b((00)*)n_1((00)*)n_2((00)*)
808 // left n1 mid n2
809 // where both n_1 and n_2 fit into 8 bits.
810 // Since this is being done with rotates, we also need to handle the case
811 // that one of these numbers is in fact split between the left and right
812 // sides, in which case the constant will look like:
813 // 0bn_1a((00)*)n_2((00)*)n_1b
814 // n1a mid n2 rgh n1b
815 // Also remember, values are rotated by multiples of two, and left, mid or
816 // right can have length zero.
817 uint32_t imm1, imm2;
818 int left = CountLeadingZeroes32(imm) & 0x1E;
819 uint32_t no_n1 = imm & ~(0xff << (24 - left));
820
821 // Not technically needed: this case only happens if we can encode as a
822 // single imm8m. There is a perfectly reasonable encoding in this case, but
823 // we shouldn't encourage people to do things like this.
824 if (no_n1 == 0) {
825 return TwoImm8mData();
826 }
827
828 int mid = CountLeadingZeroes32(no_n1) & 0x1E;
829 uint32_t no_n2 =
830 no_n1 & ~((0xff << ((24 - mid) & 0x1f)) | 0xff >> ((8 + mid) & 0x1f));
831
832 if (no_n2 == 0) {
833 // We hit the easy case, no wraparound.
834 // Note: a single constant *may* look like this.
835 int imm1shift = left + 8;
836 int imm2shift = mid + 8;
837 imm1 = (imm >> (32 - imm1shift)) & 0xff;
838 if (imm2shift >= 32) {
839 imm2shift = 0;
840 // This assert does not always hold, in fact, this would lead to
841 // some incredibly subtle bugs.
842 // assert((imm & 0xff) == no_n1);
843 imm2 = no_n1;
844 } else {
845 imm2 = ((imm >> (32 - imm2shift)) | (imm << imm2shift)) & 0xff;
846 MOZ_ASSERT(((no_n1 >> (32 - imm2shift)) | (no_n1 << imm2shift)) == imm2);
847 }
848 MOZ_ASSERT((imm1shift & 0x1) == 0);
849 MOZ_ASSERT((imm2shift & 0x1) == 0);
850 return TwoImm8mData(datastore::Imm8mData(imm1, imm1shift >> 1),
851 datastore::Imm8mData(imm2, imm2shift >> 1));
852 }
853
854 // Either it wraps, or it does not fit. If we initially chopped off more
855 // than 8 bits, then it won't fit.
856 if (left >= 8) {
857 return TwoImm8mData();
858 }
859
860 int right = 32 - (CountLeadingZeroes32(no_n2) & 30);
861 // All remaining set bits *must* fit into the lower 8 bits.
862 // The right == 8 case should be handled by the previous case.
863 if (right > 8) {
864 return TwoImm8mData();
865 }
866
867 // Make sure the initial bits that we removed for no_n1 fit into the
868 // 8-(32-right) leftmost bits.
869 if (((imm & (0xff << (24 - left))) << (8 - right)) != 0) {
870 // BUT we may have removed more bits than we needed to for no_n1
871 // 0x04104001 e.g. we can encode 0x104 with a single op, then 0x04000001
872 // with a second, but we try to encode 0x0410000 and find that we need a
873 // second op for 0x4000, and 0x1 cannot be included in the encoding of
874 // 0x04100000.
875 no_n1 = imm & ~((0xff >> (8 - right)) | (0xff << (24 + right)));
876 mid = CountLeadingZeroes32(no_n1) & 30;
877 no_n2 = no_n1 & ~((0xff << ((24 - mid) & 31)) | 0xff >> ((8 + mid) & 31));
878 if (no_n2 != 0) {
879 return TwoImm8mData();
880 }
881 }
882
883 // Now assemble all of this information into a two coherent constants it is
884 // a rotate right from the lower 8 bits.
885 int imm1shift = 8 - right;
886 imm1 = 0xff & ((imm << imm1shift) | (imm >> (32 - imm1shift)));
887 MOZ_ASSERT((imm1shift & ~0x1e) == 0);
888 // left + 8 + mid is the position of the leftmost bit of n_2.
889 // We needed to rotate 0x000000ab right by 8 in order to get 0xab000000,
890 // then shift again by the leftmost bit in order to get the constant that we
891 // care about.
892 int imm2shift = mid + 8;
893 imm2 = ((imm >> (32 - imm2shift)) | (imm << imm2shift)) & 0xff;
894 MOZ_ASSERT((imm1shift & 0x1) == 0);
895 MOZ_ASSERT((imm2shift & 0x1) == 0);
896 return TwoImm8mData(datastore::Imm8mData(imm1, imm1shift >> 1),
897 datastore::Imm8mData(imm2, imm2shift >> 1));
898 }
899
ALUNeg(ALUOp op,Register dest,Register scratch,Imm32 * imm,Register * negDest)900 ALUOp jit::ALUNeg(ALUOp op, Register dest, Register scratch, Imm32* imm,
901 Register* negDest) {
902 // Find an alternate ALUOp to get the job done, and use a different imm.
903 *negDest = dest;
904 switch (op) {
905 case OpMov:
906 *imm = Imm32(~imm->value);
907 return OpMvn;
908 case OpMvn:
909 *imm = Imm32(~imm->value);
910 return OpMov;
911 case OpAnd:
912 *imm = Imm32(~imm->value);
913 return OpBic;
914 case OpBic:
915 *imm = Imm32(~imm->value);
916 return OpAnd;
917 case OpAdd:
918 *imm = Imm32(-imm->value);
919 return OpSub;
920 case OpSub:
921 *imm = Imm32(-imm->value);
922 return OpAdd;
923 case OpCmp:
924 *imm = Imm32(-imm->value);
925 return OpCmn;
926 case OpCmn:
927 *imm = Imm32(-imm->value);
928 return OpCmp;
929 case OpTst:
930 MOZ_ASSERT(dest == InvalidReg);
931 *imm = Imm32(~imm->value);
932 *negDest = scratch;
933 return OpBic;
934 // orr has orn on thumb2 only.
935 default:
936 return OpInvalid;
937 }
938 }
939
can_dbl(ALUOp op)940 bool jit::can_dbl(ALUOp op) {
941 // Some instructions can't be processed as two separate instructions such as
942 // and, and possibly add (when we're setting ccodes). There is also some
943 // hilarity with *reading* condition codes. For example, adc dest, src1,
944 // 0xfff; (add with carry) can be split up into adc dest, src1, 0xf00; add
945 // dest, dest, 0xff, since "reading" the condition code increments the
946 // result by one conditionally, that only needs to be done on one of the two
947 // instructions.
948 switch (op) {
949 case OpBic:
950 case OpAdd:
951 case OpSub:
952 case OpEor:
953 case OpOrr:
954 return true;
955 default:
956 return false;
957 }
958 }
959
condsAreSafe(ALUOp op)960 bool jit::condsAreSafe(ALUOp op) {
961 // Even when we are setting condition codes, sometimes we can get away with
962 // splitting an operation into two. For example, if our immediate is
963 // 0x00ff00ff, and the operation is eors we can split this in half, since x
964 // ^ 0x00ff0000 ^ 0x000000ff should set all of its condition codes exactly
965 // the same as x ^ 0x00ff00ff. However, if the operation were adds, we
966 // cannot split this in half. If the source on the add is 0xfff00ff0, the
967 // result sholud be 0xef10ef, but do we set the overflow bit or not?
968 // Depending on which half is performed first (0x00ff0000 or 0x000000ff) the
969 // V bit will be set differently, and *not* updating the V bit would be
970 // wrong. Theoretically, the following should work:
971 // adds r0, r1, 0x00ff0000;
972 // addsvs r0, r1, 0x000000ff;
973 // addvc r0, r1, 0x000000ff;
974 // But this is 3 instructions, and at that point, we might as well use
975 // something else.
976 switch (op) {
977 case OpBic:
978 case OpOrr:
979 case OpEor:
980 return true;
981 default:
982 return false;
983 }
984 }
985
getDestVariant(ALUOp op)986 ALUOp jit::getDestVariant(ALUOp op) {
987 // All of the compare operations are dest-less variants of a standard
988 // operation. Given the dest-less variant, return the dest-ful variant.
989 switch (op) {
990 case OpCmp:
991 return OpSub;
992 case OpCmn:
993 return OpAdd;
994 case OpTst:
995 return OpAnd;
996 case OpTeq:
997 return OpEor;
998 default:
999 return op;
1000 }
1001 }
1002
O2Reg(Register r)1003 O2RegImmShift jit::O2Reg(Register r) { return O2RegImmShift(r, LSL, 0); }
1004
lsl(Register r,int amt)1005 O2RegImmShift jit::lsl(Register r, int amt) {
1006 MOZ_ASSERT(0 <= amt && amt <= 31);
1007 return O2RegImmShift(r, LSL, amt);
1008 }
1009
lsr(Register r,int amt)1010 O2RegImmShift jit::lsr(Register r, int amt) {
1011 MOZ_ASSERT(1 <= amt && amt <= 32);
1012 return O2RegImmShift(r, LSR, amt);
1013 }
1014
ror(Register r,int amt)1015 O2RegImmShift jit::ror(Register r, int amt) {
1016 MOZ_ASSERT(1 <= amt && amt <= 31);
1017 return O2RegImmShift(r, ROR, amt);
1018 }
rol(Register r,int amt)1019 O2RegImmShift jit::rol(Register r, int amt) {
1020 MOZ_ASSERT(1 <= amt && amt <= 31);
1021 return O2RegImmShift(r, ROR, 32 - amt);
1022 }
1023
asr(Register r,int amt)1024 O2RegImmShift jit::asr(Register r, int amt) {
1025 MOZ_ASSERT(1 <= amt && amt <= 32);
1026 return O2RegImmShift(r, ASR, amt);
1027 }
1028
lsl(Register r,Register amt)1029 O2RegRegShift jit::lsl(Register r, Register amt) {
1030 return O2RegRegShift(r, LSL, amt);
1031 }
1032
lsr(Register r,Register amt)1033 O2RegRegShift jit::lsr(Register r, Register amt) {
1034 return O2RegRegShift(r, LSR, amt);
1035 }
1036
ror(Register r,Register amt)1037 O2RegRegShift jit::ror(Register r, Register amt) {
1038 return O2RegRegShift(r, ROR, amt);
1039 }
1040
asr(Register r,Register amt)1041 O2RegRegShift jit::asr(Register r, Register amt) {
1042 return O2RegRegShift(r, ASR, amt);
1043 }
1044
1045 static js::jit::DoubleEncoder doubleEncoder;
1046
1047 /* static */
1048 const js::jit::VFPImm js::jit::VFPImm::One(0x3FF00000);
1049
VFPImm(uint32_t top)1050 js::jit::VFPImm::VFPImm(uint32_t top) {
1051 data_ = -1;
1052 datastore::Imm8VFPImmData tmp;
1053 if (doubleEncoder.lookup(top, &tmp)) {
1054 data_ = tmp.encode();
1055 }
1056 }
1057
BOffImm(const Instruction & inst)1058 BOffImm::BOffImm(const Instruction& inst) : data_(inst.encode() & 0x00ffffff) {}
1059
getDest(Instruction * src) const1060 Instruction* BOffImm::getDest(Instruction* src) const {
1061 // TODO: It is probably worthwhile to verify that src is actually a branch.
1062 // NOTE: This does not explicitly shift the offset of the destination left by
1063 // 2, since it is indexing into an array of instruction sized objects.
1064 return &src[((int32_t(data_) << 8) >> 8) + 2];
1065 }
1066
1067 const js::jit::DoubleEncoder::DoubleEntry js::jit::DoubleEncoder::table[256] = {
1068 #include "jit/arm/DoubleEntryTable.tbl"
1069 };
1070
1071 // VFPRegister implementation
doubleOverlay(unsigned int which) const1072 VFPRegister VFPRegister::doubleOverlay(unsigned int which) const {
1073 MOZ_ASSERT(!_isInvalid);
1074 MOZ_ASSERT(which == 0);
1075 if (kind != Double) {
1076 return VFPRegister(code_ >> 1, Double);
1077 }
1078 return *this;
1079 }
singleOverlay(unsigned int which) const1080 VFPRegister VFPRegister::singleOverlay(unsigned int which) const {
1081 MOZ_ASSERT(!_isInvalid);
1082 if (kind == Double) {
1083 // There are no corresponding float registers for d16-d31.
1084 MOZ_ASSERT(code_ < 16);
1085 MOZ_ASSERT(which < 2);
1086 return VFPRegister((code_ << 1) + which, Single);
1087 }
1088 MOZ_ASSERT(which == 0);
1089 return VFPRegister(code_, Single);
1090 }
1091
1092 static_assert(
1093 FloatRegisters::TotalDouble <= 16,
1094 "We assume that every Double register also has an Integer personality");
1095
sintOverlay(unsigned int which) const1096 VFPRegister VFPRegister::sintOverlay(unsigned int which) const {
1097 MOZ_ASSERT(!_isInvalid);
1098 if (kind == Double) {
1099 // There are no corresponding float registers for d16-d31.
1100 MOZ_ASSERT(code_ < 16);
1101 MOZ_ASSERT(which < 2);
1102 return VFPRegister((code_ << 1) + which, Int);
1103 }
1104 MOZ_ASSERT(which == 0);
1105 return VFPRegister(code_, Int);
1106 }
uintOverlay(unsigned int which) const1107 VFPRegister VFPRegister::uintOverlay(unsigned int which) const {
1108 MOZ_ASSERT(!_isInvalid);
1109 if (kind == Double) {
1110 // There are no corresponding float registers for d16-d31.
1111 MOZ_ASSERT(code_ < 16);
1112 MOZ_ASSERT(which < 2);
1113 return VFPRegister((code_ << 1) + which, UInt);
1114 }
1115 MOZ_ASSERT(which == 0);
1116 return VFPRegister(code_, UInt);
1117 }
1118
oom() const1119 bool Assembler::oom() const {
1120 return AssemblerShared::oom() || m_buffer.oom() || jumpRelocations_.oom() ||
1121 dataRelocations_.oom();
1122 }
1123
1124 // Size of the instruction stream, in bytes. Including pools. This function
1125 // expects all pools that need to be placed have been placed. If they haven't
1126 // then we need to go an flush the pools :(
size() const1127 size_t Assembler::size() const { return m_buffer.size(); }
1128 // Size of the relocation table, in bytes.
jumpRelocationTableBytes() const1129 size_t Assembler::jumpRelocationTableBytes() const {
1130 return jumpRelocations_.length();
1131 }
dataRelocationTableBytes() const1132 size_t Assembler::dataRelocationTableBytes() const {
1133 return dataRelocations_.length();
1134 }
1135
1136 // Size of the data table, in bytes.
bytesNeeded() const1137 size_t Assembler::bytesNeeded() const {
1138 return size() + jumpRelocationTableBytes() + dataRelocationTableBytes();
1139 }
1140
1141 // Allocate memory for a branch instruction, it will be overwritten
1142 // subsequently and should not be disassembled.
1143
allocBranchInst()1144 BufferOffset Assembler::allocBranchInst() {
1145 return m_buffer.putInt(Always | InstNOP::NopInst);
1146 }
1147
WriteInstStatic(uint32_t x,uint32_t * dest)1148 void Assembler::WriteInstStatic(uint32_t x, uint32_t* dest) {
1149 MOZ_ASSERT(dest != nullptr);
1150 *dest = x;
1151 }
1152
haltingAlign(int alignment)1153 void Assembler::haltingAlign(int alignment) {
1154 // HLT with payload 0xBAAD
1155 m_buffer.align(alignment, 0xE1000070 | (0xBAA << 8) | 0xD);
1156 }
1157
nopAlign(int alignment)1158 void Assembler::nopAlign(int alignment) { m_buffer.align(alignment); }
1159
as_nop()1160 BufferOffset Assembler::as_nop() { return writeInst(0xe320f000); }
1161
EncodeAlu(Register dest,Register src1,Operand2 op2,ALUOp op,SBit s,Assembler::Condition c)1162 static uint32_t EncodeAlu(Register dest, Register src1, Operand2 op2, ALUOp op,
1163 SBit s, Assembler::Condition c) {
1164 return (int)op | (int)s | (int)c | op2.encode() |
1165 ((dest == InvalidReg) ? 0 : RD(dest)) |
1166 ((src1 == InvalidReg) ? 0 : RN(src1));
1167 }
1168
as_alu(Register dest,Register src1,Operand2 op2,ALUOp op,SBit s,Condition c)1169 BufferOffset Assembler::as_alu(Register dest, Register src1, Operand2 op2,
1170 ALUOp op, SBit s, Condition c) {
1171 return writeInst(EncodeAlu(dest, src1, op2, op, s, c));
1172 }
1173
as_mov(Register dest,Operand2 op2,SBit s,Condition c)1174 BufferOffset Assembler::as_mov(Register dest, Operand2 op2, SBit s,
1175 Condition c) {
1176 return as_alu(dest, InvalidReg, op2, OpMov, s, c);
1177 }
1178
1179 /* static */
as_alu_patch(Register dest,Register src1,Operand2 op2,ALUOp op,SBit s,Condition c,uint32_t * pos)1180 void Assembler::as_alu_patch(Register dest, Register src1, Operand2 op2,
1181 ALUOp op, SBit s, Condition c, uint32_t* pos) {
1182 WriteInstStatic(EncodeAlu(dest, src1, op2, op, s, c), pos);
1183 }
1184
1185 /* static */
as_mov_patch(Register dest,Operand2 op2,SBit s,Condition c,uint32_t * pos)1186 void Assembler::as_mov_patch(Register dest, Operand2 op2, SBit s, Condition c,
1187 uint32_t* pos) {
1188 as_alu_patch(dest, InvalidReg, op2, OpMov, s, c, pos);
1189 }
1190
as_mvn(Register dest,Operand2 op2,SBit s,Condition c)1191 BufferOffset Assembler::as_mvn(Register dest, Operand2 op2, SBit s,
1192 Condition c) {
1193 return as_alu(dest, InvalidReg, op2, OpMvn, s, c);
1194 }
1195
1196 // Logical operations.
as_and(Register dest,Register src1,Operand2 op2,SBit s,Condition c)1197 BufferOffset Assembler::as_and(Register dest, Register src1, Operand2 op2,
1198 SBit s, Condition c) {
1199 return as_alu(dest, src1, op2, OpAnd, s, c);
1200 }
as_bic(Register dest,Register src1,Operand2 op2,SBit s,Condition c)1201 BufferOffset Assembler::as_bic(Register dest, Register src1, Operand2 op2,
1202 SBit s, Condition c) {
1203 return as_alu(dest, src1, op2, OpBic, s, c);
1204 }
as_eor(Register dest,Register src1,Operand2 op2,SBit s,Condition c)1205 BufferOffset Assembler::as_eor(Register dest, Register src1, Operand2 op2,
1206 SBit s, Condition c) {
1207 return as_alu(dest, src1, op2, OpEor, s, c);
1208 }
as_orr(Register dest,Register src1,Operand2 op2,SBit s,Condition c)1209 BufferOffset Assembler::as_orr(Register dest, Register src1, Operand2 op2,
1210 SBit s, Condition c) {
1211 return as_alu(dest, src1, op2, OpOrr, s, c);
1212 }
1213
1214 // Reverse byte operations.
as_rev(Register dest,Register src,Condition c)1215 BufferOffset Assembler::as_rev(Register dest, Register src, Condition c) {
1216 return writeInst((int)c | 0b0000'0110'1011'1111'0000'1111'0011'0000 |
1217 RD(dest) | src.code());
1218 }
as_rev16(Register dest,Register src,Condition c)1219 BufferOffset Assembler::as_rev16(Register dest, Register src, Condition c) {
1220 return writeInst((int)c | 0b0000'0110'1011'1111'0000'1111'1011'0000 |
1221 RD(dest) | src.code());
1222 }
as_revsh(Register dest,Register src,Condition c)1223 BufferOffset Assembler::as_revsh(Register dest, Register src, Condition c) {
1224 return writeInst((int)c | 0b0000'0110'1111'1111'0000'1111'1011'0000 |
1225 RD(dest) | src.code());
1226 }
1227
1228 // Mathematical operations.
as_adc(Register dest,Register src1,Operand2 op2,SBit s,Condition c)1229 BufferOffset Assembler::as_adc(Register dest, Register src1, Operand2 op2,
1230 SBit s, Condition c) {
1231 return as_alu(dest, src1, op2, OpAdc, s, c);
1232 }
as_add(Register dest,Register src1,Operand2 op2,SBit s,Condition c)1233 BufferOffset Assembler::as_add(Register dest, Register src1, Operand2 op2,
1234 SBit s, Condition c) {
1235 return as_alu(dest, src1, op2, OpAdd, s, c);
1236 }
as_sbc(Register dest,Register src1,Operand2 op2,SBit s,Condition c)1237 BufferOffset Assembler::as_sbc(Register dest, Register src1, Operand2 op2,
1238 SBit s, Condition c) {
1239 return as_alu(dest, src1, op2, OpSbc, s, c);
1240 }
as_sub(Register dest,Register src1,Operand2 op2,SBit s,Condition c)1241 BufferOffset Assembler::as_sub(Register dest, Register src1, Operand2 op2,
1242 SBit s, Condition c) {
1243 return as_alu(dest, src1, op2, OpSub, s, c);
1244 }
as_rsb(Register dest,Register src1,Operand2 op2,SBit s,Condition c)1245 BufferOffset Assembler::as_rsb(Register dest, Register src1, Operand2 op2,
1246 SBit s, Condition c) {
1247 return as_alu(dest, src1, op2, OpRsb, s, c);
1248 }
as_rsc(Register dest,Register src1,Operand2 op2,SBit s,Condition c)1249 BufferOffset Assembler::as_rsc(Register dest, Register src1, Operand2 op2,
1250 SBit s, Condition c) {
1251 return as_alu(dest, src1, op2, OpRsc, s, c);
1252 }
1253
1254 // Test operations.
as_cmn(Register src1,Operand2 op2,Condition c)1255 BufferOffset Assembler::as_cmn(Register src1, Operand2 op2, Condition c) {
1256 return as_alu(InvalidReg, src1, op2, OpCmn, SetCC, c);
1257 }
as_cmp(Register src1,Operand2 op2,Condition c)1258 BufferOffset Assembler::as_cmp(Register src1, Operand2 op2, Condition c) {
1259 return as_alu(InvalidReg, src1, op2, OpCmp, SetCC, c);
1260 }
as_teq(Register src1,Operand2 op2,Condition c)1261 BufferOffset Assembler::as_teq(Register src1, Operand2 op2, Condition c) {
1262 return as_alu(InvalidReg, src1, op2, OpTeq, SetCC, c);
1263 }
as_tst(Register src1,Operand2 op2,Condition c)1264 BufferOffset Assembler::as_tst(Register src1, Operand2 op2, Condition c) {
1265 return as_alu(InvalidReg, src1, op2, OpTst, SetCC, c);
1266 }
1267
1268 static constexpr Register NoAddend{Registers::pc};
1269
1270 static const int SignExtend = 0x06000070;
1271
1272 enum SignExtend {
1273 SxSxtb = 10 << 20,
1274 SxSxth = 11 << 20,
1275 SxUxtb = 14 << 20,
1276 SxUxth = 15 << 20
1277 };
1278
1279 // Sign extension operations.
as_sxtb(Register dest,Register src,int rotate,Condition c)1280 BufferOffset Assembler::as_sxtb(Register dest, Register src, int rotate,
1281 Condition c) {
1282 return writeInst((int)c | SignExtend | SxSxtb | RN(NoAddend) | RD(dest) |
1283 ((rotate & 3) << 10) | src.code());
1284 }
as_sxth(Register dest,Register src,int rotate,Condition c)1285 BufferOffset Assembler::as_sxth(Register dest, Register src, int rotate,
1286 Condition c) {
1287 return writeInst((int)c | SignExtend | SxSxth | RN(NoAddend) | RD(dest) |
1288 ((rotate & 3) << 10) | src.code());
1289 }
as_uxtb(Register dest,Register src,int rotate,Condition c)1290 BufferOffset Assembler::as_uxtb(Register dest, Register src, int rotate,
1291 Condition c) {
1292 return writeInst((int)c | SignExtend | SxUxtb | RN(NoAddend) | RD(dest) |
1293 ((rotate & 3) << 10) | src.code());
1294 }
as_uxth(Register dest,Register src,int rotate,Condition c)1295 BufferOffset Assembler::as_uxth(Register dest, Register src, int rotate,
1296 Condition c) {
1297 return writeInst((int)c | SignExtend | SxUxth | RN(NoAddend) | RD(dest) |
1298 ((rotate & 3) << 10) | src.code());
1299 }
1300
EncodeMovW(Register dest,Imm16 imm,Assembler::Condition c)1301 static uint32_t EncodeMovW(Register dest, Imm16 imm, Assembler::Condition c) {
1302 MOZ_ASSERT(HasMOVWT());
1303 return 0x03000000 | c | imm.encode() | RD(dest);
1304 }
1305
EncodeMovT(Register dest,Imm16 imm,Assembler::Condition c)1306 static uint32_t EncodeMovT(Register dest, Imm16 imm, Assembler::Condition c) {
1307 MOZ_ASSERT(HasMOVWT());
1308 return 0x03400000 | c | imm.encode() | RD(dest);
1309 }
1310
1311 // Not quite ALU worthy, but these are useful none the less. These also have
1312 // the isue of these being formatted completly differently from the standard ALU
1313 // operations.
as_movw(Register dest,Imm16 imm,Condition c)1314 BufferOffset Assembler::as_movw(Register dest, Imm16 imm, Condition c) {
1315 return writeInst(EncodeMovW(dest, imm, c));
1316 }
1317
1318 /* static */
as_movw_patch(Register dest,Imm16 imm,Condition c,Instruction * pos)1319 void Assembler::as_movw_patch(Register dest, Imm16 imm, Condition c,
1320 Instruction* pos) {
1321 WriteInstStatic(EncodeMovW(dest, imm, c), (uint32_t*)pos);
1322 }
1323
as_movt(Register dest,Imm16 imm,Condition c)1324 BufferOffset Assembler::as_movt(Register dest, Imm16 imm, Condition c) {
1325 return writeInst(EncodeMovT(dest, imm, c));
1326 }
1327
1328 /* static */
as_movt_patch(Register dest,Imm16 imm,Condition c,Instruction * pos)1329 void Assembler::as_movt_patch(Register dest, Imm16 imm, Condition c,
1330 Instruction* pos) {
1331 WriteInstStatic(EncodeMovT(dest, imm, c), (uint32_t*)pos);
1332 }
1333
1334 static const int mull_tag = 0x90;
1335
as_genmul(Register dhi,Register dlo,Register rm,Register rn,MULOp op,SBit s,Condition c)1336 BufferOffset Assembler::as_genmul(Register dhi, Register dlo, Register rm,
1337 Register rn, MULOp op, SBit s, Condition c) {
1338 return writeInst(RN(dhi) | maybeRD(dlo) | RM(rm) | rn.code() | op | s | c |
1339 mull_tag);
1340 }
as_mul(Register dest,Register src1,Register src2,SBit s,Condition c)1341 BufferOffset Assembler::as_mul(Register dest, Register src1, Register src2,
1342 SBit s, Condition c) {
1343 return as_genmul(dest, InvalidReg, src1, src2, OpmMul, s, c);
1344 }
as_mla(Register dest,Register acc,Register src1,Register src2,SBit s,Condition c)1345 BufferOffset Assembler::as_mla(Register dest, Register acc, Register src1,
1346 Register src2, SBit s, Condition c) {
1347 return as_genmul(dest, acc, src1, src2, OpmMla, s, c);
1348 }
as_umaal(Register destHI,Register destLO,Register src1,Register src2,Condition c)1349 BufferOffset Assembler::as_umaal(Register destHI, Register destLO,
1350 Register src1, Register src2, Condition c) {
1351 return as_genmul(destHI, destLO, src1, src2, OpmUmaal, LeaveCC, c);
1352 }
as_mls(Register dest,Register acc,Register src1,Register src2,Condition c)1353 BufferOffset Assembler::as_mls(Register dest, Register acc, Register src1,
1354 Register src2, Condition c) {
1355 return as_genmul(dest, acc, src1, src2, OpmMls, LeaveCC, c);
1356 }
1357
as_umull(Register destHI,Register destLO,Register src1,Register src2,SBit s,Condition c)1358 BufferOffset Assembler::as_umull(Register destHI, Register destLO,
1359 Register src1, Register src2, SBit s,
1360 Condition c) {
1361 return as_genmul(destHI, destLO, src1, src2, OpmUmull, s, c);
1362 }
1363
as_umlal(Register destHI,Register destLO,Register src1,Register src2,SBit s,Condition c)1364 BufferOffset Assembler::as_umlal(Register destHI, Register destLO,
1365 Register src1, Register src2, SBit s,
1366 Condition c) {
1367 return as_genmul(destHI, destLO, src1, src2, OpmUmlal, s, c);
1368 }
1369
as_smull(Register destHI,Register destLO,Register src1,Register src2,SBit s,Condition c)1370 BufferOffset Assembler::as_smull(Register destHI, Register destLO,
1371 Register src1, Register src2, SBit s,
1372 Condition c) {
1373 return as_genmul(destHI, destLO, src1, src2, OpmSmull, s, c);
1374 }
1375
as_smlal(Register destHI,Register destLO,Register src1,Register src2,SBit s,Condition c)1376 BufferOffset Assembler::as_smlal(Register destHI, Register destLO,
1377 Register src1, Register src2, SBit s,
1378 Condition c) {
1379 return as_genmul(destHI, destLO, src1, src2, OpmSmlal, s, c);
1380 }
1381
as_sdiv(Register rd,Register rn,Register rm,Condition c)1382 BufferOffset Assembler::as_sdiv(Register rd, Register rn, Register rm,
1383 Condition c) {
1384 return writeInst(0x0710f010 | c | RN(rd) | RM(rm) | rn.code());
1385 }
1386
as_udiv(Register rd,Register rn,Register rm,Condition c)1387 BufferOffset Assembler::as_udiv(Register rd, Register rn, Register rm,
1388 Condition c) {
1389 return writeInst(0x0730f010 | c | RN(rd) | RM(rm) | rn.code());
1390 }
1391
as_clz(Register dest,Register src,Condition c)1392 BufferOffset Assembler::as_clz(Register dest, Register src, Condition c) {
1393 MOZ_ASSERT(src != pc && dest != pc);
1394 return writeInst(RD(dest) | src.code() | c | 0x016f0f10);
1395 }
1396
1397 // Data transfer instructions: ldr, str, ldrb, strb. Using an int to
1398 // differentiate between 8 bits and 32 bits is overkill, but meh.
1399
EncodeDtr(LoadStore ls,int size,Index mode,Register rt,DTRAddr addr,Assembler::Condition c)1400 static uint32_t EncodeDtr(LoadStore ls, int size, Index mode, Register rt,
1401 DTRAddr addr, Assembler::Condition c) {
1402 MOZ_ASSERT(mode == Offset || (rt != addr.getBase() && pc != addr.getBase()));
1403 MOZ_ASSERT(size == 32 || size == 8);
1404 return 0x04000000 | ls | (size == 8 ? 0x00400000 : 0) | mode | c | RT(rt) |
1405 addr.encode();
1406 }
1407
as_dtr(LoadStore ls,int size,Index mode,Register rt,DTRAddr addr,Condition c)1408 BufferOffset Assembler::as_dtr(LoadStore ls, int size, Index mode, Register rt,
1409 DTRAddr addr, Condition c) {
1410 return writeInst(EncodeDtr(ls, size, mode, rt, addr, c));
1411 }
1412
1413 /* static */
as_dtr_patch(LoadStore ls,int size,Index mode,Register rt,DTRAddr addr,Condition c,uint32_t * dest)1414 void Assembler::as_dtr_patch(LoadStore ls, int size, Index mode, Register rt,
1415 DTRAddr addr, Condition c, uint32_t* dest) {
1416 WriteInstStatic(EncodeDtr(ls, size, mode, rt, addr, c), dest);
1417 }
1418
1419 class PoolHintData {
1420 public:
1421 enum LoadType {
1422 // Set 0 to bogus, since that is the value most likely to be
1423 // accidentally left somewhere.
1424 PoolBOGUS = 0,
1425 PoolDTR = 1,
1426 PoolBranch = 2,
1427 PoolVDTR = 3
1428 };
1429
1430 private:
1431 uint32_t index_ : 16;
1432 uint32_t cond_ : 4;
1433 uint32_t loadType_ : 2;
1434 uint32_t destReg_ : 5;
1435 uint32_t destType_ : 1;
1436 uint32_t ONES : 4;
1437
1438 static const uint32_t ExpectedOnes = 0xfu;
1439
1440 public:
init(uint32_t index,Assembler::Condition cond,LoadType lt,Register destReg)1441 void init(uint32_t index, Assembler::Condition cond, LoadType lt,
1442 Register destReg) {
1443 index_ = index;
1444 MOZ_ASSERT(index_ == index);
1445 cond_ = cond >> 28;
1446 MOZ_ASSERT(cond_ == cond >> 28);
1447 loadType_ = lt;
1448 ONES = ExpectedOnes;
1449 destReg_ = destReg.code();
1450 destType_ = 0;
1451 }
init(uint32_t index,Assembler::Condition cond,LoadType lt,const VFPRegister & destReg)1452 void init(uint32_t index, Assembler::Condition cond, LoadType lt,
1453 const VFPRegister& destReg) {
1454 MOZ_ASSERT(destReg.isFloat());
1455 index_ = index;
1456 MOZ_ASSERT(index_ == index);
1457 cond_ = cond >> 28;
1458 MOZ_ASSERT(cond_ == cond >> 28);
1459 loadType_ = lt;
1460 ONES = ExpectedOnes;
1461 destReg_ = destReg.id();
1462 destType_ = destReg.isDouble();
1463 }
getCond() const1464 Assembler::Condition getCond() const {
1465 return Assembler::Condition(cond_ << 28);
1466 }
1467
getReg() const1468 Register getReg() const { return Register::FromCode(destReg_); }
getVFPReg() const1469 VFPRegister getVFPReg() const {
1470 VFPRegister r = VFPRegister(
1471 destReg_, destType_ ? VFPRegister::Double : VFPRegister::Single);
1472 return r;
1473 }
1474
getIndex() const1475 int32_t getIndex() const { return index_; }
setIndex(uint32_t index)1476 void setIndex(uint32_t index) {
1477 MOZ_ASSERT(ONES == ExpectedOnes && loadType_ != PoolBOGUS);
1478 index_ = index;
1479 MOZ_ASSERT(index_ == index);
1480 }
1481
getLoadType() const1482 LoadType getLoadType() const {
1483 // If this *was* a PoolBranch, but the branch has already been bound
1484 // then this isn't going to look like a real poolhintdata, but we still
1485 // want to lie about it so everyone knows it *used* to be a branch.
1486 if (ONES != ExpectedOnes) {
1487 return PoolHintData::PoolBranch;
1488 }
1489 return static_cast<LoadType>(loadType_);
1490 }
1491
isValidPoolHint() const1492 bool isValidPoolHint() const {
1493 // Most instructions cannot have a condition that is 0xf. Notable
1494 // exceptions are blx and the entire NEON instruction set. For the
1495 // purposes of pool loads, and possibly patched branches, the possible
1496 // instructions are ldr and b, neither of which can have a condition
1497 // code of 0xf.
1498 return ONES == ExpectedOnes;
1499 }
1500 };
1501
1502 union PoolHintPun {
1503 PoolHintData phd;
1504 uint32_t raw;
1505 };
1506
1507 // Handles all of the other integral data transferring functions: ldrsb, ldrsh,
1508 // ldrd, etc. The size is given in bits.
as_extdtr(LoadStore ls,int size,bool IsSigned,Index mode,Register rt,EDtrAddr addr,Condition c)1509 BufferOffset Assembler::as_extdtr(LoadStore ls, int size, bool IsSigned,
1510 Index mode, Register rt, EDtrAddr addr,
1511 Condition c) {
1512 int extra_bits2 = 0;
1513 int extra_bits1 = 0;
1514 switch (size) {
1515 case 8:
1516 MOZ_ASSERT(IsSigned);
1517 MOZ_ASSERT(ls != IsStore);
1518 extra_bits1 = 0x1;
1519 extra_bits2 = 0x2;
1520 break;
1521 case 16:
1522 // 'case 32' doesn't need to be handled, it is handled by the default
1523 // ldr/str.
1524 extra_bits2 = 0x01;
1525 extra_bits1 = (ls == IsStore) ? 0 : 1;
1526 if (IsSigned) {
1527 MOZ_ASSERT(ls != IsStore);
1528 extra_bits2 |= 0x2;
1529 }
1530 break;
1531 case 64:
1532 extra_bits2 = (ls == IsStore) ? 0x3 : 0x2;
1533 extra_bits1 = 0;
1534 break;
1535 default:
1536 MOZ_CRASH("unexpected size in as_extdtr");
1537 }
1538 return writeInst(extra_bits2 << 5 | extra_bits1 << 20 | 0x90 | addr.encode() |
1539 RT(rt) | mode | c);
1540 }
1541
as_dtm(LoadStore ls,Register rn,uint32_t mask,DTMMode mode,DTMWriteBack wb,Condition c)1542 BufferOffset Assembler::as_dtm(LoadStore ls, Register rn, uint32_t mask,
1543 DTMMode mode, DTMWriteBack wb, Condition c) {
1544 return writeInst(0x08000000 | RN(rn) | ls | mode | mask | c | wb);
1545 }
1546
allocLiteralLoadEntry(size_t numInst,unsigned numPoolEntries,PoolHintPun & php,uint8_t * data,const LiteralDoc & doc,ARMBuffer::PoolEntry * pe,bool loadToPC)1547 BufferOffset Assembler::allocLiteralLoadEntry(
1548 size_t numInst, unsigned numPoolEntries, PoolHintPun& php, uint8_t* data,
1549 const LiteralDoc& doc, ARMBuffer::PoolEntry* pe, bool loadToPC) {
1550 uint8_t* inst = (uint8_t*)&php.raw;
1551
1552 MOZ_ASSERT(inst);
1553 MOZ_ASSERT(numInst == 1); // Or fix the disassembly
1554
1555 BufferOffset offs =
1556 m_buffer.allocEntry(numInst, numPoolEntries, inst, data, pe);
1557 propagateOOM(offs.assigned());
1558 #ifdef JS_DISASM_ARM
1559 Instruction* instruction = m_buffer.getInstOrNull(offs);
1560 if (instruction) {
1561 spewLiteralLoad(php, loadToPC, instruction, doc);
1562 }
1563 #endif
1564 return offs;
1565 }
1566
1567 // This is also used for instructions that might be resolved into branches,
1568 // or might not. If dest==pc then it is effectively a branch.
1569
as_Imm32Pool(Register dest,uint32_t value,Condition c)1570 BufferOffset Assembler::as_Imm32Pool(Register dest, uint32_t value,
1571 Condition c) {
1572 PoolHintPun php;
1573 php.phd.init(0, c, PoolHintData::PoolDTR, dest);
1574 BufferOffset offs = allocLiteralLoadEntry(
1575 1, 1, php, (uint8_t*)&value, LiteralDoc(value), nullptr, dest == pc);
1576 return offs;
1577 }
1578
1579 /* static */
WritePoolEntry(Instruction * addr,Condition c,uint32_t data)1580 void Assembler::WritePoolEntry(Instruction* addr, Condition c, uint32_t data) {
1581 MOZ_ASSERT(addr->is<InstLDR>());
1582 *addr->as<InstLDR>()->dest() = data;
1583 MOZ_ASSERT(addr->extractCond() == c);
1584 }
1585
as_FImm64Pool(VFPRegister dest,double d,Condition c)1586 BufferOffset Assembler::as_FImm64Pool(VFPRegister dest, double d, Condition c) {
1587 MOZ_ASSERT(dest.isDouble());
1588 PoolHintPun php;
1589 php.phd.init(0, c, PoolHintData::PoolVDTR, dest);
1590 return allocLiteralLoadEntry(1, 2, php, (uint8_t*)&d, LiteralDoc(d));
1591 }
1592
as_FImm32Pool(VFPRegister dest,float f,Condition c)1593 BufferOffset Assembler::as_FImm32Pool(VFPRegister dest, float f, Condition c) {
1594 // Insert floats into the double pool as they have the same limitations on
1595 // immediate offset. This wastes 4 bytes padding per float. An alternative
1596 // would be to have a separate pool for floats.
1597 MOZ_ASSERT(dest.isSingle());
1598 PoolHintPun php;
1599 php.phd.init(0, c, PoolHintData::PoolVDTR, dest);
1600 return allocLiteralLoadEntry(1, 1, php, (uint8_t*)&f, LiteralDoc(f));
1601 }
1602
1603 // Pool callbacks stuff:
InsertIndexIntoTag(uint8_t * load_,uint32_t index)1604 void Assembler::InsertIndexIntoTag(uint8_t* load_, uint32_t index) {
1605 uint32_t* load = (uint32_t*)load_;
1606 PoolHintPun php;
1607 php.raw = *load;
1608 php.phd.setIndex(index);
1609 *load = php.raw;
1610 }
1611
1612 // patchConstantPoolLoad takes the address of the instruction that wants to be
1613 // patched, and the address of the start of the constant pool, and figures
1614 // things out from there.
PatchConstantPoolLoad(void * loadAddr,void * constPoolAddr)1615 void Assembler::PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr) {
1616 PoolHintData data = *(PoolHintData*)loadAddr;
1617 uint32_t* instAddr = (uint32_t*)loadAddr;
1618 int offset = (char*)constPoolAddr - (char*)loadAddr;
1619 switch (data.getLoadType()) {
1620 case PoolHintData::PoolBOGUS:
1621 MOZ_CRASH("bogus load type!");
1622 case PoolHintData::PoolDTR:
1623 Assembler::as_dtr_patch(
1624 IsLoad, 32, Offset, data.getReg(),
1625 DTRAddr(pc, DtrOffImm(offset + 4 * data.getIndex() - 8)),
1626 data.getCond(), instAddr);
1627 break;
1628 case PoolHintData::PoolBranch:
1629 // Either this used to be a poolBranch, and the label was already bound,
1630 // so it was replaced with a real branch, or this may happen in the
1631 // future. If this is going to happen in the future, then the actual
1632 // bits that are written here don't matter (except the condition code,
1633 // since that is always preserved across patchings) but if it does not
1634 // get bound later, then we want to make sure this is a load from the
1635 // pool entry (and the pool entry should be nullptr so it will crash).
1636 if (data.isValidPoolHint()) {
1637 Assembler::as_dtr_patch(
1638 IsLoad, 32, Offset, pc,
1639 DTRAddr(pc, DtrOffImm(offset + 4 * data.getIndex() - 8)),
1640 data.getCond(), instAddr);
1641 }
1642 break;
1643 case PoolHintData::PoolVDTR: {
1644 VFPRegister dest = data.getVFPReg();
1645 int32_t imm = offset + (data.getIndex() * 4) - 8;
1646 MOZ_ASSERT(-1024 < imm && imm < 1024);
1647 Assembler::as_vdtr_patch(IsLoad, dest, VFPAddr(pc, VFPOffImm(imm)),
1648 data.getCond(), instAddr);
1649 break;
1650 }
1651 }
1652 }
1653
1654 // Atomic instruction stuff:
1655
as_ldrexd(Register rt,Register rt2,Register rn,Condition c)1656 BufferOffset Assembler::as_ldrexd(Register rt, Register rt2, Register rn,
1657 Condition c) {
1658 MOZ_ASSERT(!(rt.code() & 1) && rt2.code() == rt.code() + 1);
1659 MOZ_ASSERT(rt.code() != 14 && rn.code() != 15);
1660 return writeInst(0x01b00f9f | (int)c | RT(rt) | RN(rn));
1661 }
1662
as_ldrex(Register rt,Register rn,Condition c)1663 BufferOffset Assembler::as_ldrex(Register rt, Register rn, Condition c) {
1664 MOZ_ASSERT(rt.code() != 15 && rn.code() != 15);
1665 return writeInst(0x01900f9f | (int)c | RT(rt) | RN(rn));
1666 }
1667
as_ldrexh(Register rt,Register rn,Condition c)1668 BufferOffset Assembler::as_ldrexh(Register rt, Register rn, Condition c) {
1669 MOZ_ASSERT(rt.code() != 15 && rn.code() != 15);
1670 return writeInst(0x01f00f9f | (int)c | RT(rt) | RN(rn));
1671 }
1672
as_ldrexb(Register rt,Register rn,Condition c)1673 BufferOffset Assembler::as_ldrexb(Register rt, Register rn, Condition c) {
1674 MOZ_ASSERT(rt.code() != 15 && rn.code() != 15);
1675 return writeInst(0x01d00f9f | (int)c | RT(rt) | RN(rn));
1676 }
1677
as_strexd(Register rd,Register rt,Register rt2,Register rn,Condition c)1678 BufferOffset Assembler::as_strexd(Register rd, Register rt, Register rt2,
1679 Register rn, Condition c) {
1680 MOZ_ASSERT(!(rt.code() & 1) && rt2.code() == rt.code() + 1);
1681 MOZ_ASSERT(rt.code() != 14 && rn.code() != 15 && rd.code() != 15);
1682 MOZ_ASSERT(rd != rn && rd != rt && rd != rt2);
1683 return writeInst(0x01a00f90 | (int)c | RD(rd) | RN(rn) | rt.code());
1684 }
1685
as_strex(Register rd,Register rt,Register rn,Condition c)1686 BufferOffset Assembler::as_strex(Register rd, Register rt, Register rn,
1687 Condition c) {
1688 MOZ_ASSERT(rd != rn && rd != rt); // True restriction on Cortex-A7 (RPi2)
1689 return writeInst(0x01800f90 | (int)c | RD(rd) | RN(rn) | rt.code());
1690 }
1691
as_strexh(Register rd,Register rt,Register rn,Condition c)1692 BufferOffset Assembler::as_strexh(Register rd, Register rt, Register rn,
1693 Condition c) {
1694 MOZ_ASSERT(rd != rn && rd != rt); // True restriction on Cortex-A7 (RPi2)
1695 return writeInst(0x01e00f90 | (int)c | RD(rd) | RN(rn) | rt.code());
1696 }
1697
as_strexb(Register rd,Register rt,Register rn,Condition c)1698 BufferOffset Assembler::as_strexb(Register rd, Register rt, Register rn,
1699 Condition c) {
1700 MOZ_ASSERT(rd != rn && rd != rt); // True restriction on Cortex-A7 (RPi2)
1701 return writeInst(0x01c00f90 | (int)c | RD(rd) | RN(rn) | rt.code());
1702 }
1703
as_clrex()1704 BufferOffset Assembler::as_clrex() { return writeInst(0xf57ff01f); }
1705
1706 // Memory barrier stuff:
1707
as_dmb(BarrierOption option)1708 BufferOffset Assembler::as_dmb(BarrierOption option) {
1709 return writeInst(0xf57ff050U | (int)option);
1710 }
as_dsb(BarrierOption option)1711 BufferOffset Assembler::as_dsb(BarrierOption option) {
1712 return writeInst(0xf57ff040U | (int)option);
1713 }
as_isb()1714 BufferOffset Assembler::as_isb() {
1715 return writeInst(0xf57ff06fU); // option == SY
1716 }
as_dsb_trap()1717 BufferOffset Assembler::as_dsb_trap() {
1718 // DSB is "mcr 15, 0, r0, c7, c10, 4".
1719 // See eg https://bugs.kde.org/show_bug.cgi?id=228060.
1720 // ARMv7 manual, "VMSA CP15 c7 register summary".
1721 // Flagged as "legacy" starting with ARMv8, may be disabled on chip, see
1722 // ARMv8 manual E2.7.3 and G3.18.16.
1723 return writeInst(0xee070f9a);
1724 }
as_dmb_trap()1725 BufferOffset Assembler::as_dmb_trap() {
1726 // DMB is "mcr 15, 0, r0, c7, c10, 5".
1727 // ARMv7 manual, "VMSA CP15 c7 register summary".
1728 // Flagged as "legacy" starting with ARMv8, may be disabled on chip, see
1729 // ARMv8 manual E2.7.3 and G3.18.16.
1730 return writeInst(0xee070fba);
1731 }
as_isb_trap()1732 BufferOffset Assembler::as_isb_trap() {
1733 // ISB is "mcr 15, 0, r0, c7, c5, 4".
1734 // ARMv7 manual, "VMSA CP15 c7 register summary".
1735 // Flagged as "legacy" starting with ARMv8, may be disabled on chip, see
1736 // ARMv8 manual E2.7.3 and G3.18.16.
1737 return writeInst(0xee070f94);
1738 }
1739
as_csdb()1740 BufferOffset Assembler::as_csdb() {
1741 // NOP (see as_nop) on architectures where this instruction is not defined.
1742 //
1743 // https://developer.arm.com/-/media/developer/pdf/Cache_Speculation_Side-channels_22Feb18.pdf
1744 // CSDB A32: 1110_0011_0010_0000_1111_0000_0001_0100
1745 return writeInst(0xe320f000 | 0x14);
1746 }
1747
1748 // Control flow stuff:
1749
1750 // bx can *only* branch to a register, never to an immediate.
as_bx(Register r,Condition c)1751 BufferOffset Assembler::as_bx(Register r, Condition c) {
1752 BufferOffset ret = writeInst(((int)c) | OpBx | r.code());
1753 return ret;
1754 }
1755
WritePoolGuard(BufferOffset branch,Instruction * dest,BufferOffset afterPool)1756 void Assembler::WritePoolGuard(BufferOffset branch, Instruction* dest,
1757 BufferOffset afterPool) {
1758 BOffImm off = afterPool.diffB<BOffImm>(branch);
1759 if (off.isInvalid()) {
1760 MOZ_CRASH("BOffImm invalid");
1761 }
1762 *dest = InstBImm(off, Always);
1763 }
1764
1765 // Branch can branch to an immediate *or* to a register.
1766 // Branches to immediates are pc relative, branches to registers are absolute.
as_b(BOffImm off,Condition c,Label * documentation)1767 BufferOffset Assembler::as_b(BOffImm off, Condition c, Label* documentation) {
1768 return writeBranchInst(((int)c) | OpB | off.encode(),
1769 refLabel(documentation));
1770 }
1771
as_b(Label * l,Condition c)1772 BufferOffset Assembler::as_b(Label* l, Condition c) {
1773 if (l->bound()) {
1774 // Note only one instruction is emitted here, the NOP is overwritten.
1775 BufferOffset ret = allocBranchInst();
1776 if (oom()) {
1777 return BufferOffset();
1778 }
1779
1780 BOffImm offset = BufferOffset(l).diffB<BOffImm>(ret);
1781 MOZ_RELEASE_ASSERT(!offset.isInvalid(),
1782 "Buffer size limit should prevent this");
1783 as_b(offset, c, ret);
1784 #ifdef JS_DISASM_ARM
1785 spewBranch(m_buffer.getInstOrNull(ret), refLabel(l));
1786 #endif
1787 return ret;
1788 }
1789
1790 if (oom()) {
1791 return BufferOffset();
1792 }
1793
1794 BufferOffset ret;
1795 if (l->used()) {
1796 int32_t old = l->offset();
1797 MOZ_RELEASE_ASSERT(BOffImm::IsInRange(old),
1798 "Buffer size limit should prevent this");
1799 ret = as_b(BOffImm(old), c, l);
1800 } else {
1801 BOffImm inv;
1802 ret = as_b(inv, c, l);
1803 }
1804
1805 if (oom()) {
1806 return BufferOffset();
1807 }
1808
1809 l->use(ret.getOffset());
1810 return ret;
1811 }
1812
as_b(BOffImm off,Condition c,BufferOffset inst)1813 BufferOffset Assembler::as_b(BOffImm off, Condition c, BufferOffset inst) {
1814 // JS_DISASM_ARM NOTE: Can't disassemble here, because numerous callers use
1815 // this to patchup old code. Must disassemble in caller where it makes sense.
1816 // Not many callers.
1817 *editSrc(inst) = InstBImm(off, c);
1818 return inst;
1819 }
1820
1821 // blx can go to either an immediate or a register.
1822 // When blx'ing to a register, we change processor state depending on the low
1823 // bit of the register when blx'ing to an immediate, we *always* change
1824 // processor state.
1825
as_blx(Register r,Condition c)1826 BufferOffset Assembler::as_blx(Register r, Condition c) {
1827 return writeInst(((int)c) | OpBlx | r.code());
1828 }
1829
1830 // bl can only branch to an pc-relative immediate offset
1831 // It cannot change the processor state.
as_bl(BOffImm off,Condition c,Label * documentation)1832 BufferOffset Assembler::as_bl(BOffImm off, Condition c, Label* documentation) {
1833 return writeBranchInst(((int)c) | OpBl | off.encode(),
1834 refLabel(documentation));
1835 }
1836
as_bl(Label * l,Condition c)1837 BufferOffset Assembler::as_bl(Label* l, Condition c) {
1838 if (l->bound()) {
1839 // Note only one instruction is emitted here, the NOP is overwritten.
1840 BufferOffset ret = allocBranchInst();
1841 if (oom()) {
1842 return BufferOffset();
1843 }
1844
1845 BOffImm offset = BufferOffset(l).diffB<BOffImm>(ret);
1846 MOZ_RELEASE_ASSERT(!offset.isInvalid(),
1847 "Buffer size limit should prevent this");
1848
1849 as_bl(offset, c, ret);
1850 #ifdef JS_DISASM_ARM
1851 spewBranch(m_buffer.getInstOrNull(ret), refLabel(l));
1852 #endif
1853 return ret;
1854 }
1855
1856 if (oom()) {
1857 return BufferOffset();
1858 }
1859
1860 BufferOffset ret;
1861 // See if the list was empty.
1862 if (l->used()) {
1863 int32_t old = l->offset();
1864 MOZ_RELEASE_ASSERT(BOffImm::IsInRange(old),
1865 "Buffer size limit should prevent this");
1866 ret = as_bl(BOffImm(old), c, l);
1867 } else {
1868 BOffImm inv;
1869 ret = as_bl(inv, c, l);
1870 }
1871
1872 if (oom()) {
1873 return BufferOffset();
1874 }
1875
1876 l->use(ret.getOffset());
1877 return ret;
1878 }
1879
as_bl(BOffImm off,Condition c,BufferOffset inst)1880 BufferOffset Assembler::as_bl(BOffImm off, Condition c, BufferOffset inst) {
1881 *editSrc(inst) = InstBLImm(off, c);
1882 return inst;
1883 }
1884
as_mrs(Register r,Condition c)1885 BufferOffset Assembler::as_mrs(Register r, Condition c) {
1886 return writeInst(0x010f0000 | int(c) | RD(r));
1887 }
1888
as_msr(Register r,Condition c)1889 BufferOffset Assembler::as_msr(Register r, Condition c) {
1890 // Hardcode the 'mask' field to 0b11 for now. It is bits 18 and 19, which
1891 // are the two high bits of the 'c' in this constant.
1892 MOZ_ASSERT((r.code() & ~0xf) == 0);
1893 return writeInst(0x012cf000 | int(c) | r.code());
1894 }
1895
1896 // VFP instructions!
1897 enum vfp_tags { VfpTag = 0x0C000A00, VfpArith = 0x02000000 };
1898
writeVFPInst(vfp_size sz,uint32_t blob)1899 BufferOffset Assembler::writeVFPInst(vfp_size sz, uint32_t blob) {
1900 MOZ_ASSERT((sz & blob) == 0);
1901 MOZ_ASSERT((VfpTag & blob) == 0);
1902 return writeInst(VfpTag | sz | blob);
1903 }
1904
1905 /* static */
WriteVFPInstStatic(vfp_size sz,uint32_t blob,uint32_t * dest)1906 void Assembler::WriteVFPInstStatic(vfp_size sz, uint32_t blob, uint32_t* dest) {
1907 MOZ_ASSERT((sz & blob) == 0);
1908 MOZ_ASSERT((VfpTag & blob) == 0);
1909 WriteInstStatic(VfpTag | sz | blob, dest);
1910 }
1911
1912 // Unityped variants: all registers hold the same (ieee754 single/double)
1913 // notably not included are vcvt; vmov vd, #imm; vmov rt, vn.
as_vfp_float(VFPRegister vd,VFPRegister vn,VFPRegister vm,VFPOp op,Condition c)1914 BufferOffset Assembler::as_vfp_float(VFPRegister vd, VFPRegister vn,
1915 VFPRegister vm, VFPOp op, Condition c) {
1916 // Make sure we believe that all of our operands are the same kind.
1917 MOZ_ASSERT_IF(!vn.isMissing(), vd.equiv(vn));
1918 MOZ_ASSERT_IF(!vm.isMissing(), vd.equiv(vm));
1919 vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
1920 return writeVFPInst(sz, VD(vd) | VN(vn) | VM(vm) | op | VfpArith | c);
1921 }
1922
as_vadd(VFPRegister vd,VFPRegister vn,VFPRegister vm,Condition c)1923 BufferOffset Assembler::as_vadd(VFPRegister vd, VFPRegister vn, VFPRegister vm,
1924 Condition c) {
1925 return as_vfp_float(vd, vn, vm, OpvAdd, c);
1926 }
1927
as_vdiv(VFPRegister vd,VFPRegister vn,VFPRegister vm,Condition c)1928 BufferOffset Assembler::as_vdiv(VFPRegister vd, VFPRegister vn, VFPRegister vm,
1929 Condition c) {
1930 return as_vfp_float(vd, vn, vm, OpvDiv, c);
1931 }
1932
as_vmul(VFPRegister vd,VFPRegister vn,VFPRegister vm,Condition c)1933 BufferOffset Assembler::as_vmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
1934 Condition c) {
1935 return as_vfp_float(vd, vn, vm, OpvMul, c);
1936 }
1937
as_vnmul(VFPRegister vd,VFPRegister vn,VFPRegister vm,Condition c)1938 BufferOffset Assembler::as_vnmul(VFPRegister vd, VFPRegister vn, VFPRegister vm,
1939 Condition c) {
1940 return as_vfp_float(vd, vn, vm, OpvMul, c);
1941 }
1942
as_vnmla(VFPRegister vd,VFPRegister vn,VFPRegister vm,Condition c)1943 BufferOffset Assembler::as_vnmla(VFPRegister vd, VFPRegister vn, VFPRegister vm,
1944 Condition c) {
1945 MOZ_CRASH("Feature NYI");
1946 }
1947
as_vnmls(VFPRegister vd,VFPRegister vn,VFPRegister vm,Condition c)1948 BufferOffset Assembler::as_vnmls(VFPRegister vd, VFPRegister vn, VFPRegister vm,
1949 Condition c) {
1950 MOZ_CRASH("Feature NYI");
1951 }
1952
as_vneg(VFPRegister vd,VFPRegister vm,Condition c)1953 BufferOffset Assembler::as_vneg(VFPRegister vd, VFPRegister vm, Condition c) {
1954 return as_vfp_float(vd, NoVFPRegister, vm, OpvNeg, c);
1955 }
1956
as_vsqrt(VFPRegister vd,VFPRegister vm,Condition c)1957 BufferOffset Assembler::as_vsqrt(VFPRegister vd, VFPRegister vm, Condition c) {
1958 return as_vfp_float(vd, NoVFPRegister, vm, OpvSqrt, c);
1959 }
1960
as_vabs(VFPRegister vd,VFPRegister vm,Condition c)1961 BufferOffset Assembler::as_vabs(VFPRegister vd, VFPRegister vm, Condition c) {
1962 return as_vfp_float(vd, NoVFPRegister, vm, OpvAbs, c);
1963 }
1964
as_vsub(VFPRegister vd,VFPRegister vn,VFPRegister vm,Condition c)1965 BufferOffset Assembler::as_vsub(VFPRegister vd, VFPRegister vn, VFPRegister vm,
1966 Condition c) {
1967 return as_vfp_float(vd, vn, vm, OpvSub, c);
1968 }
1969
as_vcmp(VFPRegister vd,VFPRegister vm,Condition c)1970 BufferOffset Assembler::as_vcmp(VFPRegister vd, VFPRegister vm, Condition c) {
1971 return as_vfp_float(vd, NoVFPRegister, vm, OpvCmp, c);
1972 }
1973
as_vcmpz(VFPRegister vd,Condition c)1974 BufferOffset Assembler::as_vcmpz(VFPRegister vd, Condition c) {
1975 return as_vfp_float(vd, NoVFPRegister, NoVFPRegister, OpvCmpz, c);
1976 }
1977
1978 // Specifically, a move between two same sized-registers.
as_vmov(VFPRegister vd,VFPRegister vsrc,Condition c)1979 BufferOffset Assembler::as_vmov(VFPRegister vd, VFPRegister vsrc, Condition c) {
1980 return as_vfp_float(vd, NoVFPRegister, vsrc, OpvMov, c);
1981 }
1982
1983 // Transfer between Core and VFP.
1984
1985 // Unlike the next function, moving between the core registers and vfp registers
1986 // can't be *that* properly typed. Namely, since I don't want to munge the type
1987 // VFPRegister to also include core registers. Thus, the core and vfp registers
1988 // are passed in based on their type, and src/dest is determined by the
1989 // float2core.
1990
as_vxfer(Register vt1,Register vt2,VFPRegister vm,FloatToCore_ f2c,Condition c,int idx)1991 BufferOffset Assembler::as_vxfer(Register vt1, Register vt2, VFPRegister vm,
1992 FloatToCore_ f2c, Condition c, int idx) {
1993 vfp_size sz = IsSingle;
1994 if (vm.isDouble()) {
1995 // Technically, this can be done with a vmov à la ARM ARM under vmov
1996 // however, that requires at least an extra bit saying if the operation
1997 // should be performed on the lower or upper half of the double. Moving
1998 // a single to/from 2N/2N+1 isn't equivalent, since there are 32 single
1999 // registers, and 32 double registers so there is no way to encode the
2000 // last 16 double registers.
2001 sz = IsDouble;
2002 MOZ_ASSERT(idx == 0 || idx == 1);
2003 // If we are transferring a single half of the double then it must be
2004 // moving a VFP reg to a core reg.
2005 MOZ_ASSERT_IF(vt2 == InvalidReg, f2c == FloatToCore);
2006 idx = idx << 21;
2007 } else {
2008 MOZ_ASSERT(idx == 0);
2009 }
2010
2011 if (vt2 == InvalidReg) {
2012 return writeVFPInst(
2013 sz, WordTransfer | f2c | c | RT(vt1) | maybeRN(vt2) | VN(vm) | idx);
2014 }
2015
2016 // We are doing a 64 bit transfer.
2017 return writeVFPInst(
2018 sz, DoubleTransfer | f2c | c | RT(vt1) | maybeRN(vt2) | VM(vm) | idx);
2019 }
2020
2021 enum vcvt_destFloatness { VcvtToInteger = 1 << 18, VcvtToFloat = 0 << 18 };
2022 enum vcvt_toZero {
2023 VcvtToZero =
2024 1 << 7, // Use the default rounding mode, which rounds truncates.
2025 VcvtToFPSCR = 0 << 7 // Use whatever rounding mode the fpscr specifies.
2026 };
2027 enum vcvt_Signedness {
2028 VcvtToSigned = 1 << 16,
2029 VcvtToUnsigned = 0 << 16,
2030 VcvtFromSigned = 1 << 7,
2031 VcvtFromUnsigned = 0 << 7
2032 };
2033
2034 // Our encoding actually allows just the src and the dest (and their types) to
2035 // uniquely specify the encoding that we are going to use.
as_vcvt(VFPRegister vd,VFPRegister vm,bool useFPSCR,Condition c)2036 BufferOffset Assembler::as_vcvt(VFPRegister vd, VFPRegister vm, bool useFPSCR,
2037 Condition c) {
2038 // Unlike other cases, the source and dest types cannot be the same.
2039 MOZ_ASSERT(!vd.equiv(vm));
2040 vfp_size sz = IsDouble;
2041 if (vd.isFloat() && vm.isFloat()) {
2042 // Doing a float -> float conversion.
2043 if (vm.isSingle()) {
2044 sz = IsSingle;
2045 }
2046 return writeVFPInst(sz, c | 0x02B700C0 | VM(vm) | VD(vd));
2047 }
2048
2049 // At least one of the registers should be a float.
2050 vcvt_destFloatness destFloat;
2051 vcvt_Signedness opSign;
2052 vcvt_toZero doToZero = VcvtToFPSCR;
2053 MOZ_ASSERT(vd.isFloat() || vm.isFloat());
2054 if (vd.isSingle() || vm.isSingle()) {
2055 sz = IsSingle;
2056 }
2057
2058 if (vd.isFloat()) {
2059 destFloat = VcvtToFloat;
2060 opSign = (vm.isSInt()) ? VcvtFromSigned : VcvtFromUnsigned;
2061 } else {
2062 destFloat = VcvtToInteger;
2063 opSign = (vd.isSInt()) ? VcvtToSigned : VcvtToUnsigned;
2064 doToZero = useFPSCR ? VcvtToFPSCR : VcvtToZero;
2065 }
2066 return writeVFPInst(
2067 sz, c | 0x02B80040 | VD(vd) | VM(vm) | destFloat | opSign | doToZero);
2068 }
2069
as_vcvtFixed(VFPRegister vd,bool isSigned,uint32_t fixedPoint,bool toFixed,Condition c)2070 BufferOffset Assembler::as_vcvtFixed(VFPRegister vd, bool isSigned,
2071 uint32_t fixedPoint, bool toFixed,
2072 Condition c) {
2073 MOZ_ASSERT(vd.isFloat());
2074 uint32_t sx = 0x1;
2075 vfp_size sf = vd.isDouble() ? IsDouble : IsSingle;
2076 int32_t imm5 = fixedPoint;
2077 imm5 = (sx ? 32 : 16) - imm5;
2078 MOZ_ASSERT(imm5 >= 0);
2079 imm5 = imm5 >> 1 | (imm5 & 1) << 5;
2080 return writeVFPInst(sf, 0x02BA0040 | VD(vd) | toFixed << 18 | sx << 7 |
2081 (!isSigned) << 16 | imm5 | c);
2082 }
2083
2084 // Transfer between VFP and memory.
EncodeVdtr(LoadStore ls,VFPRegister vd,VFPAddr addr,Assembler::Condition c)2085 static uint32_t EncodeVdtr(LoadStore ls, VFPRegister vd, VFPAddr addr,
2086 Assembler::Condition c) {
2087 return ls | 0x01000000 | addr.encode() | VD(vd) | c;
2088 }
2089
as_vdtr(LoadStore ls,VFPRegister vd,VFPAddr addr,Condition c)2090 BufferOffset Assembler::as_vdtr(
2091 LoadStore ls, VFPRegister vd, VFPAddr addr,
2092 Condition c /* vfp doesn't have a wb option */) {
2093 vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
2094 return writeVFPInst(sz, EncodeVdtr(ls, vd, addr, c));
2095 }
2096
2097 /* static */
as_vdtr_patch(LoadStore ls,VFPRegister vd,VFPAddr addr,Condition c,uint32_t * dest)2098 void Assembler::as_vdtr_patch(LoadStore ls, VFPRegister vd, VFPAddr addr,
2099 Condition c, uint32_t* dest) {
2100 vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
2101 WriteVFPInstStatic(sz, EncodeVdtr(ls, vd, addr, c), dest);
2102 }
2103
2104 // VFP's ldm/stm work differently from the standard arm ones. You can only
2105 // transfer a range.
2106
as_vdtm(LoadStore st,Register rn,VFPRegister vd,int length,Condition c)2107 BufferOffset Assembler::as_vdtm(LoadStore st, Register rn, VFPRegister vd,
2108 int length,
2109 /* also has update conditions */ Condition c) {
2110 MOZ_ASSERT(length <= 16 && length >= 0);
2111 vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
2112
2113 if (vd.isDouble()) {
2114 length *= 2;
2115 }
2116
2117 return writeVFPInst(sz, dtmLoadStore | RN(rn) | VD(vd) | length | dtmMode |
2118 dtmUpdate | dtmCond);
2119 }
2120
as_vldr_unaligned(VFPRegister vd,Register rn)2121 BufferOffset Assembler::as_vldr_unaligned(VFPRegister vd, Register rn) {
2122 MOZ_ASSERT(HasNEON());
2123 if (vd.isDouble()) {
2124 // vld1 (multiple single elements) with align=0, size=3, numregs=1
2125 return writeInst(0xF42007CF | RN(rn) | VD(vd));
2126 }
2127 // vld1 (single element to single lane) with index=0, size=2
2128 MOZ_ASSERT(vd.isFloat());
2129 MOZ_ASSERT((vd.code() & 1) == 0);
2130 return writeInst(0xF4A0080F | RN(rn) | VD(vd.asDouble()));
2131 }
2132
as_vstr_unaligned(VFPRegister vd,Register rn)2133 BufferOffset Assembler::as_vstr_unaligned(VFPRegister vd, Register rn) {
2134 MOZ_ASSERT(HasNEON());
2135 if (vd.isDouble()) {
2136 // vst1 (multiple single elements) with align=0, size=3, numregs=1
2137 return writeInst(0xF40007CF | RN(rn) | VD(vd));
2138 }
2139 // vst1 (single element from one lane) with index=0, size=2
2140 MOZ_ASSERT(vd.isFloat());
2141 MOZ_ASSERT((vd.code() & 1) == 0);
2142 return writeInst(0xF480080F | RN(rn) | VD(vd.asDouble()));
2143 }
2144
as_vimm(VFPRegister vd,VFPImm imm,Condition c)2145 BufferOffset Assembler::as_vimm(VFPRegister vd, VFPImm imm, Condition c) {
2146 MOZ_ASSERT(imm.isValid());
2147 vfp_size sz = vd.isDouble() ? IsDouble : IsSingle;
2148 return writeVFPInst(sz, c | imm.encode() | VD(vd) | 0x02B00000);
2149 }
2150
as_vmrs(Register r,Condition c)2151 BufferOffset Assembler::as_vmrs(Register r, Condition c) {
2152 return writeInst(c | 0x0ef10a10 | RT(r));
2153 }
2154
as_vmsr(Register r,Condition c)2155 BufferOffset Assembler::as_vmsr(Register r, Condition c) {
2156 return writeInst(c | 0x0ee10a10 | RT(r));
2157 }
2158
nextLink(BufferOffset b,BufferOffset * next)2159 bool Assembler::nextLink(BufferOffset b, BufferOffset* next) {
2160 Instruction branch = *editSrc(b);
2161 MOZ_ASSERT(branch.is<InstBranchImm>());
2162
2163 BOffImm destOff;
2164 branch.as<InstBranchImm>()->extractImm(&destOff);
2165 if (destOff.isInvalid()) {
2166 return false;
2167 }
2168
2169 // Propagate the next link back to the caller, by constructing a new
2170 // BufferOffset into the space they provided.
2171 new (next) BufferOffset(destOff.decode());
2172 return true;
2173 }
2174
bind(Label * label,BufferOffset boff)2175 void Assembler::bind(Label* label, BufferOffset boff) {
2176 #ifdef JS_DISASM_ARM
2177 spew_.spewBind(label);
2178 #endif
2179 if (oom()) {
2180 // Ensure we always bind the label. This matches what we do on
2181 // x86/x64 and silences the assert in ~Label.
2182 label->bind(0);
2183 return;
2184 }
2185
2186 if (label->used()) {
2187 bool more;
2188 // If our caller didn't give us an explicit target to bind to then we
2189 // want to bind to the location of the next instruction.
2190 BufferOffset dest = boff.assigned() ? boff : nextOffset();
2191 BufferOffset b(label);
2192 do {
2193 BufferOffset next;
2194 more = nextLink(b, &next);
2195 Instruction branch = *editSrc(b);
2196 Condition c = branch.extractCond();
2197 BOffImm offset = dest.diffB<BOffImm>(b);
2198 MOZ_RELEASE_ASSERT(!offset.isInvalid(),
2199 "Buffer size limit should prevent this");
2200 if (branch.is<InstBImm>()) {
2201 as_b(offset, c, b);
2202 } else if (branch.is<InstBLImm>()) {
2203 as_bl(offset, c, b);
2204 } else {
2205 MOZ_CRASH("crazy fixup!");
2206 }
2207 b = next;
2208 } while (more);
2209 }
2210 label->bind(nextOffset().getOffset());
2211 MOZ_ASSERT(!oom());
2212 }
2213
retarget(Label * label,Label * target)2214 void Assembler::retarget(Label* label, Label* target) {
2215 #ifdef JS_DISASM_ARM
2216 spew_.spewRetarget(label, target);
2217 #endif
2218 if (label->used() && !oom()) {
2219 if (target->bound()) {
2220 bind(label, BufferOffset(target));
2221 } else if (target->used()) {
2222 // The target is not bound but used. Prepend label's branch list
2223 // onto target's.
2224 BufferOffset labelBranchOffset(label);
2225 BufferOffset next;
2226
2227 // Find the head of the use chain for label.
2228 while (nextLink(labelBranchOffset, &next)) {
2229 labelBranchOffset = next;
2230 }
2231
2232 // Then patch the head of label's use chain to the tail of target's
2233 // use chain, prepending the entire use chain of target.
2234 Instruction branch = *editSrc(labelBranchOffset);
2235 Condition c = branch.extractCond();
2236 int32_t prev = target->offset();
2237 target->use(label->offset());
2238 if (branch.is<InstBImm>()) {
2239 as_b(BOffImm(prev), c, labelBranchOffset);
2240 } else if (branch.is<InstBLImm>()) {
2241 as_bl(BOffImm(prev), c, labelBranchOffset);
2242 } else {
2243 MOZ_CRASH("crazy fixup!");
2244 }
2245 } else {
2246 // The target is unbound and unused. We can just take the head of
2247 // the list hanging off of label, and dump that into target.
2248 target->use(label->offset());
2249 }
2250 }
2251 label->reset();
2252 }
2253
2254 static int stopBKPT = -1;
as_bkpt()2255 void Assembler::as_bkpt() {
2256 // This is a count of how many times a breakpoint instruction has been
2257 // generated. It is embedded into the instruction for debugging
2258 // purposes. Gdb will print "bkpt xxx" when you attempt to dissassemble a
2259 // breakpoint with the number xxx embedded into it. If this breakpoint is
2260 // being hit, then you can run (in gdb):
2261 // >b dbg_break
2262 // >b main
2263 // >commands
2264 // >set stopBKPT = xxx
2265 // >c
2266 // >end
2267 // which will set a breakpoint on the function dbg_break above set a
2268 // scripted breakpoint on main that will set the (otherwise unmodified)
2269 // value to the number of the breakpoint, so dbg_break will actuall be
2270 // called and finally, when you run the executable, execution will halt when
2271 // that breakpoint is generated.
2272 static int hit = 0;
2273 if (stopBKPT == hit) {
2274 dbg_break();
2275 }
2276 writeInst(0xe1200070 | (hit & 0xf) | ((hit & 0xfff0) << 4));
2277 hit++;
2278 }
2279
as_illegal_trap()2280 BufferOffset Assembler::as_illegal_trap() {
2281 // Encoding of the permanently-undefined 'udf' instruction, with the imm16
2282 // set to 0.
2283 return writeInst(0xe7f000f0);
2284 }
2285
flushBuffer()2286 void Assembler::flushBuffer() { m_buffer.flushPool(); }
2287
enterNoPool(size_t maxInst)2288 void Assembler::enterNoPool(size_t maxInst) { m_buffer.enterNoPool(maxInst); }
2289
leaveNoPool()2290 void Assembler::leaveNoPool() { m_buffer.leaveNoPool(); }
2291
enterNoNops()2292 void Assembler::enterNoNops() { m_buffer.enterNoNops(); }
2293
leaveNoNops()2294 void Assembler::leaveNoNops() { m_buffer.leaveNoNops(); }
2295
2296 struct PoolHeader : Instruction {
2297 struct Header {
2298 // The size should take into account the pool header.
2299 // The size is in units of Instruction (4 bytes), not byte.
2300 uint32_t size : 15;
2301 uint32_t isNatural : 1;
2302 uint32_t ONES : 16;
2303
HeaderPoolHeader::Header2304 Header(int size_, bool isNatural_)
2305 : size(size_), isNatural(isNatural_), ONES(0xffff) {}
2306
HeaderPoolHeader::Header2307 explicit Header(const Instruction* i) {
2308 static_assert(sizeof(Header) == sizeof(uint32_t));
2309 memcpy(this, i, sizeof(Header));
2310 MOZ_ASSERT(ONES == 0xffff);
2311 }
2312
rawPoolHeader::Header2313 uint32_t raw() const {
2314 static_assert(sizeof(Header) == sizeof(uint32_t));
2315 uint32_t dest;
2316 memcpy(&dest, this, sizeof(Header));
2317 return dest;
2318 }
2319 };
2320
PoolHeaderPoolHeader2321 PoolHeader(int size_, bool isNatural_)
2322 : Instruction(Header(size_, isNatural_).raw(), true) {}
2323
sizePoolHeader2324 uint32_t size() const {
2325 Header tmp(this);
2326 return tmp.size;
2327 }
isNaturalPoolHeader2328 uint32_t isNatural() const {
2329 Header tmp(this);
2330 return tmp.isNatural;
2331 }
2332
IsTHISPoolHeader2333 static bool IsTHIS(const Instruction& i) {
2334 return (*i.raw() & 0xffff0000) == 0xffff0000;
2335 }
AsTHISPoolHeader2336 static const PoolHeader* AsTHIS(const Instruction& i) {
2337 if (!IsTHIS(i)) {
2338 return nullptr;
2339 }
2340 return static_cast<const PoolHeader*>(&i);
2341 }
2342 };
2343
WritePoolHeader(uint8_t * start,Pool * p,bool isNatural)2344 void Assembler::WritePoolHeader(uint8_t* start, Pool* p, bool isNatural) {
2345 static_assert(sizeof(PoolHeader) == 4,
2346 "PoolHandler must have the correct size.");
2347 uint8_t* pool = start + 4;
2348 // Go through the usual rigmarole to get the size of the pool.
2349 pool += p->getPoolSize();
2350 uint32_t size = pool - start;
2351 MOZ_ASSERT((size & 3) == 0);
2352 size = size >> 2;
2353 MOZ_ASSERT(size < (1 << 15));
2354 PoolHeader header(size, isNatural);
2355 *(PoolHeader*)start = header;
2356 }
2357
2358 // The size of an arbitrary 32-bit call in the instruction stream. On ARM this
2359 // sequence is |pc = ldr pc - 4; imm32| given that we never reach the imm32.
PatchWrite_NearCallSize()2360 uint32_t Assembler::PatchWrite_NearCallSize() { return sizeof(uint32_t); }
2361
PatchWrite_NearCall(CodeLocationLabel start,CodeLocationLabel toCall)2362 void Assembler::PatchWrite_NearCall(CodeLocationLabel start,
2363 CodeLocationLabel toCall) {
2364 Instruction* inst = (Instruction*)start.raw();
2365 // Overwrite whatever instruction used to be here with a call. Since the
2366 // destination is in the same function, it will be within range of the
2367 // 24 << 2 byte bl instruction.
2368 uint8_t* dest = toCall.raw();
2369 new (inst) InstBLImm(BOffImm(dest - (uint8_t*)inst), Always);
2370 }
2371
PatchDataWithValueCheck(CodeLocationLabel label,PatchedImmPtr newValue,PatchedImmPtr expectedValue)2372 void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
2373 PatchedImmPtr newValue,
2374 PatchedImmPtr expectedValue) {
2375 Instruction* ptr = reinterpret_cast<Instruction*>(label.raw());
2376
2377 Register dest;
2378 Assembler::RelocStyle rs;
2379
2380 {
2381 InstructionIterator iter(ptr);
2382 DebugOnly<const uint32_t*> val = GetPtr32Target(iter, &dest, &rs);
2383 MOZ_ASSERT(uint32_t((const uint32_t*)val) == uint32_t(expectedValue.value));
2384 }
2385
2386 // Patch over actual instructions.
2387 {
2388 InstructionIterator iter(ptr);
2389 MacroAssembler::ma_mov_patch(Imm32(int32_t(newValue.value)), dest, Always,
2390 rs, iter);
2391 }
2392 }
2393
PatchDataWithValueCheck(CodeLocationLabel label,ImmPtr newValue,ImmPtr expectedValue)2394 void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
2395 ImmPtr newValue, ImmPtr expectedValue) {
2396 PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value),
2397 PatchedImmPtr(expectedValue.value));
2398 }
2399
2400 // This just stomps over memory with 32 bits of raw data. Its purpose is to
2401 // overwrite the call of JITed code with 32 bits worth of an offset. This will
2402 // is only meant to function on code that has been invalidated, so it should be
2403 // totally safe. Since that instruction will never be executed again, a ICache
2404 // flush should not be necessary
PatchWrite_Imm32(CodeLocationLabel label,Imm32 imm)2405 void Assembler::PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm) {
2406 // Raw is going to be the return address.
2407 uint32_t* raw = (uint32_t*)label.raw();
2408 // Overwrite the 4 bytes before the return address, which will end up being
2409 // the call instruction.
2410 *(raw - 1) = imm.value;
2411 }
2412
NextInstruction(uint8_t * inst_,uint32_t * count)2413 uint8_t* Assembler::NextInstruction(uint8_t* inst_, uint32_t* count) {
2414 if (count != nullptr) {
2415 *count += sizeof(Instruction);
2416 }
2417
2418 InstructionIterator iter(reinterpret_cast<Instruction*>(inst_));
2419 return reinterpret_cast<uint8_t*>(iter.next());
2420 }
2421
InstIsGuard(Instruction * inst,const PoolHeader ** ph)2422 static bool InstIsGuard(Instruction* inst, const PoolHeader** ph) {
2423 Assembler::Condition c = inst->extractCond();
2424 if (c != Assembler::Always) {
2425 return false;
2426 }
2427 if (!(inst->is<InstBXReg>() || inst->is<InstBImm>())) {
2428 return false;
2429 }
2430 // See if the next instruction is a pool header.
2431 *ph = (inst + 1)->as<const PoolHeader>();
2432 return *ph != nullptr;
2433 }
2434
InstIsGuard(BufferInstructionIterator & iter,const PoolHeader ** ph)2435 static bool InstIsGuard(BufferInstructionIterator& iter,
2436 const PoolHeader** ph) {
2437 Instruction* inst = iter.cur();
2438 Assembler::Condition c = inst->extractCond();
2439 if (c != Assembler::Always) {
2440 return false;
2441 }
2442 if (!(inst->is<InstBXReg>() || inst->is<InstBImm>())) {
2443 return false;
2444 }
2445 // See if the next instruction is a pool header.
2446 *ph = iter.peek()->as<const PoolHeader>();
2447 return *ph != nullptr;
2448 }
2449
2450 template <class T>
InstIsBNop(const T & iter)2451 static bool InstIsBNop(const T& iter) {
2452 // In some special situations, it is necessary to insert a NOP into the
2453 // instruction stream that nobody knows about, since nobody should know
2454 // about it, make sure it gets skipped when Instruction::next() is called.
2455 // this generates a very specific nop, namely a branch to the next
2456 // instruction.
2457 const Instruction* cur = iter.cur();
2458 Assembler::Condition c = cur->extractCond();
2459 if (c != Assembler::Always) {
2460 return false;
2461 }
2462 if (!cur->is<InstBImm>()) {
2463 return false;
2464 }
2465 InstBImm* b = cur->as<InstBImm>();
2466 BOffImm offset;
2467 b->extractImm(&offset);
2468 return offset.decode() == 4;
2469 }
2470
maybeSkipAutomaticInstructions()2471 Instruction* InstructionIterator::maybeSkipAutomaticInstructions() {
2472 // If the current instruction was automatically-inserted, skip past it.
2473 const PoolHeader* ph;
2474
2475 // Loop until an intentionally-placed instruction is found.
2476 while (true) {
2477 if (InstIsGuard(cur(), &ph)) {
2478 // Don't skip a natural guard.
2479 if (ph->isNatural()) {
2480 return cur();
2481 }
2482 advanceRaw(1 + ph->size());
2483 } else if (InstIsBNop<InstructionIterator>(*this)) {
2484 advanceRaw(1);
2485 } else {
2486 return cur();
2487 }
2488 }
2489 }
2490
maybeSkipAutomaticInstructions()2491 Instruction* BufferInstructionIterator::maybeSkipAutomaticInstructions() {
2492 const PoolHeader* ph;
2493 // If this is a guard, and the next instruction is a header, always work
2494 // around the pool. If it isn't a guard, then start looking ahead.
2495 if (InstIsGuard(*this, &ph)) {
2496 // Don't skip a natural guard.
2497 if (ph->isNatural()) {
2498 return cur();
2499 }
2500 advance(sizeof(Instruction) * ph->size());
2501 return next();
2502 }
2503 if (InstIsBNop<BufferInstructionIterator>(*this)) {
2504 return next();
2505 }
2506 return cur();
2507 }
2508
2509 // Cases to be handled:
2510 // 1) no pools or branches in sight => return this+1
2511 // 2) branch to next instruction => return this+2, because a nop needed to be
2512 // inserted into the stream.
2513 // 3) this+1 is an artificial guard for a pool => return first instruction
2514 // after the pool
2515 // 4) this+1 is a natural guard => return the branch
2516 // 5) this is a branch, right before a pool => return first instruction after
2517 // the pool
2518 // in assembly form:
2519 // 1) add r0, r0, r0 <= this
2520 // add r1, r1, r1 <= returned value
2521 // add r2, r2, r2
2522 //
2523 // 2) add r0, r0, r0 <= this
2524 // b foo
2525 // foo:
2526 // add r2, r2, r2 <= returned value
2527 //
2528 // 3) add r0, r0, r0 <= this
2529 // b after_pool;
2530 // .word 0xffff0002 # bit 15 being 0 indicates that the branch was not
2531 // # requested by the assembler
2532 // 0xdeadbeef # the 2 indicates that there is 1 pool entry, and the
2533 // # pool header
2534 // add r4, r4, r4 <= returned value
2535 // 4) add r0, r0, r0 <= this
2536 // b after_pool <= returned value
2537 // .word 0xffff8002 # bit 15 being 1 indicates that the branch was
2538 // # requested by the assembler
2539 // 0xdeadbeef
2540 // add r4, r4, r4
2541 // 5) b after_pool <= this
2542 // .word 0xffff8002 # bit 15 has no bearing on the returned value
2543 // 0xdeadbeef
2544 // add r4, r4, r4 <= returned value
2545
next()2546 Instruction* InstructionIterator::next() {
2547 const PoolHeader* ph;
2548
2549 // If the current instruction is followed by a pool header,
2550 // move past the current instruction and the pool.
2551 if (InstIsGuard(cur(), &ph)) {
2552 advanceRaw(1 + ph->size());
2553 return maybeSkipAutomaticInstructions();
2554 }
2555
2556 // The next instruction is then known to not be a PoolHeader.
2557 advanceRaw(1);
2558 return maybeSkipAutomaticInstructions();
2559 }
2560
ToggleToJmp(CodeLocationLabel inst_)2561 void Assembler::ToggleToJmp(CodeLocationLabel inst_) {
2562 uint32_t* ptr = (uint32_t*)inst_.raw();
2563
2564 DebugOnly<Instruction*> inst = (Instruction*)inst_.raw();
2565 MOZ_ASSERT(inst->is<InstCMP>());
2566
2567 // Zero bits 20-27, then set 24-27 to be correct for a branch.
2568 // 20-23 will be party of the B's immediate, and should be 0.
2569 *ptr = (*ptr & ~(0xff << 20)) | (0xa0 << 20);
2570 }
2571
ToggleToCmp(CodeLocationLabel inst_)2572 void Assembler::ToggleToCmp(CodeLocationLabel inst_) {
2573 uint32_t* ptr = (uint32_t*)inst_.raw();
2574
2575 DebugOnly<Instruction*> inst = (Instruction*)inst_.raw();
2576 MOZ_ASSERT(inst->is<InstBImm>());
2577
2578 // Ensure that this masking operation doesn't affect the offset of the
2579 // branch instruction when it gets toggled back.
2580 MOZ_ASSERT((*ptr & (0xf << 20)) == 0);
2581
2582 // Also make sure that the CMP is valid. Part of having a valid CMP is that
2583 // all of the bits describing the destination in most ALU instructions are
2584 // all unset (looks like it is encoding r0).
2585 MOZ_ASSERT(toRD(*inst) == r0);
2586
2587 // Zero out bits 20-27, then set them to be correct for a compare.
2588 *ptr = (*ptr & ~(0xff << 20)) | (0x35 << 20);
2589 }
2590
ToggleCall(CodeLocationLabel inst_,bool enabled)2591 void Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) {
2592 InstructionIterator iter(reinterpret_cast<Instruction*>(inst_.raw()));
2593 MOZ_ASSERT(iter.cur()->is<InstMovW>() || iter.cur()->is<InstLDR>());
2594
2595 if (iter.cur()->is<InstMovW>()) {
2596 // If it looks like the start of a movw/movt sequence, then make sure we
2597 // have all of it (and advance the iterator past the full sequence).
2598 iter.next();
2599 MOZ_ASSERT(iter.cur()->is<InstMovT>());
2600 }
2601
2602 iter.next();
2603 MOZ_ASSERT(iter.cur()->is<InstNOP>() || iter.cur()->is<InstBLXReg>());
2604
2605 if (enabled == iter.cur()->is<InstBLXReg>()) {
2606 // Nothing to do.
2607 return;
2608 }
2609
2610 Instruction* inst = iter.cur();
2611
2612 if (enabled) {
2613 *inst = InstBLXReg(ScratchRegister, Always);
2614 } else {
2615 *inst = InstNOP();
2616 }
2617 }
2618
ToggledCallSize(uint8_t * code)2619 size_t Assembler::ToggledCallSize(uint8_t* code) {
2620 InstructionIterator iter(reinterpret_cast<Instruction*>(code));
2621 MOZ_ASSERT(iter.cur()->is<InstMovW>() || iter.cur()->is<InstLDR>());
2622
2623 if (iter.cur()->is<InstMovW>()) {
2624 // If it looks like the start of a movw/movt sequence, then make sure we
2625 // have all of it (and advance the iterator past the full sequence).
2626 iter.next();
2627 MOZ_ASSERT(iter.cur()->is<InstMovT>());
2628 }
2629
2630 iter.next();
2631 MOZ_ASSERT(iter.cur()->is<InstNOP>() || iter.cur()->is<InstBLXReg>());
2632 return uintptr_t(iter.cur()) + 4 - uintptr_t(code);
2633 }
2634
BailoutTableStart(uint8_t * code)2635 uint8_t* Assembler::BailoutTableStart(uint8_t* code) {
2636 // The iterator skips over any automatically-inserted instructions.
2637 InstructionIterator iter(reinterpret_cast<Instruction*>(code));
2638 MOZ_ASSERT(iter.cur()->is<InstBLImm>());
2639 return reinterpret_cast<uint8_t*>(iter.cur());
2640 }
2641
2642 uint32_t Assembler::NopFill = 0;
2643
GetNopFill()2644 uint32_t Assembler::GetNopFill() {
2645 static bool isSet = false;
2646 if (!isSet) {
2647 char* fillStr = getenv("ARM_ASM_NOP_FILL");
2648 uint32_t fill;
2649 if (fillStr && sscanf(fillStr, "%u", &fill) == 1) {
2650 NopFill = fill;
2651 }
2652 if (NopFill > 8) {
2653 MOZ_CRASH("Nop fill > 8 is not supported");
2654 }
2655 isSet = true;
2656 }
2657 return NopFill;
2658 }
2659
2660 uint32_t Assembler::AsmPoolMaxOffset = 1024;
2661
GetPoolMaxOffset()2662 uint32_t Assembler::GetPoolMaxOffset() {
2663 static bool isSet = false;
2664 if (!isSet) {
2665 char* poolMaxOffsetStr = getenv("ASM_POOL_MAX_OFFSET");
2666 uint32_t poolMaxOffset;
2667 if (poolMaxOffsetStr &&
2668 sscanf(poolMaxOffsetStr, "%u", &poolMaxOffset) == 1) {
2669 AsmPoolMaxOffset = poolMaxOffset;
2670 }
2671 isSet = true;
2672 }
2673 return AsmPoolMaxOffset;
2674 }
2675
SecondScratchRegisterScope(MacroAssembler & masm)2676 SecondScratchRegisterScope::SecondScratchRegisterScope(MacroAssembler& masm)
2677 : AutoRegisterScope(masm, masm.getSecondScratchReg()) {}
2678
2679 #ifdef JS_DISASM_ARM
2680
2681 /* static */
disassembleInstruction(const Instruction * i,DisasmBuffer & buffer)2682 void Assembler::disassembleInstruction(const Instruction* i,
2683 DisasmBuffer& buffer) {
2684 disasm::NameConverter converter;
2685 disasm::Disassembler dasm(converter);
2686 uint8_t* loc = reinterpret_cast<uint8_t*>(const_cast<uint32_t*>(i->raw()));
2687 dasm.InstructionDecode(buffer, loc);
2688 }
2689
initDisassembler()2690 void Assembler::initDisassembler() {
2691 // The line is normally laid out like this:
2692 //
2693 // xxxxxxxx ldr r, op ; comment
2694 //
2695 // where xx...x is the instruction bit pattern.
2696 //
2697 // Labels are laid out by themselves to line up with the instructions above
2698 // and below:
2699 //
2700 // nnnn:
2701 //
2702 // Branch targets are normally on the same line as the branch instruction,
2703 // but when they cannot be they will be on a line by themselves, indented
2704 // significantly:
2705 //
2706 // -> label
2707
2708 spew_.setLabelIndent(" "); // 10
2709 spew_.setTargetIndent(" "); // 20
2710 }
2711
finishDisassembler()2712 void Assembler::finishDisassembler() { spew_.spewOrphans(); }
2713
2714 // Labels are named as they are encountered by adding names to a
2715 // table, using the Label address as the key. This is made tricky by
2716 // the (memory for) Label objects being reused, but reused label
2717 // objects are recognizable from being marked as not used or not
2718 // bound. See spew_.refLabel().
2719 //
2720 // In a number of cases there is no information about the target, and
2721 // we just end up printing "patchable constant load to PC". This is
2722 // true especially for jumps to bailout handlers (which have no
2723 // names). See allocLiteralLoadEntry() and its callers. In some cases
2724 // (loop back edges) some information about the intended target may be
2725 // propagated from higher levels, and if so it's printed here.
2726
spew(Instruction * i)2727 void Assembler::spew(Instruction* i) {
2728 if (spew_.isDisabled() || !i) {
2729 return;
2730 }
2731
2732 DisasmBuffer buffer;
2733 disassembleInstruction(i, buffer);
2734 spew_.spew("%s", buffer.start());
2735 }
2736
2737 // If a target label is known, always print that and do not attempt to
2738 // disassemble the branch operands, as they will often be encoding
2739 // metainformation (pointers for a chain of jump instructions), and
2740 // not actual branch targets.
2741
spewBranch(Instruction * i,const LabelDoc & target)2742 void Assembler::spewBranch(Instruction* i, const LabelDoc& target) {
2743 if (spew_.isDisabled() || !i) {
2744 return;
2745 }
2746
2747 DisasmBuffer buffer;
2748 disassembleInstruction(i, buffer);
2749
2750 char labelBuf[128];
2751 labelBuf[0] = 0;
2752
2753 bool haveTarget = target.valid;
2754 if (!haveTarget) {
2755 SprintfLiteral(labelBuf, " -> (link-time target)");
2756 }
2757
2758 if (InstBranchImm::IsTHIS(*i)) {
2759 InstBranchImm* bimm = InstBranchImm::AsTHIS(*i);
2760 BOffImm destOff;
2761 bimm->extractImm(&destOff);
2762 if (destOff.isInvalid() || haveTarget) {
2763 // The target information in the instruction is likely garbage, so remove
2764 // it. The target label will in any case be printed if we have it.
2765 //
2766 // The format of the instruction disassembly is [0-9a-f]{8}\s+\S+\s+.*,
2767 // where the \S+ string is the opcode. Strip everything after the opcode,
2768 // and attach the label if we have it.
2769 int i;
2770 for (i = 8; i < buffer.length() && buffer[i] == ' '; i++) {
2771 }
2772 for (; i < buffer.length() && buffer[i] != ' '; i++) {
2773 }
2774 buffer[i] = 0;
2775 if (haveTarget) {
2776 SprintfLiteral(labelBuf, " -> %d%s", target.doc,
2777 !target.bound ? "f" : "");
2778 haveTarget = false;
2779 }
2780 }
2781 }
2782 spew_.spew("%s%s", buffer.start(), labelBuf);
2783
2784 if (haveTarget) {
2785 spew_.spewRef(target);
2786 }
2787 }
2788
spewLiteralLoad(PoolHintPun & php,bool loadToPC,const Instruction * i,const LiteralDoc & doc)2789 void Assembler::spewLiteralLoad(PoolHintPun& php, bool loadToPC,
2790 const Instruction* i, const LiteralDoc& doc) {
2791 if (spew_.isDisabled()) {
2792 return;
2793 }
2794
2795 char litbuf[2048];
2796 spew_.formatLiteral(doc, litbuf, sizeof(litbuf));
2797
2798 // See patchConstantPoolLoad, above. We assemble the instruction into a
2799 // buffer with a zero offset, as documentation, but the offset will be
2800 // patched later.
2801
2802 uint32_t inst;
2803 PoolHintData& data = php.phd;
2804 switch (php.phd.getLoadType()) {
2805 case PoolHintData::PoolDTR:
2806 Assembler::as_dtr_patch(IsLoad, 32, Offset, data.getReg(),
2807 DTRAddr(pc, DtrOffImm(0)), data.getCond(), &inst);
2808 break;
2809 case PoolHintData::PoolBranch:
2810 if (data.isValidPoolHint()) {
2811 Assembler::as_dtr_patch(IsLoad, 32, Offset, pc,
2812 DTRAddr(pc, DtrOffImm(0)), data.getCond(),
2813 &inst);
2814 }
2815 break;
2816 case PoolHintData::PoolVDTR:
2817 Assembler::as_vdtr_patch(IsLoad, data.getVFPReg(),
2818 VFPAddr(pc, VFPOffImm(0)), data.getCond(),
2819 &inst);
2820 break;
2821
2822 default:
2823 MOZ_CRASH();
2824 }
2825
2826 DisasmBuffer buffer;
2827 disasm::NameConverter converter;
2828 disasm::Disassembler dasm(converter);
2829 dasm.InstructionDecode(buffer, reinterpret_cast<uint8_t*>(&inst));
2830 spew_.spew("%s ; .const %s", buffer.start(), litbuf);
2831 }
2832
2833 #endif // JS_DISASM_ARM
2834