1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2  * vim: set ts=8 sts=4 et sw=4 tw=99:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "jit/arm64/Assembler-arm64.h"
8 
9 #include "mozilla/DebugOnly.h"
10 #include "mozilla/MathAlgorithms.h"
11 
12 #include "jscompartment.h"
13 #include "jsutil.h"
14 
15 #include "gc/Marking.h"
16 
17 #include "jit/arm64/Architecture-arm64.h"
18 #include "jit/arm64/MacroAssembler-arm64.h"
19 #include "jit/ExecutableAllocator.h"
20 #include "jit/JitCompartment.h"
21 
22 using namespace js;
23 using namespace js::jit;
24 
25 using mozilla::CountLeadingZeroes32;
26 using mozilla::DebugOnly;
27 
28 // Note this is used for inter-AsmJS calls and may pass arguments and results
29 // in floating point registers even if the system ABI does not.
30 
31 ABIArg
next(MIRType type)32 ABIArgGenerator::next(MIRType type)
33 {
34     switch (type) {
35       case MIRType_Int32:
36       case MIRType_Pointer:
37         if (intRegIndex_ == NumIntArgRegs) {
38             current_ = ABIArg(stackOffset_);
39             stackOffset_ += sizeof(uintptr_t);
40             break;
41         }
42         current_ = ABIArg(Register::FromCode(intRegIndex_));
43         intRegIndex_++;
44         break;
45 
46       case MIRType_Float32:
47       case MIRType_Double:
48         if (floatRegIndex_ == NumFloatArgRegs) {
49             current_ = ABIArg(stackOffset_);
50             stackOffset_ += sizeof(double);
51             break;
52         }
53         current_ = ABIArg(FloatRegister(floatRegIndex_,
54                                         type == MIRType_Double ? FloatRegisters::Double
55                                                                : FloatRegisters::Single));
56         floatRegIndex_++;
57         break;
58 
59       default:
60         MOZ_CRASH("Unexpected argument type");
61     }
62     return current_;
63 }
64 
65 const Register ABIArgGenerator::NonArgReturnReg0 = r8;
66 const Register ABIArgGenerator::NonArgReturnReg1 = r9;
67 const Register ABIArgGenerator::NonVolatileReg = r1;
68 const Register ABIArgGenerator::NonArg_VolatileReg = r13;
69 const Register ABIArgGenerator::NonReturn_VolatileReg0 = r2;
70 const Register ABIArgGenerator::NonReturn_VolatileReg1 = r3;
71 
72 namespace js {
73 namespace jit {
74 
75 void
finish()76 Assembler::finish()
77 {
78     armbuffer_.flushPool();
79 
80     // The extended jump table is part of the code buffer.
81     ExtendedJumpTable_ = emitExtendedJumpTable();
82     Assembler::FinalizeCode();
83 
84     // The jump relocation table starts with a fixed-width integer pointing
85     // to the start of the extended jump table.
86     // Space for this integer is allocated by Assembler::addJumpRelocation()
87     // before writing the first entry.
88     // Don't touch memory if we saw an OOM error.
89     if (jumpRelocations_.length() && !oom()) {
90         MOZ_ASSERT(jumpRelocations_.length() >= sizeof(uint32_t));
91         *(uint32_t*)jumpRelocations_.buffer() = ExtendedJumpTable_.getOffset();
92     }
93 }
94 
95 BufferOffset
emitExtendedJumpTable()96 Assembler::emitExtendedJumpTable()
97 {
98     if (!pendingJumps_.length() || oom())
99         return BufferOffset();
100 
101     armbuffer_.flushPool();
102     armbuffer_.align(SizeOfJumpTableEntry);
103 
104     BufferOffset tableOffset = armbuffer_.nextOffset();
105 
106     for (size_t i = 0; i < pendingJumps_.length(); i++) {
107         // Each JumpTableEntry is of the form:
108         //   LDR ip0 [PC, 8]
109         //   BR ip0
110         //   [Patchable 8-byte constant low bits]
111         //   [Patchable 8-byte constant high bits]
112         DebugOnly<size_t> preOffset = size_t(armbuffer_.nextOffset().getOffset());
113 
114         ldr(vixl::ip0, ptrdiff_t(8 / vixl::kInstructionSize));
115         br(vixl::ip0);
116 
117         DebugOnly<size_t> prePointer = size_t(armbuffer_.nextOffset().getOffset());
118         MOZ_ASSERT_IF(!oom(), prePointer - preOffset == OffsetOfJumpTableEntryPointer);
119 
120         brk(0x0);
121         brk(0x0);
122 
123         DebugOnly<size_t> postOffset = size_t(armbuffer_.nextOffset().getOffset());
124 
125         MOZ_ASSERT_IF(!oom(), postOffset - preOffset == SizeOfJumpTableEntry);
126     }
127 
128     if (oom())
129         return BufferOffset();
130 
131     return tableOffset;
132 }
133 
134 void
executableCopy(uint8_t * buffer)135 Assembler::executableCopy(uint8_t* buffer)
136 {
137     // Copy the code and all constant pools into the output buffer.
138     armbuffer_.executableCopy(buffer);
139 
140     // Patch any relative jumps that target code outside the buffer.
141     // The extended jump table may be used for distant jumps.
142     for (size_t i = 0; i < pendingJumps_.length(); i++) {
143         RelativePatch& rp = pendingJumps_[i];
144 
145         if (!rp.target) {
146             // The patch target is nullptr for jumps that have been linked to
147             // a label within the same code block, but may be repatched later
148             // to jump to a different code block.
149             continue;
150         }
151 
152         Instruction* target = (Instruction*)rp.target;
153         Instruction* branch = (Instruction*)(buffer + rp.offset.getOffset());
154         JumpTableEntry* extendedJumpTable =
155             reinterpret_cast<JumpTableEntry*>(buffer + ExtendedJumpTable_.getOffset());
156         if (branch->BranchType() != vixl::UnknownBranchType) {
157             if (branch->IsTargetReachable(target)) {
158                 branch->SetImmPCOffsetTarget(target);
159             } else {
160                 JumpTableEntry* entry = &extendedJumpTable[i];
161                 branch->SetImmPCOffsetTarget(entry->getLdr());
162                 entry->data = target;
163             }
164         } else {
165             // Currently a two-instruction call, it should be possible to optimize this
166             // into a single instruction call + nop in some instances, but this will work.
167         }
168     }
169 }
170 
171 BufferOffset
immPool(ARMRegister dest,uint8_t * value,vixl::LoadLiteralOp op,ARMBuffer::PoolEntry * pe)172 Assembler::immPool(ARMRegister dest, uint8_t* value, vixl::LoadLiteralOp op, ARMBuffer::PoolEntry* pe)
173 {
174     uint32_t inst = op | Rt(dest);
175     const size_t numInst = 1;
176     const unsigned sizeOfPoolEntryInBytes = 4;
177     const unsigned numPoolEntries = sizeof(value) / sizeOfPoolEntryInBytes;
178     return allocEntry(numInst, numPoolEntries, (uint8_t*)&inst, value, pe);
179 }
180 
181 BufferOffset
immPool64(ARMRegister dest,uint64_t value,ARMBuffer::PoolEntry * pe)182 Assembler::immPool64(ARMRegister dest, uint64_t value, ARMBuffer::PoolEntry* pe)
183 {
184     return immPool(dest, (uint8_t*)&value, vixl::LDR_x_lit, pe);
185 }
186 
187 BufferOffset
immPool64Branch(RepatchLabel * label,ARMBuffer::PoolEntry * pe,Condition c)188 Assembler::immPool64Branch(RepatchLabel* label, ARMBuffer::PoolEntry* pe, Condition c)
189 {
190     MOZ_CRASH("immPool64Branch");
191 }
192 
193 BufferOffset
fImmPool(ARMFPRegister dest,uint8_t * value,vixl::LoadLiteralOp op)194 Assembler::fImmPool(ARMFPRegister dest, uint8_t* value, vixl::LoadLiteralOp op)
195 {
196     uint32_t inst = op | Rt(dest);
197     const size_t numInst = 1;
198     const unsigned sizeOfPoolEntryInBits = 32;
199     const unsigned numPoolEntries = dest.size() / sizeOfPoolEntryInBits;
200     return allocEntry(numInst, numPoolEntries, (uint8_t*)&inst, value);
201 }
202 
203 BufferOffset
fImmPool64(ARMFPRegister dest,double value)204 Assembler::fImmPool64(ARMFPRegister dest, double value)
205 {
206     return fImmPool(dest, (uint8_t*)&value, vixl::LDR_d_lit);
207 }
208 BufferOffset
fImmPool32(ARMFPRegister dest,float value)209 Assembler::fImmPool32(ARMFPRegister dest, float value)
210 {
211     return fImmPool(dest, (uint8_t*)&value, vixl::LDR_s_lit);
212 }
213 
214 void
bind(Label * label,BufferOffset targetOffset)215 Assembler::bind(Label* label, BufferOffset targetOffset)
216 {
217     // Nothing has seen the label yet: just mark the location.
218     // If we've run out of memory, don't attempt to modify the buffer which may
219     // not be there. Just mark the label as bound to the (possibly bogus)
220     // targetOffset.
221     if (!label->used() || oom()) {
222         label->bind(targetOffset.getOffset());
223         return;
224     }
225 
226     // Get the most recent instruction that used the label, as stored in the label.
227     // This instruction is the head of an implicit linked list of label uses.
228     BufferOffset branchOffset(label);
229 
230     while (branchOffset.assigned()) {
231         // Before overwriting the offset in this instruction, get the offset of
232         // the next link in the implicit branch list.
233         BufferOffset nextOffset = NextLink(branchOffset);
234 
235         // Linking against the actual (Instruction*) would be invalid,
236         // since that Instruction could be anywhere in memory.
237         // Instead, just link against the correct relative offset, assuming
238         // no constant pools, which will be taken into consideration
239         // during finalization.
240         ptrdiff_t relativeByteOffset = targetOffset.getOffset() - branchOffset.getOffset();
241         Instruction* link = getInstructionAt(branchOffset);
242 
243         // This branch may still be registered for callbacks. Stop tracking it.
244         vixl::ImmBranchType branchType = link->BranchType();
245         vixl::ImmBranchRangeType branchRange = Instruction::ImmBranchTypeToRange(branchType);
246         if (branchRange < vixl::NumShortBranchRangeTypes) {
247             BufferOffset deadline(branchOffset.getOffset() +
248                                   Instruction::ImmBranchMaxForwardOffset(branchRange));
249             armbuffer_.unregisterBranchDeadline(branchRange, deadline);
250         }
251 
252         // Is link able to reach the label?
253         if (link->IsPCRelAddressing() || link->IsTargetReachable(link + relativeByteOffset)) {
254             // Write a new relative offset into the instruction.
255             link->SetImmPCOffsetTarget(link + relativeByteOffset);
256         } else {
257             // This is a short-range branch, and it can't reach the label directly.
258             // Verify that it branches to a veneer: an unconditional branch.
259             MOZ_ASSERT(getInstructionAt(nextOffset)->BranchType() == vixl::UncondBranchType);
260         }
261 
262         branchOffset = nextOffset;
263     }
264 
265     // Bind the label, so that future uses may encode the offset immediately.
266     label->bind(targetOffset.getOffset());
267 }
268 
269 void
bind(RepatchLabel * label)270 Assembler::bind(RepatchLabel* label)
271 {
272     // Nothing has seen the label yet: just mark the location.
273     // If we've run out of memory, don't attempt to modify the buffer which may
274     // not be there. Just mark the label as bound to nextOffset().
275     if (!label->used() || oom()) {
276         label->bind(nextOffset().getOffset());
277         return;
278     }
279     int branchOffset = label->offset();
280     Instruction* inst = getInstructionAt(BufferOffset(branchOffset));
281     inst->SetImmPCOffsetTarget(inst + nextOffset().getOffset() - branchOffset);
282 }
283 
284 void
trace(JSTracer * trc)285 Assembler::trace(JSTracer* trc)
286 {
287     for (size_t i = 0; i < pendingJumps_.length(); i++) {
288         RelativePatch& rp = pendingJumps_[i];
289         if (rp.kind == Relocation::JITCODE) {
290             JitCode* code = JitCode::FromExecutable((uint8_t*)rp.target);
291             TraceManuallyBarrieredEdge(trc, &code, "masmrel32");
292             MOZ_ASSERT(code == JitCode::FromExecutable((uint8_t*)rp.target));
293         }
294     }
295 
296     // TODO: Trace.
297 #if 0
298     if (tmpDataRelocations_.length())
299         ::TraceDataRelocations(trc, &armbuffer_, &tmpDataRelocations_);
300 #endif
301 }
302 
303 void
addJumpRelocation(BufferOffset src,Relocation::Kind reloc)304 Assembler::addJumpRelocation(BufferOffset src, Relocation::Kind reloc)
305 {
306     // Only JITCODE relocations are patchable at runtime.
307     MOZ_ASSERT(reloc == Relocation::JITCODE);
308 
309     // The jump relocation table starts with a fixed-width integer pointing
310     // to the start of the extended jump table. But, we don't know the
311     // actual extended jump table offset yet, so write a 0 which we'll
312     // patch later in Assembler::finish().
313     if (!jumpRelocations_.length())
314         jumpRelocations_.writeFixedUint32_t(0);
315 
316     // Each entry in the table is an (offset, extendedTableIndex) pair.
317     jumpRelocations_.writeUnsigned(src.getOffset());
318     jumpRelocations_.writeUnsigned(pendingJumps_.length());
319 }
320 
321 void
addPendingJump(BufferOffset src,ImmPtr target,Relocation::Kind reloc)322 Assembler::addPendingJump(BufferOffset src, ImmPtr target, Relocation::Kind reloc)
323 {
324     MOZ_ASSERT(target.value != nullptr);
325 
326     if (reloc == Relocation::JITCODE)
327         addJumpRelocation(src, reloc);
328 
329     // This jump is not patchable at runtime. Extended jump table entry requirements
330     // cannot be known until finalization, so to be safe, give each jump and entry.
331     // This also causes GC tracing of the target.
332     enoughMemory_ &= pendingJumps_.append(RelativePatch(src, target.value, reloc));
333 }
334 
335 size_t
addPatchableJump(BufferOffset src,Relocation::Kind reloc)336 Assembler::addPatchableJump(BufferOffset src, Relocation::Kind reloc)
337 {
338     MOZ_CRASH("TODO: This is currently unused (and untested)");
339     if (reloc == Relocation::JITCODE)
340         addJumpRelocation(src, reloc);
341 
342     size_t extendedTableIndex = pendingJumps_.length();
343     enoughMemory_ &= pendingJumps_.append(RelativePatch(src, nullptr, reloc));
344     return extendedTableIndex;
345 }
346 
347 void
PatchJump(CodeLocationJump & jump_,CodeLocationLabel label,ReprotectCode reprotect)348 PatchJump(CodeLocationJump& jump_, CodeLocationLabel label, ReprotectCode reprotect)
349 {
350     MOZ_CRASH("PatchJump");
351 }
352 
353 void
PatchDataWithValueCheck(CodeLocationLabel label,PatchedImmPtr newValue,PatchedImmPtr expected)354 Assembler::PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
355                                    PatchedImmPtr expected)
356 {
357     Instruction* i = (Instruction*)label.raw();
358     void** pValue = i->LiteralAddress<void**>();
359     MOZ_ASSERT(*pValue == expected.value);
360     *pValue = newValue.value;
361 }
362 
363 void
PatchDataWithValueCheck(CodeLocationLabel label,ImmPtr newValue,ImmPtr expected)364 Assembler::PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, ImmPtr expected)
365 {
366     PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value), PatchedImmPtr(expected.value));
367 }
368 
369 void
ToggleToJmp(CodeLocationLabel inst_)370 Assembler::ToggleToJmp(CodeLocationLabel inst_)
371 {
372     Instruction* i = (Instruction*)inst_.raw();
373     MOZ_ASSERT(i->IsAddSubImmediate());
374 
375     // Refer to instruction layout in ToggleToCmp().
376     int imm19 = (int)i->Bits(23, 5);
377     MOZ_ASSERT(vixl::is_int19(imm19));
378 
379     b(i, imm19, Always);
380 }
381 
382 void
ToggleToCmp(CodeLocationLabel inst_)383 Assembler::ToggleToCmp(CodeLocationLabel inst_)
384 {
385     Instruction* i = (Instruction*)inst_.raw();
386     MOZ_ASSERT(i->IsCondB());
387 
388     int imm19 = i->ImmCondBranch();
389     // bit 23 is reserved, and the simulator throws an assertion when this happens
390     // It'll be messy to decode, but we can steal bit 30 or bit 31.
391     MOZ_ASSERT(vixl::is_int18(imm19));
392 
393     // 31 - 64-bit if set, 32-bit if unset. (OK!)
394     // 30 - sub if set, add if unset. (OK!)
395     // 29 - SetFlagsBit. Must be set.
396     // 22:23 - ShiftAddSub. (OK!)
397     // 10:21 - ImmAddSub. (OK!)
398     // 5:9 - First source register (Rn). (OK!)
399     // 0:4 - Destination Register. Must be xzr.
400 
401     // From the above, there is a safe 19-bit contiguous region from 5:23.
402     Emit(i, vixl::ThirtyTwoBits | vixl::AddSubImmediateFixed | vixl::SUB | Flags(vixl::SetFlags) |
403             Rd(vixl::xzr) | (imm19 << vixl::Rn_offset));
404 }
405 
406 void
ToggleCall(CodeLocationLabel inst_,bool enabled)407 Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled)
408 {
409     const Instruction* first = reinterpret_cast<Instruction*>(inst_.raw());
410     Instruction* load;
411     Instruction* call;
412 
413     // There might be a constant pool at the very first instruction.
414     first = first->skipPool();
415 
416     // Skip the stack pointer restore instruction.
417     if (first->IsStackPtrSync())
418         first = first->InstructionAtOffset(vixl::kInstructionSize)->skipPool();
419 
420     load = const_cast<Instruction*>(first);
421 
422     // The call instruction follows the load, but there may be an injected
423     // constant pool.
424     call = const_cast<Instruction*>(load->InstructionAtOffset(vixl::kInstructionSize)->skipPool());
425 
426     if (call->IsBLR() == enabled)
427         return;
428 
429     if (call->IsBLR()) {
430         // If the second instruction is blr(), then wehave:
431         //   ldr x17, [pc, offset]
432         //   blr x17
433         MOZ_ASSERT(load->IsLDR());
434         // We want to transform this to:
435         //   adr xzr, [pc, offset]
436         //   nop
437         int32_t offset = load->ImmLLiteral();
438         adr(load, xzr, int32_t(offset));
439         nop(call);
440     } else {
441         // We have:
442         //   adr xzr, [pc, offset] (or ldr x17, [pc, offset])
443         //   nop
444         MOZ_ASSERT(load->IsADR() || load->IsLDR());
445         MOZ_ASSERT(call->IsNOP());
446         // Transform this to:
447         //   ldr x17, [pc, offset]
448         //   blr x17
449         int32_t offset = (int)load->ImmPCRawOffset();
450         MOZ_ASSERT(vixl::is_int19(offset));
451         ldr(load, ScratchReg2_64, int32_t(offset));
452         blr(call, ScratchReg2_64);
453     }
454 }
455 
456 class RelocationIterator
457 {
458     CompactBufferReader reader_;
459     uint32_t tableStart_;
460     uint32_t offset_;
461     uint32_t extOffset_;
462 
463   public:
RelocationIterator(CompactBufferReader & reader)464     explicit RelocationIterator(CompactBufferReader& reader)
465       : reader_(reader)
466     {
467         // The first uint32_t stores the extended table offset.
468         tableStart_ = reader_.readFixedUint32_t();
469     }
470 
read()471     bool read() {
472         if (!reader_.more())
473             return false;
474         offset_ = reader_.readUnsigned();
475         extOffset_ = reader_.readUnsigned();
476         return true;
477     }
478 
offset() const479     uint32_t offset() const {
480         return offset_;
481     }
extendedOffset() const482     uint32_t extendedOffset() const {
483         return extOffset_;
484     }
485 };
486 
487 static JitCode*
CodeFromJump(JitCode * code,uint8_t * jump)488 CodeFromJump(JitCode* code, uint8_t* jump)
489 {
490     const Instruction* inst = (const Instruction*)jump;
491     uint8_t* target;
492 
493     // We're expecting a call created by MacroAssembler::call(JitCode*).
494     // It looks like:
495     //
496     //   ldr scratch, [pc, offset]
497     //   blr scratch
498     //
499     // If the call has been toggled by ToggleCall(), it looks like:
500     //
501     //   adr xzr, [pc, offset]
502     //   nop
503     //
504     // There might be a constant pool at the very first instruction.
505     // See also ToggleCall().
506     inst = inst->skipPool();
507 
508     // Skip the stack pointer restore instruction.
509     if (inst->IsStackPtrSync())
510         inst = inst->InstructionAtOffset(vixl::kInstructionSize)->skipPool();
511 
512     if (inst->BranchType() != vixl::UnknownBranchType) {
513         // This is an immediate branch.
514         target = (uint8_t*)inst->ImmPCOffsetTarget();
515     } else if (inst->IsLDR()) {
516         // This is an ldr+blr call that is enabled. See ToggleCall().
517         mozilla::DebugOnly<const Instruction*> nextInst =
518           inst->InstructionAtOffset(vixl::kInstructionSize)->skipPool();
519         MOZ_ASSERT(nextInst->IsNOP() || nextInst->IsBLR());
520         target = (uint8_t*)inst->Literal64();
521     } else if (inst->IsADR()) {
522         // This is a disabled call: adr+nop. See ToggleCall().
523         mozilla::DebugOnly<const Instruction*> nextInst =
524           inst->InstructionAtOffset(vixl::kInstructionSize)->skipPool();
525         MOZ_ASSERT(nextInst->IsNOP());
526         ptrdiff_t offset = inst->ImmPCRawOffset() << vixl::kLiteralEntrySizeLog2;
527         // This is what Literal64 would do with the corresponding ldr.
528         memcpy(&target, inst + offset, sizeof(target));
529     } else {
530         MOZ_CRASH("Unrecognized jump instruction.");
531     }
532 
533     // If the jump is within the code buffer, it uses the extended jump table.
534     if (target >= code->raw() && target < code->raw() + code->instructionsSize()) {
535         MOZ_ASSERT(target + Assembler::SizeOfJumpTableEntry <= code->raw() + code->instructionsSize());
536 
537         uint8_t** patchablePtr = (uint8_t**)(target + Assembler::OffsetOfJumpTableEntryPointer);
538         target = *patchablePtr;
539     }
540 
541     return JitCode::FromExecutable(target);
542 }
543 
544 void
TraceJumpRelocations(JSTracer * trc,JitCode * code,CompactBufferReader & reader)545 Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader)
546 {
547     RelocationIterator iter(reader);
548     while (iter.read()) {
549         JitCode* child = CodeFromJump(code, code->raw() + iter.offset());
550         TraceManuallyBarrieredEdge(trc, &child, "rel32");
551         MOZ_ASSERT(child == CodeFromJump(code, code->raw() + iter.offset()));
552     }
553 }
554 
555 static void
TraceDataRelocations(JSTracer * trc,uint8_t * buffer,CompactBufferReader & reader)556 TraceDataRelocations(JSTracer* trc, uint8_t* buffer, CompactBufferReader& reader)
557 {
558     while (reader.more()) {
559         size_t offset = reader.readUnsigned();
560         Instruction* load = (Instruction*)&buffer[offset];
561 
562         // The only valid traceable operation is a 64-bit load to an ARMRegister.
563         // Refer to movePatchablePtr() for generation.
564         MOZ_ASSERT(load->Mask(vixl::LoadLiteralMask) == vixl::LDR_x_lit);
565 
566         uintptr_t* literalAddr = load->LiteralAddress<uintptr_t*>();
567         uintptr_t literal = *literalAddr;
568 
569         // All pointers on AArch64 will have the top bits cleared.
570         // If those bits are not cleared, this must be a Value.
571         if (literal >> JSVAL_TAG_SHIFT) {
572             jsval_layout layout;
573             layout.asBits = literal;
574             Value v = IMPL_TO_JSVAL(layout);
575             TraceManuallyBarrieredEdge(trc, &v, "ion-masm-value");
576             *literalAddr = JSVAL_TO_IMPL(v).asBits;
577 
578             // TODO: When we can, flush caches here if a pointer was moved.
579             continue;
580         }
581 
582         // No barriers needed since the pointers are constants.
583         TraceManuallyBarrieredGenericPointerEdge(trc, reinterpret_cast<gc::Cell**>(literalAddr),
584                                                  "ion-masm-ptr");
585 
586         // TODO: Flush caches at end?
587     }
588 }
589 
590 void
TraceDataRelocations(JSTracer * trc,JitCode * code,CompactBufferReader & reader)591 Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader)
592 {
593     ::TraceDataRelocations(trc, code->raw(), reader);
594 }
595 
596 void
FixupNurseryObjects(JSContext * cx,JitCode * code,CompactBufferReader & reader,const ObjectVector & nurseryObjects)597 Assembler::FixupNurseryObjects(JSContext* cx, JitCode* code, CompactBufferReader& reader,
598                                const ObjectVector& nurseryObjects)
599 {
600 
601     MOZ_ASSERT(!nurseryObjects.empty());
602 
603     uint8_t* buffer = code->raw();
604     bool hasNurseryPointers = false;
605 
606     while (reader.more()) {
607         size_t offset = reader.readUnsigned();
608         Instruction* ins = (Instruction*)&buffer[offset];
609 
610         uintptr_t* literalAddr = ins->LiteralAddress<uintptr_t*>();
611         uintptr_t literal = *literalAddr;
612 
613         if (literal >> JSVAL_TAG_SHIFT)
614             continue; // This is a Value.
615 
616         if (!(literal & 0x1))
617             continue;
618 
619         uint32_t index = literal >> 1;
620         JSObject* obj = nurseryObjects[index];
621         *literalAddr = uintptr_t(obj);
622 
623         // Either all objects are still in the nursery, or all objects are tenured.
624         MOZ_ASSERT_IF(hasNurseryPointers, IsInsideNursery(obj));
625 
626         if (!hasNurseryPointers && IsInsideNursery(obj))
627             hasNurseryPointers = true;
628     }
629 
630     if (hasNurseryPointers)
631         cx->runtime()->gc.storeBuffer.putWholeCell(code);
632 }
633 
634 void
PatchInstructionImmediate(uint8_t * code,PatchedImmPtr imm)635 Assembler::PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm)
636 {
637     MOZ_CRASH("PatchInstructionImmediate()");
638 }
639 
640 void
UpdateBoundsCheck(uint32_t heapSize,Instruction * inst)641 Assembler::UpdateBoundsCheck(uint32_t heapSize, Instruction* inst)
642 {
643     int32_t mask = ~(heapSize - 1);
644     unsigned n, imm_s, imm_r;
645     if (!IsImmLogical(mask, 32, &n, &imm_s, &imm_r))
646         MOZ_CRASH("Could not encode immediate!?");
647 
648     inst->SetImmR(imm_r);
649     inst->SetImmS(imm_s);
650     inst->SetBitN(n);
651 }
652 
653 void
retarget(Label * label,Label * target)654 Assembler::retarget(Label* label, Label* target)
655 {
656     if (label->used()) {
657         if (target->bound()) {
658             bind(label, BufferOffset(target));
659         } else if (target->used()) {
660             // The target is not bound but used. Prepend label's branch list
661             // onto target's.
662             BufferOffset labelBranchOffset(label);
663 
664             // Find the head of the use chain for label.
665             BufferOffset next = NextLink(labelBranchOffset);
666             while (next.assigned()) {
667                 labelBranchOffset = next;
668                 next = NextLink(next);
669             }
670 
671             // Then patch the head of label's use chain to the tail of target's
672             // use chain, prepending the entire use chain of target.
673             SetNextLink(labelBranchOffset, BufferOffset(target));
674             target->use(label->offset());
675         } else {
676             // The target is unbound and unused. We can just take the head of
677             // the list hanging off of label, and dump that into target.
678             DebugOnly<uint32_t> prev = target->use(label->offset());
679             MOZ_ASSERT((int32_t)prev == Label::INVALID_OFFSET);
680         }
681     }
682     label->reset();
683 }
684 
685 } // namespace jit
686 } // namespace js
687