1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2  * vim: set ts=8 sts=4 et sw=4 tw=99:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 #include "jit/mips64/Assembler-mips64.h"
8 
9 #include "mozilla/DebugOnly.h"
10 
11 using mozilla::DebugOnly;
12 
13 using namespace js;
14 using namespace js::jit;
15 
ABIArgGenerator()16 ABIArgGenerator::ABIArgGenerator()
17   : usedArgSlots_(0),
18     firstArgFloat(false),
19     current_()
20 {}
21 
22 ABIArg
next(MIRType type)23 ABIArgGenerator::next(MIRType type)
24 {
25     switch (type) {
26       case MIRType_Int32:
27       case MIRType_Pointer: {
28         Register destReg;
29         if (GetIntArgReg(usedArgSlots_, &destReg))
30             current_ = ABIArg(destReg);
31         else
32             current_ = ABIArg(GetArgStackDisp(usedArgSlots_));
33         usedArgSlots_++;
34         break;
35       }
36       case MIRType_Float32:
37       case MIRType_Double: {
38         FloatRegister destFReg;
39         FloatRegister::ContentType contentType;
40         if (!usedArgSlots_)
41             firstArgFloat = true;
42         contentType = (type == MIRType_Double) ?
43             FloatRegisters::Double : FloatRegisters::Single;
44         if (GetFloatArgReg(usedArgSlots_, &destFReg))
45             current_ = ABIArg(FloatRegister(destFReg.id(), contentType));
46         else
47             current_ = ABIArg(GetArgStackDisp(usedArgSlots_));
48         usedArgSlots_++;
49         break;
50       }
51       default:
52         MOZ_CRASH("Unexpected argument type");
53     }
54     return current_;
55 }
56 
57 const Register ABIArgGenerator::NonArgReturnReg0 = t0;
58 const Register ABIArgGenerator::NonArgReturnReg1 = t1;
59 const Register ABIArgGenerator::NonArg_VolatileReg = v0;
60 const Register ABIArgGenerator::NonReturn_VolatileReg0 = a0;
61 const Register ABIArgGenerator::NonReturn_VolatileReg1 = a1;
62 
63 uint32_t
RT(FloatRegister r)64 js::jit::RT(FloatRegister r)
65 {
66     MOZ_ASSERT(r.id() < FloatRegisters::TotalPhys);
67     return r.id() << RTShift;
68 }
69 
70 uint32_t
RD(FloatRegister r)71 js::jit::RD(FloatRegister r)
72 {
73     MOZ_ASSERT(r.id() < FloatRegisters::TotalPhys);
74     return r.id() << RDShift;
75 }
76 
77 uint32_t
SA(FloatRegister r)78 js::jit::SA(FloatRegister r)
79 {
80     MOZ_ASSERT(r.id() < FloatRegisters::TotalPhys);
81     return r.id() << SAShift;
82 }
83 
84 // Used to patch jumps created by MacroAssemblerMIPS64Compat::jumpWithPatch.
85 void
PatchJump(CodeLocationJump & jump_,CodeLocationLabel label,ReprotectCode reprotect)86 jit::PatchJump(CodeLocationJump& jump_, CodeLocationLabel label, ReprotectCode reprotect)
87 {
88     Instruction* inst = (Instruction*)jump_.raw();
89 
90     // Six instructions used in load 64-bit imm.
91     MaybeAutoWritableJitCode awjc(inst, 6 * sizeof(uint32_t), reprotect);
92     Assembler::UpdateLoad64Value(inst, (uint64_t)label.raw());
93 
94     AutoFlushICache::flush(uintptr_t(inst), 6 * sizeof(uint32_t));
95 }
96 
97 // For more infromation about backedges look at comment in
98 // MacroAssemblerMIPS64Compat::backedgeJump()
99 void
PatchBackedge(CodeLocationJump & jump,CodeLocationLabel label,JitRuntime::BackedgeTarget target)100 jit::PatchBackedge(CodeLocationJump& jump, CodeLocationLabel label,
101                    JitRuntime::BackedgeTarget target)
102 {
103     uintptr_t sourceAddr = (uintptr_t)jump.raw();
104     uintptr_t targetAddr = (uintptr_t)label.raw();
105     InstImm* branch = (InstImm*)jump.raw();
106 
107     MOZ_ASSERT(branch->extractOpcode() == (uint32_t(op_beq) >> OpcodeShift));
108 
109     if (BOffImm16::IsInRange(targetAddr - sourceAddr)) {
110         branch->setBOffImm16(BOffImm16(targetAddr - sourceAddr));
111     } else {
112         if (target == JitRuntime::BackedgeLoopHeader) {
113             Instruction* inst = &branch[1];
114             Assembler::UpdateLoad64Value(inst, targetAddr);
115             // Jump to first ori. The lui will be executed in delay slot.
116             branch->setBOffImm16(BOffImm16(2 * sizeof(uint32_t)));
117         } else {
118             Instruction* inst = &branch[6];
119             Assembler::UpdateLoad64Value(inst, targetAddr);
120             // Jump to first ori of interrupt loop.
121             branch->setBOffImm16(BOffImm16(6 * sizeof(uint32_t)));
122         }
123     }
124 }
125 
126 void
executableCopy(uint8_t * buffer)127 Assembler::executableCopy(uint8_t* buffer)
128 {
129     MOZ_ASSERT(isFinished);
130     m_buffer.executableCopy(buffer);
131 
132     // Patch all long jumps during code copy.
133     for (size_t i = 0; i < longJumps_.length(); i++) {
134         Instruction* inst = (Instruction*) ((uintptr_t)buffer + longJumps_[i]);
135 
136         uint64_t value = Assembler::ExtractLoad64Value(inst);
137         Assembler::UpdateLoad64Value(inst, (uint64_t)buffer + value);
138     }
139 
140     AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size());
141 }
142 
143 uintptr_t
GetPointer(uint8_t * instPtr)144 Assembler::GetPointer(uint8_t* instPtr)
145 {
146     Instruction* inst = (Instruction*)instPtr;
147     return Assembler::ExtractLoad64Value(inst);
148 }
149 
150 static JitCode *
CodeFromJump(Instruction * jump)151 CodeFromJump(Instruction* jump)
152 {
153     uint8_t* target = (uint8_t*)Assembler::ExtractLoad64Value(jump);
154     return JitCode::FromExecutable(target);
155 }
156 
157 void
TraceJumpRelocations(JSTracer * trc,JitCode * code,CompactBufferReader & reader)158 Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader)
159 {
160     while (reader.more()) {
161         JitCode* child = CodeFromJump((Instruction*)(code->raw() + reader.readUnsigned()));
162         TraceManuallyBarrieredEdge(trc, &child, "rel32");
163     }
164 }
165 
166 static void
TraceOneDataRelocation(JSTracer * trc,Instruction * inst)167 TraceOneDataRelocation(JSTracer* trc, Instruction* inst)
168 {
169     void* ptr = (void*)Assembler::ExtractLoad64Value(inst);
170     void* prior = ptr;
171 
172     // All pointers on MIPS64 will have the top bits cleared. If those bits
173     // are not cleared, this must be a Value.
174     uintptr_t word = reinterpret_cast<uintptr_t>(ptr);
175     if (word >> JSVAL_TAG_SHIFT) {
176         jsval_layout layout;
177         layout.asBits = word;
178         Value v = IMPL_TO_JSVAL(layout);
179         TraceManuallyBarrieredEdge(trc, &v, "ion-masm-value");
180         ptr = (void*)JSVAL_TO_IMPL(v).asBits;
181     } else {
182         // No barrier needed since these are constants.
183         TraceManuallyBarrieredGenericPointerEdge(trc, reinterpret_cast<gc::Cell**>(&ptr),
184                                                      "ion-masm-ptr");
185     }
186 
187     if (ptr != prior) {
188         Assembler::UpdateLoad64Value(inst, uint64_t(ptr));
189         AutoFlushICache::flush(uintptr_t(inst), 6 * sizeof(uint32_t));
190     }
191 }
192 
193 static void
TraceDataRelocations(JSTracer * trc,uint8_t * buffer,CompactBufferReader & reader)194 TraceDataRelocations(JSTracer* trc, uint8_t* buffer, CompactBufferReader& reader)
195 {
196     while (reader.more()) {
197         size_t offset = reader.readUnsigned();
198         Instruction* inst = (Instruction*)(buffer + offset);
199         TraceOneDataRelocation(trc, inst);
200     }
201 }
202 
203 static void
TraceDataRelocations(JSTracer * trc,MIPSBuffer * buffer,CompactBufferReader & reader)204 TraceDataRelocations(JSTracer* trc, MIPSBuffer* buffer, CompactBufferReader& reader)
205 {
206     while (reader.more()) {
207         BufferOffset bo (reader.readUnsigned());
208         MIPSBuffer::AssemblerBufferInstIterator iter(bo, buffer);
209         TraceOneDataRelocation(trc, iter.cur());
210     }
211 }
212 
213 void
TraceDataRelocations(JSTracer * trc,JitCode * code,CompactBufferReader & reader)214 Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader)
215 {
216     ::TraceDataRelocations(trc, code->raw(), reader);
217 }
218 
219 void
trace(JSTracer * trc)220 Assembler::trace(JSTracer* trc)
221 {
222     for (size_t i = 0; i < jumps_.length(); i++) {
223         RelativePatch& rp = jumps_[i];
224         if (rp.kind == Relocation::JITCODE) {
225             JitCode* code = JitCode::FromExecutable((uint8_t*)rp.target);
226             TraceManuallyBarrieredEdge(trc, &code, "masmrel32");
227             MOZ_ASSERT(code == JitCode::FromExecutable((uint8_t*)rp.target));
228         }
229     }
230     if (dataRelocations_.length()) {
231         CompactBufferReader reader(dataRelocations_);
232         ::TraceDataRelocations(trc, &m_buffer, reader);
233     }
234 }
235 
236 void
Bind(uint8_t * rawCode,CodeOffset * label,const void * address)237 Assembler::Bind(uint8_t* rawCode, CodeOffset* label, const void* address)
238 {
239     if (label->bound()) {
240         intptr_t offset = label->offset();
241         Instruction* inst = (Instruction*) (rawCode + offset);
242         Assembler::UpdateLoad64Value(inst, (uint64_t)address);
243     }
244 }
245 
246 void
bind(InstImm * inst,uintptr_t branch,uintptr_t target)247 Assembler::bind(InstImm* inst, uintptr_t branch, uintptr_t target)
248 {
249     int64_t offset = target - branch;
250     InstImm inst_bgezal = InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0));
251     InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
252 
253     // If encoded offset is 4, then the jump must be short
254     if (BOffImm16(inst[0]).decode() == 4) {
255         MOZ_ASSERT(BOffImm16::IsInRange(offset));
256         inst[0].setBOffImm16(BOffImm16(offset));
257         inst[1].makeNop();
258         return;
259     }
260 
261     // Generate the long jump for calls because return address has to be the
262     // address after the reserved block.
263     if (inst[0].encode() == inst_bgezal.encode()) {
264         addLongJump(BufferOffset(branch));
265         Assembler::WriteLoad64Instructions(inst, ScratchRegister, target);
266         inst[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr).encode();
267         // There is 1 nop after this.
268         return;
269     }
270 
271     if (BOffImm16::IsInRange(offset)) {
272 #ifdef _MIPS_ARCH_LOONGSON3A
273         // Don't skip trailing nops can imporve performance
274         // on Loongson3 platform.
275         bool skipNops = false;
276 #else
277         bool skipNops = (inst[0].encode() != inst_bgezal.encode() &&
278                          inst[0].encode() != inst_beq.encode());
279 #endif
280 
281         inst[0].setBOffImm16(BOffImm16(offset));
282         inst[1].makeNop();
283 
284         if (skipNops) {
285             inst[2] = InstImm(op_regimm, zero, rt_bgez, BOffImm16(5 * sizeof(uint32_t))).encode();
286             // There are 4 nops after this
287         }
288         return;
289     }
290 
291     if (inst[0].encode() == inst_beq.encode()) {
292         // Handle long unconditional jump.
293         addLongJump(BufferOffset(branch));
294         Assembler::WriteLoad64Instructions(inst, ScratchRegister, target);
295         inst[4] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
296         // There is 1 nop after this.
297     } else {
298         // Handle long conditional jump.
299         inst[0] = invertBranch(inst[0], BOffImm16(7 * sizeof(uint32_t)));
300         // No need for a "nop" here because we can clobber scratch.
301         addLongJump(BufferOffset(branch + sizeof(uint32_t)));
302         Assembler::WriteLoad64Instructions(&inst[1], ScratchRegister, target);
303         inst[5] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
304         // There is 1 nop after this.
305     }
306 }
307 
308 void
bind(RepatchLabel * label)309 Assembler::bind(RepatchLabel* label)
310 {
311     BufferOffset dest = nextOffset();
312     if (label->used() && !oom()) {
313         // If the label has a use, then change this use to refer to
314         // the bound label;
315         BufferOffset b(label->offset());
316         InstImm* inst1 = (InstImm*)editSrc(b);
317 
318         // If first instruction is branch, then this is a loop backedge.
319         if (inst1->extractOpcode() == ((uint32_t)op_beq >> OpcodeShift)) {
320             // Backedges are short jumps when bound, but can become long
321             // when patched.
322             uint64_t offset = dest.getOffset() - label->offset();
323             MOZ_ASSERT(BOffImm16::IsInRange(offset));
324             inst1->setBOffImm16(BOffImm16(offset));
325         } else {
326             Assembler::UpdateLoad64Value(inst1, dest.getOffset());
327         }
328 
329     }
330     label->bind(dest.getOffset());
331 }
332 
333 uint32_t
PatchWrite_NearCallSize()334 Assembler::PatchWrite_NearCallSize()
335 {
336     // Load an address needs 4 instructions, and a jump with a delay slot.
337     return (4 + 2) * sizeof(uint32_t);
338 }
339 
340 void
PatchWrite_NearCall(CodeLocationLabel start,CodeLocationLabel toCall)341 Assembler::PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall)
342 {
343     Instruction* inst = (Instruction*) start.raw();
344     uint8_t* dest = toCall.raw();
345 
346     // Overwrite whatever instruction used to be here with a call.
347     // Always use long jump for two reasons:
348     // - Jump has to be the same size because of PatchWrite_NearCallSize.
349     // - Return address has to be at the end of replaced block.
350     // Short jump wouldn't be more efficient.
351     Assembler::WriteLoad64Instructions(inst, ScratchRegister, (uint64_t)dest);
352     inst[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
353     inst[5] = InstNOP();
354 
355     // Ensure everyone sees the code that was just written into memory.
356     AutoFlushICache::flush(uintptr_t(inst), PatchWrite_NearCallSize());
357 }
358 
359 uint64_t
ExtractLoad64Value(Instruction * inst0)360 Assembler::ExtractLoad64Value(Instruction* inst0)
361 {
362     InstImm* i0 = (InstImm*) inst0;
363     InstImm* i1 = (InstImm*) i0->next();
364     InstReg* i2 = (InstReg*) i1->next();
365     InstImm* i3 = (InstImm*) i2->next();
366     InstImm* i5 = (InstImm*) i3->next()->next();
367 
368     MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
369     MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
370     MOZ_ASSERT(i3->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
371 
372     if ((i2->extractOpcode() == ((uint32_t)op_special >> OpcodeShift)) &&
373         (i2->extractFunctionField() == ff_dsrl32))
374     {
375         uint64_t value = (uint64_t(i0->extractImm16Value()) << 32) |
376                          (uint64_t(i1->extractImm16Value()) << 16) |
377                          uint64_t(i3->extractImm16Value());
378         return uint64_t((int64_t(value) <<16) >> 16);
379     }
380 
381     MOZ_ASSERT(i5->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
382     uint64_t value = (uint64_t(i0->extractImm16Value()) << 48) |
383                      (uint64_t(i1->extractImm16Value()) << 32) |
384                      (uint64_t(i3->extractImm16Value()) << 16) |
385                      uint64_t(i5->extractImm16Value());
386     return value;
387 }
388 
389 void
UpdateLoad64Value(Instruction * inst0,uint64_t value)390 Assembler::UpdateLoad64Value(Instruction* inst0, uint64_t value)
391 {
392     InstImm* i0 = (InstImm*) inst0;
393     InstImm* i1 = (InstImm*) i0->next();
394     InstReg* i2 = (InstReg*) i1->next();
395     InstImm* i3 = (InstImm*) i2->next();
396     InstImm* i5 = (InstImm*) i3->next()->next();
397 
398     MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
399     MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
400     MOZ_ASSERT(i3->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
401 
402     if ((i2->extractOpcode() == ((uint32_t)op_special >> OpcodeShift)) &&
403         (i2->extractFunctionField() == ff_dsrl32))
404     {
405         i0->setImm16(Imm16::Lower(Imm32(value >> 32)));
406         i1->setImm16(Imm16::Upper(Imm32(value)));
407         i3->setImm16(Imm16::Lower(Imm32(value)));
408         return;
409     }
410 
411     MOZ_ASSERT(i5->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
412 
413     i0->setImm16(Imm16::Upper(Imm32(value >> 32)));
414     i1->setImm16(Imm16::Lower(Imm32(value >> 32)));
415     i3->setImm16(Imm16::Upper(Imm32(value)));
416     i5->setImm16(Imm16::Lower(Imm32(value)));
417 }
418 
419 void
WriteLoad64Instructions(Instruction * inst0,Register reg,uint64_t value)420 Assembler::WriteLoad64Instructions(Instruction* inst0, Register reg, uint64_t value)
421 {
422     Instruction* inst1 = inst0->next();
423     Instruction* inst2 = inst1->next();
424     Instruction* inst3 = inst2->next();
425 
426     *inst0 = InstImm(op_lui, zero, reg, Imm16::Lower(Imm32(value >> 32)));
427     *inst1 = InstImm(op_ori, reg, reg, Imm16::Upper(Imm32(value)));
428     *inst2 = InstReg(op_special, rs_one, reg, reg, 48 - 32, ff_dsrl32);
429     *inst3 = InstImm(op_ori, reg, reg, Imm16::Lower(Imm32(value)));
430 }
431 
432 void
PatchDataWithValueCheck(CodeLocationLabel label,ImmPtr newValue,ImmPtr expectedValue)433 Assembler::PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
434                                    ImmPtr expectedValue)
435 {
436     PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value),
437                             PatchedImmPtr(expectedValue.value));
438 }
439 
440 void
PatchDataWithValueCheck(CodeLocationLabel label,PatchedImmPtr newValue,PatchedImmPtr expectedValue)441 Assembler::PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
442                                    PatchedImmPtr expectedValue)
443 {
444     Instruction* inst = (Instruction*) label.raw();
445 
446     // Extract old Value
447     DebugOnly<uint64_t> value = Assembler::ExtractLoad64Value(inst);
448     MOZ_ASSERT(value == uint64_t(expectedValue.value));
449 
450     // Replace with new value
451     Assembler::UpdateLoad64Value(inst, uint64_t(newValue.value));
452 
453     AutoFlushICache::flush(uintptr_t(inst), 6 * sizeof(uint32_t));
454 }
455 
456 void
PatchInstructionImmediate(uint8_t * code,PatchedImmPtr imm)457 Assembler::PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm)
458 {
459     InstImm* inst = (InstImm*)code;
460     Assembler::UpdateLoad64Value(inst, (uint64_t)imm.value);
461 }
462 
463 uint64_t
ExtractInstructionImmediate(uint8_t * code)464 Assembler::ExtractInstructionImmediate(uint8_t* code)
465 {
466     InstImm* inst = (InstImm*)code;
467     return Assembler::ExtractLoad64Value(inst);
468 }
469 
470 void
ToggleCall(CodeLocationLabel inst_,bool enabled)471 Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled)
472 {
473     Instruction* inst = (Instruction*)inst_.raw();
474     InstImm* i0 = (InstImm*) inst;
475     InstImm* i1 = (InstImm*) i0->next();
476     InstImm* i3 = (InstImm*) i1->next()->next();
477     Instruction* i4 = (Instruction*) i3->next();
478 
479     MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
480     MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
481     MOZ_ASSERT(i3->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
482 
483     if (enabled) {
484         MOZ_ASSERT(i4->extractOpcode() != ((uint32_t)op_lui >> OpcodeShift));
485         InstReg jalr = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
486         *i4 = jalr;
487     } else {
488         InstNOP nop;
489         *i4 = nop;
490     }
491 
492     AutoFlushICache::flush(uintptr_t(i4), sizeof(uint32_t));
493 }
494 
495 void
UpdateBoundsCheck(uint64_t heapSize,Instruction * inst)496 Assembler::UpdateBoundsCheck(uint64_t heapSize, Instruction* inst)
497 {
498     // Replace with new value
499     Assembler::UpdateLoad64Value(inst, heapSize);
500 }
501