1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/mips64/Assembler-mips64.h"
8
9 #include "mozilla/DebugOnly.h"
10 #include "mozilla/Maybe.h"
11
12 #include "jit/AutoWritableJitCode.h"
13
14 using mozilla::DebugOnly;
15
16 using namespace js;
17 using namespace js::jit;
18
ABIArgGenerator()19 ABIArgGenerator::ABIArgGenerator()
20 : regIndex_(0), stackOffset_(0), current_() {}
21
next(MIRType type)22 ABIArg ABIArgGenerator::next(MIRType type) {
23 static_assert(NumIntArgRegs == NumFloatArgRegs);
24 if (regIndex_ == NumIntArgRegs) {
25 if (type != MIRType::Simd128) {
26 current_ = ABIArg(stackOffset_);
27 stackOffset_ += sizeof(uint64_t);
28 } else {
29 // Mips platform does not support simd yet.
30 MOZ_CRASH("Unexpected argument type");
31 }
32 return current_;
33 }
34 switch (type) {
35 case MIRType::Int32:
36 case MIRType::Int64:
37 case MIRType::Pointer:
38 case MIRType::RefOrNull:
39 case MIRType::StackResults: {
40 Register destReg;
41 GetIntArgReg(regIndex_++, &destReg);
42 current_ = ABIArg(destReg);
43 break;
44 }
45 case MIRType::Float32:
46 case MIRType::Double: {
47 FloatRegister::ContentType contentType;
48 contentType = (type == MIRType::Double) ? FloatRegisters::Double
49 : FloatRegisters::Single;
50 FloatRegister destFReg;
51 GetFloatArgReg(regIndex_++, &destFReg);
52 current_ = ABIArg(FloatRegister(destFReg.id(), contentType));
53 break;
54 }
55 default:
56 MOZ_CRASH("Unexpected argument type");
57 }
58 return current_;
59 }
60
RT(FloatRegister r)61 uint32_t js::jit::RT(FloatRegister r) {
62 MOZ_ASSERT(r.id() < FloatRegisters::TotalPhys);
63 return r.id() << RTShift;
64 }
65
RD(FloatRegister r)66 uint32_t js::jit::RD(FloatRegister r) {
67 MOZ_ASSERT(r.id() < FloatRegisters::TotalPhys);
68 return r.id() << RDShift;
69 }
70
RZ(FloatRegister r)71 uint32_t js::jit::RZ(FloatRegister r) {
72 MOZ_ASSERT(r.id() < FloatRegisters::TotalPhys);
73 return r.id() << RZShift;
74 }
75
SA(FloatRegister r)76 uint32_t js::jit::SA(FloatRegister r) {
77 MOZ_ASSERT(r.id() < FloatRegisters::TotalPhys);
78 return r.id() << SAShift;
79 }
80
executableCopy(uint8_t * buffer)81 void Assembler::executableCopy(uint8_t* buffer) {
82 MOZ_ASSERT(isFinished);
83 m_buffer.executableCopy(buffer);
84 }
85
GetPointer(uint8_t * instPtr)86 uintptr_t Assembler::GetPointer(uint8_t* instPtr) {
87 Instruction* inst = (Instruction*)instPtr;
88 return Assembler::ExtractLoad64Value(inst);
89 }
90
CodeFromJump(Instruction * jump)91 static JitCode* CodeFromJump(Instruction* jump) {
92 uint8_t* target = (uint8_t*)Assembler::ExtractLoad64Value(jump);
93 return JitCode::FromExecutable(target);
94 }
95
TraceJumpRelocations(JSTracer * trc,JitCode * code,CompactBufferReader & reader)96 void Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code,
97 CompactBufferReader& reader) {
98 while (reader.more()) {
99 JitCode* child =
100 CodeFromJump((Instruction*)(code->raw() + reader.readUnsigned()));
101 TraceManuallyBarrieredEdge(trc, &child, "rel32");
102 }
103 }
104
TraceOneDataRelocation(JSTracer * trc,mozilla::Maybe<AutoWritableJitCode> & awjc,JitCode * code,Instruction * inst)105 static void TraceOneDataRelocation(JSTracer* trc,
106 mozilla::Maybe<AutoWritableJitCode>& awjc,
107 JitCode* code, Instruction* inst) {
108 void* ptr = (void*)Assembler::ExtractLoad64Value(inst);
109 void* prior = ptr;
110
111 // Data relocations can be for Values or for raw pointers. If a Value is
112 // zero-tagged, we can trace it as if it were a raw pointer. If a Value
113 // is not zero-tagged, we have to interpret it as a Value to ensure that the
114 // tag bits are masked off to recover the actual pointer.
115 uintptr_t word = reinterpret_cast<uintptr_t>(ptr);
116 if (word >> JSVAL_TAG_SHIFT) {
117 // This relocation is a Value with a non-zero tag.
118 Value v = Value::fromRawBits(word);
119 TraceManuallyBarrieredEdge(trc, &v, "jit-masm-value");
120 ptr = (void*)v.bitsAsPunboxPointer();
121 } else {
122 // This relocation is a raw pointer or a Value with a zero tag.
123 // No barrier needed since these are constants.
124 TraceManuallyBarrieredGenericPointerEdge(
125 trc, reinterpret_cast<gc::Cell**>(&ptr), "jit-masm-ptr");
126 }
127
128 if (ptr != prior) {
129 if (awjc.isNothing()) {
130 awjc.emplace(code);
131 }
132 Assembler::UpdateLoad64Value(inst, uint64_t(ptr));
133 }
134 }
135
136 /* static */
TraceDataRelocations(JSTracer * trc,JitCode * code,CompactBufferReader & reader)137 void Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code,
138 CompactBufferReader& reader) {
139 mozilla::Maybe<AutoWritableJitCode> awjc;
140 while (reader.more()) {
141 size_t offset = reader.readUnsigned();
142 Instruction* inst = (Instruction*)(code->raw() + offset);
143 TraceOneDataRelocation(trc, awjc, code, inst);
144 }
145 }
146
Bind(uint8_t * rawCode,const CodeLabel & label)147 void Assembler::Bind(uint8_t* rawCode, const CodeLabel& label) {
148 if (label.patchAt().bound()) {
149 auto mode = label.linkMode();
150 intptr_t offset = label.patchAt().offset();
151 intptr_t target = label.target().offset();
152
153 if (mode == CodeLabel::RawPointer) {
154 *reinterpret_cast<const void**>(rawCode + offset) = rawCode + target;
155 } else {
156 MOZ_ASSERT(mode == CodeLabel::MoveImmediate ||
157 mode == CodeLabel::JumpImmediate);
158 Instruction* inst = (Instruction*)(rawCode + offset);
159 Assembler::UpdateLoad64Value(inst, (uint64_t)(rawCode + target));
160 }
161 }
162 }
163
bind(InstImm * inst,uintptr_t branch,uintptr_t target)164 void Assembler::bind(InstImm* inst, uintptr_t branch, uintptr_t target) {
165 int64_t offset = target - branch;
166 InstImm inst_bgezal = InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0));
167 InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
168
169 // If encoded offset is 4, then the jump must be short
170 if (BOffImm16(inst[0]).decode() == 4) {
171 MOZ_ASSERT(BOffImm16::IsInRange(offset));
172 inst[0].setBOffImm16(BOffImm16(offset));
173 inst[1].makeNop();
174 return;
175 }
176
177 // Generate the long jump for calls because return address has to be the
178 // address after the reserved block.
179 if (inst[0].encode() == inst_bgezal.encode()) {
180 addLongJump(BufferOffset(branch), BufferOffset(target));
181 Assembler::WriteLoad64Instructions(inst, ScratchRegister,
182 LabelBase::INVALID_OFFSET);
183 inst[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr).encode();
184 // There is 1 nop after this.
185 return;
186 }
187
188 if (BOffImm16::IsInRange(offset)) {
189 // Don't skip trailing nops can improve performance
190 // on Loongson3 platform.
191 bool skipNops =
192 !isLoongson() && (inst[0].encode() != inst_bgezal.encode() &&
193 inst[0].encode() != inst_beq.encode());
194
195 inst[0].setBOffImm16(BOffImm16(offset));
196 inst[1].makeNop();
197
198 if (skipNops) {
199 inst[2] =
200 InstImm(op_regimm, zero, rt_bgez, BOffImm16(5 * sizeof(uint32_t)))
201 .encode();
202 // There are 4 nops after this
203 }
204 return;
205 }
206
207 if (inst[0].encode() == inst_beq.encode()) {
208 // Handle long unconditional jump.
209 addLongJump(BufferOffset(branch), BufferOffset(target));
210 Assembler::WriteLoad64Instructions(inst, ScratchRegister,
211 LabelBase::INVALID_OFFSET);
212 #ifdef MIPSR6
213 inst[4] =
214 InstReg(op_special, ScratchRegister, zero, zero, ff_jalr).encode();
215 #else
216 inst[4] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
217 #endif
218 // There is 1 nop after this.
219 } else {
220 // Handle long conditional jump.
221 inst[0] = invertBranch(inst[0], BOffImm16(7 * sizeof(uint32_t)));
222 // No need for a "nop" here because we can clobber scratch.
223 addLongJump(BufferOffset(branch + sizeof(uint32_t)), BufferOffset(target));
224 Assembler::WriteLoad64Instructions(&inst[1], ScratchRegister,
225 LabelBase::INVALID_OFFSET);
226 #ifdef MIPSR6
227 inst[5] =
228 InstReg(op_special, ScratchRegister, zero, zero, ff_jalr).encode();
229 #else
230 inst[5] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
231 #endif
232 // There is 1 nop after this.
233 }
234 }
235
processCodeLabels(uint8_t * rawCode)236 void Assembler::processCodeLabels(uint8_t* rawCode) {
237 for (const CodeLabel& label : codeLabels_) {
238 Bind(rawCode, label);
239 }
240 }
241
PatchWrite_NearCallSize()242 uint32_t Assembler::PatchWrite_NearCallSize() {
243 // Load an address needs 4 instructions, and a jump with a delay slot.
244 return (4 + 2) * sizeof(uint32_t);
245 }
246
PatchWrite_NearCall(CodeLocationLabel start,CodeLocationLabel toCall)247 void Assembler::PatchWrite_NearCall(CodeLocationLabel start,
248 CodeLocationLabel toCall) {
249 Instruction* inst = (Instruction*)start.raw();
250 uint8_t* dest = toCall.raw();
251
252 // Overwrite whatever instruction used to be here with a call.
253 // Always use long jump for two reasons:
254 // - Jump has to be the same size because of PatchWrite_NearCallSize.
255 // - Return address has to be at the end of replaced block.
256 // Short jump wouldn't be more efficient.
257 Assembler::WriteLoad64Instructions(inst, ScratchRegister, (uint64_t)dest);
258 inst[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
259 inst[5] = InstNOP();
260 }
261
ExtractLoad64Value(Instruction * inst0)262 uint64_t Assembler::ExtractLoad64Value(Instruction* inst0) {
263 InstImm* i0 = (InstImm*)inst0;
264 InstImm* i1 = (InstImm*)i0->next();
265 InstReg* i2 = (InstReg*)i1->next();
266 InstImm* i3 = (InstImm*)i2->next();
267 InstImm* i5 = (InstImm*)i3->next()->next();
268
269 MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
270 MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
271 MOZ_ASSERT(i3->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
272
273 if ((i2->extractOpcode() == ((uint32_t)op_special >> OpcodeShift)) &&
274 (i2->extractFunctionField() == ff_dsrl32)) {
275 uint64_t value = (uint64_t(i0->extractImm16Value()) << 32) |
276 (uint64_t(i1->extractImm16Value()) << 16) |
277 uint64_t(i3->extractImm16Value());
278 return uint64_t((int64_t(value) << 16) >> 16);
279 }
280
281 MOZ_ASSERT(i5->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
282 uint64_t value = (uint64_t(i0->extractImm16Value()) << 48) |
283 (uint64_t(i1->extractImm16Value()) << 32) |
284 (uint64_t(i3->extractImm16Value()) << 16) |
285 uint64_t(i5->extractImm16Value());
286 return value;
287 }
288
UpdateLoad64Value(Instruction * inst0,uint64_t value)289 void Assembler::UpdateLoad64Value(Instruction* inst0, uint64_t value) {
290 InstImm* i0 = (InstImm*)inst0;
291 InstImm* i1 = (InstImm*)i0->next();
292 InstReg* i2 = (InstReg*)i1->next();
293 InstImm* i3 = (InstImm*)i2->next();
294 InstImm* i5 = (InstImm*)i3->next()->next();
295
296 MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
297 MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
298 MOZ_ASSERT(i3->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
299
300 if ((i2->extractOpcode() == ((uint32_t)op_special >> OpcodeShift)) &&
301 (i2->extractFunctionField() == ff_dsrl32)) {
302 i0->setImm16(Imm16::Lower(Imm32(value >> 32)));
303 i1->setImm16(Imm16::Upper(Imm32(value)));
304 i3->setImm16(Imm16::Lower(Imm32(value)));
305 return;
306 }
307
308 MOZ_ASSERT(i5->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
309
310 i0->setImm16(Imm16::Upper(Imm32(value >> 32)));
311 i1->setImm16(Imm16::Lower(Imm32(value >> 32)));
312 i3->setImm16(Imm16::Upper(Imm32(value)));
313 i5->setImm16(Imm16::Lower(Imm32(value)));
314 }
315
WriteLoad64Instructions(Instruction * inst0,Register reg,uint64_t value)316 void Assembler::WriteLoad64Instructions(Instruction* inst0, Register reg,
317 uint64_t value) {
318 Instruction* inst1 = inst0->next();
319 Instruction* inst2 = inst1->next();
320 Instruction* inst3 = inst2->next();
321
322 *inst0 = InstImm(op_lui, zero, reg, Imm16::Lower(Imm32(value >> 32)));
323 *inst1 = InstImm(op_ori, reg, reg, Imm16::Upper(Imm32(value)));
324 *inst2 = InstReg(op_special, rs_one, reg, reg, 48 - 32, ff_dsrl32);
325 *inst3 = InstImm(op_ori, reg, reg, Imm16::Lower(Imm32(value)));
326 }
327
PatchDataWithValueCheck(CodeLocationLabel label,ImmPtr newValue,ImmPtr expectedValue)328 void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
329 ImmPtr newValue, ImmPtr expectedValue) {
330 PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value),
331 PatchedImmPtr(expectedValue.value));
332 }
333
PatchDataWithValueCheck(CodeLocationLabel label,PatchedImmPtr newValue,PatchedImmPtr expectedValue)334 void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
335 PatchedImmPtr newValue,
336 PatchedImmPtr expectedValue) {
337 Instruction* inst = (Instruction*)label.raw();
338
339 // Extract old Value
340 DebugOnly<uint64_t> value = Assembler::ExtractLoad64Value(inst);
341 MOZ_ASSERT(value == uint64_t(expectedValue.value));
342
343 // Replace with new value
344 Assembler::UpdateLoad64Value(inst, uint64_t(newValue.value));
345 }
346
ExtractInstructionImmediate(uint8_t * code)347 uint64_t Assembler::ExtractInstructionImmediate(uint8_t* code) {
348 InstImm* inst = (InstImm*)code;
349 return Assembler::ExtractLoad64Value(inst);
350 }
351
ToggleCall(CodeLocationLabel inst_,bool enabled)352 void Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) {
353 Instruction* inst = (Instruction*)inst_.raw();
354 InstImm* i0 = (InstImm*)inst;
355 InstImm* i1 = (InstImm*)i0->next();
356 InstImm* i3 = (InstImm*)i1->next()->next();
357 Instruction* i4 = (Instruction*)i3->next();
358
359 MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
360 MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
361 MOZ_ASSERT(i3->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
362
363 if (enabled) {
364 MOZ_ASSERT(i4->extractOpcode() != ((uint32_t)op_lui >> OpcodeShift));
365 InstReg jalr = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
366 *i4 = jalr;
367 } else {
368 InstNOP nop;
369 *i4 = nop;
370 }
371 }
372