1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/mips32/Assembler-mips32.h"
8
9 #include "mozilla/DebugOnly.h"
10 #include "mozilla/Maybe.h"
11
12 using mozilla::DebugOnly;
13
14 using namespace js;
15 using namespace js::jit;
16
ABIArgGenerator()17 ABIArgGenerator::ABIArgGenerator()
18 : usedArgSlots_(0),
19 firstArgFloatSize_(0),
20 useGPRForFloats_(false),
21 current_() {}
22
next(MIRType type)23 ABIArg ABIArgGenerator::next(MIRType type) {
24 Register destReg;
25 switch (type) {
26 case MIRType::Int32:
27 case MIRType::Pointer:
28 case MIRType::RefOrNull:
29 case MIRType::StackResults:
30 if (GetIntArgReg(usedArgSlots_, &destReg)) {
31 current_ = ABIArg(destReg);
32 } else {
33 current_ = ABIArg(usedArgSlots_ * sizeof(intptr_t));
34 }
35 usedArgSlots_++;
36 break;
37 case MIRType::Int64:
38 if (!usedArgSlots_) {
39 current_ = ABIArg(a0, a1);
40 usedArgSlots_ = 2;
41 } else if (usedArgSlots_ <= 2) {
42 current_ = ABIArg(a2, a3);
43 usedArgSlots_ = 4;
44 } else {
45 if (usedArgSlots_ < NumIntArgRegs) {
46 usedArgSlots_ = NumIntArgRegs;
47 }
48 usedArgSlots_ += usedArgSlots_ % 2;
49 current_ = ABIArg(usedArgSlots_ * sizeof(intptr_t));
50 usedArgSlots_ += 2;
51 }
52 break;
53 case MIRType::Float32:
54 if (!usedArgSlots_) {
55 current_ = ABIArg(f12.asSingle());
56 firstArgFloatSize_ = 1;
57 } else if (usedArgSlots_ == firstArgFloatSize_) {
58 current_ = ABIArg(f14.asSingle());
59 } else if (useGPRForFloats_ && GetIntArgReg(usedArgSlots_, &destReg)) {
60 current_ = ABIArg(destReg);
61 } else {
62 if (usedArgSlots_ < NumIntArgRegs) {
63 usedArgSlots_ = NumIntArgRegs;
64 }
65 current_ = ABIArg(usedArgSlots_ * sizeof(intptr_t));
66 }
67 usedArgSlots_++;
68 break;
69 case MIRType::Double:
70 if (!usedArgSlots_) {
71 current_ = ABIArg(f12);
72 usedArgSlots_ = 2;
73 firstArgFloatSize_ = 2;
74 } else if (usedArgSlots_ == firstArgFloatSize_) {
75 current_ = ABIArg(f14);
76 usedArgSlots_ = 4;
77 } else if (useGPRForFloats_ && usedArgSlots_ <= 2) {
78 current_ = ABIArg(a2, a3);
79 usedArgSlots_ = 4;
80 } else {
81 if (usedArgSlots_ < NumIntArgRegs) {
82 usedArgSlots_ = NumIntArgRegs;
83 }
84 usedArgSlots_ += usedArgSlots_ % 2;
85 current_ = ABIArg(usedArgSlots_ * sizeof(intptr_t));
86 usedArgSlots_ += 2;
87 }
88 break;
89 default:
90 MOZ_CRASH("Unexpected argument type");
91 }
92 return current_;
93 }
94
RT(FloatRegister r)95 uint32_t js::jit::RT(FloatRegister r) {
96 MOZ_ASSERT(r.id() < FloatRegisters::RegisterIdLimit);
97 return r.id() << RTShift;
98 }
99
RD(FloatRegister r)100 uint32_t js::jit::RD(FloatRegister r) {
101 MOZ_ASSERT(r.id() < FloatRegisters::RegisterIdLimit);
102 return r.id() << RDShift;
103 }
104
RZ(FloatRegister r)105 uint32_t js::jit::RZ(FloatRegister r) {
106 MOZ_ASSERT(r.id() < FloatRegisters::RegisterIdLimit);
107 return r.id() << RZShift;
108 }
109
SA(FloatRegister r)110 uint32_t js::jit::SA(FloatRegister r) {
111 MOZ_ASSERT(r.id() < FloatRegisters::RegisterIdLimit);
112 return r.id() << SAShift;
113 }
114
executableCopy(uint8_t * buffer)115 void Assembler::executableCopy(uint8_t* buffer) {
116 MOZ_ASSERT(isFinished);
117 m_buffer.executableCopy(buffer);
118 }
119
GetPointer(uint8_t * instPtr)120 uintptr_t Assembler::GetPointer(uint8_t* instPtr) {
121 Instruction* inst = (Instruction*)instPtr;
122 return Assembler::ExtractLuiOriValue(inst, inst->next());
123 }
124
CodeFromJump(Instruction * jump)125 static JitCode* CodeFromJump(Instruction* jump) {
126 uint8_t* target = (uint8_t*)Assembler::ExtractLuiOriValue(jump, jump->next());
127 return JitCode::FromExecutable(target);
128 }
129
TraceJumpRelocations(JSTracer * trc,JitCode * code,CompactBufferReader & reader)130 void Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code,
131 CompactBufferReader& reader) {
132 while (reader.more()) {
133 JitCode* child =
134 CodeFromJump((Instruction*)(code->raw() + reader.readUnsigned()));
135 TraceManuallyBarrieredEdge(trc, &child, "rel32");
136 }
137 }
138
TraceOneDataRelocation(JSTracer * trc,mozilla::Maybe<AutoWritableJitCode> & awjc,JitCode * code,Instruction * inst)139 static void TraceOneDataRelocation(JSTracer* trc,
140 mozilla::Maybe<AutoWritableJitCode>& awjc,
141 JitCode* code, Instruction* inst) {
142 void* ptr = (void*)Assembler::ExtractLuiOriValue(inst, inst->next());
143 void* prior = ptr;
144
145 // No barrier needed since these are constants.
146 TraceManuallyBarrieredGenericPointerEdge(
147 trc, reinterpret_cast<gc::Cell**>(&ptr), "jit-masm-ptr");
148 if (ptr != prior) {
149 if (awjc.isNothing()) {
150 awjc.emplace(code);
151 }
152 AssemblerMIPSShared::UpdateLuiOriValue(inst, inst->next(), uint32_t(ptr));
153 }
154 }
155
156 /* static */
TraceDataRelocations(JSTracer * trc,JitCode * code,CompactBufferReader & reader)157 void Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code,
158 CompactBufferReader& reader) {
159 mozilla::Maybe<AutoWritableJitCode> awjc;
160 while (reader.more()) {
161 size_t offset = reader.readUnsigned();
162 Instruction* inst = (Instruction*)(code->raw() + offset);
163 TraceOneDataRelocation(trc, awjc, code, inst);
164 }
165 }
166
UnsignedCondition(Condition cond)167 Assembler::Condition Assembler::UnsignedCondition(Condition cond) {
168 switch (cond) {
169 case Zero:
170 case NonZero:
171 return cond;
172 case LessThan:
173 case Below:
174 return Below;
175 case LessThanOrEqual:
176 case BelowOrEqual:
177 return BelowOrEqual;
178 case GreaterThan:
179 case Above:
180 return Above;
181 case AboveOrEqual:
182 case GreaterThanOrEqual:
183 return AboveOrEqual;
184 default:
185 MOZ_CRASH("unexpected condition");
186 }
187 }
188
ConditionWithoutEqual(Condition cond)189 Assembler::Condition Assembler::ConditionWithoutEqual(Condition cond) {
190 switch (cond) {
191 case LessThan:
192 case LessThanOrEqual:
193 return LessThan;
194 case Below:
195 case BelowOrEqual:
196 return Below;
197 case GreaterThan:
198 case GreaterThanOrEqual:
199 return GreaterThan;
200 case Above:
201 case AboveOrEqual:
202 return Above;
203 default:
204 MOZ_CRASH("unexpected condition");
205 }
206 }
207
Bind(uint8_t * rawCode,const CodeLabel & label)208 void Assembler::Bind(uint8_t* rawCode, const CodeLabel& label) {
209 if (label.patchAt().bound()) {
210 auto mode = label.linkMode();
211 intptr_t offset = label.patchAt().offset();
212 intptr_t target = label.target().offset();
213
214 if (mode == CodeLabel::RawPointer) {
215 *reinterpret_cast<const void**>(rawCode + offset) = rawCode + target;
216 } else {
217 MOZ_ASSERT(mode == CodeLabel::MoveImmediate ||
218 mode == CodeLabel::JumpImmediate);
219 Instruction* inst = (Instruction*)(rawCode + offset);
220 AssemblerMIPSShared::UpdateLuiOriValue(inst, inst->next(),
221 (uint32_t)(rawCode + target));
222 }
223 }
224 }
225
bind(InstImm * inst,uintptr_t branch,uintptr_t target)226 void Assembler::bind(InstImm* inst, uintptr_t branch, uintptr_t target) {
227 int32_t offset = target - branch;
228 InstImm inst_bgezal = InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0));
229 InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
230
231 // If encoded offset is 4, then the jump must be short
232 if (BOffImm16(inst[0]).decode() == 4) {
233 MOZ_ASSERT(BOffImm16::IsInRange(offset));
234 inst[0].setBOffImm16(BOffImm16(offset));
235 inst[1].makeNop();
236 return;
237 }
238
239 // Generate the long jump for calls because return address has to be the
240 // address after the reserved block.
241 if (inst[0].encode() == inst_bgezal.encode()) {
242 addLongJump(BufferOffset(branch), BufferOffset(target));
243 Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister,
244 LabelBase::INVALID_OFFSET);
245 inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr).encode();
246 // There is 1 nop after this.
247 return;
248 }
249
250 if (BOffImm16::IsInRange(offset)) {
251 bool conditional = (inst[0].encode() != inst_bgezal.encode() &&
252 inst[0].encode() != inst_beq.encode());
253
254 inst[0].setBOffImm16(BOffImm16(offset));
255 inst[1].makeNop();
256
257 // Skip the trailing nops in conditional branches.
258 if (conditional) {
259 inst[2] = InstImm(op_regimm, zero, rt_bgez, BOffImm16(3 * sizeof(void*)))
260 .encode();
261 // There are 2 nops after this
262 }
263 return;
264 }
265
266 if (inst[0].encode() == inst_beq.encode()) {
267 // Handle long unconditional jump.
268 addLongJump(BufferOffset(branch), BufferOffset(target));
269 Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister,
270 LabelBase::INVALID_OFFSET);
271 inst[2] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
272 // There is 1 nop after this.
273 } else {
274 // Handle long conditional jump.
275 inst[0] = invertBranch(inst[0], BOffImm16(5 * sizeof(void*)));
276 // No need for a "nop" here because we can clobber scratch.
277 addLongJump(BufferOffset(branch + sizeof(void*)), BufferOffset(target));
278 Assembler::WriteLuiOriInstructions(&inst[1], &inst[2], ScratchRegister,
279 LabelBase::INVALID_OFFSET);
280 inst[3] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
281 // There is 1 nop after this.
282 }
283 }
284
processCodeLabels(uint8_t * rawCode)285 void Assembler::processCodeLabels(uint8_t* rawCode) {
286 for (const CodeLabel& label : codeLabels_) {
287 Bind(rawCode, label);
288 }
289 }
290
PatchWrite_NearCallSize()291 uint32_t Assembler::PatchWrite_NearCallSize() { return 4 * sizeof(uint32_t); }
292
PatchWrite_NearCall(CodeLocationLabel start,CodeLocationLabel toCall)293 void Assembler::PatchWrite_NearCall(CodeLocationLabel start,
294 CodeLocationLabel toCall) {
295 Instruction* inst = (Instruction*)start.raw();
296 uint8_t* dest = toCall.raw();
297
298 // Overwrite whatever instruction used to be here with a call.
299 // Always use long jump for two reasons:
300 // - Jump has to be the same size because of PatchWrite_NearCallSize.
301 // - Return address has to be at the end of replaced block.
302 // Short jump wouldn't be more efficient.
303 Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister,
304 (uint32_t)dest);
305 inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
306 inst[3] = InstNOP();
307 }
308
ExtractLuiOriValue(Instruction * inst0,Instruction * inst1)309 uint32_t Assembler::ExtractLuiOriValue(Instruction* inst0, Instruction* inst1) {
310 InstImm* i0 = (InstImm*)inst0;
311 InstImm* i1 = (InstImm*)inst1;
312 MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
313 MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
314
315 uint32_t value = i0->extractImm16Value() << 16;
316 value = value | i1->extractImm16Value();
317 return value;
318 }
319
WriteLuiOriInstructions(Instruction * inst0,Instruction * inst1,Register reg,uint32_t value)320 void Assembler::WriteLuiOriInstructions(Instruction* inst0, Instruction* inst1,
321 Register reg, uint32_t value) {
322 *inst0 = InstImm(op_lui, zero, reg, Imm16::Upper(Imm32(value)));
323 *inst1 = InstImm(op_ori, reg, reg, Imm16::Lower(Imm32(value)));
324 }
325
PatchDataWithValueCheck(CodeLocationLabel label,ImmPtr newValue,ImmPtr expectedValue)326 void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
327 ImmPtr newValue, ImmPtr expectedValue) {
328 PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value),
329 PatchedImmPtr(expectedValue.value));
330 }
331
PatchDataWithValueCheck(CodeLocationLabel label,PatchedImmPtr newValue,PatchedImmPtr expectedValue)332 void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
333 PatchedImmPtr newValue,
334 PatchedImmPtr expectedValue) {
335 Instruction* inst = (Instruction*)label.raw();
336
337 // Extract old Value
338 DebugOnly<uint32_t> value = Assembler::ExtractLuiOriValue(&inst[0], &inst[1]);
339 MOZ_ASSERT(value == uint32_t(expectedValue.value));
340
341 // Replace with new value
342 AssemblerMIPSShared::UpdateLuiOriValue(inst, inst->next(),
343 uint32_t(newValue.value));
344 }
345
ExtractInstructionImmediate(uint8_t * code)346 uint32_t Assembler::ExtractInstructionImmediate(uint8_t* code) {
347 InstImm* inst = (InstImm*)code;
348 return Assembler::ExtractLuiOriValue(inst, inst->next());
349 }
350
ToggleCall(CodeLocationLabel inst_,bool enabled)351 void Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) {
352 Instruction* inst = (Instruction*)inst_.raw();
353 InstImm* i0 = (InstImm*)inst;
354 InstImm* i1 = (InstImm*)i0->next();
355 Instruction* i2 = (Instruction*)i1->next();
356
357 MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
358 MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
359
360 if (enabled) {
361 InstReg jalr = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
362 *i2 = jalr;
363 } else {
364 InstNOP nop;
365 *i2 = nop;
366 }
367 }
368