1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sts=4 et sw=4 tw=99:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7 #include "jit/x86-shared/MacroAssembler-x86-shared.h"
8
9 #include "jit/JitFrames.h"
10 #include "jit/MacroAssembler.h"
11
12 #include "jit/MacroAssembler-inl.h"
13
14 using namespace js;
15 using namespace js::jit;
16
17 // Note: this function clobbers the input register.
18 void
clampDoubleToUint8(FloatRegister input,Register output)19 MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output)
20 {
21 ScratchDoubleScope scratch(*this);
22 MOZ_ASSERT(input != scratch);
23 Label positive, done;
24
25 // <= 0 or NaN --> 0
26 zeroDouble(scratch);
27 branchDouble(DoubleGreaterThan, input, scratch, &positive);
28 {
29 move32(Imm32(0), output);
30 jump(&done);
31 }
32
33 bind(&positive);
34
35 // Add 0.5 and truncate.
36 loadConstantDouble(0.5, scratch);
37 addDouble(scratch, input);
38
39 Label outOfRange;
40
41 // Truncate to int32 and ensure the result <= 255. This relies on the
42 // processor setting output to a value > 255 for doubles outside the int32
43 // range (for instance 0x80000000).
44 vcvttsd2si(input, output);
45 branch32(Assembler::Above, output, Imm32(255), &outOfRange);
46 {
47 // Check if we had a tie.
48 convertInt32ToDouble(output, scratch);
49 branchDouble(DoubleNotEqual, input, scratch, &done);
50
51 // It was a tie. Mask out the ones bit to get an even value.
52 // See also js_TypedArray_uint8_clamp_double.
53 and32(Imm32(~1), output);
54 jump(&done);
55 }
56
57 // > 255 --> 255
58 bind(&outOfRange);
59 {
60 move32(Imm32(255), output);
61 }
62
63 bind(&done);
64 }
65
66 void
alignFrameForICArguments(AfterICSaveLive & aic)67 MacroAssembler::alignFrameForICArguments(AfterICSaveLive& aic)
68 {
69 // Exists for MIPS compatibility.
70 }
71
72 void
restoreFrameAlignmentForICArguments(AfterICSaveLive & aic)73 MacroAssembler::restoreFrameAlignmentForICArguments(AfterICSaveLive& aic)
74 {
75 // Exists for MIPS compatibility.
76 }
77
78 bool
buildOOLFakeExitFrame(void * fakeReturnAddr)79 MacroAssemblerX86Shared::buildOOLFakeExitFrame(void* fakeReturnAddr)
80 {
81 uint32_t descriptor = MakeFrameDescriptor(asMasm().framePushed(), JitFrame_IonJS);
82 asMasm().Push(Imm32(descriptor));
83 asMasm().Push(ImmPtr(fakeReturnAddr));
84 return true;
85 }
86
87 void
branchNegativeZero(FloatRegister reg,Register scratch,Label * label,bool maybeNonZero)88 MacroAssemblerX86Shared::branchNegativeZero(FloatRegister reg,
89 Register scratch,
90 Label* label,
91 bool maybeNonZero)
92 {
93 // Determines whether the low double contained in the XMM register reg
94 // is equal to -0.0.
95
96 #if defined(JS_CODEGEN_X86)
97 Label nonZero;
98
99 // if not already compared to zero
100 if (maybeNonZero) {
101 ScratchDoubleScope scratchDouble(asMasm());
102
103 // Compare to zero. Lets through {0, -0}.
104 zeroDouble(scratchDouble);
105
106 // If reg is non-zero, jump to nonZero.
107 branchDouble(DoubleNotEqual, reg, scratchDouble, &nonZero);
108 }
109 // Input register is either zero or negative zero. Retrieve sign of input.
110 vmovmskpd(reg, scratch);
111
112 // If reg is 1 or 3, input is negative zero.
113 // If reg is 0 or 2, input is a normal zero.
114 branchTest32(NonZero, scratch, Imm32(1), label);
115
116 bind(&nonZero);
117 #elif defined(JS_CODEGEN_X64)
118 vmovq(reg, scratch);
119 cmpq(Imm32(1), scratch);
120 j(Overflow, label);
121 #endif
122 }
123
124 void
branchNegativeZeroFloat32(FloatRegister reg,Register scratch,Label * label)125 MacroAssemblerX86Shared::branchNegativeZeroFloat32(FloatRegister reg,
126 Register scratch,
127 Label* label)
128 {
129 vmovd(reg, scratch);
130 cmp32(scratch, Imm32(1));
131 j(Overflow, label);
132 }
133
134 MacroAssembler&
asMasm()135 MacroAssemblerX86Shared::asMasm()
136 {
137 return *static_cast<MacroAssembler*>(this);
138 }
139
140 const MacroAssembler&
asMasm() const141 MacroAssemblerX86Shared::asMasm() const
142 {
143 return *static_cast<const MacroAssembler*>(this);
144 }
145
146 template<typename T>
147 void
compareExchangeToTypedIntArray(Scalar::Type arrayType,const T & mem,Register oldval,Register newval,Register temp,AnyRegister output)148 MacroAssemblerX86Shared::compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
149 Register oldval, Register newval,
150 Register temp, AnyRegister output)
151 {
152 switch (arrayType) {
153 case Scalar::Int8:
154 compareExchange8SignExtend(mem, oldval, newval, output.gpr());
155 break;
156 case Scalar::Uint8:
157 compareExchange8ZeroExtend(mem, oldval, newval, output.gpr());
158 break;
159 case Scalar::Int16:
160 compareExchange16SignExtend(mem, oldval, newval, output.gpr());
161 break;
162 case Scalar::Uint16:
163 compareExchange16ZeroExtend(mem, oldval, newval, output.gpr());
164 break;
165 case Scalar::Int32:
166 compareExchange32(mem, oldval, newval, output.gpr());
167 break;
168 case Scalar::Uint32:
169 // At the moment, the code in MCallOptimize.cpp requires the output
170 // type to be double for uint32 arrays. See bug 1077305.
171 MOZ_ASSERT(output.isFloat());
172 compareExchange32(mem, oldval, newval, temp);
173 asMasm().convertUInt32ToDouble(temp, output.fpu());
174 break;
175 default:
176 MOZ_CRASH("Invalid typed array type");
177 }
178 }
179
180 template void
181 MacroAssemblerX86Shared::compareExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
182 Register oldval, Register newval, Register temp,
183 AnyRegister output);
184 template void
185 MacroAssemblerX86Shared::compareExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
186 Register oldval, Register newval, Register temp,
187 AnyRegister output);
188
189 template<typename T>
190 void
atomicExchangeToTypedIntArray(Scalar::Type arrayType,const T & mem,Register value,Register temp,AnyRegister output)191 MacroAssemblerX86Shared::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
192 Register value, Register temp, AnyRegister output)
193 {
194 switch (arrayType) {
195 case Scalar::Int8:
196 atomicExchange8SignExtend(mem, value, output.gpr());
197 break;
198 case Scalar::Uint8:
199 atomicExchange8ZeroExtend(mem, value, output.gpr());
200 break;
201 case Scalar::Int16:
202 atomicExchange16SignExtend(mem, value, output.gpr());
203 break;
204 case Scalar::Uint16:
205 atomicExchange16ZeroExtend(mem, value, output.gpr());
206 break;
207 case Scalar::Int32:
208 atomicExchange32(mem, value, output.gpr());
209 break;
210 case Scalar::Uint32:
211 // At the moment, the code in MCallOptimize.cpp requires the output
212 // type to be double for uint32 arrays. See bug 1077305.
213 MOZ_ASSERT(output.isFloat());
214 atomicExchange32(mem, value, temp);
215 asMasm().convertUInt32ToDouble(temp, output.fpu());
216 break;
217 default:
218 MOZ_CRASH("Invalid typed array type");
219 }
220 }
221
222 template void
223 MacroAssemblerX86Shared::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
224 Register value, Register temp, AnyRegister output);
225 template void
226 MacroAssemblerX86Shared::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
227 Register value, Register temp, AnyRegister output);
228
229 template<class T, class Map>
230 T*
231 MacroAssemblerX86Shared::getConstant(const typename T::Pod& value, Map& map,
232 Vector<T, 0, SystemAllocPolicy>& vec)
233 {
234 typedef typename Map::AddPtr AddPtr;
235 if (!map.initialized()) {
236 enoughMemory_ &= map.init();
237 if (!enoughMemory_)
238 return nullptr;
239 }
240 size_t index;
241 if (AddPtr p = map.lookupForAdd(value)) {
242 index = p->value();
243 } else {
244 index = vec.length();
245 enoughMemory_ &= vec.append(T(value));
246 if (!enoughMemory_)
247 return nullptr;
248 enoughMemory_ &= map.add(p, value, index);
249 if (!enoughMemory_)
250 return nullptr;
251 }
252 return &vec[index];
253 }
254
255 MacroAssemblerX86Shared::Float*
getFloat(float f)256 MacroAssemblerX86Shared::getFloat(float f)
257 {
258 return getConstant<Float, FloatMap>(f, floatMap_, floats_);
259 }
260
261 MacroAssemblerX86Shared::Double*
getDouble(double d)262 MacroAssemblerX86Shared::getDouble(double d)
263 {
264 return getConstant<Double, DoubleMap>(d, doubleMap_, doubles_);
265 }
266
267 MacroAssemblerX86Shared::SimdData*
getSimdData(const SimdConstant & v)268 MacroAssemblerX86Shared::getSimdData(const SimdConstant& v)
269 {
270 return getConstant<SimdData, SimdMap>(v, simdMap_, simds_);
271 }
272
273 template<class T, class Map>
274 static bool
MergeConstants(size_t delta,const Vector<T,0,SystemAllocPolicy> & other,Map & map,Vector<T,0,SystemAllocPolicy> & vec)275 MergeConstants(size_t delta, const Vector<T, 0, SystemAllocPolicy>& other,
276 Map& map, Vector<T, 0, SystemAllocPolicy>& vec)
277 {
278 typedef typename Map::AddPtr AddPtr;
279 if (!map.initialized() && !map.init())
280 return false;
281
282 for (const T& c : other) {
283 size_t index;
284 if (AddPtr p = map.lookupForAdd(c.value)) {
285 index = p->value();
286 } else {
287 index = vec.length();
288 if (!vec.append(T(c.value)) || !map.add(p, c.value, index))
289 return false;
290 }
291 MacroAssemblerX86Shared::UsesVector& uses = vec[index].uses;
292 for (CodeOffset use : c.uses) {
293 use.offsetBy(delta);
294 if (!uses.append(use))
295 return false;
296 }
297 }
298
299 return true;
300 }
301
302 bool
asmMergeWith(const MacroAssemblerX86Shared & other)303 MacroAssemblerX86Shared::asmMergeWith(const MacroAssemblerX86Shared& other)
304 {
305 size_t sizeBefore = masm.size();
306 if (!Assembler::asmMergeWith(other))
307 return false;
308 if (!MergeConstants<Double, DoubleMap>(sizeBefore, other.doubles_, doubleMap_, doubles_))
309 return false;
310 if (!MergeConstants<Float, FloatMap>(sizeBefore, other.floats_, floatMap_, floats_))
311 return false;
312 if (!MergeConstants<SimdData, SimdMap>(sizeBefore, other.simds_, simdMap_, simds_))
313 return false;
314 return true;
315 }
316
317 //{{{ check_macroassembler_style
318 // ===============================================================
319 // Stack manipulation functions.
320
321 void
PushRegsInMask(LiveRegisterSet set)322 MacroAssembler::PushRegsInMask(LiveRegisterSet set)
323 {
324 FloatRegisterSet fpuSet(set.fpus().reduceSetForPush());
325 unsigned numFpu = fpuSet.size();
326 int32_t diffF = fpuSet.getPushSizeInBytes();
327 int32_t diffG = set.gprs().size() * sizeof(intptr_t);
328
329 // On x86, always use push to push the integer registers, as it's fast
330 // on modern hardware and it's a small instruction.
331 for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); iter++) {
332 diffG -= sizeof(intptr_t);
333 Push(*iter);
334 }
335 MOZ_ASSERT(diffG == 0);
336
337 reserveStack(diffF);
338 for (FloatRegisterBackwardIterator iter(fpuSet); iter.more(); iter++) {
339 FloatRegister reg = *iter;
340 diffF -= reg.size();
341 numFpu -= 1;
342 Address spillAddress(StackPointer, diffF);
343 if (reg.isDouble())
344 storeDouble(reg, spillAddress);
345 else if (reg.isSingle())
346 storeFloat32(reg, spillAddress);
347 else if (reg.isSimd128())
348 storeUnalignedFloat32x4(reg, spillAddress);
349 else
350 MOZ_CRASH("Unknown register type.");
351 }
352 MOZ_ASSERT(numFpu == 0);
353 // x64 padding to keep the stack aligned on uintptr_t. Keep in sync with
354 // GetPushBytesInSize.
355 diffF -= diffF % sizeof(uintptr_t);
356 MOZ_ASSERT(diffF == 0);
357 }
358
359 void
PopRegsInMaskIgnore(LiveRegisterSet set,LiveRegisterSet ignore)360 MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set, LiveRegisterSet ignore)
361 {
362 FloatRegisterSet fpuSet(set.fpus().reduceSetForPush());
363 unsigned numFpu = fpuSet.size();
364 int32_t diffG = set.gprs().size() * sizeof(intptr_t);
365 int32_t diffF = fpuSet.getPushSizeInBytes();
366 const int32_t reservedG = diffG;
367 const int32_t reservedF = diffF;
368
369 for (FloatRegisterBackwardIterator iter(fpuSet); iter.more(); iter++) {
370 FloatRegister reg = *iter;
371 diffF -= reg.size();
372 numFpu -= 1;
373 if (ignore.has(reg))
374 continue;
375
376 Address spillAddress(StackPointer, diffF);
377 if (reg.isDouble())
378 loadDouble(spillAddress, reg);
379 else if (reg.isSingle())
380 loadFloat32(spillAddress, reg);
381 else if (reg.isSimd128())
382 loadUnalignedFloat32x4(spillAddress, reg);
383 else
384 MOZ_CRASH("Unknown register type.");
385 }
386 freeStack(reservedF);
387 MOZ_ASSERT(numFpu == 0);
388 // x64 padding to keep the stack aligned on uintptr_t. Keep in sync with
389 // GetPushBytesInSize.
390 diffF -= diffF % sizeof(uintptr_t);
391 MOZ_ASSERT(diffF == 0);
392
393 // On x86, use pop to pop the integer registers, if we're not going to
394 // ignore any slots, as it's fast on modern hardware and it's a small
395 // instruction.
396 if (ignore.emptyGeneral()) {
397 for (GeneralRegisterForwardIterator iter(set.gprs()); iter.more(); iter++) {
398 diffG -= sizeof(intptr_t);
399 Pop(*iter);
400 }
401 } else {
402 for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); iter++) {
403 diffG -= sizeof(intptr_t);
404 if (!ignore.has(*iter))
405 loadPtr(Address(StackPointer, diffG), *iter);
406 }
407 freeStack(reservedG);
408 }
409 MOZ_ASSERT(diffG == 0);
410 }
411
412 void
Push(const Operand op)413 MacroAssembler::Push(const Operand op)
414 {
415 push(op);
416 adjustFrame(sizeof(intptr_t));
417 }
418
419 void
Push(Register reg)420 MacroAssembler::Push(Register reg)
421 {
422 push(reg);
423 adjustFrame(sizeof(intptr_t));
424 }
425
426 void
Push(const Imm32 imm)427 MacroAssembler::Push(const Imm32 imm)
428 {
429 push(imm);
430 adjustFrame(sizeof(intptr_t));
431 }
432
433 void
Push(const ImmWord imm)434 MacroAssembler::Push(const ImmWord imm)
435 {
436 push(imm);
437 adjustFrame(sizeof(intptr_t));
438 }
439
440 void
Push(const ImmPtr imm)441 MacroAssembler::Push(const ImmPtr imm)
442 {
443 Push(ImmWord(uintptr_t(imm.value)));
444 }
445
446 void
Push(const ImmGCPtr ptr)447 MacroAssembler::Push(const ImmGCPtr ptr)
448 {
449 push(ptr);
450 adjustFrame(sizeof(intptr_t));
451 }
452
453 void
Push(FloatRegister t)454 MacroAssembler::Push(FloatRegister t)
455 {
456 push(t);
457 adjustFrame(sizeof(double));
458 }
459
460 void
Pop(const Operand op)461 MacroAssembler::Pop(const Operand op)
462 {
463 pop(op);
464 implicitPop(sizeof(intptr_t));
465 }
466
467 void
Pop(Register reg)468 MacroAssembler::Pop(Register reg)
469 {
470 pop(reg);
471 implicitPop(sizeof(intptr_t));
472 }
473
474 void
Pop(FloatRegister reg)475 MacroAssembler::Pop(FloatRegister reg)
476 {
477 pop(reg);
478 implicitPop(sizeof(double));
479 }
480
481 void
Pop(const ValueOperand & val)482 MacroAssembler::Pop(const ValueOperand& val)
483 {
484 popValue(val);
485 implicitPop(sizeof(Value));
486 }
487
488 // ===============================================================
489 // Simple call functions.
490
491 CodeOffset
call(Register reg)492 MacroAssembler::call(Register reg)
493 {
494 return Assembler::call(reg);
495 }
496
497 CodeOffset
call(Label * label)498 MacroAssembler::call(Label* label)
499 {
500 return Assembler::call(label);
501 }
502
503 void
call(const Address & addr)504 MacroAssembler::call(const Address& addr)
505 {
506 Assembler::call(Operand(addr.base, addr.offset));
507 }
508
509 void
call(wasm::SymbolicAddress target)510 MacroAssembler::call(wasm::SymbolicAddress target)
511 {
512 mov(target, eax);
513 Assembler::call(eax);
514 }
515
516 void
call(ImmWord target)517 MacroAssembler::call(ImmWord target)
518 {
519 mov(target, eax);
520 Assembler::call(eax);
521 }
522
523 void
call(ImmPtr target)524 MacroAssembler::call(ImmPtr target)
525 {
526 call(ImmWord(uintptr_t(target.value)));
527 }
528
529 void
call(JitCode * target)530 MacroAssembler::call(JitCode* target)
531 {
532 Assembler::call(target);
533 }
534
535 CodeOffset
callWithPatch()536 MacroAssembler::callWithPatch()
537 {
538 return Assembler::callWithPatch();
539 }
540 void
patchCall(uint32_t callerOffset,uint32_t calleeOffset)541 MacroAssembler::patchCall(uint32_t callerOffset, uint32_t calleeOffset)
542 {
543 Assembler::patchCall(callerOffset, calleeOffset);
544 }
545
546 void
callAndPushReturnAddress(Register reg)547 MacroAssembler::callAndPushReturnAddress(Register reg)
548 {
549 call(reg);
550 }
551
552 void
callAndPushReturnAddress(Label * label)553 MacroAssembler::callAndPushReturnAddress(Label* label)
554 {
555 call(label);
556 }
557
558 // ===============================================================
559 // Jit Frames.
560
561 uint32_t
pushFakeReturnAddress(Register scratch)562 MacroAssembler::pushFakeReturnAddress(Register scratch)
563 {
564 CodeLabel cl;
565
566 mov(cl.patchAt(), scratch);
567 Push(scratch);
568 use(cl.target());
569 uint32_t retAddr = currentOffset();
570
571 addCodeLabel(cl);
572 return retAddr;
573 }
574
575 //}}} check_macroassembler_style
576