1 /*
2  * Copyright (C) 2008 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25 
26 #ifndef MacroAssemblerX86Common_h
27 #define MacroAssemblerX86Common_h
28 
29 #if ENABLE(ASSEMBLER)
30 
31 #include "X86Assembler.h"
32 #include "AbstractMacroAssembler.h"
33 
34 namespace JSC {
35 
36 class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
37 protected:
38 #if CPU(X86_64)
39     static const X86Registers::RegisterID scratchRegister = X86Registers::r11;
40 #endif
41 
42     static const int DoubleConditionBitInvert = 0x10;
43     static const int DoubleConditionBitSpecial = 0x20;
44     static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial;
45 
46 public:
47     typedef X86Assembler::FPRegisterID FPRegisterID;
48     typedef X86Assembler::XMMRegisterID XMMRegisterID;
49 
isCompactPtrAlignedAddressOffset(ptrdiff_t value)50     static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
51     {
52         return value >= -128 && value <= 127;
53     }
54 
55     enum RelationalCondition {
56         Equal = X86Assembler::ConditionE,
57         NotEqual = X86Assembler::ConditionNE,
58         Above = X86Assembler::ConditionA,
59         AboveOrEqual = X86Assembler::ConditionAE,
60         Below = X86Assembler::ConditionB,
61         BelowOrEqual = X86Assembler::ConditionBE,
62         GreaterThan = X86Assembler::ConditionG,
63         GreaterThanOrEqual = X86Assembler::ConditionGE,
64         LessThan = X86Assembler::ConditionL,
65         LessThanOrEqual = X86Assembler::ConditionLE
66     };
67 
68     enum ResultCondition {
69         Overflow = X86Assembler::ConditionO,
70         Signed = X86Assembler::ConditionS,
71         Zero = X86Assembler::ConditionE,
72         NonZero = X86Assembler::ConditionNE
73     };
74 
75     enum DoubleCondition {
76         // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
77         DoubleEqual = X86Assembler::ConditionE | DoubleConditionBitSpecial,
78         DoubleNotEqual = X86Assembler::ConditionNE,
79         DoubleGreaterThan = X86Assembler::ConditionA,
80         DoubleGreaterThanOrEqual = X86Assembler::ConditionAE,
81         DoubleLessThan = X86Assembler::ConditionA | DoubleConditionBitInvert,
82         DoubleLessThanOrEqual = X86Assembler::ConditionAE | DoubleConditionBitInvert,
83         // If either operand is NaN, these conditions always evaluate to true.
84         DoubleEqualOrUnordered = X86Assembler::ConditionE,
85         DoubleNotEqualOrUnordered = X86Assembler::ConditionNE | DoubleConditionBitSpecial,
86         DoubleGreaterThanOrUnordered = X86Assembler::ConditionB | DoubleConditionBitInvert,
87         DoubleGreaterThanOrEqualOrUnordered = X86Assembler::ConditionBE | DoubleConditionBitInvert,
88         DoubleLessThanOrUnordered = X86Assembler::ConditionB,
89         DoubleLessThanOrEqualOrUnordered = X86Assembler::ConditionBE,
90     };
91     COMPILE_ASSERT(
92         !((X86Assembler::ConditionE | X86Assembler::ConditionNE | X86Assembler::ConditionA | X86Assembler::ConditionAE | X86Assembler::ConditionB | X86Assembler::ConditionBE) & DoubleConditionBits),
93         DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes);
94 
95     static const RegisterID stackPointerRegister = X86Registers::esp;
96 
97 #if ENABLE(JIT_CONSTANT_BLINDING)
shouldBlindForSpecificArch(uint32_t value)98     static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; }
99 #if CPU(X86_64)
shouldBlindForSpecificArch(uint64_t value)100     static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; }
101 #if OS(DARWIN) // On 64-bit systems other than DARWIN uint64_t and uintptr_t are the same type so overload is prohibited.
shouldBlindForSpecificArch(uintptr_t value)102     static bool shouldBlindForSpecificArch(uintptr_t value) { return value >= 0x00ffffff; }
103 #endif
104 #endif
105 #endif
106 
107     // Integer arithmetic operations:
108     //
109     // Operations are typically two operand - operation(source, srcDst)
110     // For many operations the source may be an TrustedImm32, the srcDst operand
111     // may often be a memory location (explictly described using an Address
112     // object).
113 
add32(RegisterID src,RegisterID dest)114     void add32(RegisterID src, RegisterID dest)
115     {
116         m_assembler.addl_rr(src, dest);
117     }
118 
add32(TrustedImm32 imm,Address address)119     void add32(TrustedImm32 imm, Address address)
120     {
121         m_assembler.addl_im(imm.m_value, address.offset, address.base);
122     }
123 
add32(TrustedImm32 imm,RegisterID dest)124     void add32(TrustedImm32 imm, RegisterID dest)
125     {
126         m_assembler.addl_ir(imm.m_value, dest);
127     }
128 
add32(Address src,RegisterID dest)129     void add32(Address src, RegisterID dest)
130     {
131         m_assembler.addl_mr(src.offset, src.base, dest);
132     }
133 
add32(RegisterID src,Address dest)134     void add32(RegisterID src, Address dest)
135     {
136         m_assembler.addl_rm(src, dest.offset, dest.base);
137     }
138 
add32(TrustedImm32 imm,RegisterID src,RegisterID dest)139     void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
140     {
141         m_assembler.leal_mr(imm.m_value, src, dest);
142     }
143 
and32(RegisterID src,RegisterID dest)144     void and32(RegisterID src, RegisterID dest)
145     {
146         m_assembler.andl_rr(src, dest);
147     }
148 
add32(RegisterID a,RegisterID b,RegisterID dest)149     void add32(RegisterID a, RegisterID b, RegisterID dest)
150     {
151         x86Lea32(BaseIndex(a, b, TimesOne), dest);
152     }
153 
x86Lea32(BaseIndex index,RegisterID dest)154     void x86Lea32(BaseIndex index, RegisterID dest)
155     {
156         if (!index.scale && !index.offset) {
157             if (index.base == dest) {
158                 add32(index.index, dest);
159                 return;
160             }
161             if (index.index == dest) {
162                 add32(index.base, dest);
163                 return;
164             }
165         }
166         m_assembler.leal_mr(index.offset, index.base, index.index, index.scale, dest);
167     }
168 
and32(TrustedImm32 imm,RegisterID dest)169     void and32(TrustedImm32 imm, RegisterID dest)
170     {
171         m_assembler.andl_ir(imm.m_value, dest);
172     }
173 
and32(RegisterID src,Address dest)174     void and32(RegisterID src, Address dest)
175     {
176         m_assembler.andl_rm(src, dest.offset, dest.base);
177     }
178 
and32(Address src,RegisterID dest)179     void and32(Address src, RegisterID dest)
180     {
181         m_assembler.andl_mr(src.offset, src.base, dest);
182     }
183 
and32(TrustedImm32 imm,Address address)184     void and32(TrustedImm32 imm, Address address)
185     {
186         m_assembler.andl_im(imm.m_value, address.offset, address.base);
187     }
188 
and32(RegisterID op1,RegisterID op2,RegisterID dest)189     void and32(RegisterID op1, RegisterID op2, RegisterID dest)
190     {
191         if (op1 == op2)
192             zeroExtend32ToPtr(op1, dest);
193         else if (op1 == dest)
194             and32(op2, dest);
195         else {
196             move(op2, dest);
197             and32(op1, dest);
198         }
199     }
200 
and32(TrustedImm32 imm,RegisterID src,RegisterID dest)201     void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
202     {
203         move(src, dest);
204         and32(imm, dest);
205     }
206 
lshift32(RegisterID shift_amount,RegisterID dest)207     void lshift32(RegisterID shift_amount, RegisterID dest)
208     {
209         ASSERT(shift_amount != dest);
210 
211         if (shift_amount == X86Registers::ecx)
212             m_assembler.shll_CLr(dest);
213         else {
214             // On x86 we can only shift by ecx; if asked to shift by another register we'll
215             // need rejig the shift amount into ecx first, and restore the registers afterwards.
216             // If we dest is ecx, then shift the swapped register!
217             swap(shift_amount, X86Registers::ecx);
218             m_assembler.shll_CLr(dest == X86Registers::ecx ? shift_amount : dest);
219             swap(shift_amount, X86Registers::ecx);
220         }
221     }
222 
lshift32(RegisterID src,RegisterID shift_amount,RegisterID dest)223     void lshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
224     {
225         ASSERT(shift_amount != dest);
226 
227         if (src != dest)
228             move(src, dest);
229         lshift32(shift_amount, dest);
230     }
231 
lshift32(TrustedImm32 imm,RegisterID dest)232     void lshift32(TrustedImm32 imm, RegisterID dest)
233     {
234         m_assembler.shll_i8r(imm.m_value, dest);
235     }
236 
lshift32(RegisterID src,TrustedImm32 imm,RegisterID dest)237     void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
238     {
239         if (src != dest)
240             move(src, dest);
241         lshift32(imm, dest);
242     }
243 
mul32(RegisterID src,RegisterID dest)244     void mul32(RegisterID src, RegisterID dest)
245     {
246         m_assembler.imull_rr(src, dest);
247     }
248 
mul32(RegisterID op1,RegisterID op2,RegisterID dest)249     void mul32(RegisterID op1, RegisterID op2, RegisterID dest)
250     {
251         if (op2 == dest) {
252             mul32(op1, dest);
253         } else {
254             move(op1, dest);
255             mul32(op2, dest);
256         }
257     }
258 
mul32(Address src,RegisterID dest)259     void mul32(Address src, RegisterID dest)
260     {
261         m_assembler.imull_mr(src.offset, src.base, dest);
262     }
263 
mul32(TrustedImm32 imm,RegisterID src,RegisterID dest)264     void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
265     {
266         m_assembler.imull_i32r(src, imm.m_value, dest);
267     }
268 
neg32(RegisterID srcDest)269     void neg32(RegisterID srcDest)
270     {
271         m_assembler.negl_r(srcDest);
272     }
273 
neg32(Address srcDest)274     void neg32(Address srcDest)
275     {
276         m_assembler.negl_m(srcDest.offset, srcDest.base);
277     }
278 
or32(RegisterID src,RegisterID dest)279     void or32(RegisterID src, RegisterID dest)
280     {
281         m_assembler.orl_rr(src, dest);
282     }
283 
or32(TrustedImm32 imm,RegisterID dest)284     void or32(TrustedImm32 imm, RegisterID dest)
285     {
286         m_assembler.orl_ir(imm.m_value, dest);
287     }
288 
or32(RegisterID src,Address dest)289     void or32(RegisterID src, Address dest)
290     {
291         m_assembler.orl_rm(src, dest.offset, dest.base);
292     }
293 
or32(Address src,RegisterID dest)294     void or32(Address src, RegisterID dest)
295     {
296         m_assembler.orl_mr(src.offset, src.base, dest);
297     }
298 
or32(TrustedImm32 imm,Address address)299     void or32(TrustedImm32 imm, Address address)
300     {
301         m_assembler.orl_im(imm.m_value, address.offset, address.base);
302     }
303 
or32(RegisterID op1,RegisterID op2,RegisterID dest)304     void or32(RegisterID op1, RegisterID op2, RegisterID dest)
305     {
306         if (op1 == op2)
307             zeroExtend32ToPtr(op1, dest);
308         else if (op1 == dest)
309             or32(op2, dest);
310         else {
311             move(op2, dest);
312             or32(op1, dest);
313         }
314     }
315 
or32(TrustedImm32 imm,RegisterID src,RegisterID dest)316     void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
317     {
318         move(src, dest);
319         or32(imm, dest);
320     }
321 
rshift32(RegisterID shift_amount,RegisterID dest)322     void rshift32(RegisterID shift_amount, RegisterID dest)
323     {
324         ASSERT(shift_amount != dest);
325 
326         if (shift_amount == X86Registers::ecx)
327             m_assembler.sarl_CLr(dest);
328         else {
329             // On x86 we can only shift by ecx; if asked to shift by another register we'll
330             // need rejig the shift amount into ecx first, and restore the registers afterwards.
331             // If we dest is ecx, then shift the swapped register!
332             swap(shift_amount, X86Registers::ecx);
333             m_assembler.sarl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
334             swap(shift_amount, X86Registers::ecx);
335         }
336     }
337 
rshift32(RegisterID src,RegisterID shift_amount,RegisterID dest)338     void rshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
339     {
340         ASSERT(shift_amount != dest);
341 
342         if (src != dest)
343             move(src, dest);
344         rshift32(shift_amount, dest);
345     }
346 
rshift32(TrustedImm32 imm,RegisterID dest)347     void rshift32(TrustedImm32 imm, RegisterID dest)
348     {
349         m_assembler.sarl_i8r(imm.m_value, dest);
350     }
351 
rshift32(RegisterID src,TrustedImm32 imm,RegisterID dest)352     void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
353     {
354         if (src != dest)
355             move(src, dest);
356         rshift32(imm, dest);
357     }
358 
urshift32(RegisterID shift_amount,RegisterID dest)359     void urshift32(RegisterID shift_amount, RegisterID dest)
360     {
361         ASSERT(shift_amount != dest);
362 
363         if (shift_amount == X86Registers::ecx)
364             m_assembler.shrl_CLr(dest);
365         else {
366             // On x86 we can only shift by ecx; if asked to shift by another register we'll
367             // need rejig the shift amount into ecx first, and restore the registers afterwards.
368             // If we dest is ecx, then shift the swapped register!
369             swap(shift_amount, X86Registers::ecx);
370             m_assembler.shrl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
371             swap(shift_amount, X86Registers::ecx);
372         }
373     }
374 
urshift32(RegisterID src,RegisterID shift_amount,RegisterID dest)375     void urshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
376     {
377         ASSERT(shift_amount != dest);
378 
379         if (src != dest)
380             move(src, dest);
381         urshift32(shift_amount, dest);
382     }
383 
urshift32(TrustedImm32 imm,RegisterID dest)384     void urshift32(TrustedImm32 imm, RegisterID dest)
385     {
386         m_assembler.shrl_i8r(imm.m_value, dest);
387     }
388 
urshift32(RegisterID src,TrustedImm32 imm,RegisterID dest)389     void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
390     {
391         if (src != dest)
392             move(src, dest);
393         urshift32(imm, dest);
394     }
395 
sub32(RegisterID src,RegisterID dest)396     void sub32(RegisterID src, RegisterID dest)
397     {
398         m_assembler.subl_rr(src, dest);
399     }
400 
sub32(TrustedImm32 imm,RegisterID dest)401     void sub32(TrustedImm32 imm, RegisterID dest)
402     {
403         m_assembler.subl_ir(imm.m_value, dest);
404     }
405 
sub32(TrustedImm32 imm,Address address)406     void sub32(TrustedImm32 imm, Address address)
407     {
408         m_assembler.subl_im(imm.m_value, address.offset, address.base);
409     }
410 
sub32(Address src,RegisterID dest)411     void sub32(Address src, RegisterID dest)
412     {
413         m_assembler.subl_mr(src.offset, src.base, dest);
414     }
415 
sub32(RegisterID src,Address dest)416     void sub32(RegisterID src, Address dest)
417     {
418         m_assembler.subl_rm(src, dest.offset, dest.base);
419     }
420 
xor32(RegisterID src,RegisterID dest)421     void xor32(RegisterID src, RegisterID dest)
422     {
423         m_assembler.xorl_rr(src, dest);
424     }
425 
xor32(TrustedImm32 imm,Address dest)426     void xor32(TrustedImm32 imm, Address dest)
427     {
428         if (imm.m_value == -1)
429             m_assembler.notl_m(dest.offset, dest.base);
430         else
431             m_assembler.xorl_im(imm.m_value, dest.offset, dest.base);
432     }
433 
xor32(TrustedImm32 imm,RegisterID dest)434     void xor32(TrustedImm32 imm, RegisterID dest)
435     {
436         if (imm.m_value == -1)
437         m_assembler.notl_r(dest);
438         else
439         m_assembler.xorl_ir(imm.m_value, dest);
440     }
441 
xor32(RegisterID src,Address dest)442     void xor32(RegisterID src, Address dest)
443     {
444         m_assembler.xorl_rm(src, dest.offset, dest.base);
445     }
446 
xor32(Address src,RegisterID dest)447     void xor32(Address src, RegisterID dest)
448     {
449         m_assembler.xorl_mr(src.offset, src.base, dest);
450     }
451 
xor32(RegisterID op1,RegisterID op2,RegisterID dest)452     void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
453     {
454         if (op1 == op2)
455             move(TrustedImm32(0), dest);
456         else if (op1 == dest)
457             xor32(op2, dest);
458         else {
459             move(op2, dest);
460             xor32(op1, dest);
461         }
462     }
463 
xor32(TrustedImm32 imm,RegisterID src,RegisterID dest)464     void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
465     {
466         move(src, dest);
467         xor32(imm, dest);
468     }
469 
sqrtDouble(FPRegisterID src,FPRegisterID dst)470     void sqrtDouble(FPRegisterID src, FPRegisterID dst)
471     {
472         m_assembler.sqrtsd_rr(src, dst);
473     }
474 
absDouble(FPRegisterID src,FPRegisterID dst)475     void absDouble(FPRegisterID src, FPRegisterID dst)
476     {
477         ASSERT(src != dst);
478         static const double negativeZeroConstant = -0.0;
479         loadDouble(&negativeZeroConstant, dst);
480         m_assembler.andnpd_rr(src, dst);
481     }
482 
negateDouble(FPRegisterID src,FPRegisterID dst)483     void negateDouble(FPRegisterID src, FPRegisterID dst)
484     {
485         ASSERT(src != dst);
486         static const double negativeZeroConstant = -0.0;
487         loadDouble(&negativeZeroConstant, dst);
488         m_assembler.xorpd_rr(src, dst);
489     }
490 
491 
492     // Memory access operations:
493     //
494     // Loads are of the form load(address, destination) and stores of the form
495     // store(source, address).  The source for a store may be an TrustedImm32.  Address
496     // operand objects to loads and store will be implicitly constructed if a
497     // register is passed.
498 
load32(ImplicitAddress address,RegisterID dest)499     void load32(ImplicitAddress address, RegisterID dest)
500     {
501         m_assembler.movl_mr(address.offset, address.base, dest);
502     }
503 
load32(BaseIndex address,RegisterID dest)504     void load32(BaseIndex address, RegisterID dest)
505     {
506         m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
507     }
508 
load32WithUnalignedHalfWords(BaseIndex address,RegisterID dest)509     void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
510     {
511         load32(address, dest);
512     }
513 
load16Unaligned(ImplicitAddress address,RegisterID dest)514     void load16Unaligned(ImplicitAddress address, RegisterID dest)
515     {
516         load16(address, dest);
517     }
518 
load16Unaligned(BaseIndex address,RegisterID dest)519     void load16Unaligned(BaseIndex address, RegisterID dest)
520     {
521         load16(address, dest);
522     }
523 
load32WithAddressOffsetPatch(Address address,RegisterID dest)524     DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
525     {
526         padBeforePatch();
527         m_assembler.movl_mr_disp32(address.offset, address.base, dest);
528         return DataLabel32(this);
529     }
530 
load32WithCompactAddressOffsetPatch(Address address,RegisterID dest)531     DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
532     {
533         padBeforePatch();
534         m_assembler.movl_mr_disp8(address.offset, address.base, dest);
535         return DataLabelCompact(this);
536     }
537 
repatchCompact(CodeLocationDataLabelCompact dataLabelCompact,int32_t value)538     static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
539     {
540         ASSERT(isCompactPtrAlignedAddressOffset(value));
541         AssemblerType_T::repatchCompact(dataLabelCompact.dataLocation(), value);
542     }
543 
loadCompactWithAddressOffsetPatch(Address address,RegisterID dest)544     DataLabelCompact loadCompactWithAddressOffsetPatch(Address address, RegisterID dest)
545     {
546         padBeforePatch();
547         m_assembler.movl_mr_disp8(address.offset, address.base, dest);
548         return DataLabelCompact(this);
549     }
550 
load8(BaseIndex address,RegisterID dest)551     void load8(BaseIndex address, RegisterID dest)
552     {
553         m_assembler.movzbl_mr(address.offset, address.base, address.index, address.scale, dest);
554     }
555 
load8(ImplicitAddress address,RegisterID dest)556     void load8(ImplicitAddress address, RegisterID dest)
557     {
558         m_assembler.movzbl_mr(address.offset, address.base, dest);
559     }
560 
load8Signed(BaseIndex address,RegisterID dest)561     void load8Signed(BaseIndex address, RegisterID dest)
562     {
563         m_assembler.movsbl_mr(address.offset, address.base, address.index, address.scale, dest);
564     }
565 
load8Signed(ImplicitAddress address,RegisterID dest)566     void load8Signed(ImplicitAddress address, RegisterID dest)
567     {
568         m_assembler.movsbl_mr(address.offset, address.base, dest);
569     }
570 
load16(BaseIndex address,RegisterID dest)571     void load16(BaseIndex address, RegisterID dest)
572     {
573         m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
574     }
575 
load16(ImplicitAddress address,RegisterID dest)576     void load16(ImplicitAddress address, RegisterID dest)
577     {
578         m_assembler.movzwl_mr(address.offset, address.base, dest);
579     }
580 
load16(Address address,RegisterID dest)581     void load16(Address address, RegisterID dest)
582     {
583         m_assembler.movzwl_mr(address.offset, address.base, dest);
584     }
585 
load16Signed(BaseIndex address,RegisterID dest)586     void load16Signed(BaseIndex address, RegisterID dest)
587     {
588         m_assembler.movswl_mr(address.offset, address.base, address.index, address.scale, dest);
589     }
590 
load16Signed(Address address,RegisterID dest)591     void load16Signed(Address address, RegisterID dest)
592     {
593         m_assembler.movswl_mr(address.offset, address.base, dest);
594     }
595 
store32WithAddressOffsetPatch(RegisterID src,Address address)596     DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
597     {
598         padBeforePatch();
599         m_assembler.movl_rm_disp32(src, address.offset, address.base);
600         return DataLabel32(this);
601     }
602 
store32(RegisterID src,ImplicitAddress address)603     void store32(RegisterID src, ImplicitAddress address)
604     {
605         m_assembler.movl_rm(src, address.offset, address.base);
606     }
607 
store32(RegisterID src,BaseIndex address)608     void store32(RegisterID src, BaseIndex address)
609     {
610         m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
611     }
612 
store32(TrustedImm32 imm,ImplicitAddress address)613     void store32(TrustedImm32 imm, ImplicitAddress address)
614     {
615         m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
616     }
617 
store32(TrustedImm32 imm,BaseIndex address)618     void store32(TrustedImm32 imm, BaseIndex address)
619     {
620         m_assembler.movl_i32m(imm.m_value, address.offset, address.base, address.index, address.scale);
621     }
622 
store8(TrustedImm32 imm,Address address)623     void store8(TrustedImm32 imm, Address address)
624     {
625         ASSERT(-128 <= imm.m_value && imm.m_value < 128);
626         m_assembler.movb_i8m(imm.m_value, address.offset, address.base);
627     }
628 
store8(TrustedImm32 imm,BaseIndex address)629     void store8(TrustedImm32 imm, BaseIndex address)
630     {
631         ASSERT(-128 <= imm.m_value && imm.m_value < 128);
632         m_assembler.movb_i8m(imm.m_value, address.offset, address.base, address.index, address.scale);
633     }
634 
store8(RegisterID src,BaseIndex address)635     void store8(RegisterID src, BaseIndex address)
636     {
637 #if CPU(X86)
638         // On 32-bit x86 we can only store from the first 4 registers;
639         // esp..edi are mapped to the 'h' registers!
640         if (src >= 4) {
641             // Pick a temporary register.
642             RegisterID temp;
643             if (address.base != X86Registers::eax && address.index != X86Registers::eax)
644                 temp = X86Registers::eax;
645             else if (address.base != X86Registers::ebx && address.index != X86Registers::ebx)
646                 temp = X86Registers::ebx;
647             else {
648                 ASSERT(address.base != X86Registers::ecx && address.index != X86Registers::ecx);
649                 temp = X86Registers::ecx;
650             }
651 
652             // Swap to the temporary register to perform the store.
653             swap(src, temp);
654             m_assembler.movb_rm(temp, address.offset, address.base, address.index, address.scale);
655             swap(src, temp);
656             return;
657         }
658 #endif
659         m_assembler.movb_rm(src, address.offset, address.base, address.index, address.scale);
660     }
661 
store16(RegisterID src,BaseIndex address)662     void store16(RegisterID src, BaseIndex address)
663     {
664 #if CPU(X86)
665         // On 32-bit x86 we can only store from the first 4 registers;
666         // esp..edi are mapped to the 'h' registers!
667         if (src >= 4) {
668             // Pick a temporary register.
669             RegisterID temp;
670             if (address.base != X86Registers::eax && address.index != X86Registers::eax)
671                 temp = X86Registers::eax;
672             else if (address.base != X86Registers::ebx && address.index != X86Registers::ebx)
673                 temp = X86Registers::ebx;
674             else {
675                 ASSERT(address.base != X86Registers::ecx && address.index != X86Registers::ecx);
676                 temp = X86Registers::ecx;
677             }
678 
679             // Swap to the temporary register to perform the store.
680             swap(src, temp);
681             m_assembler.movw_rm(temp, address.offset, address.base, address.index, address.scale);
682             swap(src, temp);
683             return;
684         }
685 #endif
686         m_assembler.movw_rm(src, address.offset, address.base, address.index, address.scale);
687     }
688 
689 
690     // Floating-point operation:
691     //
692     // Presently only supports SSE, not x87 floating point.
693 
moveDouble(FPRegisterID src,FPRegisterID dest)694     void moveDouble(FPRegisterID src, FPRegisterID dest)
695     {
696         ASSERT(isSSE2Present());
697         if (src != dest)
698             m_assembler.movsd_rr(src, dest);
699     }
700 
loadDouble(const void * address,FPRegisterID dest)701     void loadDouble(const void* address, FPRegisterID dest)
702     {
703 #if CPU(X86)
704         ASSERT(isSSE2Present());
705         m_assembler.movsd_mr(address, dest);
706 #else
707         move(TrustedImmPtr(address), scratchRegister);
708         loadDouble(scratchRegister, dest);
709 #endif
710     }
711 
loadDouble(ImplicitAddress address,FPRegisterID dest)712     void loadDouble(ImplicitAddress address, FPRegisterID dest)
713     {
714         ASSERT(isSSE2Present());
715         m_assembler.movsd_mr(address.offset, address.base, dest);
716     }
717 
loadDouble(BaseIndex address,FPRegisterID dest)718     void loadDouble(BaseIndex address, FPRegisterID dest)
719     {
720         ASSERT(isSSE2Present());
721         m_assembler.movsd_mr(address.offset, address.base, address.index, address.scale, dest);
722     }
loadFloat(BaseIndex address,FPRegisterID dest)723     void loadFloat(BaseIndex address, FPRegisterID dest)
724     {
725         ASSERT(isSSE2Present());
726         m_assembler.movss_mr(address.offset, address.base, address.index, address.scale, dest);
727     }
728 
storeDouble(FPRegisterID src,ImplicitAddress address)729     void storeDouble(FPRegisterID src, ImplicitAddress address)
730     {
731         ASSERT(isSSE2Present());
732         m_assembler.movsd_rm(src, address.offset, address.base);
733     }
734 
storeDouble(FPRegisterID src,BaseIndex address)735     void storeDouble(FPRegisterID src, BaseIndex address)
736     {
737         ASSERT(isSSE2Present());
738         m_assembler.movsd_rm(src, address.offset, address.base, address.index, address.scale);
739     }
740 
storeFloat(FPRegisterID src,BaseIndex address)741     void storeFloat(FPRegisterID src, BaseIndex address)
742     {
743         ASSERT(isSSE2Present());
744         m_assembler.movss_rm(src, address.offset, address.base, address.index, address.scale);
745     }
746 
convertDoubleToFloat(FPRegisterID src,FPRegisterID dst)747     void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
748     {
749         ASSERT(isSSE2Present());
750         m_assembler.cvtsd2ss_rr(src, dst);
751     }
752 
convertFloatToDouble(FPRegisterID src,FPRegisterID dst)753     void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
754     {
755         ASSERT(isSSE2Present());
756         m_assembler.cvtss2sd_rr(src, dst);
757     }
758 
addDouble(FPRegisterID src,FPRegisterID dest)759     void addDouble(FPRegisterID src, FPRegisterID dest)
760     {
761         ASSERT(isSSE2Present());
762         m_assembler.addsd_rr(src, dest);
763     }
764 
addDouble(FPRegisterID op1,FPRegisterID op2,FPRegisterID dest)765     void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
766     {
767         ASSERT(isSSE2Present());
768         if (op1 == dest)
769             addDouble(op2, dest);
770         else {
771             moveDouble(op2, dest);
772             addDouble(op1, dest);
773         }
774     }
775 
addDouble(Address src,FPRegisterID dest)776     void addDouble(Address src, FPRegisterID dest)
777     {
778         ASSERT(isSSE2Present());
779         m_assembler.addsd_mr(src.offset, src.base, dest);
780     }
781 
divDouble(FPRegisterID src,FPRegisterID dest)782     void divDouble(FPRegisterID src, FPRegisterID dest)
783     {
784         ASSERT(isSSE2Present());
785         m_assembler.divsd_rr(src, dest);
786     }
787 
divDouble(FPRegisterID op1,FPRegisterID op2,FPRegisterID dest)788     void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
789     {
790         // B := A / B is invalid.
791         ASSERT(op1 == dest || op2 != dest);
792 
793         moveDouble(op1, dest);
794         divDouble(op2, dest);
795     }
796 
divDouble(Address src,FPRegisterID dest)797     void divDouble(Address src, FPRegisterID dest)
798     {
799         ASSERT(isSSE2Present());
800         m_assembler.divsd_mr(src.offset, src.base, dest);
801     }
802 
subDouble(FPRegisterID src,FPRegisterID dest)803     void subDouble(FPRegisterID src, FPRegisterID dest)
804     {
805         ASSERT(isSSE2Present());
806         m_assembler.subsd_rr(src, dest);
807     }
808 
subDouble(FPRegisterID op1,FPRegisterID op2,FPRegisterID dest)809     void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
810     {
811         // B := A - B is invalid.
812         ASSERT(op1 == dest || op2 != dest);
813 
814         moveDouble(op1, dest);
815         subDouble(op2, dest);
816     }
817 
subDouble(Address src,FPRegisterID dest)818     void subDouble(Address src, FPRegisterID dest)
819     {
820         ASSERT(isSSE2Present());
821         m_assembler.subsd_mr(src.offset, src.base, dest);
822     }
823 
mulDouble(FPRegisterID src,FPRegisterID dest)824     void mulDouble(FPRegisterID src, FPRegisterID dest)
825     {
826         ASSERT(isSSE2Present());
827         m_assembler.mulsd_rr(src, dest);
828     }
829 
mulDouble(FPRegisterID op1,FPRegisterID op2,FPRegisterID dest)830     void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
831     {
832         ASSERT(isSSE2Present());
833         if (op1 == dest)
834             mulDouble(op2, dest);
835         else {
836             moveDouble(op2, dest);
837             mulDouble(op1, dest);
838         }
839     }
840 
mulDouble(Address src,FPRegisterID dest)841     void mulDouble(Address src, FPRegisterID dest)
842     {
843         ASSERT(isSSE2Present());
844         m_assembler.mulsd_mr(src.offset, src.base, dest);
845     }
846 
convertInt32ToDouble(RegisterID src,FPRegisterID dest)847     void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
848     {
849         ASSERT(isSSE2Present());
850         m_assembler.cvtsi2sd_rr(src, dest);
851     }
852 
convertInt32ToDouble(Address src,FPRegisterID dest)853     void convertInt32ToDouble(Address src, FPRegisterID dest)
854     {
855         ASSERT(isSSE2Present());
856         m_assembler.cvtsi2sd_mr(src.offset, src.base, dest);
857     }
858 
branchDouble(DoubleCondition cond,FPRegisterID left,FPRegisterID right)859     Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
860     {
861         ASSERT(isSSE2Present());
862 
863         if (cond & DoubleConditionBitInvert)
864             m_assembler.ucomisd_rr(left, right);
865         else
866             m_assembler.ucomisd_rr(right, left);
867 
868         if (cond == DoubleEqual) {
869             if (left == right)
870                 return Jump(m_assembler.jnp());
871             Jump isUnordered(m_assembler.jp());
872             Jump result = Jump(m_assembler.je());
873             isUnordered.link(this);
874             return result;
875         } else if (cond == DoubleNotEqualOrUnordered) {
876             if (left == right)
877                 return Jump(m_assembler.jp());
878             Jump isUnordered(m_assembler.jp());
879             Jump isEqual(m_assembler.je());
880             isUnordered.link(this);
881             Jump result = jump();
882             isEqual.link(this);
883             return result;
884         }
885 
886         ASSERT(!(cond & DoubleConditionBitSpecial));
887         return Jump(m_assembler.jCC(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits)));
888     }
889 
890     // Truncates 'src' to an integer, and places the resulting 'dest'.
891     // If the result is not representable as a 32 bit value, branch.
892     // May also branch for some values that are representable in 32 bits
893     // (specifically, in this case, INT_MIN).
894     enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
895     Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
896     {
897         ASSERT(isSSE2Present());
898         m_assembler.cvttsd2si_rr(src, dest);
899         return branch32(branchType ? NotEqual : Equal, dest, TrustedImm32(0x80000000));
900     }
901 
902     Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
903     {
904         ASSERT(isSSE2Present());
905         m_assembler.cvttsd2si_rr(src, dest);
906         return branch32(branchType ? GreaterThanOrEqual : LessThan, dest, TrustedImm32(0));
907     }
908 
truncateDoubleToInt32(FPRegisterID src,RegisterID dest)909     void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
910     {
911         ASSERT(isSSE2Present());
912         m_assembler.cvttsd2si_rr(src, dest);
913     }
914 
915 #if CPU(X86_64)
truncateDoubleToUint32(FPRegisterID src,RegisterID dest)916     void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
917     {
918         ASSERT(isSSE2Present());
919         m_assembler.cvttsd2siq_rr(src, dest);
920     }
921 #endif
922 
923     // Convert 'src' to an integer, and places the resulting 'dest'.
924     // If the result is not representable as a 32 bit value, branch.
925     // May also branch for some values that are representable in 32 bits
926     // (specifically, in this case, 0).
branchConvertDoubleToInt32(FPRegisterID src,RegisterID dest,JumpList & failureCases,FPRegisterID fpTemp)927     void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
928     {
929         ASSERT(isSSE2Present());
930         m_assembler.cvttsd2si_rr(src, dest);
931 
932         // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
933         failureCases.append(branchTest32(Zero, dest));
934 
935         // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
936         convertInt32ToDouble(dest, fpTemp);
937         m_assembler.ucomisd_rr(fpTemp, src);
938         failureCases.append(m_assembler.jp());
939         failureCases.append(m_assembler.jne());
940     }
941 
branchDoubleNonZero(FPRegisterID reg,FPRegisterID scratch)942     Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
943     {
944         ASSERT(isSSE2Present());
945         m_assembler.xorpd_rr(scratch, scratch);
946         return branchDouble(DoubleNotEqual, reg, scratch);
947     }
948 
branchDoubleZeroOrNaN(FPRegisterID reg,FPRegisterID scratch)949     Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
950     {
951         ASSERT(isSSE2Present());
952         m_assembler.xorpd_rr(scratch, scratch);
953         return branchDouble(DoubleEqualOrUnordered, reg, scratch);
954     }
955 
lshiftPacked(TrustedImm32 imm,XMMRegisterID reg)956     void lshiftPacked(TrustedImm32 imm, XMMRegisterID reg)
957     {
958         ASSERT(isSSE2Present());
959         m_assembler.psllq_i8r(imm.m_value, reg);
960     }
961 
rshiftPacked(TrustedImm32 imm,XMMRegisterID reg)962     void rshiftPacked(TrustedImm32 imm, XMMRegisterID reg)
963     {
964         ASSERT(isSSE2Present());
965         m_assembler.psrlq_i8r(imm.m_value, reg);
966     }
967 
orPacked(XMMRegisterID src,XMMRegisterID dst)968     void orPacked(XMMRegisterID src, XMMRegisterID dst)
969     {
970         ASSERT(isSSE2Present());
971         m_assembler.por_rr(src, dst);
972     }
973 
moveInt32ToPacked(RegisterID src,XMMRegisterID dst)974     void moveInt32ToPacked(RegisterID src, XMMRegisterID dst)
975     {
976         ASSERT(isSSE2Present());
977         m_assembler.movd_rr(src, dst);
978     }
979 
movePackedToInt32(XMMRegisterID src,RegisterID dst)980     void movePackedToInt32(XMMRegisterID src, RegisterID dst)
981     {
982         ASSERT(isSSE2Present());
983         m_assembler.movd_rr(src, dst);
984     }
985 
986     // Stack manipulation operations:
987     //
988     // The ABI is assumed to provide a stack abstraction to memory,
989     // containing machine word sized units of data.  Push and pop
990     // operations add and remove a single register sized unit of data
991     // to or from the stack.  Peek and poke operations read or write
992     // values on the stack, without moving the current stack position.
993 
pop(RegisterID dest)994     void pop(RegisterID dest)
995     {
996         m_assembler.pop_r(dest);
997     }
998 
push(RegisterID src)999     void push(RegisterID src)
1000     {
1001         m_assembler.push_r(src);
1002     }
1003 
push(Address address)1004     void push(Address address)
1005     {
1006         m_assembler.push_m(address.offset, address.base);
1007     }
1008 
push(TrustedImm32 imm)1009     void push(TrustedImm32 imm)
1010     {
1011         m_assembler.push_i32(imm.m_value);
1012     }
1013 
1014 
1015     // Register move operations:
1016     //
1017     // Move values in registers.
1018 
move(TrustedImm32 imm,RegisterID dest)1019     void move(TrustedImm32 imm, RegisterID dest)
1020     {
1021         // Note: on 64-bit the TrustedImm32 value is zero extended into the register, it
1022         // may be useful to have a separate version that sign extends the value?
1023         if (!imm.m_value)
1024             m_assembler.xorl_rr(dest, dest);
1025         else
1026             m_assembler.movl_i32r(imm.m_value, dest);
1027     }
1028 
1029 #if CPU(X86_64)
move(RegisterID src,RegisterID dest)1030     void move(RegisterID src, RegisterID dest)
1031     {
1032         // Note: on 64-bit this is is a full register move; perhaps it would be
1033         // useful to have separate move32 & movePtr, with move32 zero extending?
1034         if (src != dest)
1035             m_assembler.movq_rr(src, dest);
1036     }
1037 
move(TrustedImmPtr imm,RegisterID dest)1038     void move(TrustedImmPtr imm, RegisterID dest)
1039     {
1040         m_assembler.movq_i64r(imm.asIntptr(), dest);
1041     }
1042 
move(TrustedImm64 imm,RegisterID dest)1043     void move(TrustedImm64 imm, RegisterID dest)
1044     {
1045         m_assembler.movq_i64r(imm.m_value, dest);
1046     }
1047 
swap(RegisterID reg1,RegisterID reg2)1048     void swap(RegisterID reg1, RegisterID reg2)
1049     {
1050         if (reg1 != reg2)
1051             m_assembler.xchgq_rr(reg1, reg2);
1052     }
1053 
signExtend32ToPtr(RegisterID src,RegisterID dest)1054     void signExtend32ToPtr(RegisterID src, RegisterID dest)
1055     {
1056         m_assembler.movsxd_rr(src, dest);
1057     }
1058 
zeroExtend32ToPtr(RegisterID src,RegisterID dest)1059     void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
1060     {
1061         m_assembler.movl_rr(src, dest);
1062     }
1063 #else
move(RegisterID src,RegisterID dest)1064     void move(RegisterID src, RegisterID dest)
1065     {
1066         if (src != dest)
1067             m_assembler.movl_rr(src, dest);
1068     }
1069 
move(TrustedImmPtr imm,RegisterID dest)1070     void move(TrustedImmPtr imm, RegisterID dest)
1071     {
1072         m_assembler.movl_i32r(imm.asIntptr(), dest);
1073     }
1074 
swap(RegisterID reg1,RegisterID reg2)1075     void swap(RegisterID reg1, RegisterID reg2)
1076     {
1077         if (reg1 != reg2)
1078             m_assembler.xchgl_rr(reg1, reg2);
1079     }
1080 
signExtend32ToPtr(RegisterID src,RegisterID dest)1081     void signExtend32ToPtr(RegisterID src, RegisterID dest)
1082     {
1083         move(src, dest);
1084     }
1085 
zeroExtend32ToPtr(RegisterID src,RegisterID dest)1086     void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
1087     {
1088         move(src, dest);
1089     }
1090 #endif
1091 
1092 
1093     // Forwards / external control flow operations:
1094     //
1095     // This set of jump and conditional branch operations return a Jump
1096     // object which may linked at a later point, allow forwards jump,
1097     // or jumps that will require external linkage (after the code has been
1098     // relocated).
1099     //
1100     // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
1101     // respecitvely, for unsigned comparisons the names b, a, be, and ae are
1102     // used (representing the names 'below' and 'above').
1103     //
1104     // Operands to the comparision are provided in the expected order, e.g.
1105     // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
1106     // treated as a signed 32bit value, is less than or equal to 5.
1107     //
1108     // jz and jnz test whether the first operand is equal to zero, and take
1109     // an optional second operand of a mask under which to perform the test.
1110 
1111 public:
branch8(RelationalCondition cond,Address left,TrustedImm32 right)1112     Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
1113     {
1114         m_assembler.cmpb_im(right.m_value, left.offset, left.base);
1115         return Jump(m_assembler.jCC(x86Condition(cond)));
1116     }
1117 
branch32(RelationalCondition cond,RegisterID left,RegisterID right)1118     Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
1119     {
1120         m_assembler.cmpl_rr(right, left);
1121         return Jump(m_assembler.jCC(x86Condition(cond)));
1122     }
1123 
branch32(RelationalCondition cond,RegisterID left,TrustedImm32 right)1124     Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
1125     {
1126         if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
1127             m_assembler.testl_rr(left, left);
1128         else
1129             m_assembler.cmpl_ir(right.m_value, left);
1130         return Jump(m_assembler.jCC(x86Condition(cond)));
1131     }
1132 
branch32(RelationalCondition cond,RegisterID left,Address right)1133     Jump branch32(RelationalCondition cond, RegisterID left, Address right)
1134     {
1135         m_assembler.cmpl_mr(right.offset, right.base, left);
1136         return Jump(m_assembler.jCC(x86Condition(cond)));
1137     }
1138 
branch32(RelationalCondition cond,Address left,RegisterID right)1139     Jump branch32(RelationalCondition cond, Address left, RegisterID right)
1140     {
1141         m_assembler.cmpl_rm(right, left.offset, left.base);
1142         return Jump(m_assembler.jCC(x86Condition(cond)));
1143     }
1144 
branch32(RelationalCondition cond,Address left,TrustedImm32 right)1145     Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
1146     {
1147         m_assembler.cmpl_im(right.m_value, left.offset, left.base);
1148         return Jump(m_assembler.jCC(x86Condition(cond)));
1149     }
1150 
branch32(RelationalCondition cond,BaseIndex left,TrustedImm32 right)1151     Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1152     {
1153         m_assembler.cmpl_im(right.m_value, left.offset, left.base, left.index, left.scale);
1154         return Jump(m_assembler.jCC(x86Condition(cond)));
1155     }
1156 
branch32WithUnalignedHalfWords(RelationalCondition cond,BaseIndex left,TrustedImm32 right)1157     Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1158     {
1159         return branch32(cond, left, right);
1160     }
1161 
branchTest32(ResultCondition cond,RegisterID reg,RegisterID mask)1162     Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
1163     {
1164         m_assembler.testl_rr(reg, mask);
1165         return Jump(m_assembler.jCC(x86Condition(cond)));
1166     }
1167 
1168     Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1169     {
1170         // if we are only interested in the low seven bits, this can be tested with a testb
1171         if (mask.m_value == -1)
1172             m_assembler.testl_rr(reg, reg);
1173         else
1174             m_assembler.testl_i32r(mask.m_value, reg);
1175         return Jump(m_assembler.jCC(x86Condition(cond)));
1176     }
1177 
1178     Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1179     {
1180         if (mask.m_value == -1)
1181             m_assembler.cmpl_im(0, address.offset, address.base);
1182         else
1183             m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
1184         return Jump(m_assembler.jCC(x86Condition(cond)));
1185     }
1186 
1187     Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1188     {
1189         if (mask.m_value == -1)
1190             m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
1191         else
1192             m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
1193         return Jump(m_assembler.jCC(x86Condition(cond)));
1194     }
1195 
1196     Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1197     {
1198         // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
1199         ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
1200         if (mask.m_value == -1)
1201             m_assembler.cmpb_im(0, address.offset, address.base);
1202         else
1203             m_assembler.testb_im(mask.m_value, address.offset, address.base);
1204         return Jump(m_assembler.jCC(x86Condition(cond)));
1205     }
1206 
1207     Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1208     {
1209         // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
1210         ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
1211         if (mask.m_value == -1)
1212             m_assembler.cmpb_im(0, address.offset, address.base, address.index, address.scale);
1213         else
1214             m_assembler.testb_im(mask.m_value, address.offset, address.base, address.index, address.scale);
1215         return Jump(m_assembler.jCC(x86Condition(cond)));
1216     }
1217 
branch8(RelationalCondition cond,BaseIndex left,TrustedImm32 right)1218     Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1219     {
1220         ASSERT(!(right.m_value & 0xFFFFFF00));
1221 
1222         m_assembler.cmpb_im(right.m_value, left.offset, left.base, left.index, left.scale);
1223         return Jump(m_assembler.jCC(x86Condition(cond)));
1224     }
1225 
jump()1226     Jump jump()
1227     {
1228         return Jump(m_assembler.jmp());
1229     }
1230 
jump(RegisterID target)1231     void jump(RegisterID target)
1232     {
1233         m_assembler.jmp_r(target);
1234     }
1235 
1236     // Address is a memory location containing the address to jump to
jump(Address address)1237     void jump(Address address)
1238     {
1239         m_assembler.jmp_m(address.offset, address.base);
1240     }
1241 
1242 
1243     // Arithmetic control flow operations:
1244     //
1245     // This set of conditional branch operations branch based
1246     // on the result of an arithmetic operation.  The operation
1247     // is performed as normal, storing the result.
1248     //
1249     // * jz operations branch if the result is zero.
1250     // * jo operations branch if the (signed) arithmetic
1251     //   operation caused an overflow to occur.
1252 
branchAdd32(ResultCondition cond,RegisterID src,RegisterID dest)1253     Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
1254     {
1255         add32(src, dest);
1256         return Jump(m_assembler.jCC(x86Condition(cond)));
1257     }
1258 
branchAdd32(ResultCondition cond,TrustedImm32 imm,RegisterID dest)1259     Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1260     {
1261         add32(imm, dest);
1262         return Jump(m_assembler.jCC(x86Condition(cond)));
1263     }
1264 
branchAdd32(ResultCondition cond,TrustedImm32 src,Address dest)1265     Jump branchAdd32(ResultCondition cond, TrustedImm32 src, Address dest)
1266     {
1267         add32(src, dest);
1268         return Jump(m_assembler.jCC(x86Condition(cond)));
1269     }
1270 
branchAdd32(ResultCondition cond,RegisterID src,Address dest)1271     Jump branchAdd32(ResultCondition cond, RegisterID src, Address dest)
1272     {
1273         add32(src, dest);
1274         return Jump(m_assembler.jCC(x86Condition(cond)));
1275     }
1276 
branchAdd32(ResultCondition cond,Address src,RegisterID dest)1277     Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
1278     {
1279         add32(src, dest);
1280         return Jump(m_assembler.jCC(x86Condition(cond)));
1281     }
1282 
branchAdd32(ResultCondition cond,RegisterID src1,RegisterID src2,RegisterID dest)1283     Jump branchAdd32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1284     {
1285         if (src1 == dest)
1286             return branchAdd32(cond, src2, dest);
1287         move(src2, dest);
1288         return branchAdd32(cond, src1, dest);
1289     }
1290 
branchAdd32(ResultCondition cond,RegisterID src,TrustedImm32 imm,RegisterID dest)1291     Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
1292     {
1293         move(src, dest);
1294         return branchAdd32(cond, imm, dest);
1295     }
1296 
branchMul32(ResultCondition cond,RegisterID src,RegisterID dest)1297     Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
1298     {
1299         mul32(src, dest);
1300         if (cond != Overflow)
1301             m_assembler.testl_rr(dest, dest);
1302         return Jump(m_assembler.jCC(x86Condition(cond)));
1303     }
1304 
branchMul32(ResultCondition cond,Address src,RegisterID dest)1305     Jump branchMul32(ResultCondition cond, Address src, RegisterID dest)
1306     {
1307         mul32(src, dest);
1308         if (cond != Overflow)
1309             m_assembler.testl_rr(dest, dest);
1310         return Jump(m_assembler.jCC(x86Condition(cond)));
1311     }
1312 
branchMul32(ResultCondition cond,TrustedImm32 imm,RegisterID src,RegisterID dest)1313     Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
1314     {
1315         mul32(imm, src, dest);
1316         if (cond != Overflow)
1317             m_assembler.testl_rr(dest, dest);
1318         return Jump(m_assembler.jCC(x86Condition(cond)));
1319     }
1320 
branchMul32(ResultCondition cond,RegisterID src1,RegisterID src2,RegisterID dest)1321     Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1322     {
1323         if (src1 == dest)
1324             return branchMul32(cond, src2, dest);
1325         move(src2, dest);
1326         return branchMul32(cond, src1, dest);
1327     }
1328 
branchSub32(ResultCondition cond,RegisterID src,RegisterID dest)1329     Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
1330     {
1331         sub32(src, dest);
1332         return Jump(m_assembler.jCC(x86Condition(cond)));
1333     }
1334 
branchSub32(ResultCondition cond,TrustedImm32 imm,RegisterID dest)1335     Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1336     {
1337         sub32(imm, dest);
1338         return Jump(m_assembler.jCC(x86Condition(cond)));
1339     }
1340 
branchSub32(ResultCondition cond,TrustedImm32 imm,Address dest)1341     Jump branchSub32(ResultCondition cond, TrustedImm32 imm, Address dest)
1342     {
1343         sub32(imm, dest);
1344         return Jump(m_assembler.jCC(x86Condition(cond)));
1345     }
1346 
branchSub32(ResultCondition cond,RegisterID src,Address dest)1347     Jump branchSub32(ResultCondition cond, RegisterID src, Address dest)
1348     {
1349         sub32(src, dest);
1350         return Jump(m_assembler.jCC(x86Condition(cond)));
1351     }
1352 
branchSub32(ResultCondition cond,Address src,RegisterID dest)1353     Jump branchSub32(ResultCondition cond, Address src, RegisterID dest)
1354     {
1355         sub32(src, dest);
1356         return Jump(m_assembler.jCC(x86Condition(cond)));
1357     }
1358 
branchSub32(ResultCondition cond,RegisterID src1,RegisterID src2,RegisterID dest)1359     Jump branchSub32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1360     {
1361         // B := A - B is invalid.
1362         ASSERT(src1 == dest || src2 != dest);
1363 
1364         move(src1, dest);
1365         return branchSub32(cond, src2, dest);
1366     }
1367 
branchSub32(ResultCondition cond,RegisterID src1,TrustedImm32 src2,RegisterID dest)1368     Jump branchSub32(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
1369     {
1370         move(src1, dest);
1371         return branchSub32(cond, src2, dest);
1372     }
1373 
branchNeg32(ResultCondition cond,RegisterID srcDest)1374     Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
1375     {
1376         neg32(srcDest);
1377         return Jump(m_assembler.jCC(x86Condition(cond)));
1378     }
1379 
branchOr32(ResultCondition cond,RegisterID src,RegisterID dest)1380     Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
1381     {
1382         or32(src, dest);
1383         return Jump(m_assembler.jCC(x86Condition(cond)));
1384     }
1385 
1386 
1387     // Miscellaneous operations:
1388 
breakpoint()1389     void breakpoint()
1390     {
1391         m_assembler.int3();
1392     }
1393 
nearCall()1394     Call nearCall()
1395     {
1396         return Call(m_assembler.call(), Call::LinkableNear);
1397     }
1398 
call(RegisterID target)1399     Call call(RegisterID target)
1400     {
1401         return Call(m_assembler.call(target), Call::None);
1402     }
1403 
call(Address address)1404     void call(Address address)
1405     {
1406         m_assembler.call_m(address.offset, address.base);
1407     }
1408 
ret()1409     void ret()
1410     {
1411         m_assembler.ret();
1412     }
1413 
compare8(RelationalCondition cond,Address left,TrustedImm32 right,RegisterID dest)1414     void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
1415     {
1416         m_assembler.cmpb_im(right.m_value, left.offset, left.base);
1417         set32(x86Condition(cond), dest);
1418     }
1419 
compare32(RelationalCondition cond,RegisterID left,RegisterID right,RegisterID dest)1420     void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
1421     {
1422         m_assembler.cmpl_rr(right, left);
1423         set32(x86Condition(cond), dest);
1424     }
1425 
compare32(RelationalCondition cond,RegisterID left,TrustedImm32 right,RegisterID dest)1426     void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
1427     {
1428         if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
1429             m_assembler.testl_rr(left, left);
1430         else
1431             m_assembler.cmpl_ir(right.m_value, left);
1432         set32(x86Condition(cond), dest);
1433     }
1434 
1435     // FIXME:
1436     // The mask should be optional... perhaps the argument order should be
1437     // dest-src, operations always have a dest? ... possibly not true, considering
1438     // asm ops like test, or pseudo ops like pop().
1439 
test8(ResultCondition cond,Address address,TrustedImm32 mask,RegisterID dest)1440     void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1441     {
1442         if (mask.m_value == -1)
1443             m_assembler.cmpb_im(0, address.offset, address.base);
1444         else
1445             m_assembler.testb_im(mask.m_value, address.offset, address.base);
1446         set32(x86Condition(cond), dest);
1447     }
1448 
test32(ResultCondition cond,Address address,TrustedImm32 mask,RegisterID dest)1449     void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1450     {
1451         if (mask.m_value == -1)
1452             m_assembler.cmpl_im(0, address.offset, address.base);
1453         else
1454             m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
1455         set32(x86Condition(cond), dest);
1456     }
1457 
1458     // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
invert(RelationalCondition cond)1459     static RelationalCondition invert(RelationalCondition cond)
1460     {
1461         return static_cast<RelationalCondition>(cond ^ 1);
1462     }
1463 
nop()1464     void nop()
1465     {
1466         m_assembler.nop();
1467     }
1468 
replaceWithJump(CodeLocationLabel instructionStart,CodeLocationLabel destination)1469     static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
1470     {
1471         X86Assembler::replaceWithJump(instructionStart.executableAddress(), destination.executableAddress());
1472     }
1473 
maxJumpReplacementSize()1474     static ptrdiff_t maxJumpReplacementSize()
1475     {
1476         return X86Assembler::maxJumpReplacementSize();
1477     }
1478 
1479 protected:
x86Condition(RelationalCondition cond)1480     X86Assembler::Condition x86Condition(RelationalCondition cond)
1481     {
1482         return static_cast<X86Assembler::Condition>(cond);
1483     }
1484 
x86Condition(ResultCondition cond)1485     X86Assembler::Condition x86Condition(ResultCondition cond)
1486     {
1487         return static_cast<X86Assembler::Condition>(cond);
1488     }
1489 
set32(X86Assembler::Condition cond,RegisterID dest)1490     void set32(X86Assembler::Condition cond, RegisterID dest)
1491     {
1492 #if CPU(X86)
1493         // On 32-bit x86 we can only set the first 4 registers;
1494         // esp..edi are mapped to the 'h' registers!
1495         if (dest >= 4) {
1496             m_assembler.xchgl_rr(dest, X86Registers::eax);
1497             m_assembler.setCC_r(cond, X86Registers::eax);
1498             m_assembler.movzbl_rr(X86Registers::eax, X86Registers::eax);
1499             m_assembler.xchgl_rr(dest, X86Registers::eax);
1500             return;
1501         }
1502 #endif
1503         m_assembler.setCC_r(cond, dest);
1504         m_assembler.movzbl_rr(dest, dest);
1505     }
1506 
1507 private:
1508     // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
1509     // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
1510     friend class MacroAssemblerX86;
1511 
1512 #if CPU(X86)
1513 #if OS(MAC_OS_X)
1514 
1515     // All X86 Macs are guaranteed to support at least SSE2,
isSSE2Present()1516     static bool isSSE2Present()
1517     {
1518         return true;
1519     }
1520 
1521 #else // OS(MAC_OS_X)
1522 
1523     enum SSE2CheckState {
1524         NotCheckedSSE2,
1525         HasSSE2,
1526         NoSSE2
1527     };
1528 
isSSE2Present()1529     static bool isSSE2Present()
1530     {
1531         if (s_sse2CheckState == NotCheckedSSE2) {
1532             // Default the flags value to zero; if the compiler is
1533             // not MSVC or GCC we will read this as SSE2 not present.
1534             int flags = 0;
1535 #if COMPILER(MSVC)
1536             _asm {
1537                 mov eax, 1 // cpuid function 1 gives us the standard feature set
1538                 cpuid;
1539                 mov flags, edx;
1540             }
1541 #elif COMPILER(GCC)
1542             asm (
1543                  "movl $0x1, %%eax;"
1544                  "pushl %%ebx;"
1545                  "cpuid;"
1546                  "popl %%ebx;"
1547                  "movl %%edx, %0;"
1548                  : "=g" (flags)
1549                  :
1550                  : "%eax", "%ecx", "%edx"
1551                  );
1552 #endif
1553             static const int SSE2FeatureBit = 1 << 26;
1554             s_sse2CheckState = (flags & SSE2FeatureBit) ? HasSSE2 : NoSSE2;
1555         }
1556         // Only check once.
1557         ASSERT(s_sse2CheckState != NotCheckedSSE2);
1558 
1559         return s_sse2CheckState == HasSSE2;
1560     }
1561 
1562     static SSE2CheckState s_sse2CheckState;
1563 
1564 #endif // OS(MAC_OS_X)
1565 #elif !defined(NDEBUG) // CPU(X86)
1566 
1567     // On x86-64 we should never be checking for SSE2 in a non-debug build,
1568     // but non debug add this method to keep the asserts above happy.
isSSE2Present()1569     static bool isSSE2Present()
1570     {
1571         return true;
1572     }
1573 
1574 #endif
1575 };
1576 
1577 } // namespace JSC
1578 
1579 #endif // ENABLE(ASSEMBLER)
1580 
1581 #endif // MacroAssemblerX86Common_h
1582