1 /*
2  * Copyright (C) 2008 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25 
26 #ifndef MacroAssemblerX86Common_h
27 #define MacroAssemblerX86Common_h
28 
29 #if ENABLE(ASSEMBLER)
30 
31 #include "X86Assembler.h"
32 #include "AbstractMacroAssembler.h"
33 
34 namespace JSC {
35 
36 class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
37     static const int DoubleConditionBitInvert = 0x10;
38     static const int DoubleConditionBitSpecial = 0x20;
39     static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial;
40 
41 public:
42     typedef X86Assembler::FPRegisterID FPRegisterID;
43 
44     enum RelationalCondition {
45         Equal = X86Assembler::ConditionE,
46         NotEqual = X86Assembler::ConditionNE,
47         Above = X86Assembler::ConditionA,
48         AboveOrEqual = X86Assembler::ConditionAE,
49         Below = X86Assembler::ConditionB,
50         BelowOrEqual = X86Assembler::ConditionBE,
51         GreaterThan = X86Assembler::ConditionG,
52         GreaterThanOrEqual = X86Assembler::ConditionGE,
53         LessThan = X86Assembler::ConditionL,
54         LessThanOrEqual = X86Assembler::ConditionLE
55     };
56 
57     enum ResultCondition {
58         Overflow = X86Assembler::ConditionO,
59         Signed = X86Assembler::ConditionS,
60         Zero = X86Assembler::ConditionE,
61         NonZero = X86Assembler::ConditionNE
62     };
63 
64     enum DoubleCondition {
65         // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
66         DoubleEqual = X86Assembler::ConditionE | DoubleConditionBitSpecial,
67         DoubleNotEqual = X86Assembler::ConditionNE,
68         DoubleGreaterThan = X86Assembler::ConditionA,
69         DoubleGreaterThanOrEqual = X86Assembler::ConditionAE,
70         DoubleLessThan = X86Assembler::ConditionA | DoubleConditionBitInvert,
71         DoubleLessThanOrEqual = X86Assembler::ConditionAE | DoubleConditionBitInvert,
72         // If either operand is NaN, these conditions always evaluate to true.
73         DoubleEqualOrUnordered = X86Assembler::ConditionE,
74         DoubleNotEqualOrUnordered = X86Assembler::ConditionNE | DoubleConditionBitSpecial,
75         DoubleGreaterThanOrUnordered = X86Assembler::ConditionB | DoubleConditionBitInvert,
76         DoubleGreaterThanOrEqualOrUnordered = X86Assembler::ConditionBE | DoubleConditionBitInvert,
77         DoubleLessThanOrUnordered = X86Assembler::ConditionB,
78         DoubleLessThanOrEqualOrUnordered = X86Assembler::ConditionBE,
79     };
80     COMPILE_ASSERT(
81         !((X86Assembler::ConditionE | X86Assembler::ConditionNE | X86Assembler::ConditionA | X86Assembler::ConditionAE | X86Assembler::ConditionB | X86Assembler::ConditionBE) & DoubleConditionBits),
82         DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes);
83 
84     static const RegisterID stackPointerRegister = X86Registers::esp;
85 
86     // Integer arithmetic operations:
87     //
88     // Operations are typically two operand - operation(source, srcDst)
89     // For many operations the source may be an TrustedImm32, the srcDst operand
90     // may often be a memory location (explictly described using an Address
91     // object).
92 
add32(RegisterID src,RegisterID dest)93     void add32(RegisterID src, RegisterID dest)
94     {
95         m_assembler.addl_rr(src, dest);
96     }
97 
add32(TrustedImm32 imm,Address address)98     void add32(TrustedImm32 imm, Address address)
99     {
100         m_assembler.addl_im(imm.m_value, address.offset, address.base);
101     }
102 
add32(TrustedImm32 imm,RegisterID dest)103     void add32(TrustedImm32 imm, RegisterID dest)
104     {
105         m_assembler.addl_ir(imm.m_value, dest);
106     }
107 
add32(Address src,RegisterID dest)108     void add32(Address src, RegisterID dest)
109     {
110         m_assembler.addl_mr(src.offset, src.base, dest);
111     }
112 
add32(RegisterID src,Address dest)113     void add32(RegisterID src, Address dest)
114     {
115         m_assembler.addl_rm(src, dest.offset, dest.base);
116     }
117 
and32(RegisterID src,RegisterID dest)118     void and32(RegisterID src, RegisterID dest)
119     {
120         m_assembler.andl_rr(src, dest);
121     }
122 
and32(TrustedImm32 imm,RegisterID dest)123     void and32(TrustedImm32 imm, RegisterID dest)
124     {
125         m_assembler.andl_ir(imm.m_value, dest);
126     }
127 
and32(RegisterID src,Address dest)128     void and32(RegisterID src, Address dest)
129     {
130         m_assembler.andl_rm(src, dest.offset, dest.base);
131     }
132 
and32(Address src,RegisterID dest)133     void and32(Address src, RegisterID dest)
134     {
135         m_assembler.andl_mr(src.offset, src.base, dest);
136     }
137 
and32(TrustedImm32 imm,Address address)138     void and32(TrustedImm32 imm, Address address)
139     {
140         m_assembler.andl_im(imm.m_value, address.offset, address.base);
141     }
142 
and32(RegisterID op1,RegisterID op2,RegisterID dest)143     void and32(RegisterID op1, RegisterID op2, RegisterID dest)
144     {
145         if (op1 == op2)
146             zeroExtend32ToPtr(op1, dest);
147         else if (op1 == dest)
148             and32(op2, dest);
149         else {
150             move(op2, dest);
151             and32(op1, dest);
152         }
153     }
154 
and32(TrustedImm32 imm,RegisterID src,RegisterID dest)155     void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
156     {
157         move(src, dest);
158         and32(imm, dest);
159     }
160 
lshift32(RegisterID shift_amount,RegisterID dest)161     void lshift32(RegisterID shift_amount, RegisterID dest)
162     {
163         ASSERT(shift_amount != dest);
164 
165         if (shift_amount == X86Registers::ecx)
166             m_assembler.shll_CLr(dest);
167         else {
168             // On x86 we can only shift by ecx; if asked to shift by another register we'll
169             // need rejig the shift amount into ecx first, and restore the registers afterwards.
170             // If we dest is ecx, then shift the swapped register!
171             swap(shift_amount, X86Registers::ecx);
172             m_assembler.shll_CLr(dest == X86Registers::ecx ? shift_amount : dest);
173             swap(shift_amount, X86Registers::ecx);
174         }
175     }
176 
lshift32(RegisterID src,RegisterID shift_amount,RegisterID dest)177     void lshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
178     {
179         ASSERT(shift_amount != dest);
180 
181         if (src != dest)
182             move(src, dest);
183         lshift32(shift_amount, dest);
184     }
185 
lshift32(TrustedImm32 imm,RegisterID dest)186     void lshift32(TrustedImm32 imm, RegisterID dest)
187     {
188         m_assembler.shll_i8r(imm.m_value, dest);
189     }
190 
lshift32(RegisterID src,TrustedImm32 imm,RegisterID dest)191     void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
192     {
193         if (src != dest)
194             move(src, dest);
195         lshift32(imm, dest);
196     }
197 
mul32(RegisterID src,RegisterID dest)198     void mul32(RegisterID src, RegisterID dest)
199     {
200         m_assembler.imull_rr(src, dest);
201     }
202 
mul32(Address src,RegisterID dest)203     void mul32(Address src, RegisterID dest)
204     {
205         m_assembler.imull_mr(src.offset, src.base, dest);
206     }
207 
mul32(TrustedImm32 imm,RegisterID src,RegisterID dest)208     void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
209     {
210         m_assembler.imull_i32r(src, imm.m_value, dest);
211     }
212 
neg32(RegisterID srcDest)213     void neg32(RegisterID srcDest)
214     {
215         m_assembler.negl_r(srcDest);
216     }
217 
neg32(Address srcDest)218     void neg32(Address srcDest)
219     {
220         m_assembler.negl_m(srcDest.offset, srcDest.base);
221     }
222 
not32(RegisterID srcDest)223     void not32(RegisterID srcDest)
224     {
225         m_assembler.notl_r(srcDest);
226     }
227 
not32(Address srcDest)228     void not32(Address srcDest)
229     {
230         m_assembler.notl_m(srcDest.offset, srcDest.base);
231     }
232 
or32(RegisterID src,RegisterID dest)233     void or32(RegisterID src, RegisterID dest)
234     {
235         m_assembler.orl_rr(src, dest);
236     }
237 
or32(TrustedImm32 imm,RegisterID dest)238     void or32(TrustedImm32 imm, RegisterID dest)
239     {
240         m_assembler.orl_ir(imm.m_value, dest);
241     }
242 
or32(RegisterID src,Address dest)243     void or32(RegisterID src, Address dest)
244     {
245         m_assembler.orl_rm(src, dest.offset, dest.base);
246     }
247 
or32(Address src,RegisterID dest)248     void or32(Address src, RegisterID dest)
249     {
250         m_assembler.orl_mr(src.offset, src.base, dest);
251     }
252 
or32(TrustedImm32 imm,Address address)253     void or32(TrustedImm32 imm, Address address)
254     {
255         m_assembler.orl_im(imm.m_value, address.offset, address.base);
256     }
257 
or32(RegisterID op1,RegisterID op2,RegisterID dest)258     void or32(RegisterID op1, RegisterID op2, RegisterID dest)
259     {
260         if (op1 == op2)
261             zeroExtend32ToPtr(op1, dest);
262         else if (op1 == dest)
263             or32(op2, dest);
264         else {
265             move(op2, dest);
266             or32(op1, dest);
267         }
268     }
269 
or32(TrustedImm32 imm,RegisterID src,RegisterID dest)270     void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
271     {
272         move(src, dest);
273         or32(imm, dest);
274     }
275 
rshift32(RegisterID shift_amount,RegisterID dest)276     void rshift32(RegisterID shift_amount, RegisterID dest)
277     {
278         ASSERT(shift_amount != dest);
279 
280         if (shift_amount == X86Registers::ecx)
281             m_assembler.sarl_CLr(dest);
282         else {
283             // On x86 we can only shift by ecx; if asked to shift by another register we'll
284             // need rejig the shift amount into ecx first, and restore the registers afterwards.
285             // If we dest is ecx, then shift the swapped register!
286             swap(shift_amount, X86Registers::ecx);
287             m_assembler.sarl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
288             swap(shift_amount, X86Registers::ecx);
289         }
290     }
291 
rshift32(RegisterID src,RegisterID shift_amount,RegisterID dest)292     void rshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
293     {
294         ASSERT(shift_amount != dest);
295 
296         if (src != dest)
297             move(src, dest);
298         rshift32(shift_amount, dest);
299     }
300 
rshift32(TrustedImm32 imm,RegisterID dest)301     void rshift32(TrustedImm32 imm, RegisterID dest)
302     {
303         m_assembler.sarl_i8r(imm.m_value, dest);
304     }
305 
rshift32(RegisterID src,TrustedImm32 imm,RegisterID dest)306     void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
307     {
308         if (src != dest)
309             move(src, dest);
310         rshift32(imm, dest);
311     }
312 
urshift32(RegisterID shift_amount,RegisterID dest)313     void urshift32(RegisterID shift_amount, RegisterID dest)
314     {
315         ASSERT(shift_amount != dest);
316 
317         if (shift_amount == X86Registers::ecx)
318             m_assembler.shrl_CLr(dest);
319         else {
320             // On x86 we can only shift by ecx; if asked to shift by another register we'll
321             // need rejig the shift amount into ecx first, and restore the registers afterwards.
322             // If we dest is ecx, then shift the swapped register!
323             swap(shift_amount, X86Registers::ecx);
324             m_assembler.shrl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
325             swap(shift_amount, X86Registers::ecx);
326         }
327     }
328 
urshift32(RegisterID src,RegisterID shift_amount,RegisterID dest)329     void urshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
330     {
331         ASSERT(shift_amount != dest);
332 
333         if (src != dest)
334             move(src, dest);
335         urshift32(shift_amount, dest);
336     }
337 
urshift32(TrustedImm32 imm,RegisterID dest)338     void urshift32(TrustedImm32 imm, RegisterID dest)
339     {
340         m_assembler.shrl_i8r(imm.m_value, dest);
341     }
342 
urshift32(RegisterID src,TrustedImm32 imm,RegisterID dest)343     void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
344     {
345         if (src != dest)
346             move(src, dest);
347         urshift32(imm, dest);
348     }
349 
sub32(RegisterID src,RegisterID dest)350     void sub32(RegisterID src, RegisterID dest)
351     {
352         m_assembler.subl_rr(src, dest);
353     }
354 
sub32(TrustedImm32 imm,RegisterID dest)355     void sub32(TrustedImm32 imm, RegisterID dest)
356     {
357         m_assembler.subl_ir(imm.m_value, dest);
358     }
359 
sub32(TrustedImm32 imm,Address address)360     void sub32(TrustedImm32 imm, Address address)
361     {
362         m_assembler.subl_im(imm.m_value, address.offset, address.base);
363     }
364 
sub32(Address src,RegisterID dest)365     void sub32(Address src, RegisterID dest)
366     {
367         m_assembler.subl_mr(src.offset, src.base, dest);
368     }
369 
sub32(RegisterID src,Address dest)370     void sub32(RegisterID src, Address dest)
371     {
372         m_assembler.subl_rm(src, dest.offset, dest.base);
373     }
374 
375 
xor32(RegisterID src,RegisterID dest)376     void xor32(RegisterID src, RegisterID dest)
377     {
378         m_assembler.xorl_rr(src, dest);
379     }
380 
xor32(TrustedImm32 imm,Address dest)381     void xor32(TrustedImm32 imm, Address dest)
382     {
383         m_assembler.xorl_im(imm.m_value, dest.offset, dest.base);
384     }
385 
xor32(TrustedImm32 imm,RegisterID dest)386     void xor32(TrustedImm32 imm, RegisterID dest)
387     {
388         m_assembler.xorl_ir(imm.m_value, dest);
389     }
390 
xor32(RegisterID src,Address dest)391     void xor32(RegisterID src, Address dest)
392     {
393         m_assembler.xorl_rm(src, dest.offset, dest.base);
394     }
395 
xor32(Address src,RegisterID dest)396     void xor32(Address src, RegisterID dest)
397     {
398         m_assembler.xorl_mr(src.offset, src.base, dest);
399     }
400 
xor32(RegisterID op1,RegisterID op2,RegisterID dest)401     void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
402     {
403         if (op1 == op2)
404             move(TrustedImm32(0), dest);
405         else if (op1 == dest)
406             xor32(op2, dest);
407         else {
408             move(op2, dest);
409             xor32(op1, dest);
410         }
411     }
412 
xor32(TrustedImm32 imm,RegisterID src,RegisterID dest)413     void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
414     {
415         move(src, dest);
416         xor32(imm, dest);
417     }
418 
sqrtDouble(FPRegisterID src,FPRegisterID dst)419     void sqrtDouble(FPRegisterID src, FPRegisterID dst)
420     {
421         m_assembler.sqrtsd_rr(src, dst);
422     }
423 
424     // Memory access operations:
425     //
426     // Loads are of the form load(address, destination) and stores of the form
427     // store(source, address).  The source for a store may be an TrustedImm32.  Address
428     // operand objects to loads and store will be implicitly constructed if a
429     // register is passed.
430 
load32(ImplicitAddress address,RegisterID dest)431     void load32(ImplicitAddress address, RegisterID dest)
432     {
433         m_assembler.movl_mr(address.offset, address.base, dest);
434     }
435 
load32(BaseIndex address,RegisterID dest)436     void load32(BaseIndex address, RegisterID dest)
437     {
438         m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
439     }
440 
load32WithUnalignedHalfWords(BaseIndex address,RegisterID dest)441     void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
442     {
443         load32(address, dest);
444     }
445 
load32WithAddressOffsetPatch(Address address,RegisterID dest)446     DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
447     {
448         m_assembler.movl_mr_disp32(address.offset, address.base, dest);
449         return DataLabel32(this);
450     }
451 
load16(BaseIndex address,RegisterID dest)452     void load16(BaseIndex address, RegisterID dest)
453     {
454         m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
455     }
456 
load16(Address address,RegisterID dest)457     void load16(Address address, RegisterID dest)
458     {
459         m_assembler.movzwl_mr(address.offset, address.base, dest);
460     }
461 
store32WithAddressOffsetPatch(RegisterID src,Address address)462     DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
463     {
464         m_assembler.movl_rm_disp32(src, address.offset, address.base);
465         return DataLabel32(this);
466     }
467 
store32(RegisterID src,ImplicitAddress address)468     void store32(RegisterID src, ImplicitAddress address)
469     {
470         m_assembler.movl_rm(src, address.offset, address.base);
471     }
472 
store32(RegisterID src,BaseIndex address)473     void store32(RegisterID src, BaseIndex address)
474     {
475         m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
476     }
477 
store32(TrustedImm32 imm,ImplicitAddress address)478     void store32(TrustedImm32 imm, ImplicitAddress address)
479     {
480         m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
481     }
482 
483 
484     // Floating-point operation:
485     //
486     // Presently only supports SSE, not x87 floating point.
487 
moveDouble(FPRegisterID src,FPRegisterID dest)488     void moveDouble(FPRegisterID src, FPRegisterID dest)
489     {
490         ASSERT(isSSE2Present());
491         if (src != dest)
492             m_assembler.movsd_rr(src, dest);
493     }
494 
loadDouble(ImplicitAddress address,FPRegisterID dest)495     void loadDouble(ImplicitAddress address, FPRegisterID dest)
496     {
497         ASSERT(isSSE2Present());
498         m_assembler.movsd_mr(address.offset, address.base, dest);
499     }
500 
storeDouble(FPRegisterID src,ImplicitAddress address)501     void storeDouble(FPRegisterID src, ImplicitAddress address)
502     {
503         ASSERT(isSSE2Present());
504         m_assembler.movsd_rm(src, address.offset, address.base);
505     }
506 
addDouble(FPRegisterID src,FPRegisterID dest)507     void addDouble(FPRegisterID src, FPRegisterID dest)
508     {
509         ASSERT(isSSE2Present());
510         m_assembler.addsd_rr(src, dest);
511     }
512 
addDouble(FPRegisterID op1,FPRegisterID op2,FPRegisterID dest)513     void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
514     {
515         ASSERT(isSSE2Present());
516         if (op1 == dest)
517             addDouble(op2, dest);
518         else {
519             moveDouble(op2, dest);
520             addDouble(op1, dest);
521         }
522     }
523 
addDouble(Address src,FPRegisterID dest)524     void addDouble(Address src, FPRegisterID dest)
525     {
526         ASSERT(isSSE2Present());
527         m_assembler.addsd_mr(src.offset, src.base, dest);
528     }
529 
divDouble(FPRegisterID src,FPRegisterID dest)530     void divDouble(FPRegisterID src, FPRegisterID dest)
531     {
532         ASSERT(isSSE2Present());
533         m_assembler.divsd_rr(src, dest);
534     }
535 
divDouble(FPRegisterID op1,FPRegisterID op2,FPRegisterID dest)536     void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
537     {
538         // B := A / B is invalid.
539         ASSERT(op1 == dest || op2 != dest);
540 
541         moveDouble(op1, dest);
542         divDouble(op2, dest);
543     }
544 
divDouble(Address src,FPRegisterID dest)545     void divDouble(Address src, FPRegisterID dest)
546     {
547         ASSERT(isSSE2Present());
548         m_assembler.divsd_mr(src.offset, src.base, dest);
549     }
550 
subDouble(FPRegisterID src,FPRegisterID dest)551     void subDouble(FPRegisterID src, FPRegisterID dest)
552     {
553         ASSERT(isSSE2Present());
554         m_assembler.subsd_rr(src, dest);
555     }
556 
subDouble(FPRegisterID op1,FPRegisterID op2,FPRegisterID dest)557     void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
558     {
559         // B := A - B is invalid.
560         ASSERT(op1 == dest || op2 != dest);
561 
562         moveDouble(op1, dest);
563         subDouble(op2, dest);
564     }
565 
subDouble(Address src,FPRegisterID dest)566     void subDouble(Address src, FPRegisterID dest)
567     {
568         ASSERT(isSSE2Present());
569         m_assembler.subsd_mr(src.offset, src.base, dest);
570     }
571 
mulDouble(FPRegisterID src,FPRegisterID dest)572     void mulDouble(FPRegisterID src, FPRegisterID dest)
573     {
574         ASSERT(isSSE2Present());
575         m_assembler.mulsd_rr(src, dest);
576     }
577 
mulDouble(FPRegisterID op1,FPRegisterID op2,FPRegisterID dest)578     void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
579     {
580         ASSERT(isSSE2Present());
581         if (op1 == dest)
582             mulDouble(op2, dest);
583         else {
584             moveDouble(op2, dest);
585             mulDouble(op1, dest);
586         }
587     }
588 
mulDouble(Address src,FPRegisterID dest)589     void mulDouble(Address src, FPRegisterID dest)
590     {
591         ASSERT(isSSE2Present());
592         m_assembler.mulsd_mr(src.offset, src.base, dest);
593     }
594 
convertInt32ToDouble(RegisterID src,FPRegisterID dest)595     void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
596     {
597         ASSERT(isSSE2Present());
598         m_assembler.cvtsi2sd_rr(src, dest);
599     }
600 
convertInt32ToDouble(Address src,FPRegisterID dest)601     void convertInt32ToDouble(Address src, FPRegisterID dest)
602     {
603         ASSERT(isSSE2Present());
604         m_assembler.cvtsi2sd_mr(src.offset, src.base, dest);
605     }
606 
branchDouble(DoubleCondition cond,FPRegisterID left,FPRegisterID right)607     Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
608     {
609         ASSERT(isSSE2Present());
610 
611         if (cond & DoubleConditionBitInvert)
612             m_assembler.ucomisd_rr(left, right);
613         else
614             m_assembler.ucomisd_rr(right, left);
615 
616         if (cond == DoubleEqual) {
617             Jump isUnordered(m_assembler.jp());
618             Jump result = Jump(m_assembler.je());
619             isUnordered.link(this);
620             return result;
621         } else if (cond == DoubleNotEqualOrUnordered) {
622             Jump isUnordered(m_assembler.jp());
623             Jump isEqual(m_assembler.je());
624             isUnordered.link(this);
625             Jump result = jump();
626             isEqual.link(this);
627             return result;
628         }
629 
630         ASSERT(!(cond & DoubleConditionBitSpecial));
631         return Jump(m_assembler.jCC(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits)));
632     }
633 
634     // Truncates 'src' to an integer, and places the resulting 'dest'.
635     // If the result is not representable as a 32 bit value, branch.
636     // May also branch for some values that are representable in 32 bits
637     // (specifically, in this case, INT_MIN).
638     enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
639     Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
640     {
641         ASSERT(isSSE2Present());
642         m_assembler.cvttsd2si_rr(src, dest);
643         return branch32(branchType ? NotEqual : Equal, dest, TrustedImm32(0x80000000));
644     }
645 
646     // Convert 'src' to an integer, and places the resulting 'dest'.
647     // If the result is not representable as a 32 bit value, branch.
648     // May also branch for some values that are representable in 32 bits
649     // (specifically, in this case, 0).
branchConvertDoubleToInt32(FPRegisterID src,RegisterID dest,JumpList & failureCases,FPRegisterID fpTemp)650     void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
651     {
652         ASSERT(isSSE2Present());
653         m_assembler.cvttsd2si_rr(src, dest);
654 
655         // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
656         failureCases.append(branchTest32(Zero, dest));
657 
658         // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
659         convertInt32ToDouble(dest, fpTemp);
660         m_assembler.ucomisd_rr(fpTemp, src);
661         failureCases.append(m_assembler.jp());
662         failureCases.append(m_assembler.jne());
663     }
664 
branchDoubleNonZero(FPRegisterID reg,FPRegisterID scratch)665     Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
666     {
667         ASSERT(isSSE2Present());
668         m_assembler.xorpd_rr(scratch, scratch);
669         return branchDouble(DoubleNotEqual, reg, scratch);
670     }
671 
branchDoubleZeroOrNaN(FPRegisterID reg,FPRegisterID scratch)672     Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
673     {
674         ASSERT(isSSE2Present());
675         m_assembler.xorpd_rr(scratch, scratch);
676         return branchDouble(DoubleEqualOrUnordered, reg, scratch);
677     }
678 
679     // Stack manipulation operations:
680     //
681     // The ABI is assumed to provide a stack abstraction to memory,
682     // containing machine word sized units of data.  Push and pop
683     // operations add and remove a single register sized unit of data
684     // to or from the stack.  Peek and poke operations read or write
685     // values on the stack, without moving the current stack position.
686 
pop(RegisterID dest)687     void pop(RegisterID dest)
688     {
689         m_assembler.pop_r(dest);
690     }
691 
push(RegisterID src)692     void push(RegisterID src)
693     {
694         m_assembler.push_r(src);
695     }
696 
push(Address address)697     void push(Address address)
698     {
699         m_assembler.push_m(address.offset, address.base);
700     }
701 
push(TrustedImm32 imm)702     void push(TrustedImm32 imm)
703     {
704         m_assembler.push_i32(imm.m_value);
705     }
706 
707 
708     // Register move operations:
709     //
710     // Move values in registers.
711 
move(TrustedImm32 imm,RegisterID dest)712     void move(TrustedImm32 imm, RegisterID dest)
713     {
714         // Note: on 64-bit the TrustedImm32 value is zero extended into the register, it
715         // may be useful to have a separate version that sign extends the value?
716         if (!imm.m_value)
717             m_assembler.xorl_rr(dest, dest);
718         else
719             m_assembler.movl_i32r(imm.m_value, dest);
720     }
721 
722 #if CPU(X86_64)
move(RegisterID src,RegisterID dest)723     void move(RegisterID src, RegisterID dest)
724     {
725         // Note: on 64-bit this is is a full register move; perhaps it would be
726         // useful to have separate move32 & movePtr, with move32 zero extending?
727         if (src != dest)
728             m_assembler.movq_rr(src, dest);
729     }
730 
move(TrustedImmPtr imm,RegisterID dest)731     void move(TrustedImmPtr imm, RegisterID dest)
732     {
733         m_assembler.movq_i64r(imm.asIntptr(), dest);
734     }
735 
swap(RegisterID reg1,RegisterID reg2)736     void swap(RegisterID reg1, RegisterID reg2)
737     {
738         if (reg1 != reg2)
739             m_assembler.xchgq_rr(reg1, reg2);
740     }
741 
signExtend32ToPtr(RegisterID src,RegisterID dest)742     void signExtend32ToPtr(RegisterID src, RegisterID dest)
743     {
744         m_assembler.movsxd_rr(src, dest);
745     }
746 
zeroExtend32ToPtr(RegisterID src,RegisterID dest)747     void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
748     {
749         m_assembler.movl_rr(src, dest);
750     }
751 #else
move(RegisterID src,RegisterID dest)752     void move(RegisterID src, RegisterID dest)
753     {
754         if (src != dest)
755             m_assembler.movl_rr(src, dest);
756     }
757 
move(TrustedImmPtr imm,RegisterID dest)758     void move(TrustedImmPtr imm, RegisterID dest)
759     {
760         m_assembler.movl_i32r(imm.asIntptr(), dest);
761     }
762 
swap(RegisterID reg1,RegisterID reg2)763     void swap(RegisterID reg1, RegisterID reg2)
764     {
765         if (reg1 != reg2)
766             m_assembler.xchgl_rr(reg1, reg2);
767     }
768 
signExtend32ToPtr(RegisterID src,RegisterID dest)769     void signExtend32ToPtr(RegisterID src, RegisterID dest)
770     {
771         move(src, dest);
772     }
773 
zeroExtend32ToPtr(RegisterID src,RegisterID dest)774     void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
775     {
776         move(src, dest);
777     }
778 #endif
779 
780 
781     // Forwards / external control flow operations:
782     //
783     // This set of jump and conditional branch operations return a Jump
784     // object which may linked at a later point, allow forwards jump,
785     // or jumps that will require external linkage (after the code has been
786     // relocated).
787     //
788     // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
789     // respecitvely, for unsigned comparisons the names b, a, be, and ae are
790     // used (representing the names 'below' and 'above').
791     //
792     // Operands to the comparision are provided in the expected order, e.g.
793     // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
794     // treated as a signed 32bit value, is less than or equal to 5.
795     //
796     // jz and jnz test whether the first operand is equal to zero, and take
797     // an optional second operand of a mask under which to perform the test.
798 
799 public:
branch8(RelationalCondition cond,Address left,TrustedImm32 right)800     Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
801     {
802         m_assembler.cmpb_im(right.m_value, left.offset, left.base);
803         return Jump(m_assembler.jCC(x86Condition(cond)));
804     }
805 
branch32(RelationalCondition cond,RegisterID left,RegisterID right)806     Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
807     {
808         m_assembler.cmpl_rr(right, left);
809         return Jump(m_assembler.jCC(x86Condition(cond)));
810     }
811 
branch32(RelationalCondition cond,RegisterID left,TrustedImm32 right)812     Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
813     {
814         if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
815             m_assembler.testl_rr(left, left);
816         else
817             m_assembler.cmpl_ir(right.m_value, left);
818         return Jump(m_assembler.jCC(x86Condition(cond)));
819     }
820 
branch32(RelationalCondition cond,TrustedImm32 left,RegisterID right)821     Jump branch32(RelationalCondition cond, TrustedImm32 left, RegisterID right)
822     {
823         if (((cond == Equal) || (cond == NotEqual)) && !left.m_value)
824             m_assembler.testl_rr(right, right);
825         else
826             m_assembler.cmpl_ir(left.m_value, right);
827         return Jump(m_assembler.jCC(x86Condition(commute(cond))));
828     }
829 
branch32(RelationalCondition cond,RegisterID left,Address right)830     Jump branch32(RelationalCondition cond, RegisterID left, Address right)
831     {
832         m_assembler.cmpl_mr(right.offset, right.base, left);
833         return Jump(m_assembler.jCC(x86Condition(cond)));
834     }
835 
branch32(RelationalCondition cond,Address left,RegisterID right)836     Jump branch32(RelationalCondition cond, Address left, RegisterID right)
837     {
838         m_assembler.cmpl_rm(right, left.offset, left.base);
839         return Jump(m_assembler.jCC(x86Condition(cond)));
840     }
841 
branch32(RelationalCondition cond,Address left,TrustedImm32 right)842     Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
843     {
844         m_assembler.cmpl_im(right.m_value, left.offset, left.base);
845         return Jump(m_assembler.jCC(x86Condition(cond)));
846     }
847 
branch32(RelationalCondition cond,BaseIndex left,TrustedImm32 right)848     Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
849     {
850         m_assembler.cmpl_im(right.m_value, left.offset, left.base, left.index, left.scale);
851         return Jump(m_assembler.jCC(x86Condition(cond)));
852     }
853 
branch32WithUnalignedHalfWords(RelationalCondition cond,BaseIndex left,TrustedImm32 right)854     Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
855     {
856         return branch32(cond, left, right);
857     }
858 
branch16(RelationalCondition cond,BaseIndex left,RegisterID right)859     Jump branch16(RelationalCondition cond, BaseIndex left, RegisterID right)
860     {
861         m_assembler.cmpw_rm(right, left.offset, left.base, left.index, left.scale);
862         return Jump(m_assembler.jCC(x86Condition(cond)));
863     }
864 
branch16(RelationalCondition cond,BaseIndex left,TrustedImm32 right)865     Jump branch16(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
866     {
867         ASSERT(!(right.m_value & 0xFFFF0000));
868 
869         m_assembler.cmpw_im(right.m_value, left.offset, left.base, left.index, left.scale);
870         return Jump(m_assembler.jCC(x86Condition(cond)));
871     }
872 
branchTest32(ResultCondition cond,RegisterID reg,RegisterID mask)873     Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
874     {
875         m_assembler.testl_rr(reg, mask);
876         return Jump(m_assembler.jCC(x86Condition(cond)));
877     }
878 
879     Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
880     {
881         // if we are only interested in the low seven bits, this can be tested with a testb
882         if (mask.m_value == -1)
883             m_assembler.testl_rr(reg, reg);
884         else if ((mask.m_value & ~0x7f) == 0)
885             m_assembler.testb_i8r(mask.m_value, reg);
886         else
887             m_assembler.testl_i32r(mask.m_value, reg);
888         return Jump(m_assembler.jCC(x86Condition(cond)));
889     }
890 
891     Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
892     {
893         if (mask.m_value == -1)
894             m_assembler.cmpl_im(0, address.offset, address.base);
895         else
896             m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
897         return Jump(m_assembler.jCC(x86Condition(cond)));
898     }
899 
900     Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
901     {
902         if (mask.m_value == -1)
903             m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
904         else
905             m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
906         return Jump(m_assembler.jCC(x86Condition(cond)));
907     }
908 
909     Jump branchTest8(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
910     {
911         // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
912         ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
913         if (mask.m_value == -1)
914             m_assembler.testb_rr(reg, reg);
915         else
916             m_assembler.testb_i8r(mask.m_value, reg);
917         return Jump(m_assembler.jCC(x86Condition(cond)));
918     }
919 
920     Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
921     {
922         // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
923         ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
924         if (mask.m_value == -1)
925             m_assembler.cmpb_im(0, address.offset, address.base);
926         else
927             m_assembler.testb_im(mask.m_value, address.offset, address.base);
928         return Jump(m_assembler.jCC(x86Condition(cond)));
929     }
930 
931     Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
932     {
933         // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
934         ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
935         if (mask.m_value == -1)
936             m_assembler.cmpb_im(0, address.offset, address.base, address.index, address.scale);
937         else
938             m_assembler.testb_im(mask.m_value, address.offset, address.base, address.index, address.scale);
939         return Jump(m_assembler.jCC(x86Condition(cond)));
940     }
941 
jump()942     Jump jump()
943     {
944         return Jump(m_assembler.jmp());
945     }
946 
jump(RegisterID target)947     void jump(RegisterID target)
948     {
949         m_assembler.jmp_r(target);
950     }
951 
952     // Address is a memory location containing the address to jump to
jump(Address address)953     void jump(Address address)
954     {
955         m_assembler.jmp_m(address.offset, address.base);
956     }
957 
958 
959     // Arithmetic control flow operations:
960     //
961     // This set of conditional branch operations branch based
962     // on the result of an arithmetic operation.  The operation
963     // is performed as normal, storing the result.
964     //
965     // * jz operations branch if the result is zero.
966     // * jo operations branch if the (signed) arithmetic
967     //   operation caused an overflow to occur.
968 
branchAdd32(ResultCondition cond,RegisterID src,RegisterID dest)969     Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
970     {
971         add32(src, dest);
972         return Jump(m_assembler.jCC(x86Condition(cond)));
973     }
974 
branchAdd32(ResultCondition cond,TrustedImm32 imm,RegisterID dest)975     Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
976     {
977         add32(imm, dest);
978         return Jump(m_assembler.jCC(x86Condition(cond)));
979     }
980 
branchAdd32(ResultCondition cond,TrustedImm32 src,Address dest)981     Jump branchAdd32(ResultCondition cond, TrustedImm32 src, Address dest)
982     {
983         add32(src, dest);
984         return Jump(m_assembler.jCC(x86Condition(cond)));
985     }
986 
branchAdd32(ResultCondition cond,RegisterID src,Address dest)987     Jump branchAdd32(ResultCondition cond, RegisterID src, Address dest)
988     {
989         add32(src, dest);
990         return Jump(m_assembler.jCC(x86Condition(cond)));
991     }
992 
branchAdd32(ResultCondition cond,Address src,RegisterID dest)993     Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
994     {
995         add32(src, dest);
996         return Jump(m_assembler.jCC(x86Condition(cond)));
997     }
998 
branchAdd32(ResultCondition cond,RegisterID src1,RegisterID src2,RegisterID dest)999     Jump branchAdd32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1000     {
1001         if (src1 == dest)
1002             return branchAdd32(cond, src2, dest);
1003         move(src2, dest);
1004         return branchAdd32(cond, src1, dest);
1005     }
1006 
branchAdd32(ResultCondition cond,RegisterID src,TrustedImm32 imm,RegisterID dest)1007     Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
1008     {
1009         move(src, dest);
1010         return branchAdd32(cond, imm, dest);
1011     }
1012 
branchMul32(ResultCondition cond,RegisterID src,RegisterID dest)1013     Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
1014     {
1015         mul32(src, dest);
1016         if (cond != Overflow)
1017             m_assembler.testl_rr(dest, dest);
1018         return Jump(m_assembler.jCC(x86Condition(cond)));
1019     }
1020 
branchMul32(ResultCondition cond,Address src,RegisterID dest)1021     Jump branchMul32(ResultCondition cond, Address src, RegisterID dest)
1022     {
1023         mul32(src, dest);
1024         if (cond != Overflow)
1025             m_assembler.testl_rr(dest, dest);
1026         return Jump(m_assembler.jCC(x86Condition(cond)));
1027     }
1028 
branchMul32(ResultCondition cond,TrustedImm32 imm,RegisterID src,RegisterID dest)1029     Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
1030     {
1031         mul32(imm, src, dest);
1032         if (cond != Overflow)
1033             m_assembler.testl_rr(dest, dest);
1034         return Jump(m_assembler.jCC(x86Condition(cond)));
1035     }
1036 
branchMul32(ResultCondition cond,RegisterID src1,RegisterID src2,RegisterID dest)1037     Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1038     {
1039         if (src1 == dest)
1040             return branchMul32(cond, src2, dest);
1041         move(src2, dest);
1042         return branchMul32(cond, src1, dest);
1043     }
1044 
branchSub32(ResultCondition cond,RegisterID src,RegisterID dest)1045     Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
1046     {
1047         sub32(src, dest);
1048         return Jump(m_assembler.jCC(x86Condition(cond)));
1049     }
1050 
branchSub32(ResultCondition cond,TrustedImm32 imm,RegisterID dest)1051     Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1052     {
1053         sub32(imm, dest);
1054         return Jump(m_assembler.jCC(x86Condition(cond)));
1055     }
1056 
branchSub32(ResultCondition cond,TrustedImm32 imm,Address dest)1057     Jump branchSub32(ResultCondition cond, TrustedImm32 imm, Address dest)
1058     {
1059         sub32(imm, dest);
1060         return Jump(m_assembler.jCC(x86Condition(cond)));
1061     }
1062 
branchSub32(ResultCondition cond,RegisterID src,Address dest)1063     Jump branchSub32(ResultCondition cond, RegisterID src, Address dest)
1064     {
1065         sub32(src, dest);
1066         return Jump(m_assembler.jCC(x86Condition(cond)));
1067     }
1068 
branchSub32(ResultCondition cond,Address src,RegisterID dest)1069     Jump branchSub32(ResultCondition cond, Address src, RegisterID dest)
1070     {
1071         sub32(src, dest);
1072         return Jump(m_assembler.jCC(x86Condition(cond)));
1073     }
1074 
branchSub32(ResultCondition cond,RegisterID src1,RegisterID src2,RegisterID dest)1075     Jump branchSub32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1076     {
1077         // B := A - B is invalid.
1078         ASSERT(src1 == dest || src2 != dest);
1079 
1080         move(src1, dest);
1081         return branchSub32(cond, src2, dest);
1082     }
1083 
branchSub32(ResultCondition cond,RegisterID src1,TrustedImm32 src2,RegisterID dest)1084     Jump branchSub32(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
1085     {
1086         move(src1, dest);
1087         return branchSub32(cond, src2, dest);
1088     }
1089 
branchNeg32(ResultCondition cond,RegisterID srcDest)1090     Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
1091     {
1092         neg32(srcDest);
1093         return Jump(m_assembler.jCC(x86Condition(cond)));
1094     }
1095 
branchOr32(ResultCondition cond,RegisterID src,RegisterID dest)1096     Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
1097     {
1098         or32(src, dest);
1099         return Jump(m_assembler.jCC(x86Condition(cond)));
1100     }
1101 
1102 
1103     // Miscellaneous operations:
1104 
breakpoint()1105     void breakpoint()
1106     {
1107         m_assembler.int3();
1108     }
1109 
nearCall()1110     Call nearCall()
1111     {
1112         return Call(m_assembler.call(), Call::LinkableNear);
1113     }
1114 
call(RegisterID target)1115     Call call(RegisterID target)
1116     {
1117         return Call(m_assembler.call(target), Call::None);
1118     }
1119 
call(Address address)1120     void call(Address address)
1121     {
1122         m_assembler.call_m(address.offset, address.base);
1123     }
1124 
ret()1125     void ret()
1126     {
1127         m_assembler.ret();
1128     }
1129 
compare32(RelationalCondition cond,RegisterID left,RegisterID right,RegisterID dest)1130     void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
1131     {
1132         m_assembler.cmpl_rr(right, left);
1133         m_assembler.setCC_r(x86Condition(cond), dest);
1134         m_assembler.movzbl_rr(dest, dest);
1135     }
1136 
compare32(RelationalCondition cond,RegisterID left,TrustedImm32 right,RegisterID dest)1137     void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
1138     {
1139         if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
1140             m_assembler.testl_rr(left, left);
1141         else
1142             m_assembler.cmpl_ir(right.m_value, left);
1143         m_assembler.setCC_r(x86Condition(cond), dest);
1144         m_assembler.movzbl_rr(dest, dest);
1145     }
1146 
1147     // FIXME:
1148     // The mask should be optional... paerhaps the argument order should be
1149     // dest-src, operations always have a dest? ... possibly not true, considering
1150     // asm ops like test, or pseudo ops like pop().
1151 
test8(ResultCondition cond,Address address,TrustedImm32 mask,RegisterID dest)1152     void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1153     {
1154         if (mask.m_value == -1)
1155             m_assembler.cmpb_im(0, address.offset, address.base);
1156         else
1157             m_assembler.testb_im(mask.m_value, address.offset, address.base);
1158         m_assembler.setCC_r(x86Condition(cond), dest);
1159         m_assembler.movzbl_rr(dest, dest);
1160     }
1161 
test32(ResultCondition cond,Address address,TrustedImm32 mask,RegisterID dest)1162     void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1163     {
1164         if (mask.m_value == -1)
1165             m_assembler.cmpl_im(0, address.offset, address.base);
1166         else
1167             m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
1168         m_assembler.setCC_r(x86Condition(cond), dest);
1169         m_assembler.movzbl_rr(dest, dest);
1170     }
1171 
1172     // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
invert(RelationalCondition cond)1173     static RelationalCondition invert(RelationalCondition cond)
1174     {
1175         return static_cast<RelationalCondition>(cond ^ 1);
1176     }
1177 
1178     // Commute a relational condition, returns a new condition that will produce
1179     // the same results given the same inputs but with their positions exchanged.
commute(RelationalCondition cond)1180     static RelationalCondition commute(RelationalCondition cond)
1181     {
1182         // Equality is commutative!
1183         if (cond == Equal || cond == NotEqual)
1184             return cond;
1185 
1186         // Based on the values of x86 condition codes, remap > with < and >= with <=
1187         if (cond >= LessThan) {
1188             ASSERT(cond == LessThan || cond == LessThanOrEqual || cond == GreaterThan || cond == GreaterThanOrEqual);
1189             return static_cast<RelationalCondition>(X86Assembler::ConditionL + X86Assembler::ConditionG - cond);
1190         }
1191 
1192         // As above, for unsigned conditions.
1193         ASSERT(cond == Below || cond == BelowOrEqual || cond == Above || cond == AboveOrEqual);
1194         return static_cast<RelationalCondition>(X86Assembler::ConditionB + X86Assembler::ConditionA - cond);
1195     }
1196 
1197 protected:
x86Condition(RelationalCondition cond)1198     X86Assembler::Condition x86Condition(RelationalCondition cond)
1199     {
1200         return static_cast<X86Assembler::Condition>(cond);
1201     }
1202 
x86Condition(ResultCondition cond)1203     X86Assembler::Condition x86Condition(ResultCondition cond)
1204     {
1205         return static_cast<X86Assembler::Condition>(cond);
1206     }
1207 
1208 private:
1209     // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
1210     // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
1211     friend class MacroAssemblerX86;
1212 
1213 #if CPU(X86)
1214 #if OS(MAC_OS_X)
1215 
1216     // All X86 Macs are guaranteed to support at least SSE2,
isSSE2Present()1217     static bool isSSE2Present()
1218     {
1219         return true;
1220     }
1221 
1222 #else // OS(MAC_OS_X)
1223 
1224     enum SSE2CheckState {
1225         NotCheckedSSE2,
1226         HasSSE2,
1227         NoSSE2
1228     };
1229 
isSSE2Present()1230     static bool isSSE2Present()
1231     {
1232         if (s_sse2CheckState == NotCheckedSSE2) {
1233             // Default the flags value to zero; if the compiler is
1234             // not MSVC or GCC we will read this as SSE2 not present.
1235             int flags = 0;
1236 #if COMPILER(MSVC)
1237             _asm {
1238                 mov eax, 1 // cpuid function 1 gives us the standard feature set
1239                 cpuid;
1240                 mov flags, edx;
1241             }
1242 #elif COMPILER(GCC)
1243             asm (
1244                  "movl $0x1, %%eax;"
1245                  "pushl %%ebx;"
1246                  "cpuid;"
1247                  "popl %%ebx;"
1248                  "movl %%edx, %0;"
1249                  : "=g" (flags)
1250                  :
1251                  : "%eax", "%ecx", "%edx"
1252                  );
1253 #endif
1254             static const int SSE2FeatureBit = 1 << 26;
1255             s_sse2CheckState = (flags & SSE2FeatureBit) ? HasSSE2 : NoSSE2;
1256         }
1257         // Only check once.
1258         ASSERT(s_sse2CheckState != NotCheckedSSE2);
1259 
1260         return s_sse2CheckState == HasSSE2;
1261     }
1262 
1263     static SSE2CheckState s_sse2CheckState;
1264 
1265 #endif // OS(MAC_OS_X)
1266 #elif !defined(NDEBUG) // CPU(X86)
1267 
1268     // On x86-64 we should never be checking for SSE2 in a non-debug build,
1269     // but non debug add this method to keep the asserts above happy.
isSSE2Present()1270     static bool isSSE2Present()
1271     {
1272         return true;
1273     }
1274 
1275 #endif
1276 };
1277 
1278 } // namespace JSC
1279 
1280 #endif // ENABLE(ASSEMBLER)
1281 
1282 #endif // MacroAssemblerX86Common_h
1283