1 /*
2  * Copyright (C) 2008 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25 
26 #ifndef MacroAssemblerX86_64_h
27 #define MacroAssemblerX86_64_h
28 
29 #if ENABLE(ASSEMBLER) && CPU(X86_64)
30 
31 #include "MacroAssemblerX86Common.h"
32 
33 #define REPTACH_OFFSET_CALL_R11 3
34 
35 namespace JSC {
36 
37 class MacroAssemblerX86_64 : public MacroAssemblerX86Common {
38 protected:
39     static const X86Registers::RegisterID scratchRegister = X86Registers::r11;
40 
41 public:
42     static const Scale ScalePtr = TimesEight;
43 
44     using MacroAssemblerX86Common::add32;
45     using MacroAssemblerX86Common::and32;
46     using MacroAssemblerX86Common::or32;
47     using MacroAssemblerX86Common::sub32;
48     using MacroAssemblerX86Common::load32;
49     using MacroAssemblerX86Common::store32;
50     using MacroAssemblerX86Common::call;
51     using MacroAssemblerX86Common::addDouble;
52     using MacroAssemblerX86Common::loadDouble;
53     using MacroAssemblerX86Common::convertInt32ToDouble;
54 
add32(TrustedImm32 imm,AbsoluteAddress address)55     void add32(TrustedImm32 imm, AbsoluteAddress address)
56     {
57         move(TrustedImmPtr(address.m_ptr), scratchRegister);
58         add32(imm, Address(scratchRegister));
59     }
60 
and32(TrustedImm32 imm,AbsoluteAddress address)61     void and32(TrustedImm32 imm, AbsoluteAddress address)
62     {
63         move(TrustedImmPtr(address.m_ptr), scratchRegister);
64         and32(imm, Address(scratchRegister));
65     }
66 
or32(TrustedImm32 imm,AbsoluteAddress address)67     void or32(TrustedImm32 imm, AbsoluteAddress address)
68     {
69         move(TrustedImmPtr(address.m_ptr), scratchRegister);
70         or32(imm, Address(scratchRegister));
71     }
72 
sub32(TrustedImm32 imm,AbsoluteAddress address)73     void sub32(TrustedImm32 imm, AbsoluteAddress address)
74     {
75         move(TrustedImmPtr(address.m_ptr), scratchRegister);
76         sub32(imm, Address(scratchRegister));
77     }
78 
load32(void * address,RegisterID dest)79     void load32(void* address, RegisterID dest)
80     {
81         if (dest == X86Registers::eax)
82             m_assembler.movl_mEAX(address);
83         else {
84             move(X86Registers::eax, dest);
85             m_assembler.movl_mEAX(address);
86             swap(X86Registers::eax, dest);
87         }
88     }
89 
loadDouble(const void * address,FPRegisterID dest)90     void loadDouble(const void* address, FPRegisterID dest)
91     {
92         move(TrustedImmPtr(address), scratchRegister);
93         loadDouble(scratchRegister, dest);
94     }
95 
addDouble(AbsoluteAddress address,FPRegisterID dest)96     void addDouble(AbsoluteAddress address, FPRegisterID dest)
97     {
98         move(TrustedImmPtr(address.m_ptr), scratchRegister);
99         m_assembler.addsd_mr(0, scratchRegister, dest);
100     }
101 
convertInt32ToDouble(TrustedImm32 imm,FPRegisterID dest)102     void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest)
103     {
104         move(imm, scratchRegister);
105         m_assembler.cvtsi2sd_rr(scratchRegister, dest);
106     }
107 
store32(TrustedImm32 imm,void * address)108     void store32(TrustedImm32 imm, void* address)
109     {
110         move(X86Registers::eax, scratchRegister);
111         move(imm, X86Registers::eax);
112         m_assembler.movl_EAXm(address);
113         move(scratchRegister, X86Registers::eax);
114     }
115 
call()116     Call call()
117     {
118         DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister);
119         Call result = Call(m_assembler.call(scratchRegister), Call::Linkable);
120         ASSERT_UNUSED(label, differenceBetween(label, result) == REPTACH_OFFSET_CALL_R11);
121         return result;
122     }
123 
tailRecursiveCall()124     Call tailRecursiveCall()
125     {
126         DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister);
127         Jump newJump = Jump(m_assembler.jmp_r(scratchRegister));
128         ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11);
129         return Call::fromTailJump(newJump);
130     }
131 
makeTailRecursiveCall(Jump oldJump)132     Call makeTailRecursiveCall(Jump oldJump)
133     {
134         oldJump.link(this);
135         DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister);
136         Jump newJump = Jump(m_assembler.jmp_r(scratchRegister));
137         ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11);
138         return Call::fromTailJump(newJump);
139     }
140 
141 
addPtr(RegisterID src,RegisterID dest)142     void addPtr(RegisterID src, RegisterID dest)
143     {
144         m_assembler.addq_rr(src, dest);
145     }
146 
addPtr(TrustedImm32 imm,RegisterID srcDest)147     void addPtr(TrustedImm32 imm, RegisterID srcDest)
148     {
149         m_assembler.addq_ir(imm.m_value, srcDest);
150     }
151 
addPtr(TrustedImmPtr imm,RegisterID dest)152     void addPtr(TrustedImmPtr imm, RegisterID dest)
153     {
154         move(imm, scratchRegister);
155         m_assembler.addq_rr(scratchRegister, dest);
156     }
157 
addPtr(TrustedImm32 imm,RegisterID src,RegisterID dest)158     void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
159     {
160         m_assembler.leaq_mr(imm.m_value, src, dest);
161     }
162 
addPtr(TrustedImm32 imm,Address address)163     void addPtr(TrustedImm32 imm, Address address)
164     {
165         m_assembler.addq_im(imm.m_value, address.offset, address.base);
166     }
167 
addPtr(TrustedImm32 imm,AbsoluteAddress address)168     void addPtr(TrustedImm32 imm, AbsoluteAddress address)
169     {
170         move(TrustedImmPtr(address.m_ptr), scratchRegister);
171         addPtr(imm, Address(scratchRegister));
172     }
173 
andPtr(RegisterID src,RegisterID dest)174     void andPtr(RegisterID src, RegisterID dest)
175     {
176         m_assembler.andq_rr(src, dest);
177     }
178 
andPtr(TrustedImm32 imm,RegisterID srcDest)179     void andPtr(TrustedImm32 imm, RegisterID srcDest)
180     {
181         m_assembler.andq_ir(imm.m_value, srcDest);
182     }
183 
orPtr(RegisterID src,RegisterID dest)184     void orPtr(RegisterID src, RegisterID dest)
185     {
186         m_assembler.orq_rr(src, dest);
187     }
188 
orPtr(TrustedImmPtr imm,RegisterID dest)189     void orPtr(TrustedImmPtr imm, RegisterID dest)
190     {
191         move(imm, scratchRegister);
192         m_assembler.orq_rr(scratchRegister, dest);
193     }
194 
orPtr(TrustedImm32 imm,RegisterID dest)195     void orPtr(TrustedImm32 imm, RegisterID dest)
196     {
197         m_assembler.orq_ir(imm.m_value, dest);
198     }
199 
orPtr(RegisterID op1,RegisterID op2,RegisterID dest)200     void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
201     {
202         if (op1 == op2)
203             move(op1, dest);
204         else if (op1 == dest)
205             orPtr(op2, dest);
206         else {
207             move(op2, dest);
208             orPtr(op1, dest);
209         }
210     }
211 
orPtr(TrustedImm32 imm,RegisterID src,RegisterID dest)212     void orPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
213     {
214         move(src, dest);
215         orPtr(imm, dest);
216     }
217 
subPtr(RegisterID src,RegisterID dest)218     void subPtr(RegisterID src, RegisterID dest)
219     {
220         m_assembler.subq_rr(src, dest);
221     }
222 
subPtr(TrustedImm32 imm,RegisterID dest)223     void subPtr(TrustedImm32 imm, RegisterID dest)
224     {
225         m_assembler.subq_ir(imm.m_value, dest);
226     }
227 
subPtr(TrustedImmPtr imm,RegisterID dest)228     void subPtr(TrustedImmPtr imm, RegisterID dest)
229     {
230         move(imm, scratchRegister);
231         m_assembler.subq_rr(scratchRegister, dest);
232     }
233 
xorPtr(RegisterID src,RegisterID dest)234     void xorPtr(RegisterID src, RegisterID dest)
235     {
236         m_assembler.xorq_rr(src, dest);
237     }
238 
xorPtr(TrustedImm32 imm,RegisterID srcDest)239     void xorPtr(TrustedImm32 imm, RegisterID srcDest)
240     {
241         m_assembler.xorq_ir(imm.m_value, srcDest);
242     }
243 
244 
loadPtr(ImplicitAddress address,RegisterID dest)245     void loadPtr(ImplicitAddress address, RegisterID dest)
246     {
247         m_assembler.movq_mr(address.offset, address.base, dest);
248     }
249 
loadPtr(BaseIndex address,RegisterID dest)250     void loadPtr(BaseIndex address, RegisterID dest)
251     {
252         m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest);
253     }
254 
loadPtr(const void * address,RegisterID dest)255     void loadPtr(const void* address, RegisterID dest)
256     {
257         if (dest == X86Registers::eax)
258             m_assembler.movq_mEAX(address);
259         else {
260             move(X86Registers::eax, dest);
261             m_assembler.movq_mEAX(address);
262             swap(X86Registers::eax, dest);
263         }
264     }
265 
loadPtrWithAddressOffsetPatch(Address address,RegisterID dest)266     DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
267     {
268         m_assembler.movq_mr_disp32(address.offset, address.base, dest);
269         return DataLabel32(this);
270     }
271 
storePtr(RegisterID src,ImplicitAddress address)272     void storePtr(RegisterID src, ImplicitAddress address)
273     {
274         m_assembler.movq_rm(src, address.offset, address.base);
275     }
276 
storePtr(RegisterID src,BaseIndex address)277     void storePtr(RegisterID src, BaseIndex address)
278     {
279         m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale);
280     }
281 
storePtr(RegisterID src,void * address)282     void storePtr(RegisterID src, void* address)
283     {
284         if (src == X86Registers::eax)
285             m_assembler.movq_EAXm(address);
286         else {
287             swap(X86Registers::eax, src);
288             m_assembler.movq_EAXm(address);
289             swap(X86Registers::eax, src);
290         }
291     }
292 
storePtr(TrustedImmPtr imm,ImplicitAddress address)293     void storePtr(TrustedImmPtr imm, ImplicitAddress address)
294     {
295         move(imm, scratchRegister);
296         storePtr(scratchRegister, address);
297     }
298 
storePtrWithAddressOffsetPatch(RegisterID src,Address address)299     DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
300     {
301         m_assembler.movq_rm_disp32(src, address.offset, address.base);
302         return DataLabel32(this);
303     }
304 
movePtrToDouble(RegisterID src,FPRegisterID dest)305     void movePtrToDouble(RegisterID src, FPRegisterID dest)
306     {
307         m_assembler.movq_rr(src, dest);
308     }
309 
moveDoubleToPtr(FPRegisterID src,RegisterID dest)310     void moveDoubleToPtr(FPRegisterID src, RegisterID dest)
311     {
312         m_assembler.movq_rr(src, dest);
313     }
314 
comparePtr(RelationalCondition cond,RegisterID left,TrustedImm32 right,RegisterID dest)315     void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
316     {
317         if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
318             m_assembler.testq_rr(left, left);
319         else
320             m_assembler.cmpq_ir(right.m_value, left);
321         m_assembler.setCC_r(x86Condition(cond), dest);
322         m_assembler.movzbl_rr(dest, dest);
323     }
324 
branchPtr(RelationalCondition cond,RegisterID left,RegisterID right)325     Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
326     {
327         m_assembler.cmpq_rr(right, left);
328         return Jump(m_assembler.jCC(x86Condition(cond)));
329     }
330 
branchPtr(RelationalCondition cond,RegisterID left,TrustedImmPtr right)331     Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
332     {
333         move(right, scratchRegister);
334         return branchPtr(cond, left, scratchRegister);
335     }
336 
branchPtr(RelationalCondition cond,RegisterID left,Address right)337     Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
338     {
339         m_assembler.cmpq_mr(right.offset, right.base, left);
340         return Jump(m_assembler.jCC(x86Condition(cond)));
341     }
342 
branchPtr(RelationalCondition cond,AbsoluteAddress left,RegisterID right)343     Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
344     {
345         move(TrustedImmPtr(left.m_ptr), scratchRegister);
346         return branchPtr(cond, Address(scratchRegister), right);
347     }
348 
branchPtr(RelationalCondition cond,Address left,RegisterID right)349     Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
350     {
351         m_assembler.cmpq_rm(right, left.offset, left.base);
352         return Jump(m_assembler.jCC(x86Condition(cond)));
353     }
354 
branchPtr(RelationalCondition cond,Address left,TrustedImmPtr right)355     Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
356     {
357         move(right, scratchRegister);
358         return branchPtr(cond, left, scratchRegister);
359     }
360 
branchTestPtr(ResultCondition cond,RegisterID reg,RegisterID mask)361     Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
362     {
363         m_assembler.testq_rr(reg, mask);
364         return Jump(m_assembler.jCC(x86Condition(cond)));
365     }
366 
367     Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
368     {
369         // if we are only interested in the low seven bits, this can be tested with a testb
370         if (mask.m_value == -1)
371             m_assembler.testq_rr(reg, reg);
372         else if ((mask.m_value & ~0x7f) == 0)
373             m_assembler.testb_i8r(mask.m_value, reg);
374         else
375             m_assembler.testq_i32r(mask.m_value, reg);
376         return Jump(m_assembler.jCC(x86Condition(cond)));
377     }
378 
379     Jump branchTestPtr(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
380     {
381         loadPtr(address.m_ptr, scratchRegister);
382         return branchTestPtr(cond, scratchRegister, mask);
383     }
384 
385     Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
386     {
387         if (mask.m_value == -1)
388             m_assembler.cmpq_im(0, address.offset, address.base);
389         else
390             m_assembler.testq_i32m(mask.m_value, address.offset, address.base);
391         return Jump(m_assembler.jCC(x86Condition(cond)));
392     }
393 
394     Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
395     {
396         if (mask.m_value == -1)
397             m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale);
398         else
399             m_assembler.testq_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
400         return Jump(m_assembler.jCC(x86Condition(cond)));
401     }
402 
403 
branchAddPtr(ResultCondition cond,RegisterID src,RegisterID dest)404     Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
405     {
406         addPtr(src, dest);
407         return Jump(m_assembler.jCC(x86Condition(cond)));
408     }
409 
branchSubPtr(ResultCondition cond,TrustedImm32 imm,RegisterID dest)410     Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
411     {
412         subPtr(imm, dest);
413         return Jump(m_assembler.jCC(x86Condition(cond)));
414     }
415 
moveWithPatch(TrustedImmPtr initialValue,RegisterID dest)416     DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
417     {
418         m_assembler.movq_i64r(initialValue.asIntptr(), dest);
419         return DataLabelPtr(this);
420     }
421 
422     Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
423     {
424         dataLabel = moveWithPatch(initialRightValue, scratchRegister);
425         return branchPtr(cond, left, scratchRegister);
426     }
427 
428     Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
429     {
430         dataLabel = moveWithPatch(initialRightValue, scratchRegister);
431         return branchPtr(cond, left, scratchRegister);
432     }
433 
storePtrWithPatch(TrustedImmPtr initialValue,ImplicitAddress address)434     DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
435     {
436         DataLabelPtr label = moveWithPatch(initialValue, scratchRegister);
437         storePtr(scratchRegister, address);
438         return label;
439     }
440 
441     using MacroAssemblerX86Common::branchTest8;
442     Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
443     {
444         TrustedImmPtr addr(reinterpret_cast<void*>(address.offset));
445         MacroAssemblerX86Common::move(addr, scratchRegister);
446         return MacroAssemblerX86Common::branchTest8(cond, BaseIndex(scratchRegister, address.base, TimesOne), mask);
447     }
448 
supportsFloatingPoint()449     bool supportsFloatingPoint() const { return true; }
450     // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
supportsFloatingPointTruncate()451     bool supportsFloatingPointTruncate() const { return true; }
supportsFloatingPointSqrt()452     bool supportsFloatingPointSqrt() const { return true; }
453 
454 private:
455     friend class LinkBuffer;
456     friend class RepatchBuffer;
457 
linkCall(void * code,Call call,FunctionPtr function)458     static void linkCall(void* code, Call call, FunctionPtr function)
459     {
460         if (!call.isFlagSet(Call::Near))
461             X86Assembler::linkPointer(code, call.m_jmp.labelAtOffset(-REPTACH_OFFSET_CALL_R11), function.value());
462         else
463             X86Assembler::linkCall(code, call.m_jmp, function.value());
464     }
465 
repatchCall(CodeLocationCall call,CodeLocationLabel destination)466     static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
467     {
468         X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
469     }
470 
repatchCall(CodeLocationCall call,FunctionPtr destination)471     static void repatchCall(CodeLocationCall call, FunctionPtr destination)
472     {
473         X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress());
474     }
475 
476 };
477 
478 } // namespace JSC
479 
480 #endif // ENABLE(ASSEMBLER)
481 
482 #endif // MacroAssemblerX86_64_h
483