1 /* 2 * Copyright (C) 2008 Apple Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 */ 25 26 #ifndef MacroAssemblerX86_h 27 #define MacroAssemblerX86_h 28 29 #if ENABLE(ASSEMBLER) && CPU(X86) 30 31 #include "MacroAssemblerX86Common.h" 32 33 namespace JSC { 34 35 class MacroAssemblerX86 : public MacroAssemblerX86Common { 36 public: 37 static const Scale ScalePtr = TimesFour; 38 static const int PointerSize = 4; 39 40 using MacroAssemblerX86Common::add32; 41 using MacroAssemblerX86Common::and32; 42 using MacroAssemblerX86Common::branchAdd32; 43 using MacroAssemblerX86Common::branchSub32; 44 using MacroAssemblerX86Common::sub32; 45 using MacroAssemblerX86Common::or32; 46 using MacroAssemblerX86Common::load32; 47 using MacroAssemblerX86Common::store32; 48 using MacroAssemblerX86Common::store8; 49 using MacroAssemblerX86Common::branch32; 50 using MacroAssemblerX86Common::call; 51 using MacroAssemblerX86Common::jump; 52 using MacroAssemblerX86Common::addDouble; 53 using MacroAssemblerX86Common::loadDouble; 54 using MacroAssemblerX86Common::storeDouble; 55 using MacroAssemblerX86Common::convertInt32ToDouble; 56 using MacroAssemblerX86Common::branchTest8; 57 add32(TrustedImm32 imm,RegisterID src,RegisterID dest)58 void add32(TrustedImm32 imm, RegisterID src, RegisterID dest) 59 { 60 m_assembler.leal_mr(imm.m_value, src, dest); 61 } 62 add32(TrustedImm32 imm,AbsoluteAddress address)63 void add32(TrustedImm32 imm, AbsoluteAddress address) 64 { 65 m_assembler.addl_im(imm.m_value, address.m_ptr); 66 } 67 add32(AbsoluteAddress address,RegisterID dest)68 void add32(AbsoluteAddress address, RegisterID dest) 69 { 70 m_assembler.addl_mr(address.m_ptr, dest); 71 } 72 add64(TrustedImm32 imm,AbsoluteAddress address)73 void add64(TrustedImm32 imm, AbsoluteAddress address) 74 { 75 m_assembler.addl_im(imm.m_value, address.m_ptr); 76 m_assembler.adcl_im(imm.m_value >> 31, reinterpret_cast<const char*>(address.m_ptr) + sizeof(int32_t)); 77 } 78 getEffectiveAddress(BaseIndex address,RegisterID dest)79 void getEffectiveAddress(BaseIndex address, RegisterID dest) 80 { 81 return x86Lea32(address, dest); 82 } 83 and32(TrustedImm32 imm,AbsoluteAddress address)84 void and32(TrustedImm32 imm, AbsoluteAddress address) 85 { 86 m_assembler.andl_im(imm.m_value, address.m_ptr); 87 } 88 or32(TrustedImm32 imm,AbsoluteAddress address)89 void or32(TrustedImm32 imm, AbsoluteAddress address) 90 { 91 m_assembler.orl_im(imm.m_value, address.m_ptr); 92 } 93 or32(RegisterID reg,AbsoluteAddress address)94 void or32(RegisterID reg, AbsoluteAddress address) 95 { 96 m_assembler.orl_rm(reg, address.m_ptr); 97 } 98 sub32(TrustedImm32 imm,AbsoluteAddress address)99 void sub32(TrustedImm32 imm, AbsoluteAddress address) 100 { 101 m_assembler.subl_im(imm.m_value, address.m_ptr); 102 } 103 load32(const void * address,RegisterID dest)104 void load32(const void* address, RegisterID dest) 105 { 106 m_assembler.movl_mr(address, dest); 107 } 108 convertibleLoadPtr(Address address,RegisterID dest)109 ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest) 110 { 111 ConvertibleLoadLabel result = ConvertibleLoadLabel(this); 112 m_assembler.movl_mr(address.offset, address.base, dest); 113 return result; 114 } 115 addDouble(AbsoluteAddress address,FPRegisterID dest)116 void addDouble(AbsoluteAddress address, FPRegisterID dest) 117 { 118 m_assembler.addsd_mr(address.m_ptr, dest); 119 } 120 storeDouble(FPRegisterID src,const void * address)121 void storeDouble(FPRegisterID src, const void* address) 122 { 123 ASSERT(isSSE2Present()); 124 m_assembler.movsd_rm(src, address); 125 } 126 convertInt32ToDouble(AbsoluteAddress src,FPRegisterID dest)127 void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest) 128 { 129 m_assembler.cvtsi2sd_mr(src.m_ptr, dest); 130 } 131 convertUInt32ToDouble(RegisterID src,FPRegisterID dest,RegisterID scratch)132 void convertUInt32ToDouble(RegisterID src, FPRegisterID dest, RegisterID scratch) 133 { 134 Jump intRange = branch32(GreaterThanOrEqual, src, TrustedImm32(0)); 135 and32(TrustedImm32(INT_MAX), src, scratch); 136 convertInt32ToDouble(scratch, dest); 137 static const double magic = double(INT_MAX) + 1; 138 addDouble(AbsoluteAddress(&magic), dest); 139 Jump done = jump(); 140 intRange.link(this); 141 convertInt32ToDouble(src, dest); 142 done.link(this); 143 } 144 store32(TrustedImm32 imm,void * address)145 void store32(TrustedImm32 imm, void* address) 146 { 147 m_assembler.movl_i32m(imm.m_value, address); 148 } 149 store32(RegisterID src,void * address)150 void store32(RegisterID src, void* address) 151 { 152 m_assembler.movl_rm(src, address); 153 } 154 store8(TrustedImm32 imm,void * address)155 void store8(TrustedImm32 imm, void* address) 156 { 157 ASSERT(-128 <= imm.m_value && imm.m_value < 128); 158 m_assembler.movb_i8m(imm.m_value, address); 159 } 160 161 // Possibly clobbers src. moveDoubleToInts(FPRegisterID src,RegisterID dest1,RegisterID dest2)162 void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2) 163 { 164 movePackedToInt32(src, dest1); 165 rshiftPacked(TrustedImm32(32), src); 166 movePackedToInt32(src, dest2); 167 } 168 moveIntsToDouble(RegisterID src1,RegisterID src2,FPRegisterID dest,FPRegisterID scratch)169 void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch) 170 { 171 moveInt32ToPacked(src1, dest); 172 moveInt32ToPacked(src2, scratch); 173 lshiftPacked(TrustedImm32(32), scratch); 174 orPacked(scratch, dest); 175 } 176 branchAdd32(ResultCondition cond,TrustedImm32 imm,AbsoluteAddress dest)177 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest) 178 { 179 m_assembler.addl_im(imm.m_value, dest.m_ptr); 180 return Jump(m_assembler.jCC(x86Condition(cond))); 181 } 182 branchSub32(ResultCondition cond,TrustedImm32 imm,AbsoluteAddress dest)183 Jump branchSub32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest) 184 { 185 m_assembler.subl_im(imm.m_value, dest.m_ptr); 186 return Jump(m_assembler.jCC(x86Condition(cond))); 187 } 188 branch32(RelationalCondition cond,AbsoluteAddress left,RegisterID right)189 Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right) 190 { 191 m_assembler.cmpl_rm(right, left.m_ptr); 192 return Jump(m_assembler.jCC(x86Condition(cond))); 193 } 194 branch32(RelationalCondition cond,AbsoluteAddress left,TrustedImm32 right)195 Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right) 196 { 197 m_assembler.cmpl_im(right.m_value, left.m_ptr); 198 return Jump(m_assembler.jCC(x86Condition(cond))); 199 } 200 call()201 Call call() 202 { 203 return Call(m_assembler.call(), Call::Linkable); 204 } 205 callToRetrieveIP()206 void callToRetrieveIP() 207 { 208 m_assembler.call(); 209 } 210 211 // Address is a memory location containing the address to jump to jump(AbsoluteAddress address)212 void jump(AbsoluteAddress address) 213 { 214 m_assembler.jmp_m(address.m_ptr); 215 } 216 tailRecursiveCall()217 Call tailRecursiveCall() 218 { 219 return Call::fromTailJump(jump()); 220 } 221 makeTailRecursiveCall(Jump oldJump)222 Call makeTailRecursiveCall(Jump oldJump) 223 { 224 return Call::fromTailJump(oldJump); 225 } 226 227 moveWithPatch(TrustedImmPtr initialValue,RegisterID dest)228 DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest) 229 { 230 padBeforePatch(); 231 m_assembler.movl_i32r(initialValue.asIntptr(), dest); 232 return DataLabelPtr(this); 233 } 234 235 Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) 236 { 237 ASSERT(mask.m_value >= -128 && mask.m_value <= 255); 238 if (mask.m_value == -1) 239 m_assembler.cmpb_im(0, address.m_ptr); 240 else 241 m_assembler.testb_im(mask.m_value, address.m_ptr); 242 return Jump(m_assembler.jCC(x86Condition(cond))); 243 } 244 245 Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) 246 { 247 padBeforePatch(); 248 m_assembler.cmpl_ir_force32(initialRightValue.asIntptr(), left); 249 dataLabel = DataLabelPtr(this); 250 return Jump(m_assembler.jCC(x86Condition(cond))); 251 } 252 253 Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) 254 { 255 padBeforePatch(); 256 m_assembler.cmpl_im_force32(initialRightValue.asIntptr(), left.offset, left.base); 257 dataLabel = DataLabelPtr(this); 258 return Jump(m_assembler.jCC(x86Condition(cond))); 259 } 260 storePtrWithPatch(TrustedImmPtr initialValue,ImplicitAddress address)261 DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address) 262 { 263 padBeforePatch(); 264 m_assembler.movl_i32m(initialValue.asIntptr(), address.offset, address.base); 265 return DataLabelPtr(this); 266 } 267 supportsFloatingPoint()268 static bool supportsFloatingPoint() { return isSSE2Present(); } 269 // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate() supportsFloatingPointTruncate()270 static bool supportsFloatingPointTruncate() { return isSSE2Present(); } supportsFloatingPointSqrt()271 static bool supportsFloatingPointSqrt() { return isSSE2Present(); } supportsFloatingPointAbs()272 static bool supportsFloatingPointAbs() { return isSSE2Present(); } 273 readCallTarget(CodeLocationCall call)274 static FunctionPtr readCallTarget(CodeLocationCall call) 275 { 276 intptr_t offset = reinterpret_cast<int32_t*>(call.dataLocation())[-1]; 277 return FunctionPtr(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(call.dataLocation()) + offset)); 278 } 279 canJumpReplacePatchableBranchPtrWithPatch()280 static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; } 281 startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)282 static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label) 283 { 284 const int opcodeBytes = 1; 285 const int modRMBytes = 1; 286 const int immediateBytes = 4; 287 const int totalBytes = opcodeBytes + modRMBytes + immediateBytes; 288 ASSERT(totalBytes >= maxJumpReplacementSize()); 289 return label.labelAtOffset(-totalBytes); 290 } 291 startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label)292 static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label) 293 { 294 const int opcodeBytes = 1; 295 const int modRMBytes = 1; 296 const int offsetBytes = 0; 297 const int immediateBytes = 4; 298 const int totalBytes = opcodeBytes + modRMBytes + offsetBytes + immediateBytes; 299 ASSERT(totalBytes >= maxJumpReplacementSize()); 300 return label.labelAtOffset(-totalBytes); 301 } 302 revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart,RegisterID reg,void * initialValue)303 static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID reg, void* initialValue) 304 { 305 X86Assembler::revertJumpTo_cmpl_ir_force32(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), reg); 306 } 307 revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart,Address address,void * initialValue)308 static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, Address address, void* initialValue) 309 { 310 ASSERT(!address.offset); 311 X86Assembler::revertJumpTo_cmpl_im_force32(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), 0, address.base); 312 } 313 314 private: 315 template <typename, template <typename> class> friend class LinkBufferBase; 316 friend class RepatchBuffer; 317 linkCall(void * code,Call call,FunctionPtr function)318 static void linkCall(void* code, Call call, FunctionPtr function) 319 { 320 X86Assembler::linkCall(code, call.m_label, function.value()); 321 } 322 repatchCall(CodeLocationCall call,CodeLocationLabel destination)323 static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) 324 { 325 X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress()); 326 } 327 repatchCall(CodeLocationCall call,FunctionPtr destination)328 static void repatchCall(CodeLocationCall call, FunctionPtr destination) 329 { 330 X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress()); 331 } 332 }; 333 334 } // namespace JSC 335 336 #endif // ENABLE(ASSEMBLER) 337 338 #endif // MacroAssemblerX86_h 339