1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 * vim: set ts=8 sts=4 et sw=4 tw=99: 3 * This Source Code Form is subject to the terms of the Mozilla Public 4 * License, v. 2.0. If a copy of the MPL was not distributed with this 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 7 #ifndef jit_arm64_MacroAssembler_arm64_h 8 #define jit_arm64_MacroAssembler_arm64_h 9 10 #include "jit/arm64/Assembler-arm64.h" 11 #include "jit/arm64/vixl/Debugger-vixl.h" 12 #include "jit/arm64/vixl/MacroAssembler-vixl.h" 13 14 #include "jit/AtomicOp.h" 15 #include "jit/JitFrames.h" 16 #include "jit/MoveResolver.h" 17 18 namespace js { 19 namespace jit { 20 21 // Import VIXL operands directly into the jit namespace for shared code. 22 using vixl::Operand; 23 using vixl::MemOperand; 24 25 struct ImmShiftedTag : public ImmWord 26 { ImmShiftedTagImmShiftedTag27 ImmShiftedTag(JSValueShiftedTag shtag) 28 : ImmWord((uintptr_t)shtag) 29 { } 30 ImmShiftedTagImmShiftedTag31 ImmShiftedTag(JSValueType type) 32 : ImmWord(uintptr_t(JSValueShiftedTag(JSVAL_TYPE_TO_SHIFTED_TAG(type)))) 33 { } 34 }; 35 36 struct ImmTag : public Imm32 37 { ImmTagImmTag38 ImmTag(JSValueTag tag) 39 : Imm32(tag) 40 { } 41 }; 42 43 class MacroAssemblerCompat : public vixl::MacroAssembler 44 { 45 public: 46 typedef vixl::Condition Condition; 47 48 private: 49 // Perform a downcast. Should be removed by Bug 996602. 50 js::jit::MacroAssembler& asMasm(); 51 const js::jit::MacroAssembler& asMasm() const; 52 53 public: 54 // Restrict to only VIXL-internal functions. 55 vixl::MacroAssembler& asVIXL(); 56 const MacroAssembler& asVIXL() const; 57 58 protected: 59 bool enoughMemory_; 60 uint32_t framePushed_; 61 MacroAssemblerCompat()62 MacroAssemblerCompat() 63 : vixl::MacroAssembler(), 64 enoughMemory_(true), 65 framePushed_(0) 66 { } 67 68 protected: 69 MoveResolver moveResolver_; 70 71 public: oom()72 bool oom() const { 73 return Assembler::oom() || !enoughMemory_; 74 } toMemOperand(Address & a)75 static MemOperand toMemOperand(Address& a) { 76 return MemOperand(ARMRegister(a.base, 64), a.offset); 77 } doBaseIndex(const vixl::CPURegister & rt,const BaseIndex & addr,vixl::LoadStoreOp op)78 void doBaseIndex(const vixl::CPURegister& rt, const BaseIndex& addr, vixl::LoadStoreOp op) { 79 const ARMRegister base = ARMRegister(addr.base, 64); 80 const ARMRegister index = ARMRegister(addr.index, 64); 81 const unsigned scale = addr.scale; 82 83 if (!addr.offset && (!scale || scale == static_cast<unsigned>(CalcLSDataSize(op)))) { 84 LoadStoreMacro(rt, MemOperand(base, index, vixl::LSL, scale), op); 85 return; 86 } 87 88 vixl::UseScratchRegisterScope temps(this); 89 ARMRegister scratch64 = temps.AcquireX(); 90 MOZ_ASSERT(!scratch64.Is(rt)); 91 MOZ_ASSERT(!scratch64.Is(base)); 92 MOZ_ASSERT(!scratch64.Is(index)); 93 94 Add(scratch64, base, Operand(index, vixl::LSL, scale)); 95 LoadStoreMacro(rt, MemOperand(scratch64, addr.offset), op); 96 } Push(ARMRegister reg)97 void Push(ARMRegister reg) { 98 push(reg); 99 adjustFrame(reg.size() / 8); 100 } Push(Register reg)101 void Push(Register reg) { 102 vixl::MacroAssembler::Push(ARMRegister(reg, 64)); 103 adjustFrame(8); 104 } Push(Imm32 imm)105 void Push(Imm32 imm) { 106 push(imm); 107 adjustFrame(8); 108 } Push(FloatRegister f)109 void Push(FloatRegister f) { 110 push(ARMFPRegister(f, 64)); 111 adjustFrame(8); 112 } Push(ImmPtr imm)113 void Push(ImmPtr imm) { 114 push(imm); 115 adjustFrame(sizeof(void*)); 116 } push(FloatRegister f)117 void push(FloatRegister f) { 118 vixl::MacroAssembler::Push(ARMFPRegister(f, 64)); 119 } push(ARMFPRegister f)120 void push(ARMFPRegister f) { 121 vixl::MacroAssembler::Push(f); 122 } push(Imm32 imm)123 void push(Imm32 imm) { 124 if (imm.value == 0) { 125 vixl::MacroAssembler::Push(vixl::xzr); 126 } else { 127 vixl::UseScratchRegisterScope temps(this); 128 const ARMRegister scratch64 = temps.AcquireX(); 129 move32(imm, scratch64.asUnsized()); 130 vixl::MacroAssembler::Push(scratch64); 131 } 132 } push(ImmWord imm)133 void push(ImmWord imm) { 134 if (imm.value == 0) { 135 vixl::MacroAssembler::Push(vixl::xzr); 136 } else { 137 vixl::UseScratchRegisterScope temps(this); 138 const ARMRegister scratch64 = temps.AcquireX(); 139 Mov(scratch64, imm.value); 140 vixl::MacroAssembler::Push(scratch64); 141 } 142 } push(ImmPtr imm)143 void push(ImmPtr imm) { 144 if (imm.value == nullptr) { 145 vixl::MacroAssembler::Push(vixl::xzr); 146 } else { 147 vixl::UseScratchRegisterScope temps(this); 148 const ARMRegister scratch64 = temps.AcquireX(); 149 movePtr(imm, scratch64.asUnsized()); 150 vixl::MacroAssembler::Push(scratch64); 151 } 152 } push(ImmGCPtr imm)153 void push(ImmGCPtr imm) { 154 if (imm.value == nullptr) { 155 vixl::MacroAssembler::Push(vixl::xzr); 156 } else { 157 vixl::UseScratchRegisterScope temps(this); 158 const ARMRegister scratch64 = temps.AcquireX(); 159 movePtr(imm, scratch64.asUnsized()); 160 vixl::MacroAssembler::Push(scratch64); 161 } 162 } push(ARMRegister reg)163 void push(ARMRegister reg) { 164 vixl::MacroAssembler::Push(reg); 165 } push(Address a)166 void push(Address a) { 167 vixl::UseScratchRegisterScope temps(this); 168 const ARMRegister scratch64 = temps.AcquireX(); 169 MOZ_ASSERT(a.base != scratch64.asUnsized()); 170 loadPtr(a, scratch64.asUnsized()); 171 vixl::MacroAssembler::Push(scratch64); 172 } 173 174 // Push registers. push(Register reg)175 void push(Register reg) { 176 vixl::MacroAssembler::Push(ARMRegister(reg, 64)); 177 } push(Register r0,Register r1)178 void push(Register r0, Register r1) { 179 vixl::MacroAssembler::Push(ARMRegister(r0, 64), ARMRegister(r1, 64)); 180 } push(Register r0,Register r1,Register r2)181 void push(Register r0, Register r1, Register r2) { 182 vixl::MacroAssembler::Push(ARMRegister(r0, 64), ARMRegister(r1, 64), ARMRegister(r2, 64)); 183 } push(Register r0,Register r1,Register r2,Register r3)184 void push(Register r0, Register r1, Register r2, Register r3) { 185 vixl::MacroAssembler::Push(ARMRegister(r0, 64), ARMRegister(r1, 64), 186 ARMRegister(r2, 64), ARMRegister(r3, 64)); 187 } push(ARMFPRegister r0,ARMFPRegister r1,ARMFPRegister r2,ARMFPRegister r3)188 void push(ARMFPRegister r0, ARMFPRegister r1, ARMFPRegister r2, ARMFPRegister r3) { 189 vixl::MacroAssembler::Push(r0, r1, r2, r3); 190 } 191 192 // Pop registers. pop(Register reg)193 void pop(Register reg) { 194 vixl::MacroAssembler::Pop(ARMRegister(reg, 64)); 195 } pop(Register r0,Register r1)196 void pop(Register r0, Register r1) { 197 vixl::MacroAssembler::Pop(ARMRegister(r0, 64), ARMRegister(r1, 64)); 198 } pop(Register r0,Register r1,Register r2)199 void pop(Register r0, Register r1, Register r2) { 200 vixl::MacroAssembler::Pop(ARMRegister(r0, 64), ARMRegister(r1, 64), ARMRegister(r2, 64)); 201 } pop(Register r0,Register r1,Register r2,Register r3)202 void pop(Register r0, Register r1, Register r2, Register r3) { 203 vixl::MacroAssembler::Pop(ARMRegister(r0, 64), ARMRegister(r1, 64), 204 ARMRegister(r2, 64), ARMRegister(r3, 64)); 205 } pop(ARMFPRegister r0,ARMFPRegister r1,ARMFPRegister r2,ARMFPRegister r3)206 void pop(ARMFPRegister r0, ARMFPRegister r1, ARMFPRegister r2, ARMFPRegister r3) { 207 vixl::MacroAssembler::Pop(r0, r1, r2, r3); 208 } 209 pop(const ValueOperand & v)210 void pop(const ValueOperand& v) { 211 pop(v.valueReg()); 212 } pop(const FloatRegister & f)213 void pop(const FloatRegister& f) { 214 vixl::MacroAssembler::Pop(ARMRegister(f.code(), 64)); 215 } 216 implicitPop(uint32_t args)217 void implicitPop(uint32_t args) { 218 MOZ_ASSERT(args % sizeof(intptr_t) == 0); 219 adjustFrame(-args); 220 } Pop(ARMRegister r)221 void Pop(ARMRegister r) { 222 vixl::MacroAssembler::Pop(r); 223 adjustFrame(- r.size() / 8); 224 } 225 // FIXME: This is the same on every arch. 226 // FIXME: If we can share framePushed_, we can share this. 227 // FIXME: Or just make it at the highest level. PushWithPatch(ImmWord word)228 CodeOffset PushWithPatch(ImmWord word) { 229 framePushed_ += sizeof(word.value); 230 return pushWithPatch(word); 231 } PushWithPatch(ImmPtr ptr)232 CodeOffset PushWithPatch(ImmPtr ptr) { 233 return PushWithPatch(ImmWord(uintptr_t(ptr.value))); 234 } 235 framePushed()236 uint32_t framePushed() const { 237 return framePushed_; 238 } adjustFrame(int32_t diff)239 void adjustFrame(int32_t diff) { 240 setFramePushed(framePushed_ + diff); 241 } 242 setFramePushed(uint32_t framePushed)243 void setFramePushed(uint32_t framePushed) { 244 framePushed_ = framePushed; 245 } 246 freeStack(Register amount)247 void freeStack(Register amount) { 248 vixl::MacroAssembler::Drop(Operand(ARMRegister(amount, 64))); 249 } 250 251 // Update sp with the value of the current active stack pointer, if necessary. syncStackPtr()252 void syncStackPtr() { 253 if (!GetStackPointer64().Is(vixl::sp)) 254 Mov(vixl::sp, GetStackPointer64()); 255 } initStackPtr()256 void initStackPtr() { 257 if (!GetStackPointer64().Is(vixl::sp)) 258 Mov(GetStackPointer64(), vixl::sp); 259 } storeValue(ValueOperand val,const Address & dest)260 void storeValue(ValueOperand val, const Address& dest) { 261 storePtr(val.valueReg(), dest); 262 } 263 264 template <typename T> storeValue(JSValueType type,Register reg,const T & dest)265 void storeValue(JSValueType type, Register reg, const T& dest) { 266 vixl::UseScratchRegisterScope temps(this); 267 const Register scratch = temps.AcquireX().asUnsized(); 268 MOZ_ASSERT(scratch != reg); 269 tagValue(type, reg, ValueOperand(scratch)); 270 storeValue(ValueOperand(scratch), dest); 271 } 272 template <typename T> storeValue(const Value & val,const T & dest)273 void storeValue(const Value& val, const T& dest) { 274 vixl::UseScratchRegisterScope temps(this); 275 const Register scratch = temps.AcquireX().asUnsized(); 276 moveValue(val, ValueOperand(scratch)); 277 storeValue(ValueOperand(scratch), dest); 278 } storeValue(ValueOperand val,BaseIndex dest)279 void storeValue(ValueOperand val, BaseIndex dest) { 280 storePtr(val.valueReg(), dest); 281 } 282 283 template <typename T> storeUnboxedValue(ConstantOrRegister value,MIRType valueType,const T & dest,MIRType slotType)284 void storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const T& dest, MIRType slotType) { 285 if (valueType == MIRType_Double) { 286 storeDouble(value.reg().typedReg().fpu(), dest); 287 return; 288 } 289 290 // For known integers and booleans, we can just store the unboxed value if 291 // the slot has the same type. 292 if ((valueType == MIRType_Int32 || valueType == MIRType_Boolean) && slotType == valueType) { 293 if (value.constant()) { 294 Value val = value.value(); 295 if (valueType == MIRType_Int32) 296 store32(Imm32(val.toInt32()), dest); 297 else 298 store32(Imm32(val.toBoolean() ? 1 : 0), dest); 299 } else { 300 store32(value.reg().typedReg().gpr(), dest); 301 } 302 return; 303 } 304 305 if (value.constant()) 306 storeValue(value.value(), dest); 307 else 308 storeValue(ValueTypeFromMIRType(valueType), value.reg().typedReg().gpr(), dest); 309 310 } loadValue(Address src,Register val)311 void loadValue(Address src, Register val) { 312 Ldr(ARMRegister(val, 64), MemOperand(src)); 313 } loadValue(Address src,ValueOperand val)314 void loadValue(Address src, ValueOperand val) { 315 Ldr(ARMRegister(val.valueReg(), 64), MemOperand(src)); 316 } loadValue(const BaseIndex & src,ValueOperand val)317 void loadValue(const BaseIndex& src, ValueOperand val) { 318 doBaseIndex(ARMRegister(val.valueReg(), 64), src, vixl::LDR_x); 319 } tagValue(JSValueType type,Register payload,ValueOperand dest)320 void tagValue(JSValueType type, Register payload, ValueOperand dest) { 321 // This could be cleverer, but the first attempt had bugs. 322 Orr(ARMRegister(dest.valueReg(), 64), ARMRegister(payload, 64), Operand(ImmShiftedTag(type).value)); 323 } pushValue(ValueOperand val)324 void pushValue(ValueOperand val) { 325 vixl::MacroAssembler::Push(ARMRegister(val.valueReg(), 64)); 326 } popValue(ValueOperand val)327 void popValue(ValueOperand val) { 328 vixl::MacroAssembler::Pop(ARMRegister(val.valueReg(), 64)); 329 } pushValue(const Value & val)330 void pushValue(const Value& val) { 331 vixl::UseScratchRegisterScope temps(this); 332 const Register scratch = temps.AcquireX().asUnsized(); 333 jsval_layout jv = JSVAL_TO_IMPL(val); 334 if (val.isMarkable()) { 335 BufferOffset load = movePatchablePtr(ImmPtr((void*)jv.asBits), scratch); 336 writeDataRelocation(val, load); 337 push(scratch); 338 } else { 339 moveValue(val, scratch); 340 push(scratch); 341 } 342 } pushValue(JSValueType type,Register reg)343 void pushValue(JSValueType type, Register reg) { 344 vixl::UseScratchRegisterScope temps(this); 345 const Register scratch = temps.AcquireX().asUnsized(); 346 MOZ_ASSERT(scratch != reg); 347 tagValue(type, reg, ValueOperand(scratch)); 348 push(scratch); 349 } pushValue(const Address & addr)350 void pushValue(const Address& addr) { 351 vixl::UseScratchRegisterScope temps(this); 352 const Register scratch = temps.AcquireX().asUnsized(); 353 MOZ_ASSERT(scratch != addr.base); 354 loadValue(addr, scratch); 355 push(scratch); 356 } 357 template <typename T> storeUnboxedPayload(ValueOperand value,T address,size_t nbytes)358 void storeUnboxedPayload(ValueOperand value, T address, size_t nbytes) { 359 switch (nbytes) { 360 case 8: { 361 vixl::UseScratchRegisterScope temps(this); 362 const Register scratch = temps.AcquireX().asUnsized(); 363 unboxNonDouble(value, scratch); 364 storePtr(scratch, address); 365 return; 366 } 367 case 4: 368 storePtr(value.valueReg(), address); 369 return; 370 case 1: 371 store8(value.valueReg(), address); 372 return; 373 default: MOZ_CRASH("Bad payload width"); 374 } 375 } moveValue(const Value & val,Register dest)376 void moveValue(const Value& val, Register dest) { 377 if (val.isMarkable()) { 378 BufferOffset load = movePatchablePtr(ImmPtr((void*)val.asRawBits()), dest); 379 writeDataRelocation(val, load); 380 } else { 381 movePtr(ImmWord(val.asRawBits()), dest); 382 } 383 } moveValue(const Value & src,const ValueOperand & dest)384 void moveValue(const Value& src, const ValueOperand& dest) { 385 moveValue(src, dest.valueReg()); 386 } moveValue(const ValueOperand & src,const ValueOperand & dest)387 void moveValue(const ValueOperand& src, const ValueOperand& dest) { 388 if (src.valueReg() != dest.valueReg()) 389 movePtr(src.valueReg(), dest.valueReg()); 390 } 391 pushWithPatch(ImmWord imm)392 CodeOffset pushWithPatch(ImmWord imm) { 393 vixl::UseScratchRegisterScope temps(this); 394 const Register scratch = temps.AcquireX().asUnsized(); 395 CodeOffset label = movWithPatch(imm, scratch); 396 push(scratch); 397 return label; 398 } 399 movWithPatch(ImmWord imm,Register dest)400 CodeOffset movWithPatch(ImmWord imm, Register dest) { 401 BufferOffset off = immPool64(ARMRegister(dest, 64), imm.value); 402 return CodeOffset(off.getOffset()); 403 } movWithPatch(ImmPtr imm,Register dest)404 CodeOffset movWithPatch(ImmPtr imm, Register dest) { 405 BufferOffset off = immPool64(ARMRegister(dest, 64), uint64_t(imm.value)); 406 return CodeOffset(off.getOffset()); 407 } 408 boxValue(JSValueType type,Register src,Register dest)409 void boxValue(JSValueType type, Register src, Register dest) { 410 Orr(ARMRegister(dest, 64), ARMRegister(src, 64), Operand(ImmShiftedTag(type).value)); 411 } splitTag(Register src,Register dest)412 void splitTag(Register src, Register dest) { 413 ubfx(ARMRegister(dest, 64), ARMRegister(src, 64), JSVAL_TAG_SHIFT, (64 - JSVAL_TAG_SHIFT)); 414 } extractTag(const Address & address,Register scratch)415 Register extractTag(const Address& address, Register scratch) { 416 loadPtr(address, scratch); 417 splitTag(scratch, scratch); 418 return scratch; 419 } extractTag(const ValueOperand & value,Register scratch)420 Register extractTag(const ValueOperand& value, Register scratch) { 421 splitTag(value.valueReg(), scratch); 422 return scratch; 423 } extractObject(const Address & address,Register scratch)424 Register extractObject(const Address& address, Register scratch) { 425 loadPtr(address, scratch); 426 unboxObject(scratch, scratch); 427 return scratch; 428 } extractObject(const ValueOperand & value,Register scratch)429 Register extractObject(const ValueOperand& value, Register scratch) { 430 unboxObject(value, scratch); 431 return scratch; 432 } extractInt32(const ValueOperand & value,Register scratch)433 Register extractInt32(const ValueOperand& value, Register scratch) { 434 unboxInt32(value, scratch); 435 return scratch; 436 } extractBoolean(const ValueOperand & value,Register scratch)437 Register extractBoolean(const ValueOperand& value, Register scratch) { 438 unboxBoolean(value, scratch); 439 return scratch; 440 } 441 442 // If source is a double, load into dest. 443 // If source is int32, convert to double and store in dest. 444 // Else, branch to failure. ensureDouble(const ValueOperand & source,FloatRegister dest,Label * failure)445 void ensureDouble(const ValueOperand& source, FloatRegister dest, Label* failure) { 446 Label isDouble, done; 447 448 // TODO: splitTagForTest really should not leak a scratch register. 449 Register tag = splitTagForTest(source); 450 { 451 vixl::UseScratchRegisterScope temps(this); 452 temps.Exclude(ARMRegister(tag, 64)); 453 454 branchTestDouble(Assembler::Equal, tag, &isDouble); 455 branchTestInt32(Assembler::NotEqual, tag, failure); 456 } 457 458 convertInt32ToDouble(source.valueReg(), dest); 459 jump(&done); 460 461 bind(&isDouble); 462 unboxDouble(source, dest); 463 464 bind(&done); 465 } 466 emitSet(Condition cond,Register dest)467 void emitSet(Condition cond, Register dest) { 468 Cset(ARMRegister(dest, 64), cond); 469 } 470 471 template <typename T1, typename T2> cmpPtrSet(Condition cond,T1 lhs,T2 rhs,Register dest)472 void cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) { 473 cmpPtr(lhs, rhs); 474 emitSet(cond, dest); 475 } 476 477 template <typename T1, typename T2> cmp32Set(Condition cond,T1 lhs,T2 rhs,Register dest)478 void cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest) { 479 cmp32(lhs, rhs); 480 emitSet(cond, dest); 481 } 482 testNullSet(Condition cond,const ValueOperand & value,Register dest)483 void testNullSet(Condition cond, const ValueOperand& value, Register dest) { 484 cond = testNull(cond, value); 485 emitSet(cond, dest); 486 } testObjectSet(Condition cond,const ValueOperand & value,Register dest)487 void testObjectSet(Condition cond, const ValueOperand& value, Register dest) { 488 cond = testObject(cond, value); 489 emitSet(cond, dest); 490 } testUndefinedSet(Condition cond,const ValueOperand & value,Register dest)491 void testUndefinedSet(Condition cond, const ValueOperand& value, Register dest) { 492 cond = testUndefined(cond, value); 493 emitSet(cond, dest); 494 } 495 convertBoolToInt32(Register source,Register dest)496 void convertBoolToInt32(Register source, Register dest) { 497 Uxtb(ARMRegister(dest, 64), ARMRegister(source, 64)); 498 } 499 convertInt32ToDouble(Register src,FloatRegister dest)500 void convertInt32ToDouble(Register src, FloatRegister dest) { 501 Scvtf(ARMFPRegister(dest, 64), ARMRegister(src, 32)); // Uses FPCR rounding mode. 502 } convertInt32ToDouble(const Address & src,FloatRegister dest)503 void convertInt32ToDouble(const Address& src, FloatRegister dest) { 504 vixl::UseScratchRegisterScope temps(this); 505 const Register scratch = temps.AcquireX().asUnsized(); 506 MOZ_ASSERT(scratch != src.base); 507 load32(src, scratch); 508 convertInt32ToDouble(scratch, dest); 509 } convertInt32ToDouble(const BaseIndex & src,FloatRegister dest)510 void convertInt32ToDouble(const BaseIndex& src, FloatRegister dest) { 511 vixl::UseScratchRegisterScope temps(this); 512 const Register scratch = temps.AcquireX().asUnsized(); 513 MOZ_ASSERT(scratch != src.base); 514 MOZ_ASSERT(scratch != src.index); 515 load32(src, scratch); 516 convertInt32ToDouble(scratch, dest); 517 } 518 convertInt32ToFloat32(Register src,FloatRegister dest)519 void convertInt32ToFloat32(Register src, FloatRegister dest) { 520 Scvtf(ARMFPRegister(dest, 32), ARMRegister(src, 32)); // Uses FPCR rounding mode. 521 } convertInt32ToFloat32(const Address & src,FloatRegister dest)522 void convertInt32ToFloat32(const Address& src, FloatRegister dest) { 523 vixl::UseScratchRegisterScope temps(this); 524 const Register scratch = temps.AcquireX().asUnsized(); 525 MOZ_ASSERT(scratch != src.base); 526 load32(src, scratch); 527 convertInt32ToFloat32(scratch, dest); 528 } 529 convertUInt32ToDouble(Register src,FloatRegister dest)530 void convertUInt32ToDouble(Register src, FloatRegister dest) { 531 Ucvtf(ARMFPRegister(dest, 64), ARMRegister(src, 32)); // Uses FPCR rounding mode. 532 } convertUInt32ToDouble(const Address & src,FloatRegister dest)533 void convertUInt32ToDouble(const Address& src, FloatRegister dest) { 534 vixl::UseScratchRegisterScope temps(this); 535 const Register scratch = temps.AcquireX().asUnsized(); 536 MOZ_ASSERT(scratch != src.base); 537 load32(src, scratch); 538 convertUInt32ToDouble(scratch, dest); 539 } 540 convertUInt32ToFloat32(Register src,FloatRegister dest)541 void convertUInt32ToFloat32(Register src, FloatRegister dest) { 542 Ucvtf(ARMFPRegister(dest, 32), ARMRegister(src, 32)); // Uses FPCR rounding mode. 543 } convertUInt32ToFloat32(const Address & src,FloatRegister dest)544 void convertUInt32ToFloat32(const Address& src, FloatRegister dest) { 545 vixl::UseScratchRegisterScope temps(this); 546 const Register scratch = temps.AcquireX().asUnsized(); 547 MOZ_ASSERT(scratch != src.base); 548 load32(src, scratch); 549 convertUInt32ToFloat32(scratch, dest); 550 } 551 convertFloat32ToDouble(FloatRegister src,FloatRegister dest)552 void convertFloat32ToDouble(FloatRegister src, FloatRegister dest) { 553 Fcvt(ARMFPRegister(dest, 64), ARMFPRegister(src, 32)); 554 } convertDoubleToFloat32(FloatRegister src,FloatRegister dest)555 void convertDoubleToFloat32(FloatRegister src, FloatRegister dest) { 556 Fcvt(ARMFPRegister(dest, 32), ARMFPRegister(src, 64)); 557 } 558 branchTruncateDouble(FloatRegister src,Register dest,Label * fail)559 void branchTruncateDouble(FloatRegister src, Register dest, Label* fail) { 560 vixl::UseScratchRegisterScope temps(this); 561 const ARMRegister scratch64 = temps.AcquireX(); 562 563 // An out of range integer will be saturated to the destination size. 564 ARMFPRegister src64(src, 64); 565 ARMRegister dest64(dest, 64); 566 567 MOZ_ASSERT(!scratch64.Is(dest64)); 568 569 //breakpoint(); 570 Fcvtzs(dest64, src64); 571 Add(scratch64, dest64, Operand(0x7fffffffffffffff)); 572 Cmn(scratch64, 3); 573 B(fail, Assembler::Above); 574 And(dest64, dest64, Operand(0xffffffff)); 575 } 576 void convertDoubleToInt32(FloatRegister src, Register dest, Label* fail, 577 bool negativeZeroCheck = true) 578 { 579 vixl::UseScratchRegisterScope temps(this); 580 const ARMFPRegister scratch64 = temps.AcquireD(); 581 582 ARMFPRegister fsrc(src, 64); 583 ARMRegister dest32(dest, 32); 584 ARMRegister dest64(dest, 64); 585 586 MOZ_ASSERT(!scratch64.Is(fsrc)); 587 588 Fcvtzs(dest32, fsrc); // Convert, rounding toward zero. 589 Scvtf(scratch64, dest32); // Convert back, using FPCR rounding mode. 590 Fcmp(scratch64, fsrc); 591 B(fail, Assembler::NotEqual); 592 593 if (negativeZeroCheck) { 594 Label nonzero; 595 Cbnz(dest32, &nonzero); 596 Fmov(dest64, fsrc); 597 Cbnz(dest64, fail); 598 bind(&nonzero); 599 } 600 } 601 void convertFloat32ToInt32(FloatRegister src, Register dest, Label* fail, 602 bool negativeZeroCheck = true) 603 { 604 vixl::UseScratchRegisterScope temps(this); 605 const ARMFPRegister scratch32 = temps.AcquireS(); 606 607 ARMFPRegister fsrc(src, 32); 608 ARMRegister dest32(dest, 32); 609 ARMRegister dest64(dest, 64); 610 611 MOZ_ASSERT(!scratch32.Is(fsrc)); 612 613 Fcvtzs(dest64, fsrc); // Convert, rounding toward zero. 614 Scvtf(scratch32, dest32); // Convert back, using FPCR rounding mode. 615 Fcmp(scratch32, fsrc); 616 B(fail, Assembler::NotEqual); 617 618 if (negativeZeroCheck) { 619 Label nonzero; 620 Cbnz(dest32, &nonzero); 621 Fmov(dest32, fsrc); 622 Cbnz(dest32, fail); 623 bind(&nonzero); 624 } 625 And(dest64, dest64, Operand(0xffffffff)); 626 } 627 branchTruncateFloat32(FloatRegister src,Register dest,Label * fail)628 void branchTruncateFloat32(FloatRegister src, Register dest, Label* fail) { 629 vixl::UseScratchRegisterScope temps(this); 630 const ARMRegister scratch64 = temps.AcquireX(); 631 632 ARMFPRegister src32(src, 32); 633 ARMRegister dest64(dest, 64); 634 635 MOZ_ASSERT(!scratch64.Is(dest64)); 636 637 Fcvtzs(dest64, src32); 638 Add(scratch64, dest64, Operand(0x7fffffffffffffff)); 639 Cmn(scratch64, 3); 640 B(fail, Assembler::Above); 641 And(dest64, dest64, Operand(0xffffffff)); 642 } floor(FloatRegister input,Register output,Label * bail)643 void floor(FloatRegister input, Register output, Label* bail) { 644 Label handleZero; 645 //Label handleNeg; 646 Label fin; 647 ARMFPRegister iDbl(input, 64); 648 ARMRegister o64(output, 64); 649 ARMRegister o32(output, 32); 650 Fcmp(iDbl, 0.0); 651 B(Assembler::Equal, &handleZero); 652 //B(Assembler::Signed, &handleNeg); 653 // NaN is always a bail condition, just bail directly. 654 B(Assembler::Overflow, bail); 655 Fcvtms(o64, iDbl); 656 Cmp(o64, Operand(o64, vixl::SXTW)); 657 B(NotEqual, bail); 658 Mov(o32, o32); 659 B(&fin); 660 661 bind(&handleZero); 662 // Move the top word of the double into the output reg, if it is non-zero, 663 // then the original value was -0.0. 664 Fmov(o64, iDbl); 665 Cbnz(o64, bail); 666 bind(&fin); 667 } 668 floorf(FloatRegister input,Register output,Label * bail)669 void floorf(FloatRegister input, Register output, Label* bail) { 670 Label handleZero; 671 //Label handleNeg; 672 Label fin; 673 ARMFPRegister iFlt(input, 32); 674 ARMRegister o64(output, 64); 675 ARMRegister o32(output, 32); 676 Fcmp(iFlt, 0.0); 677 B(Assembler::Equal, &handleZero); 678 //B(Assembler::Signed, &handleNeg); 679 // NaN is always a bail condition, just bail directly. 680 B(Assembler::Overflow, bail); 681 Fcvtms(o64, iFlt); 682 Cmp(o64, Operand(o64, vixl::SXTW)); 683 B(NotEqual, bail); 684 Mov(o32, o32); 685 B(&fin); 686 687 bind(&handleZero); 688 // Move the top word of the double into the output reg, if it is non-zero, 689 // then the original value was -0.0. 690 Fmov(o32, iFlt); 691 Cbnz(o32, bail); 692 bind(&fin); 693 } 694 ceil(FloatRegister input,Register output,Label * bail)695 void ceil(FloatRegister input, Register output, Label* bail) { 696 Label handleZero; 697 Label fin; 698 ARMFPRegister iDbl(input, 64); 699 ARMRegister o64(output, 64); 700 ARMRegister o32(output, 32); 701 Fcmp(iDbl, 0.0); 702 B(Assembler::Overflow, bail); 703 Fcvtps(o64, iDbl); 704 Cmp(o64, Operand(o64, vixl::SXTW)); 705 B(NotEqual, bail); 706 Cbz(o64, &handleZero); 707 Mov(o32, o32); 708 B(&fin); 709 710 bind(&handleZero); 711 vixl::UseScratchRegisterScope temps(this); 712 const ARMRegister scratch = temps.AcquireX(); 713 Fmov(scratch, iDbl); 714 Cbnz(scratch, bail); 715 bind(&fin); 716 } 717 ceilf(FloatRegister input,Register output,Label * bail)718 void ceilf(FloatRegister input, Register output, Label* bail) { 719 Label handleZero; 720 Label fin; 721 ARMFPRegister iFlt(input, 32); 722 ARMRegister o64(output, 64); 723 ARMRegister o32(output, 32); 724 Fcmp(iFlt, 0.0); 725 726 // NaN is always a bail condition, just bail directly. 727 B(Assembler::Overflow, bail); 728 Fcvtps(o64, iFlt); 729 Cmp(o64, Operand(o64, vixl::SXTW)); 730 B(NotEqual, bail); 731 Cbz(o64, &handleZero); 732 Mov(o32, o32); 733 B(&fin); 734 735 bind(&handleZero); 736 // Move the top word of the double into the output reg, if it is non-zero, 737 // then the original value was -0.0. 738 Fmov(o32, iFlt); 739 Cbnz(o32, bail); 740 bind(&fin); 741 } 742 jump(Label * label)743 void jump(Label* label) { 744 B(label); 745 } jump(JitCode * code)746 void jump(JitCode* code) { 747 branch(code); 748 } jump(RepatchLabel * label)749 void jump(RepatchLabel* label) { 750 MOZ_CRASH("jump (repatchlabel)"); 751 } jump(Register reg)752 void jump(Register reg) { 753 Br(ARMRegister(reg, 64)); 754 } jump(const Address & addr)755 void jump(const Address& addr) { 756 loadPtr(addr, ip0); 757 Br(vixl::ip0); 758 } 759 align(int alignment)760 void align(int alignment) { 761 armbuffer_.align(alignment); 762 } 763 haltingAlign(int alignment)764 void haltingAlign(int alignment) { 765 // TODO: Implement a proper halting align. 766 // ARM doesn't have one either. 767 armbuffer_.align(alignment); 768 } 769 movePtr(Register src,Register dest)770 void movePtr(Register src, Register dest) { 771 Mov(ARMRegister(dest, 64), ARMRegister(src, 64)); 772 } movePtr(ImmWord imm,Register dest)773 void movePtr(ImmWord imm, Register dest) { 774 Mov(ARMRegister(dest, 64), int64_t(imm.value)); 775 } movePtr(ImmPtr imm,Register dest)776 void movePtr(ImmPtr imm, Register dest) { 777 Mov(ARMRegister(dest, 64), int64_t(imm.value)); 778 } movePtr(wasm::SymbolicAddress imm,Register dest)779 void movePtr(wasm::SymbolicAddress imm, Register dest) { 780 BufferOffset off = movePatchablePtr(ImmWord(0xffffffffffffffffULL), dest); 781 append(AsmJSAbsoluteLink(CodeOffset(off.getOffset()), imm)); 782 } movePtr(ImmGCPtr imm,Register dest)783 void movePtr(ImmGCPtr imm, Register dest) { 784 BufferOffset load = movePatchablePtr(ImmPtr(imm.value), dest); 785 writeDataRelocation(imm, load); 786 } move64(Register64 src,Register64 dest)787 void move64(Register64 src, Register64 dest) { 788 movePtr(src.reg, dest.reg); 789 } 790 mov(ImmWord imm,Register dest)791 void mov(ImmWord imm, Register dest) { 792 movePtr(imm, dest); 793 } mov(ImmPtr imm,Register dest)794 void mov(ImmPtr imm, Register dest) { 795 movePtr(imm, dest); 796 } mov(wasm::SymbolicAddress imm,Register dest)797 void mov(wasm::SymbolicAddress imm, Register dest) { 798 movePtr(imm, dest); 799 } mov(Register src,Register dest)800 void mov(Register src, Register dest) { 801 movePtr(src, dest); 802 } 803 move32(Imm32 imm,Register dest)804 void move32(Imm32 imm, Register dest) { 805 Mov(ARMRegister(dest, 32), (int64_t)imm.value); 806 } move32(Register src,Register dest)807 void move32(Register src, Register dest) { 808 Mov(ARMRegister(dest, 32), ARMRegister(src, 32)); 809 } 810 811 // Move a pointer using a literal pool, so that the pointer 812 // may be easily patched or traced. 813 // Returns the BufferOffset of the load instruction emitted. 814 BufferOffset movePatchablePtr(ImmWord ptr, Register dest); 815 BufferOffset movePatchablePtr(ImmPtr ptr, Register dest); 816 neg32(Register reg)817 void neg32(Register reg) { 818 Negs(ARMRegister(reg, 32), Operand(ARMRegister(reg, 32))); 819 } 820 loadPtr(wasm::SymbolicAddress address,Register dest)821 void loadPtr(wasm::SymbolicAddress address, Register dest) { 822 vixl::UseScratchRegisterScope temps(this); 823 const ARMRegister scratch = temps.AcquireX(); 824 movePtr(address, scratch.asUnsized()); 825 Ldr(ARMRegister(dest, 64), MemOperand(scratch)); 826 } loadPtr(AbsoluteAddress address,Register dest)827 void loadPtr(AbsoluteAddress address, Register dest) { 828 vixl::UseScratchRegisterScope temps(this); 829 const ARMRegister scratch = temps.AcquireX(); 830 movePtr(ImmWord((uintptr_t)address.addr), scratch.asUnsized()); 831 Ldr(ARMRegister(dest, 64), MemOperand(scratch)); 832 } loadPtr(const Address & address,Register dest)833 void loadPtr(const Address& address, Register dest) { 834 Ldr(ARMRegister(dest, 64), MemOperand(address)); 835 } loadPtr(const BaseIndex & src,Register dest)836 void loadPtr(const BaseIndex& src, Register dest) { 837 Register base = src.base; 838 uint32_t scale = Imm32::ShiftOf(src.scale).value; 839 ARMRegister dest64(dest, 64); 840 ARMRegister index64(src.index, 64); 841 842 if (src.offset) { 843 vixl::UseScratchRegisterScope temps(this); 844 const ARMRegister scratch = temps.AcquireX(); 845 MOZ_ASSERT(!scratch.Is(ARMRegister(base, 64))); 846 MOZ_ASSERT(!scratch.Is(dest64)); 847 MOZ_ASSERT(!scratch.Is(index64)); 848 849 Add(scratch, ARMRegister(base, 64), Operand(int64_t(src.offset))); 850 Ldr(dest64, MemOperand(scratch, index64, vixl::LSL, scale)); 851 return; 852 } 853 854 Ldr(dest64, MemOperand(ARMRegister(base, 64), index64, vixl::LSL, scale)); 855 } 856 void loadPrivate(const Address& src, Register dest); 857 store8(Register src,const Address & address)858 void store8(Register src, const Address& address) { 859 Strb(ARMRegister(src, 32), MemOperand(ARMRegister(address.base, 64), address.offset)); 860 } store8(Imm32 imm,const Address & address)861 void store8(Imm32 imm, const Address& address) { 862 vixl::UseScratchRegisterScope temps(this); 863 const ARMRegister scratch32 = temps.AcquireW(); 864 MOZ_ASSERT(scratch32.asUnsized() != address.base); 865 move32(imm, scratch32.asUnsized()); 866 Strb(scratch32, MemOperand(ARMRegister(address.base, 64), address.offset)); 867 } store8(Register src,const BaseIndex & address)868 void store8(Register src, const BaseIndex& address) { 869 doBaseIndex(ARMRegister(src, 32), address, vixl::STRB_w); 870 } store8(Imm32 imm,const BaseIndex & address)871 void store8(Imm32 imm, const BaseIndex& address) { 872 vixl::UseScratchRegisterScope temps(this); 873 const ARMRegister scratch32 = temps.AcquireW(); 874 MOZ_ASSERT(scratch32.asUnsized() != address.base); 875 MOZ_ASSERT(scratch32.asUnsized() != address.index); 876 Mov(scratch32, Operand(imm.value)); 877 doBaseIndex(scratch32, address, vixl::STRB_w); 878 } 879 store16(Register src,const Address & address)880 void store16(Register src, const Address& address) { 881 Strh(ARMRegister(src, 32), MemOperand(ARMRegister(address.base, 64), address.offset)); 882 } store16(Imm32 imm,const Address & address)883 void store16(Imm32 imm, const Address& address) { 884 vixl::UseScratchRegisterScope temps(this); 885 const ARMRegister scratch32 = temps.AcquireW(); 886 MOZ_ASSERT(scratch32.asUnsized() != address.base); 887 move32(imm, scratch32.asUnsized()); 888 Strh(scratch32, MemOperand(ARMRegister(address.base, 64), address.offset)); 889 } store16(Register src,const BaseIndex & address)890 void store16(Register src, const BaseIndex& address) { 891 doBaseIndex(ARMRegister(src, 32), address, vixl::STRH_w); 892 } store16(Imm32 imm,const BaseIndex & address)893 void store16(Imm32 imm, const BaseIndex& address) { 894 vixl::UseScratchRegisterScope temps(this); 895 const ARMRegister scratch32 = temps.AcquireW(); 896 MOZ_ASSERT(scratch32.asUnsized() != address.base); 897 MOZ_ASSERT(scratch32.asUnsized() != address.index); 898 Mov(scratch32, Operand(imm.value)); 899 doBaseIndex(scratch32, address, vixl::STRH_w); 900 } 901 storePtr(ImmWord imm,const Address & address)902 void storePtr(ImmWord imm, const Address& address) { 903 vixl::UseScratchRegisterScope temps(this); 904 const Register scratch = temps.AcquireX().asUnsized(); 905 MOZ_ASSERT(scratch != address.base); 906 movePtr(imm, scratch); 907 storePtr(scratch, address); 908 } storePtr(ImmPtr imm,const Address & address)909 void storePtr(ImmPtr imm, const Address& address) { 910 vixl::UseScratchRegisterScope temps(this); 911 const ARMRegister scratch64 = temps.AcquireX(); 912 MOZ_ASSERT(scratch64.asUnsized() != address.base); 913 Mov(scratch64, uint64_t(imm.value)); 914 Str(scratch64, MemOperand(ARMRegister(address.base, 64), address.offset)); 915 } storePtr(ImmGCPtr imm,const Address & address)916 void storePtr(ImmGCPtr imm, const Address& address) { 917 vixl::UseScratchRegisterScope temps(this); 918 const Register scratch = temps.AcquireX().asUnsized(); 919 MOZ_ASSERT(scratch != address.base); 920 movePtr(imm, scratch); 921 storePtr(scratch, address); 922 } storePtr(Register src,const Address & address)923 void storePtr(Register src, const Address& address) { 924 Str(ARMRegister(src, 64), MemOperand(ARMRegister(address.base, 64), address.offset)); 925 } 926 storePtr(ImmWord imm,const BaseIndex & address)927 void storePtr(ImmWord imm, const BaseIndex& address) { 928 vixl::UseScratchRegisterScope temps(this); 929 const ARMRegister scratch64 = temps.AcquireX(); 930 MOZ_ASSERT(scratch64.asUnsized() != address.base); 931 MOZ_ASSERT(scratch64.asUnsized() != address.index); 932 Mov(scratch64, Operand(imm.value)); 933 doBaseIndex(scratch64, address, vixl::STR_x); 934 } storePtr(ImmGCPtr imm,const BaseIndex & address)935 void storePtr(ImmGCPtr imm, const BaseIndex& address) { 936 vixl::UseScratchRegisterScope temps(this); 937 const Register scratch = temps.AcquireX().asUnsized(); 938 MOZ_ASSERT(scratch != address.base); 939 MOZ_ASSERT(scratch != address.index); 940 movePtr(imm, scratch); 941 doBaseIndex(ARMRegister(scratch, 64), address, vixl::STR_x); 942 } storePtr(Register src,const BaseIndex & address)943 void storePtr(Register src, const BaseIndex& address) { 944 doBaseIndex(ARMRegister(src, 64), address, vixl::STR_x); 945 } 946 storePtr(Register src,AbsoluteAddress address)947 void storePtr(Register src, AbsoluteAddress address) { 948 vixl::UseScratchRegisterScope temps(this); 949 const ARMRegister scratch64 = temps.AcquireX(); 950 Mov(scratch64, uint64_t(address.addr)); 951 Str(ARMRegister(src, 64), MemOperand(scratch64)); 952 } 953 store32(Register src,AbsoluteAddress address)954 void store32(Register src, AbsoluteAddress address) { 955 vixl::UseScratchRegisterScope temps(this); 956 const ARMRegister scratch64 = temps.AcquireX(); 957 Mov(scratch64, uint64_t(address.addr)); 958 Str(ARMRegister(src, 32), MemOperand(scratch64)); 959 } store32(Imm32 imm,const Address & address)960 void store32(Imm32 imm, const Address& address) { 961 vixl::UseScratchRegisterScope temps(this); 962 const ARMRegister scratch32 = temps.AcquireW(); 963 MOZ_ASSERT(scratch32.asUnsized() != address.base); 964 Mov(scratch32, uint64_t(imm.value)); 965 Str(scratch32, MemOperand(ARMRegister(address.base, 64), address.offset)); 966 } store32(Register r,const Address & address)967 void store32(Register r, const Address& address) { 968 Str(ARMRegister(r, 32), MemOperand(ARMRegister(address.base, 64), address.offset)); 969 } store32(Imm32 imm,const BaseIndex & address)970 void store32(Imm32 imm, const BaseIndex& address) { 971 vixl::UseScratchRegisterScope temps(this); 972 const ARMRegister scratch32 = temps.AcquireW(); 973 MOZ_ASSERT(scratch32.asUnsized() != address.base); 974 MOZ_ASSERT(scratch32.asUnsized() != address.index); 975 Mov(scratch32, imm.value); 976 doBaseIndex(scratch32, address, vixl::STR_w); 977 } store32(Register r,const BaseIndex & address)978 void store32(Register r, const BaseIndex& address) { 979 doBaseIndex(ARMRegister(r, 32), address, vixl::STR_w); 980 } 981 store32_NoSecondScratch(Imm32 imm,const Address & address)982 void store32_NoSecondScratch(Imm32 imm, const Address& address) { 983 vixl::UseScratchRegisterScope temps(this); 984 temps.Exclude(ARMRegister(ScratchReg2, 32)); // Disallow ScratchReg2. 985 const ARMRegister scratch32 = temps.AcquireW(); 986 987 MOZ_ASSERT(scratch32.asUnsized() != address.base); 988 Mov(scratch32, uint64_t(imm.value)); 989 Str(scratch32, MemOperand(ARMRegister(address.base, 64), address.offset)); 990 } 991 store64(Register64 src,Address address)992 void store64(Register64 src, Address address) { 993 storePtr(src.reg, address); 994 } 995 996 // SIMD. loadInt32x1(const Address & addr,FloatRegister dest)997 void loadInt32x1(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } loadInt32x1(const BaseIndex & addr,FloatRegister dest)998 void loadInt32x1(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } loadInt32x2(const Address & addr,FloatRegister dest)999 void loadInt32x2(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } loadInt32x2(const BaseIndex & addr,FloatRegister dest)1000 void loadInt32x2(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } loadInt32x3(const Address & src,FloatRegister dest)1001 void loadInt32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); } loadInt32x3(const BaseIndex & src,FloatRegister dest)1002 void loadInt32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); } storeInt32x1(FloatRegister src,const Address & dest)1003 void storeInt32x1(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); } storeInt32x1(FloatRegister src,const BaseIndex & dest)1004 void storeInt32x1(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); } storeInt32x2(FloatRegister src,const Address & dest)1005 void storeInt32x2(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); } storeInt32x2(FloatRegister src,const BaseIndex & dest)1006 void storeInt32x2(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); } storeInt32x3(FloatRegister src,const Address & dest)1007 void storeInt32x3(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); } storeInt32x3(FloatRegister src,const BaseIndex & dest)1008 void storeInt32x3(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); } loadAlignedInt32x4(const Address & addr,FloatRegister dest)1009 void loadAlignedInt32x4(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } loadAlignedInt32x4(const BaseIndex & addr,FloatRegister dest)1010 void loadAlignedInt32x4(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } storeAlignedInt32x4(FloatRegister src,const Address & addr)1011 void storeAlignedInt32x4(FloatRegister src, const Address& addr) { MOZ_CRASH("NYI"); } storeAlignedInt32x4(FloatRegister src,const BaseIndex & addr)1012 void storeAlignedInt32x4(FloatRegister src, const BaseIndex& addr) { MOZ_CRASH("NYI"); } loadUnalignedInt32x4(const Address & addr,FloatRegister dest)1013 void loadUnalignedInt32x4(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } loadUnalignedInt32x4(const BaseIndex & addr,FloatRegister dest)1014 void loadUnalignedInt32x4(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } storeUnalignedInt32x4(FloatRegister dest,const Address & addr)1015 void storeUnalignedInt32x4(FloatRegister dest, const Address& addr) { MOZ_CRASH("NYI"); } storeUnalignedInt32x4(FloatRegister dest,const BaseIndex & addr)1016 void storeUnalignedInt32x4(FloatRegister dest, const BaseIndex& addr) { MOZ_CRASH("NYI"); } 1017 loadFloat32x3(const Address & src,FloatRegister dest)1018 void loadFloat32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); } loadFloat32x3(const BaseIndex & src,FloatRegister dest)1019 void loadFloat32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); } storeFloat32x3(FloatRegister src,const Address & dest)1020 void storeFloat32x3(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); } storeFloat32x3(FloatRegister src,const BaseIndex & dest)1021 void storeFloat32x3(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); } loadAlignedFloat32x4(const Address & addr,FloatRegister dest)1022 void loadAlignedFloat32x4(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } loadAlignedFloat32x4(const BaseIndex & addr,FloatRegister dest)1023 void loadAlignedFloat32x4(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } storeAlignedFloat32x4(FloatRegister src,const Address & addr)1024 void storeAlignedFloat32x4(FloatRegister src, const Address& addr) { MOZ_CRASH("NYI"); } storeAlignedFloat32x4(FloatRegister src,const BaseIndex & addr)1025 void storeAlignedFloat32x4(FloatRegister src, const BaseIndex& addr) { MOZ_CRASH("NYI"); } loadUnalignedFloat32x4(const Address & addr,FloatRegister dest)1026 void loadUnalignedFloat32x4(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } loadUnalignedFloat32x4(const BaseIndex & addr,FloatRegister dest)1027 void loadUnalignedFloat32x4(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } storeUnalignedFloat32x4(FloatRegister dest,const Address & addr)1028 void storeUnalignedFloat32x4(FloatRegister dest, const Address& addr) { MOZ_CRASH("NYI"); } storeUnalignedFloat32x4(FloatRegister dest,const BaseIndex & addr)1029 void storeUnalignedFloat32x4(FloatRegister dest, const BaseIndex& addr) { MOZ_CRASH("NYI"); } 1030 1031 // StackPointer manipulation. 1032 template <typename T> addToStackPtr(T t)1033 void addToStackPtr(T t) { addPtr(t, getStackPointer()); } 1034 template <typename T> addStackPtrTo(T t)1035 void addStackPtrTo(T t) { addPtr(getStackPointer(), t); } 1036 1037 template <typename T> subFromStackPtr(T t)1038 void subFromStackPtr(T t) { subPtr(t, getStackPointer()); syncStackPtr(); } 1039 template <typename T> subStackPtrFrom(T t)1040 void subStackPtrFrom(T t) { subPtr(getStackPointer(), t); } 1041 1042 template <typename T> void andToStackPtr(T t); 1043 template <typename T> void andStackPtrTo(T t); 1044 1045 template <typename T> moveToStackPtr(T t)1046 void moveToStackPtr(T t) { movePtr(t, getStackPointer()); syncStackPtr(); } 1047 template <typename T> moveStackPtrTo(T t)1048 void moveStackPtrTo(T t) { movePtr(getStackPointer(), t); } 1049 1050 template <typename T> loadStackPtr(T t)1051 void loadStackPtr(T t) { loadPtr(t, getStackPointer()); syncStackPtr(); } 1052 template <typename T> storeStackPtr(T t)1053 void storeStackPtr(T t) { storePtr(getStackPointer(), t); } 1054 1055 // StackPointer testing functions. 1056 template <typename T> branchTestStackPtr(Condition cond,T t,Label * label)1057 void branchTestStackPtr(Condition cond, T t, Label* label) { 1058 branchTestPtr(cond, getStackPointer(), t, label); 1059 } 1060 template <typename T> branchStackPtr(Condition cond,T rhs,Label * label)1061 void branchStackPtr(Condition cond, T rhs, Label* label) { 1062 branchPtr(cond, getStackPointer(), rhs, label); 1063 } 1064 template <typename T> branchStackPtrRhs(Condition cond,T lhs,Label * label)1065 void branchStackPtrRhs(Condition cond, T lhs, Label* label) { 1066 branchPtr(cond, lhs, getStackPointer(), label); 1067 } 1068 testPtr(Register lhs,Register rhs)1069 void testPtr(Register lhs, Register rhs) { 1070 Tst(ARMRegister(lhs, 64), Operand(ARMRegister(rhs, 64))); 1071 } test32(Register lhs,Register rhs)1072 void test32(Register lhs, Register rhs) { 1073 Tst(ARMRegister(lhs, 32), Operand(ARMRegister(rhs, 32))); 1074 } test32(const Address & addr,Imm32 imm)1075 void test32(const Address& addr, Imm32 imm) { 1076 vixl::UseScratchRegisterScope temps(this); 1077 const ARMRegister scratch32 = temps.AcquireW(); 1078 MOZ_ASSERT(scratch32.asUnsized() != addr.base); 1079 load32(addr, scratch32.asUnsized()); 1080 Tst(scratch32, Operand(imm.value)); 1081 } test32(Register lhs,Imm32 rhs)1082 void test32(Register lhs, Imm32 rhs) { 1083 Tst(ARMRegister(lhs, 32), Operand(rhs.value)); 1084 } cmp32(Register lhs,Imm32 rhs)1085 void cmp32(Register lhs, Imm32 rhs) { 1086 Cmp(ARMRegister(lhs, 32), Operand(rhs.value)); 1087 } cmp32(Register a,Register b)1088 void cmp32(Register a, Register b) { 1089 Cmp(ARMRegister(a, 32), Operand(ARMRegister(b, 32))); 1090 } cmp32(const Operand & lhs,Imm32 rhs)1091 void cmp32(const Operand& lhs, Imm32 rhs) { 1092 vixl::UseScratchRegisterScope temps(this); 1093 const ARMRegister scratch32 = temps.AcquireW(); 1094 Mov(scratch32, lhs); 1095 Cmp(scratch32, Operand(rhs.value)); 1096 } cmp32(const Operand & lhs,Register rhs)1097 void cmp32(const Operand& lhs, Register rhs) { 1098 vixl::UseScratchRegisterScope temps(this); 1099 const ARMRegister scratch32 = temps.AcquireW(); 1100 Mov(scratch32, lhs); 1101 Cmp(scratch32, Operand(ARMRegister(rhs, 32))); 1102 } 1103 cmpPtr(Register lhs,Imm32 rhs)1104 void cmpPtr(Register lhs, Imm32 rhs) { 1105 Cmp(ARMRegister(lhs, 64), Operand(rhs.value)); 1106 } cmpPtr(Register lhs,ImmWord rhs)1107 void cmpPtr(Register lhs, ImmWord rhs) { 1108 Cmp(ARMRegister(lhs, 64), Operand(rhs.value)); 1109 } cmpPtr(Register lhs,ImmPtr rhs)1110 void cmpPtr(Register lhs, ImmPtr rhs) { 1111 Cmp(ARMRegister(lhs, 64), Operand(uint64_t(rhs.value))); 1112 } cmpPtr(Register lhs,Register rhs)1113 void cmpPtr(Register lhs, Register rhs) { 1114 Cmp(ARMRegister(lhs, 64), ARMRegister(rhs, 64)); 1115 } cmpPtr(Register lhs,ImmGCPtr rhs)1116 void cmpPtr(Register lhs, ImmGCPtr rhs) { 1117 vixl::UseScratchRegisterScope temps(this); 1118 const Register scratch = temps.AcquireX().asUnsized(); 1119 MOZ_ASSERT(scratch != lhs); 1120 movePtr(rhs, scratch); 1121 cmpPtr(lhs, scratch); 1122 } 1123 cmpPtr(const Address & lhs,Register rhs)1124 void cmpPtr(const Address& lhs, Register rhs) { 1125 vixl::UseScratchRegisterScope temps(this); 1126 const ARMRegister scratch64 = temps.AcquireX(); 1127 MOZ_ASSERT(scratch64.asUnsized() != lhs.base); 1128 MOZ_ASSERT(scratch64.asUnsized() != rhs); 1129 Ldr(scratch64, MemOperand(ARMRegister(lhs.base, 64), lhs.offset)); 1130 Cmp(scratch64, Operand(ARMRegister(rhs, 64))); 1131 } cmpPtr(const Address & lhs,ImmWord rhs)1132 void cmpPtr(const Address& lhs, ImmWord rhs) { 1133 vixl::UseScratchRegisterScope temps(this); 1134 const ARMRegister scratch64 = temps.AcquireX(); 1135 MOZ_ASSERT(scratch64.asUnsized() != lhs.base); 1136 Ldr(scratch64, MemOperand(ARMRegister(lhs.base, 64), lhs.offset)); 1137 Cmp(scratch64, Operand(rhs.value)); 1138 } cmpPtr(const Address & lhs,ImmPtr rhs)1139 void cmpPtr(const Address& lhs, ImmPtr rhs) { 1140 vixl::UseScratchRegisterScope temps(this); 1141 const ARMRegister scratch64 = temps.AcquireX(); 1142 MOZ_ASSERT(scratch64.asUnsized() != lhs.base); 1143 Ldr(scratch64, MemOperand(ARMRegister(lhs.base, 64), lhs.offset)); 1144 Cmp(scratch64, Operand(uint64_t(rhs.value))); 1145 } cmpPtr(const Address & lhs,ImmGCPtr rhs)1146 void cmpPtr(const Address& lhs, ImmGCPtr rhs) { 1147 vixl::UseScratchRegisterScope temps(this); 1148 const Register scratch = temps.AcquireX().asUnsized(); 1149 MOZ_ASSERT(scratch != lhs.base); 1150 loadPtr(lhs, scratch); 1151 cmpPtr(scratch, rhs); 1152 } 1153 loadDouble(const Address & src,FloatRegister dest)1154 void loadDouble(const Address& src, FloatRegister dest) { 1155 Ldr(ARMFPRegister(dest, 64), MemOperand(src)); 1156 } loadDouble(const BaseIndex & src,FloatRegister dest)1157 void loadDouble(const BaseIndex& src, FloatRegister dest) { 1158 ARMRegister base(src.base, 64); 1159 ARMRegister index(src.index, 64); 1160 1161 if (src.offset == 0) { 1162 Ldr(ARMFPRegister(dest, 64), MemOperand(base, index, vixl::LSL, unsigned(src.scale))); 1163 return; 1164 } 1165 1166 vixl::UseScratchRegisterScope temps(this); 1167 const ARMRegister scratch64 = temps.AcquireX(); 1168 MOZ_ASSERT(scratch64.asUnsized() != src.base); 1169 MOZ_ASSERT(scratch64.asUnsized() != src.index); 1170 1171 Add(scratch64, base, Operand(index, vixl::LSL, unsigned(src.scale))); 1172 Ldr(ARMFPRegister(dest, 64), MemOperand(scratch64, src.offset)); 1173 } loadFloatAsDouble(const Address & addr,FloatRegister dest)1174 void loadFloatAsDouble(const Address& addr, FloatRegister dest) { 1175 Ldr(ARMFPRegister(dest, 32), MemOperand(ARMRegister(addr.base,64), addr.offset)); 1176 fcvt(ARMFPRegister(dest, 64), ARMFPRegister(dest, 32)); 1177 } loadFloatAsDouble(const BaseIndex & src,FloatRegister dest)1178 void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest) { 1179 ARMRegister base(src.base, 64); 1180 ARMRegister index(src.index, 64); 1181 if (src.offset == 0) { 1182 Ldr(ARMFPRegister(dest, 32), MemOperand(base, index, vixl::LSL, unsigned(src.scale))); 1183 } else { 1184 vixl::UseScratchRegisterScope temps(this); 1185 const ARMRegister scratch64 = temps.AcquireX(); 1186 MOZ_ASSERT(scratch64.asUnsized() != src.base); 1187 MOZ_ASSERT(scratch64.asUnsized() != src.index); 1188 1189 Add(scratch64, base, Operand(index, vixl::LSL, unsigned(src.scale))); 1190 Ldr(ARMFPRegister(dest, 32), MemOperand(scratch64, src.offset)); 1191 } 1192 fcvt(ARMFPRegister(dest, 64), ARMFPRegister(dest, 32)); 1193 } 1194 loadFloat32(const Address & addr,FloatRegister dest)1195 void loadFloat32(const Address& addr, FloatRegister dest) { 1196 Ldr(ARMFPRegister(dest, 32), MemOperand(ARMRegister(addr.base,64), addr.offset)); 1197 } loadFloat32(const BaseIndex & src,FloatRegister dest)1198 void loadFloat32(const BaseIndex& src, FloatRegister dest) { 1199 ARMRegister base(src.base, 64); 1200 ARMRegister index(src.index, 64); 1201 if (src.offset == 0) { 1202 Ldr(ARMFPRegister(dest, 32), MemOperand(base, index, vixl::LSL, unsigned(src.scale))); 1203 } else { 1204 vixl::UseScratchRegisterScope temps(this); 1205 const ARMRegister scratch64 = temps.AcquireX(); 1206 MOZ_ASSERT(scratch64.asUnsized() != src.base); 1207 MOZ_ASSERT(scratch64.asUnsized() != src.index); 1208 1209 Add(scratch64, base, Operand(index, vixl::LSL, unsigned(src.scale))); 1210 Ldr(ARMFPRegister(dest, 32), MemOperand(scratch64, src.offset)); 1211 } 1212 } 1213 storeDouble(FloatRegister src,const Address & dest)1214 void storeDouble(FloatRegister src, const Address& dest) { 1215 Str(ARMFPRegister(src, 64), MemOperand(ARMRegister(dest.base, 64), dest.offset)); 1216 } storeDouble(FloatRegister src,const BaseIndex & dest)1217 void storeDouble(FloatRegister src, const BaseIndex& dest) { 1218 doBaseIndex(ARMFPRegister(src, 64), dest, vixl::STR_d); 1219 } 1220 storeFloat32(FloatRegister src,Address addr)1221 void storeFloat32(FloatRegister src, Address addr) { 1222 Str(ARMFPRegister(src, 32), MemOperand(ARMRegister(addr.base, 64), addr.offset)); 1223 } storeFloat32(FloatRegister src,BaseIndex addr)1224 void storeFloat32(FloatRegister src, BaseIndex addr) { 1225 doBaseIndex(ARMFPRegister(src, 32), addr, vixl::STR_s); 1226 } 1227 moveDouble(FloatRegister src,FloatRegister dest)1228 void moveDouble(FloatRegister src, FloatRegister dest) { 1229 fmov(ARMFPRegister(dest, 64), ARMFPRegister(src, 64)); 1230 } zeroDouble(FloatRegister reg)1231 void zeroDouble(FloatRegister reg) { 1232 fmov(ARMFPRegister(reg, 64), vixl::xzr); 1233 } zeroFloat32(FloatRegister reg)1234 void zeroFloat32(FloatRegister reg) { 1235 fmov(ARMFPRegister(reg, 32), vixl::wzr); 1236 } negateDouble(FloatRegister reg)1237 void negateDouble(FloatRegister reg) { 1238 fneg(ARMFPRegister(reg, 64), ARMFPRegister(reg, 64)); 1239 } negateFloat(FloatRegister reg)1240 void negateFloat(FloatRegister reg) { 1241 fneg(ARMFPRegister(reg, 32), ARMFPRegister(reg, 32)); 1242 } addDouble(FloatRegister src,FloatRegister dest)1243 void addDouble(FloatRegister src, FloatRegister dest) { 1244 fadd(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64), ARMFPRegister(src, 64)); 1245 } subDouble(FloatRegister src,FloatRegister dest)1246 void subDouble(FloatRegister src, FloatRegister dest) { 1247 fsub(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64), ARMFPRegister(src, 64)); 1248 } mulDouble(FloatRegister src,FloatRegister dest)1249 void mulDouble(FloatRegister src, FloatRegister dest) { 1250 fmul(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64), ARMFPRegister(src, 64)); 1251 } divDouble(FloatRegister src,FloatRegister dest)1252 void divDouble(FloatRegister src, FloatRegister dest) { 1253 fdiv(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64), ARMFPRegister(src, 64)); 1254 } 1255 moveFloat32(FloatRegister src,FloatRegister dest)1256 void moveFloat32(FloatRegister src, FloatRegister dest) { 1257 fmov(ARMFPRegister(dest, 32), ARMFPRegister(src, 32)); 1258 } moveFloatAsDouble(Register src,FloatRegister dest)1259 void moveFloatAsDouble(Register src, FloatRegister dest) { 1260 MOZ_CRASH("moveFloatAsDouble"); 1261 } 1262 splitTag(const ValueOperand & operand,Register dest)1263 void splitTag(const ValueOperand& operand, Register dest) { 1264 splitTag(operand.valueReg(), dest); 1265 } splitTag(const Address & operand,Register dest)1266 void splitTag(const Address& operand, Register dest) { 1267 loadPtr(operand, dest); 1268 splitTag(dest, dest); 1269 } splitTag(const BaseIndex & operand,Register dest)1270 void splitTag(const BaseIndex& operand, Register dest) { 1271 loadPtr(operand, dest); 1272 splitTag(dest, dest); 1273 } 1274 1275 // Extracts the tag of a value and places it in ScratchReg. splitTagForTest(const ValueOperand & value)1276 Register splitTagForTest(const ValueOperand& value) { 1277 vixl::UseScratchRegisterScope temps(this); 1278 const ARMRegister scratch64 = temps.AcquireX(); 1279 MOZ_ASSERT(scratch64.asUnsized() != value.valueReg()); 1280 Lsr(scratch64, ARMRegister(value.valueReg(), 64), JSVAL_TAG_SHIFT); 1281 return scratch64.asUnsized(); // FIXME: Surely we can make a better interface. 1282 } cmpTag(const ValueOperand & operand,ImmTag tag)1283 void cmpTag(const ValueOperand& operand, ImmTag tag) { 1284 MOZ_CRASH("cmpTag"); 1285 } 1286 load32(const Address & address,Register dest)1287 void load32(const Address& address, Register dest) { 1288 Ldr(ARMRegister(dest, 32), MemOperand(ARMRegister(address.base, 64), address.offset)); 1289 } load32(const BaseIndex & src,Register dest)1290 void load32(const BaseIndex& src, Register dest) { 1291 doBaseIndex(ARMRegister(dest, 32), src, vixl::LDR_w); 1292 } load32(AbsoluteAddress address,Register dest)1293 void load32(AbsoluteAddress address, Register dest) { 1294 vixl::UseScratchRegisterScope temps(this); 1295 const ARMRegister scratch64 = temps.AcquireX(); 1296 movePtr(ImmWord((uintptr_t)address.addr), scratch64.asUnsized()); 1297 ldr(ARMRegister(dest, 32), MemOperand(scratch64)); 1298 } load64(const Address & address,Register64 dest)1299 void load64(const Address& address, Register64 dest) { 1300 loadPtr(address, dest.reg); 1301 } 1302 load8SignExtend(const Address & address,Register dest)1303 void load8SignExtend(const Address& address, Register dest) { 1304 Ldrsb(ARMRegister(dest, 32), MemOperand(ARMRegister(address.base, 64), address.offset)); 1305 } load8SignExtend(const BaseIndex & src,Register dest)1306 void load8SignExtend(const BaseIndex& src, Register dest) { 1307 doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRSB_w); 1308 } 1309 load8ZeroExtend(const Address & address,Register dest)1310 void load8ZeroExtend(const Address& address, Register dest) { 1311 Ldrb(ARMRegister(dest, 32), MemOperand(ARMRegister(address.base, 64), address.offset)); 1312 } load8ZeroExtend(const BaseIndex & src,Register dest)1313 void load8ZeroExtend(const BaseIndex& src, Register dest) { 1314 doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRB_w); 1315 } 1316 load16SignExtend(const Address & address,Register dest)1317 void load16SignExtend(const Address& address, Register dest) { 1318 Ldrsh(ARMRegister(dest, 32), MemOperand(ARMRegister(address.base, 64), address.offset)); 1319 } load16SignExtend(const BaseIndex & src,Register dest)1320 void load16SignExtend(const BaseIndex& src, Register dest) { 1321 doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRSH_w); 1322 } 1323 load16ZeroExtend(const Address & address,Register dest)1324 void load16ZeroExtend(const Address& address, Register dest) { 1325 Ldrh(ARMRegister(dest, 32), MemOperand(ARMRegister(address.base, 64), address.offset)); 1326 } load16ZeroExtend(const BaseIndex & src,Register dest)1327 void load16ZeroExtend(const BaseIndex& src, Register dest) { 1328 doBaseIndex(ARMRegister(dest, 32), src, vixl::LDRH_w); 1329 } 1330 add32(Register src,Register dest)1331 void add32(Register src, Register dest) { 1332 Add(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(ARMRegister(src, 32))); 1333 } add32(Imm32 imm,Register dest)1334 void add32(Imm32 imm, Register dest) { 1335 Add(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value)); 1336 } add32(Imm32 imm,const Address & dest)1337 void add32(Imm32 imm, const Address& dest) { 1338 vixl::UseScratchRegisterScope temps(this); 1339 const ARMRegister scratch32 = temps.AcquireW(); 1340 MOZ_ASSERT(scratch32.asUnsized() != dest.base); 1341 1342 Ldr(scratch32, MemOperand(ARMRegister(dest.base, 64), dest.offset)); 1343 Add(scratch32, scratch32, Operand(imm.value)); 1344 Str(scratch32, MemOperand(ARMRegister(dest.base, 64), dest.offset)); 1345 } 1346 adds32(Register src,Register dest)1347 void adds32(Register src, Register dest) { 1348 Adds(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(ARMRegister(src, 32))); 1349 } adds32(Imm32 imm,Register dest)1350 void adds32(Imm32 imm, Register dest) { 1351 Adds(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value)); 1352 } adds32(Imm32 imm,const Address & dest)1353 void adds32(Imm32 imm, const Address& dest) { 1354 vixl::UseScratchRegisterScope temps(this); 1355 const ARMRegister scratch32 = temps.AcquireW(); 1356 MOZ_ASSERT(scratch32.asUnsized() != dest.base); 1357 1358 Ldr(scratch32, MemOperand(ARMRegister(dest.base, 64), dest.offset)); 1359 Adds(scratch32, scratch32, Operand(imm.value)); 1360 Str(scratch32, MemOperand(ARMRegister(dest.base, 64), dest.offset)); 1361 } add64(Imm32 imm,Register64 dest)1362 void add64(Imm32 imm, Register64 dest) { 1363 Add(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), Operand(imm.value)); 1364 } 1365 subs32(Imm32 imm,Register dest)1366 void subs32(Imm32 imm, Register dest) { 1367 Subs(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value)); 1368 } subs32(Register src,Register dest)1369 void subs32(Register src, Register dest) { 1370 Subs(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(ARMRegister(src, 32))); 1371 } 1372 addPtr(Register src,Register dest)1373 void addPtr(Register src, Register dest) { 1374 Add(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(ARMRegister(src, 64))); 1375 } addPtr(Register src1,Register src2,Register dest)1376 void addPtr(Register src1, Register src2, Register dest) { 1377 Add(ARMRegister(dest, 64), ARMRegister(src1, 64), Operand(ARMRegister(src2, 64))); 1378 } 1379 addPtr(Imm32 imm,Register dest)1380 void addPtr(Imm32 imm, Register dest) { 1381 Add(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value)); 1382 } addPtr(Imm32 imm,Register src,Register dest)1383 void addPtr(Imm32 imm, Register src, Register dest) { 1384 Add(ARMRegister(dest, 64), ARMRegister(src, 64), Operand(imm.value)); 1385 } 1386 addPtr(Imm32 imm,const Address & dest)1387 void addPtr(Imm32 imm, const Address& dest) { 1388 vixl::UseScratchRegisterScope temps(this); 1389 const ARMRegister scratch64 = temps.AcquireX(); 1390 MOZ_ASSERT(scratch64.asUnsized() != dest.base); 1391 1392 Ldr(scratch64, MemOperand(ARMRegister(dest.base, 64), dest.offset)); 1393 Add(scratch64, scratch64, Operand(imm.value)); 1394 Str(scratch64, MemOperand(ARMRegister(dest.base, 64), dest.offset)); 1395 } addPtr(ImmWord imm,Register dest)1396 void addPtr(ImmWord imm, Register dest) { 1397 Add(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value)); 1398 } addPtr(ImmPtr imm,Register dest)1399 void addPtr(ImmPtr imm, Register dest) { 1400 Add(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(uint64_t(imm.value))); 1401 } addPtr(const Address & src,Register dest)1402 void addPtr(const Address& src, Register dest) { 1403 vixl::UseScratchRegisterScope temps(this); 1404 const ARMRegister scratch64 = temps.AcquireX(); 1405 MOZ_ASSERT(scratch64.asUnsized() != src.base); 1406 1407 Ldr(scratch64, MemOperand(ARMRegister(src.base, 64), src.offset)); 1408 Add(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(scratch64)); 1409 } subPtr(Imm32 imm,Register dest)1410 void subPtr(Imm32 imm, Register dest) { 1411 Sub(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value)); 1412 } subPtr(Register src,Register dest)1413 void subPtr(Register src, Register dest) { 1414 Sub(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(ARMRegister(src, 64))); 1415 } subPtr(const Address & addr,Register dest)1416 void subPtr(const Address& addr, Register dest) { 1417 vixl::UseScratchRegisterScope temps(this); 1418 const ARMRegister scratch64 = temps.AcquireX(); 1419 MOZ_ASSERT(scratch64.asUnsized() != addr.base); 1420 1421 Ldr(scratch64, MemOperand(ARMRegister(addr.base, 64), addr.offset)); 1422 Sub(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(scratch64)); 1423 } subPtr(Register src,const Address & dest)1424 void subPtr(Register src, const Address& dest) { 1425 vixl::UseScratchRegisterScope temps(this); 1426 const ARMRegister scratch64 = temps.AcquireX(); 1427 MOZ_ASSERT(scratch64.asUnsized() != dest.base); 1428 1429 Ldr(scratch64, MemOperand(ARMRegister(dest.base, 64), dest.offset)); 1430 Sub(scratch64, scratch64, Operand(ARMRegister(src, 64))); 1431 Str(scratch64, MemOperand(ARMRegister(dest.base, 64), dest.offset)); 1432 } mul32(Register src1,Register src2,Register dest,Label * onOver,Label * onZero)1433 void mul32(Register src1, Register src2, Register dest, Label* onOver, Label* onZero) { 1434 Smull(ARMRegister(dest, 64), ARMRegister(src1, 32), ARMRegister(src2, 32)); 1435 if (onOver) { 1436 Cmp(ARMRegister(dest, 64), Operand(ARMRegister(dest, 32), vixl::SXTW)); 1437 B(onOver, NotEqual); 1438 } 1439 if (onZero) 1440 Cbz(ARMRegister(dest, 32), onZero); 1441 1442 // Clear upper 32 bits. 1443 Mov(ARMRegister(dest, 32), ARMRegister(dest, 32)); 1444 } 1445 ret()1446 void ret() { 1447 pop(lr); 1448 abiret(); 1449 } 1450 retn(Imm32 n)1451 void retn(Imm32 n) { 1452 // ip0 <- [sp]; sp += n; ret ip0 1453 Ldr(vixl::ip0, MemOperand(GetStackPointer64(), ptrdiff_t(n.value), vixl::PostIndex)); 1454 syncStackPtr(); // SP is always used to transmit the stack between calls. 1455 Ret(vixl::ip0); 1456 } 1457 j(Condition cond,Label * dest)1458 void j(Condition cond, Label* dest) { 1459 B(dest, cond); 1460 } 1461 branch(Condition cond,Label * label)1462 void branch(Condition cond, Label* label) { 1463 B(label, cond); 1464 } branch(JitCode * target)1465 void branch(JitCode* target) { 1466 syncStackPtr(); 1467 addPendingJump(nextOffset(), ImmPtr(target->raw()), Relocation::JITCODE); 1468 b(-1); // The jump target will be patched by executableCopy(). 1469 } 1470 branch32(Condition cond,const Operand & lhs,Register rhs,Label * label)1471 void branch32(Condition cond, const Operand& lhs, Register rhs, Label* label) { 1472 // since rhs is an operand, do the compare backwards 1473 Cmp(ARMRegister(rhs, 32), lhs); 1474 B(label, Assembler::InvertCmpCondition(cond)); 1475 } branch32(Condition cond,const Operand & lhs,Imm32 rhs,Label * label)1476 void branch32(Condition cond, const Operand& lhs, Imm32 rhs, Label* label) { 1477 ARMRegister l = lhs.reg(); 1478 Cmp(l, Operand(rhs.value)); 1479 B(label, cond); 1480 } branch32(Condition cond,Register lhs,Register rhs,Label * label)1481 void branch32(Condition cond, Register lhs, Register rhs, Label* label) { 1482 cmp32(lhs, rhs); 1483 B(label, cond); 1484 } branch32(Condition cond,Register lhs,Imm32 imm,Label * label)1485 void branch32(Condition cond, Register lhs, Imm32 imm, Label* label) { 1486 cmp32(lhs, imm); 1487 B(label, cond); 1488 } branch32(Condition cond,const Address & lhs,Register rhs,Label * label)1489 void branch32(Condition cond, const Address& lhs, Register rhs, Label* label) { 1490 vixl::UseScratchRegisterScope temps(this); 1491 const Register scratch = temps.AcquireX().asUnsized(); 1492 MOZ_ASSERT(scratch != lhs.base); 1493 MOZ_ASSERT(scratch != rhs); 1494 load32(lhs, scratch); 1495 branch32(cond, scratch, rhs, label); 1496 } branch32(Condition cond,const Address & lhs,Imm32 imm,Label * label)1497 void branch32(Condition cond, const Address& lhs, Imm32 imm, Label* label) { 1498 vixl::UseScratchRegisterScope temps(this); 1499 const Register scratch = temps.AcquireX().asUnsized(); 1500 MOZ_ASSERT(scratch != lhs.base); 1501 load32(lhs, scratch); 1502 branch32(cond, scratch, imm, label); 1503 } branch32(Condition cond,AbsoluteAddress lhs,Register rhs,Label * label)1504 void branch32(Condition cond, AbsoluteAddress lhs, Register rhs, Label* label) { 1505 vixl::UseScratchRegisterScope temps(this); 1506 const Register scratch = temps.AcquireX().asUnsized(); 1507 movePtr(ImmPtr(lhs.addr), scratch); 1508 branch32(cond, Address(scratch, 0), rhs, label); 1509 } branch32(Condition cond,AbsoluteAddress lhs,Imm32 rhs,Label * label)1510 void branch32(Condition cond, AbsoluteAddress lhs, Imm32 rhs, Label* label) { 1511 vixl::UseScratchRegisterScope temps(this); 1512 const Register scratch = temps.AcquireX().asUnsized(); 1513 movePtr(ImmPtr(lhs.addr), scratch); 1514 branch32(cond, Address(scratch, 0), rhs, label); 1515 } branch32(Condition cond,wasm::SymbolicAddress lhs,Imm32 rhs,Label * label)1516 void branch32(Condition cond, wasm::SymbolicAddress lhs, Imm32 rhs, Label* label) { 1517 vixl::UseScratchRegisterScope temps(this); 1518 const Register scratch = temps.AcquireX().asUnsized(); 1519 movePtr(lhs, scratch); 1520 branch32(cond, Address(scratch, 0), rhs, label); 1521 } branch32(Condition cond,BaseIndex lhs,Imm32 rhs,Label * label)1522 void branch32(Condition cond, BaseIndex lhs, Imm32 rhs, Label* label) { 1523 vixl::UseScratchRegisterScope temps(this); 1524 const ARMRegister scratch32 = temps.AcquireW(); 1525 MOZ_ASSERT(scratch32.asUnsized() != lhs.base); 1526 MOZ_ASSERT(scratch32.asUnsized() != lhs.index); 1527 doBaseIndex(scratch32, lhs, vixl::LDR_w); 1528 branch32(cond, scratch32.asUnsized(), rhs, label); 1529 } 1530 branchTest32(Condition cond,Register lhs,Register rhs,Label * label)1531 void branchTest32(Condition cond, Register lhs, Register rhs, Label* label) { 1532 MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned); 1533 // x86 prefers |test foo, foo| to |cmp foo, #0|. 1534 // Convert the former to the latter for ARM. 1535 if (lhs == rhs && (cond == Zero || cond == NonZero)) 1536 cmp32(lhs, Imm32(0)); 1537 else 1538 test32(lhs, rhs); 1539 B(label, cond); 1540 } branchTest32(Condition cond,Register lhs,Imm32 imm,Label * label)1541 void branchTest32(Condition cond, Register lhs, Imm32 imm, Label* label) { 1542 MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned); 1543 test32(lhs, imm); 1544 B(label, cond); 1545 } branchTest32(Condition cond,const Address & address,Imm32 imm,Label * label)1546 void branchTest32(Condition cond, const Address& address, Imm32 imm, Label* label) { 1547 vixl::UseScratchRegisterScope temps(this); 1548 const Register scratch = temps.AcquireX().asUnsized(); 1549 MOZ_ASSERT(scratch != address.base); 1550 load32(address, scratch); 1551 branchTest32(cond, scratch, imm, label); 1552 } branchTest32(Condition cond,AbsoluteAddress address,Imm32 imm,Label * label)1553 void branchTest32(Condition cond, AbsoluteAddress address, Imm32 imm, Label* label) { 1554 vixl::UseScratchRegisterScope temps(this); 1555 const Register scratch = temps.AcquireX().asUnsized(); 1556 loadPtr(address, scratch); 1557 branchTest32(cond, scratch, imm, label); 1558 } 1559 CodeOffsetJump jumpWithPatch(RepatchLabel* label, Condition cond = Always, 1560 Label* documentation = nullptr) 1561 { 1562 ARMBuffer::PoolEntry pe; 1563 BufferOffset load_bo; 1564 BufferOffset branch_bo; 1565 1566 // Does not overwrite condition codes from the caller. 1567 { 1568 vixl::UseScratchRegisterScope temps(this); 1569 const ARMRegister scratch64 = temps.AcquireX(); 1570 load_bo = immPool64(scratch64, (uint64_t)label, &pe); 1571 } 1572 1573 MOZ_ASSERT(!label->bound()); 1574 if (cond != Always) { 1575 Label notTaken; 1576 B(¬Taken, Assembler::InvertCondition(cond)); 1577 branch_bo = b(-1); 1578 bind(¬Taken); 1579 } else { 1580 nop(); 1581 branch_bo = b(-1); 1582 } 1583 label->use(branch_bo.getOffset()); 1584 return CodeOffsetJump(load_bo.getOffset(), pe.index()); 1585 } 1586 CodeOffsetJump backedgeJump(RepatchLabel* label, Label* documentation = nullptr) { 1587 return jumpWithPatch(label, Always, documentation); 1588 } 1589 template <typename T> branchPtrWithPatch(Condition cond,Register reg,T ptr,RepatchLabel * label)1590 CodeOffsetJump branchPtrWithPatch(Condition cond, Register reg, T ptr, RepatchLabel* label) { 1591 cmpPtr(reg, ptr); 1592 return jumpWithPatch(label, cond); 1593 } 1594 template <typename T> branchPtrWithPatch(Condition cond,Address addr,T ptr,RepatchLabel * label)1595 CodeOffsetJump branchPtrWithPatch(Condition cond, Address addr, T ptr, RepatchLabel* label) { 1596 // The scratch register is unused after the condition codes are set. 1597 { 1598 vixl::UseScratchRegisterScope temps(this); 1599 const Register scratch = temps.AcquireX().asUnsized(); 1600 MOZ_ASSERT(scratch != addr.base); 1601 loadPtr(addr, scratch); 1602 cmpPtr(scratch, ptr); 1603 } 1604 return jumpWithPatch(label, cond); 1605 } 1606 branchPtr(Condition cond,wasm::SymbolicAddress lhs,Register rhs,Label * label)1607 void branchPtr(Condition cond, wasm::SymbolicAddress lhs, Register rhs, Label* label) { 1608 vixl::UseScratchRegisterScope temps(this); 1609 const Register scratch = temps.AcquireX().asUnsized(); 1610 MOZ_ASSERT(scratch != rhs); 1611 loadPtr(lhs, scratch); 1612 branchPtr(cond, scratch, rhs, label); 1613 } branchPtr(Condition cond,Address lhs,ImmWord ptr,Label * label)1614 void branchPtr(Condition cond, Address lhs, ImmWord ptr, Label* label) { 1615 vixl::UseScratchRegisterScope temps(this); 1616 const Register scratch = temps.AcquireX().asUnsized(); 1617 MOZ_ASSERT(scratch != lhs.base); 1618 loadPtr(lhs, scratch); 1619 branchPtr(cond, scratch, ptr, label); 1620 } branchPtr(Condition cond,Address lhs,ImmPtr ptr,Label * label)1621 void branchPtr(Condition cond, Address lhs, ImmPtr ptr, Label* label) { 1622 vixl::UseScratchRegisterScope temps(this); 1623 const Register scratch = temps.AcquireX().asUnsized(); 1624 MOZ_ASSERT(scratch != lhs.base); 1625 loadPtr(lhs, scratch); 1626 branchPtr(cond, scratch, ptr, label); 1627 } branchPtr(Condition cond,Address lhs,Register ptr,Label * label)1628 void branchPtr(Condition cond, Address lhs, Register ptr, Label* label) { 1629 vixl::UseScratchRegisterScope temps(this); 1630 const Register scratch = temps.AcquireX().asUnsized(); 1631 MOZ_ASSERT(scratch != lhs.base); 1632 MOZ_ASSERT(scratch != ptr); 1633 loadPtr(lhs, scratch); 1634 branchPtr(cond, scratch, ptr, label); 1635 } branchPtr(Condition cond,Register lhs,Imm32 imm,Label * label)1636 void branchPtr(Condition cond, Register lhs, Imm32 imm, Label* label) { 1637 cmpPtr(lhs, imm); 1638 B(label, cond); 1639 } branchPtr(Condition cond,Register lhs,ImmWord ptr,Label * label)1640 void branchPtr(Condition cond, Register lhs, ImmWord ptr, Label* label) { 1641 cmpPtr(lhs, ptr); 1642 B(label, cond); 1643 } branchPtr(Condition cond,Register lhs,ImmPtr rhs,Label * label)1644 void branchPtr(Condition cond, Register lhs, ImmPtr rhs, Label* label) { 1645 cmpPtr(lhs, rhs); 1646 B(label, cond); 1647 } branchPtr(Condition cond,Register lhs,ImmGCPtr ptr,Label * label)1648 void branchPtr(Condition cond, Register lhs, ImmGCPtr ptr, Label* label) { 1649 vixl::UseScratchRegisterScope temps(this); 1650 const Register scratch = temps.AcquireX().asUnsized(); 1651 MOZ_ASSERT(scratch != lhs); 1652 movePtr(ptr, scratch); 1653 branchPtr(cond, lhs, scratch, label); 1654 } branchPtr(Condition cond,Address lhs,ImmGCPtr ptr,Label * label)1655 void branchPtr(Condition cond, Address lhs, ImmGCPtr ptr, Label* label) { 1656 vixl::UseScratchRegisterScope temps(this); 1657 const ARMRegister scratch1_64 = temps.AcquireX(); 1658 const ARMRegister scratch2_64 = temps.AcquireX(); 1659 MOZ_ASSERT(scratch1_64.asUnsized() != lhs.base); 1660 MOZ_ASSERT(scratch2_64.asUnsized() != lhs.base); 1661 1662 movePtr(ptr, scratch1_64.asUnsized()); 1663 loadPtr(lhs, scratch2_64.asUnsized()); 1664 cmp(scratch2_64, scratch1_64); 1665 B(cond, label); 1666 1667 } branchPtr(Condition cond,Register lhs,Register rhs,Label * label)1668 void branchPtr(Condition cond, Register lhs, Register rhs, Label* label) { 1669 Cmp(ARMRegister(lhs, 64), ARMRegister(rhs, 64)); 1670 B(label, cond); 1671 } branchPtr(Condition cond,AbsoluteAddress lhs,Register rhs,Label * label)1672 void branchPtr(Condition cond, AbsoluteAddress lhs, Register rhs, Label* label) { 1673 vixl::UseScratchRegisterScope temps(this); 1674 const Register scratch = temps.AcquireX().asUnsized(); 1675 MOZ_ASSERT(scratch != rhs); 1676 loadPtr(lhs, scratch); 1677 branchPtr(cond, scratch, rhs, label); 1678 } branchPtr(Condition cond,AbsoluteAddress lhs,ImmWord ptr,Label * label)1679 void branchPtr(Condition cond, AbsoluteAddress lhs, ImmWord ptr, Label* label) { 1680 vixl::UseScratchRegisterScope temps(this); 1681 const Register scratch = temps.AcquireX().asUnsized(); 1682 loadPtr(lhs, scratch); 1683 branchPtr(cond, scratch, ptr, label); 1684 } 1685 branch64(Condition cond,const Address & lhs,Imm64 val,Label * label)1686 void branch64(Condition cond, const Address& lhs, Imm64 val, Label* label) { 1687 MOZ_ASSERT(cond == Assembler::NotEqual, 1688 "other condition codes not supported"); 1689 1690 branchPtr(cond, lhs, ImmWord(val.value), label); 1691 } 1692 branch64(Condition cond,const Address & lhs,const Address & rhs,Register scratch,Label * label)1693 void branch64(Condition cond, const Address& lhs, const Address& rhs, Register scratch, 1694 Label* label) 1695 { 1696 MOZ_ASSERT(cond == Assembler::NotEqual, 1697 "other condition codes not supported"); 1698 MOZ_ASSERT(lhs.base != scratch); 1699 MOZ_ASSERT(rhs.base != scratch); 1700 1701 loadPtr(rhs, scratch); 1702 branchPtr(cond, lhs, scratch, label); 1703 } 1704 branchTestPtr(Condition cond,Register lhs,Register rhs,Label * label)1705 void branchTestPtr(Condition cond, Register lhs, Register rhs, Label* label) { 1706 Tst(ARMRegister(lhs, 64), Operand(ARMRegister(rhs, 64))); 1707 B(label, cond); 1708 } branchTestPtr(Condition cond,Register lhs,Imm32 imm,Label * label)1709 void branchTestPtr(Condition cond, Register lhs, Imm32 imm, Label* label) { 1710 Tst(ARMRegister(lhs, 64), Operand(imm.value)); 1711 B(label, cond); 1712 } branchTestPtr(Condition cond,const Address & lhs,Imm32 imm,Label * label)1713 void branchTestPtr(Condition cond, const Address& lhs, Imm32 imm, Label* label) { 1714 vixl::UseScratchRegisterScope temps(this); 1715 const Register scratch = temps.AcquireX().asUnsized(); 1716 MOZ_ASSERT(scratch != lhs.base); 1717 loadPtr(lhs, scratch); 1718 branchTestPtr(cond, scratch, imm, label); 1719 } branchPrivatePtr(Condition cond,const Address & lhs,ImmPtr ptr,Label * label)1720 void branchPrivatePtr(Condition cond, const Address& lhs, ImmPtr ptr, Label* label) { 1721 branchPtr(cond, lhs, ptr, label); 1722 } 1723 branchPrivatePtr(Condition cond,const Address & lhs,Register ptr,Label * label)1724 void branchPrivatePtr(Condition cond, const Address& lhs, Register ptr, Label* label) { 1725 branchPtr(cond, lhs, ptr, label); 1726 } 1727 branchPrivatePtr(Condition cond,Register lhs,ImmWord ptr,Label * label)1728 void branchPrivatePtr(Condition cond, Register lhs, ImmWord ptr, Label* label) { 1729 branchPtr(cond, lhs, ptr, label); 1730 } 1731 decBranchPtr(Condition cond,Register lhs,Imm32 imm,Label * label)1732 void decBranchPtr(Condition cond, Register lhs, Imm32 imm, Label* label) { 1733 Subs(ARMRegister(lhs, 64), ARMRegister(lhs, 64), Operand(imm.value)); 1734 B(cond, label); 1735 } 1736 branchTestUndefined(Condition cond,Register tag,Label * label)1737 void branchTestUndefined(Condition cond, Register tag, Label* label) { 1738 Condition c = testUndefined(cond, tag); 1739 B(label, c); 1740 } branchTestInt32(Condition cond,Register tag,Label * label)1741 void branchTestInt32(Condition cond, Register tag, Label* label) { 1742 Condition c = testInt32(cond, tag); 1743 B(label, c); 1744 } branchTestDouble(Condition cond,Register tag,Label * label)1745 void branchTestDouble(Condition cond, Register tag, Label* label) { 1746 Condition c = testDouble(cond, tag); 1747 B(label, c); 1748 } branchTestBoolean(Condition cond,Register tag,Label * label)1749 void branchTestBoolean(Condition cond, Register tag, Label* label) { 1750 Condition c = testBoolean(cond, tag); 1751 B(label, c); 1752 } branchTestNull(Condition cond,Register tag,Label * label)1753 void branchTestNull(Condition cond, Register tag, Label* label) { 1754 Condition c = testNull(cond, tag); 1755 B(label, c); 1756 } branchTestString(Condition cond,Register tag,Label * label)1757 void branchTestString(Condition cond, Register tag, Label* label) { 1758 Condition c = testString(cond, tag); 1759 B(label, c); 1760 } branchTestSymbol(Condition cond,Register tag,Label * label)1761 void branchTestSymbol(Condition cond, Register tag, Label* label) { 1762 Condition c = testSymbol(cond, tag); 1763 B(label, c); 1764 } branchTestObject(Condition cond,Register tag,Label * label)1765 void branchTestObject(Condition cond, Register tag, Label* label) { 1766 Condition c = testObject(cond, tag); 1767 B(label, c); 1768 } branchTestNumber(Condition cond,Register tag,Label * label)1769 void branchTestNumber(Condition cond, Register tag, Label* label) { 1770 Condition c = testNumber(cond, tag); 1771 B(label, c); 1772 } 1773 branchTestUndefined(Condition cond,const Address & address,Label * label)1774 void branchTestUndefined(Condition cond, const Address& address, Label* label) { 1775 Condition c = testUndefined(cond, address); 1776 B(label, c); 1777 } branchTestInt32(Condition cond,const Address & address,Label * label)1778 void branchTestInt32(Condition cond, const Address& address, Label* label) { 1779 Condition c = testInt32(cond, address); 1780 B(label, c); 1781 } branchTestDouble(Condition cond,const Address & address,Label * label)1782 void branchTestDouble(Condition cond, const Address& address, Label* label) { 1783 Condition c = testDouble(cond, address); 1784 B(label, c); 1785 } branchTestBoolean(Condition cond,const Address & address,Label * label)1786 void branchTestBoolean(Condition cond, const Address& address, Label* label) { 1787 Condition c = testDouble(cond, address); 1788 B(label, c); 1789 } branchTestNull(Condition cond,const Address & address,Label * label)1790 void branchTestNull(Condition cond, const Address& address, Label* label) { 1791 Condition c = testNull(cond, address); 1792 B(label, c); 1793 } branchTestString(Condition cond,const Address & address,Label * label)1794 void branchTestString(Condition cond, const Address& address, Label* label) { 1795 Condition c = testString(cond, address); 1796 B(label, c); 1797 } branchTestSymbol(Condition cond,const Address & address,Label * label)1798 void branchTestSymbol(Condition cond, const Address& address, Label* label) { 1799 Condition c = testSymbol(cond, address); 1800 B(label, c); 1801 } branchTestObject(Condition cond,const Address & address,Label * label)1802 void branchTestObject(Condition cond, const Address& address, Label* label) { 1803 Condition c = testObject(cond, address); 1804 B(label, c); 1805 } branchTestNumber(Condition cond,const Address & address,Label * label)1806 void branchTestNumber(Condition cond, const Address& address, Label* label) { 1807 Condition c = testNumber(cond, address); 1808 B(label, c); 1809 } 1810 1811 // Perform a type-test on a full Value loaded into a register. 1812 // Clobbers the ScratchReg. branchTestUndefined(Condition cond,const ValueOperand & src,Label * label)1813 void branchTestUndefined(Condition cond, const ValueOperand& src, Label* label) { 1814 Condition c = testUndefined(cond, src); 1815 B(label, c); 1816 } branchTestInt32(Condition cond,const ValueOperand & src,Label * label)1817 void branchTestInt32(Condition cond, const ValueOperand& src, Label* label) { 1818 Condition c = testInt32(cond, src); 1819 B(label, c); 1820 } branchTestBoolean(Condition cond,const ValueOperand & src,Label * label)1821 void branchTestBoolean(Condition cond, const ValueOperand& src, Label* label) { 1822 Condition c = testBoolean(cond, src); 1823 B(label, c); 1824 } branchTestDouble(Condition cond,const ValueOperand & src,Label * label)1825 void branchTestDouble(Condition cond, const ValueOperand& src, Label* label) { 1826 Condition c = testDouble(cond, src); 1827 B(label, c); 1828 } branchTestNull(Condition cond,const ValueOperand & src,Label * label)1829 void branchTestNull(Condition cond, const ValueOperand& src, Label* label) { 1830 Condition c = testNull(cond, src); 1831 B(label, c); 1832 } branchTestString(Condition cond,const ValueOperand & src,Label * label)1833 void branchTestString(Condition cond, const ValueOperand& src, Label* label) { 1834 Condition c = testString(cond, src); 1835 B(label, c); 1836 } branchTestSymbol(Condition cond,const ValueOperand & src,Label * label)1837 void branchTestSymbol(Condition cond, const ValueOperand& src, Label* label) { 1838 Condition c = testSymbol(cond, src); 1839 B(label, c); 1840 } branchTestObject(Condition cond,const ValueOperand & src,Label * label)1841 void branchTestObject(Condition cond, const ValueOperand& src, Label* label) { 1842 Condition c = testObject(cond, src); 1843 B(label, c); 1844 } branchTestNumber(Condition cond,const ValueOperand & src,Label * label)1845 void branchTestNumber(Condition cond, const ValueOperand& src, Label* label) { 1846 Condition c = testNumber(cond, src); 1847 B(label, c); 1848 } 1849 1850 // Perform a type-test on a Value addressed by BaseIndex. 1851 // Clobbers the ScratchReg. branchTestUndefined(Condition cond,const BaseIndex & address,Label * label)1852 void branchTestUndefined(Condition cond, const BaseIndex& address, Label* label) { 1853 Condition c = testUndefined(cond, address); 1854 B(label, c); 1855 } branchTestInt32(Condition cond,const BaseIndex & address,Label * label)1856 void branchTestInt32(Condition cond, const BaseIndex& address, Label* label) { 1857 Condition c = testInt32(cond, address); 1858 B(label, c); 1859 } branchTestBoolean(Condition cond,const BaseIndex & address,Label * label)1860 void branchTestBoolean(Condition cond, const BaseIndex& address, Label* label) { 1861 Condition c = testBoolean(cond, address); 1862 B(label, c); 1863 } branchTestDouble(Condition cond,const BaseIndex & address,Label * label)1864 void branchTestDouble(Condition cond, const BaseIndex& address, Label* label) { 1865 Condition c = testDouble(cond, address); 1866 B(label, c); 1867 } branchTestNull(Condition cond,const BaseIndex & address,Label * label)1868 void branchTestNull(Condition cond, const BaseIndex& address, Label* label) { 1869 Condition c = testNull(cond, address); 1870 B(label, c); 1871 } branchTestString(Condition cond,const BaseIndex & address,Label * label)1872 void branchTestString(Condition cond, const BaseIndex& address, Label* label) { 1873 Condition c = testString(cond, address); 1874 B(label, c); 1875 } branchTestSymbol(Condition cond,const BaseIndex & address,Label * label)1876 void branchTestSymbol(Condition cond, const BaseIndex& address, Label* label) { 1877 Condition c = testSymbol(cond, address); 1878 B(label, c); 1879 } branchTestObject(Condition cond,const BaseIndex & address,Label * label)1880 void branchTestObject(Condition cond, const BaseIndex& address, Label* label) { 1881 Condition c = testObject(cond, address); 1882 B(label, c); 1883 } 1884 template <typename T> branchTestGCThing(Condition cond,const T & src,Label * label)1885 void branchTestGCThing(Condition cond, const T& src, Label* label) { 1886 Condition c = testGCThing(cond, src); 1887 B(label, c); 1888 } 1889 template <typename T> branchTestPrimitive(Condition cond,const T & t,Label * label)1890 void branchTestPrimitive(Condition cond, const T& t, Label* label) { 1891 Condition c = testPrimitive(cond, t); 1892 B(label, c); 1893 } 1894 template <typename T> branchTestMagic(Condition cond,const T & t,Label * label)1895 void branchTestMagic(Condition cond, const T& t, Label* label) { 1896 Condition c = testMagic(cond, t); 1897 B(label, c); 1898 } branchTestMagicValue(Condition cond,const ValueOperand & val,JSWhyMagic why,Label * label)1899 void branchTestMagicValue(Condition cond, const ValueOperand& val, JSWhyMagic why, Label* label) { 1900 MOZ_ASSERT(cond == Equal || cond == NotEqual); 1901 branchTestValue(cond, val, MagicValue(why), label); 1902 } branchTestValue(Condition cond,const ValueOperand & value,const Value & v,Label * label)1903 void branchTestValue(Condition cond, const ValueOperand& value, const Value& v, Label* label) { 1904 vixl::UseScratchRegisterScope temps(this); 1905 const ARMRegister scratch64 = temps.AcquireX(); 1906 MOZ_ASSERT(scratch64.asUnsized() != value.valueReg()); 1907 moveValue(v, ValueOperand(scratch64.asUnsized())); 1908 Cmp(ARMRegister(value.valueReg(), 64), scratch64); 1909 B(label, cond); 1910 } branchTestValue(Condition cond,const Address & valaddr,const ValueOperand & value,Label * label)1911 void branchTestValue(Condition cond, const Address& valaddr, const ValueOperand& value, 1912 Label* label) 1913 { 1914 vixl::UseScratchRegisterScope temps(this); 1915 const ARMRegister scratch64 = temps.AcquireX(); 1916 MOZ_ASSERT(scratch64.asUnsized() != valaddr.base); 1917 MOZ_ASSERT(scratch64.asUnsized() != value.valueReg()); 1918 loadValue(valaddr, scratch64.asUnsized()); 1919 Cmp(ARMRegister(value.valueReg(), 64), Operand(scratch64)); 1920 B(label, cond); 1921 } branchTest64(Condition cond,Register64 lhs,Register64 rhs,Register temp,Label * label)1922 void branchTest64(Condition cond, Register64 lhs, Register64 rhs, Register temp, Label* label) { 1923 branchTestPtr(cond, lhs.reg, rhs.reg, label); 1924 } 1925 compareDouble(DoubleCondition cond,FloatRegister lhs,FloatRegister rhs)1926 void compareDouble(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs) { 1927 Fcmp(ARMFPRegister(lhs, 64), ARMFPRegister(rhs, 64)); 1928 } branchDouble(DoubleCondition cond,FloatRegister lhs,FloatRegister rhs,Label * label)1929 void branchDouble(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs, Label* label) { 1930 compareDouble(cond, lhs, rhs); 1931 switch (cond) { 1932 case DoubleNotEqual: { 1933 Label unordered; 1934 // not equal *and* ordered 1935 branch(Overflow, &unordered); 1936 branch(NotEqual, label); 1937 bind(&unordered); 1938 break; 1939 } 1940 case DoubleEqualOrUnordered: 1941 branch(Overflow, label); 1942 branch(Equal, label); 1943 break; 1944 default: 1945 branch(Condition(cond), label); 1946 } 1947 } 1948 compareFloat(DoubleCondition cond,FloatRegister lhs,FloatRegister rhs)1949 void compareFloat(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs) { 1950 Fcmp(ARMFPRegister(lhs, 32), ARMFPRegister(rhs, 32)); 1951 } branchFloat(DoubleCondition cond,FloatRegister lhs,FloatRegister rhs,Label * label)1952 void branchFloat(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs, Label* label) { 1953 compareFloat(cond, lhs, rhs); 1954 switch (cond) { 1955 case DoubleNotEqual: { 1956 Label unordered; 1957 // not equal *and* ordered 1958 branch(Overflow, &unordered); 1959 branch(NotEqual, label); 1960 bind(&unordered); 1961 break; 1962 } 1963 case DoubleEqualOrUnordered: 1964 branch(Overflow, label); 1965 branch(Equal, label); 1966 break; 1967 default: 1968 branch(Condition(cond), label); 1969 } 1970 } 1971 branchNegativeZero(FloatRegister reg,Register scratch,Label * label)1972 void branchNegativeZero(FloatRegister reg, Register scratch, Label* label) { 1973 MOZ_CRASH("branchNegativeZero"); 1974 } branchNegativeZeroFloat32(FloatRegister reg,Register scratch,Label * label)1975 void branchNegativeZeroFloat32(FloatRegister reg, Register scratch, Label* label) { 1976 MOZ_CRASH("branchNegativeZeroFloat32"); 1977 } 1978 boxDouble(FloatRegister src,const ValueOperand & dest)1979 void boxDouble(FloatRegister src, const ValueOperand& dest) { 1980 Fmov(ARMRegister(dest.valueReg(), 64), ARMFPRegister(src, 64)); 1981 } boxNonDouble(JSValueType type,Register src,const ValueOperand & dest)1982 void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest) { 1983 boxValue(type, src, dest.valueReg()); 1984 } 1985 1986 // Note that the |dest| register here may be ScratchReg, so we shouldn't use it. unboxInt32(const ValueOperand & src,Register dest)1987 void unboxInt32(const ValueOperand& src, Register dest) { 1988 move32(src.valueReg(), dest); 1989 } unboxInt32(const Address & src,Register dest)1990 void unboxInt32(const Address& src, Register dest) { 1991 load32(src, dest); 1992 } unboxDouble(const Address & src,FloatRegister dest)1993 void unboxDouble(const Address& src, FloatRegister dest) { 1994 loadDouble(src, dest); 1995 } unboxDouble(const ValueOperand & src,FloatRegister dest)1996 void unboxDouble(const ValueOperand& src, FloatRegister dest) { 1997 Fmov(ARMFPRegister(dest, 64), ARMRegister(src.valueReg(), 64)); 1998 } 1999 unboxArgObjMagic(const ValueOperand & src,Register dest)2000 void unboxArgObjMagic(const ValueOperand& src, Register dest) { 2001 MOZ_CRASH("unboxArgObjMagic"); 2002 } unboxArgObjMagic(const Address & src,Register dest)2003 void unboxArgObjMagic(const Address& src, Register dest) { 2004 MOZ_CRASH("unboxArgObjMagic"); 2005 } 2006 unboxBoolean(const ValueOperand & src,Register dest)2007 void unboxBoolean(const ValueOperand& src, Register dest) { 2008 move32(src.valueReg(), dest); 2009 } unboxBoolean(const Address & src,Register dest)2010 void unboxBoolean(const Address& src, Register dest) { 2011 load32(src, dest); 2012 } 2013 unboxMagic(const ValueOperand & src,Register dest)2014 void unboxMagic(const ValueOperand& src, Register dest) { 2015 move32(src.valueReg(), dest); 2016 } 2017 // Unbox any non-double value into dest. Prefer unboxInt32 or unboxBoolean 2018 // instead if the source type is known. unboxNonDouble(const ValueOperand & src,Register dest)2019 void unboxNonDouble(const ValueOperand& src, Register dest) { 2020 unboxNonDouble(src.valueReg(), dest); 2021 } unboxNonDouble(Address src,Register dest)2022 void unboxNonDouble(Address src, Register dest) { 2023 loadPtr(src, dest); 2024 unboxNonDouble(dest, dest); 2025 } 2026 unboxNonDouble(Register src,Register dest)2027 void unboxNonDouble(Register src, Register dest) { 2028 And(ARMRegister(dest, 64), ARMRegister(src, 64), Operand((1ULL << JSVAL_TAG_SHIFT) - 1ULL)); 2029 } 2030 unboxPrivate(const ValueOperand & src,Register dest)2031 void unboxPrivate(const ValueOperand& src, Register dest) { 2032 ubfx(ARMRegister(dest, 64), ARMRegister(src.valueReg(), 64), 1, JSVAL_TAG_SHIFT - 1); 2033 } 2034 notBoolean(const ValueOperand & val)2035 void notBoolean(const ValueOperand& val) { 2036 ARMRegister r(val.valueReg(), 64); 2037 eor(r, r, Operand(1)); 2038 } unboxObject(const ValueOperand & src,Register dest)2039 void unboxObject(const ValueOperand& src, Register dest) { 2040 unboxNonDouble(src.valueReg(), dest); 2041 } unboxObject(Register src,Register dest)2042