1 /* 2 * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 25 26 package org.graalvm.compiler.core.amd64; 27 28 import static jdk.vm.ci.code.ValueUtil.asRegister; 29 import static jdk.vm.ci.code.ValueUtil.isAllocatableValue; 30 import static jdk.vm.ci.code.ValueUtil.isRegister; 31 import static org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic.CMP; 32 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.DWORD; 33 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PD; 34 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.PS; 35 import static org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize.QWORD; 36 import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC; 37 import static org.graalvm.compiler.lir.LIRValueUtil.asConstant; 38 import static org.graalvm.compiler.lir.LIRValueUtil.asConstantValue; 39 import static org.graalvm.compiler.lir.LIRValueUtil.asJavaConstant; 40 import static org.graalvm.compiler.lir.LIRValueUtil.isConstantValue; 41 import static org.graalvm.compiler.lir.LIRValueUtil.isIntConstant; 42 import static org.graalvm.compiler.lir.LIRValueUtil.isJavaConstant; 43 44 import java.util.Optional; 45 46 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic; 47 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MIOp; 48 import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp; 49 import org.graalvm.compiler.asm.amd64.AMD64Assembler.ConditionFlag; 50 import org.graalvm.compiler.asm.amd64.AMD64Assembler.SSEOp; 51 import org.graalvm.compiler.asm.amd64.AMD64Assembler.VexRMOp; 52 import org.graalvm.compiler.asm.amd64.AMD64BaseAssembler.OperandSize; 53 import org.graalvm.compiler.asm.amd64.AVXKind; 54 import org.graalvm.compiler.asm.amd64.AVXKind.AVXSize; 55 import org.graalvm.compiler.core.common.LIRKind; 56 import org.graalvm.compiler.core.common.NumUtil; 57 import org.graalvm.compiler.core.common.calc.Condition; 58 import org.graalvm.compiler.core.common.spi.ForeignCallLinkage; 59 import org.graalvm.compiler.core.common.spi.LIRKindTool; 60 import org.graalvm.compiler.debug.GraalError; 61 import org.graalvm.compiler.lir.ConstantValue; 62 import org.graalvm.compiler.lir.LIRFrameState; 63 import org.graalvm.compiler.lir.LIRInstruction; 64 import org.graalvm.compiler.lir.LIRValueUtil; 65 import org.graalvm.compiler.lir.LabelRef; 66 import org.graalvm.compiler.lir.StandardOp.JumpOp; 67 import org.graalvm.compiler.lir.StandardOp.ZapRegistersOp; 68 import org.graalvm.compiler.lir.SwitchStrategy; 69 import org.graalvm.compiler.lir.Variable; 70 import org.graalvm.compiler.lir.amd64.AMD64AddressValue; 71 import org.graalvm.compiler.lir.amd64.AMD64ArithmeticLIRGeneratorTool; 72 import org.graalvm.compiler.lir.amd64.AMD64ArrayCompareToOp; 73 import org.graalvm.compiler.lir.amd64.AMD64ArrayEqualsOp; 74 import org.graalvm.compiler.lir.amd64.AMD64ArrayIndexOfOp; 75 import org.graalvm.compiler.lir.amd64.AMD64Binary; 76 import org.graalvm.compiler.lir.amd64.AMD64BinaryConsumer; 77 import org.graalvm.compiler.lir.amd64.AMD64ByteSwapOp; 78 import org.graalvm.compiler.lir.amd64.AMD64Call; 79 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow; 80 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.BranchOp; 81 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.CondMoveOp; 82 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.CondSetOp; 83 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.FloatBranchOp; 84 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.FloatCondMoveOp; 85 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.FloatCondSetOp; 86 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.HashTableSwitchOp; 87 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.ReturnOp; 88 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.StrategySwitchOp; 89 import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.TableSwitchOp; 90 import org.graalvm.compiler.lir.amd64.AMD64LFenceOp; 91 import org.graalvm.compiler.lir.amd64.AMD64Move; 92 import org.graalvm.compiler.lir.amd64.AMD64Move.CompareAndSwapOp; 93 import org.graalvm.compiler.lir.amd64.AMD64Move.MembarOp; 94 import org.graalvm.compiler.lir.amd64.AMD64Move.StackLeaOp; 95 import org.graalvm.compiler.lir.amd64.AMD64PauseOp; 96 import org.graalvm.compiler.lir.amd64.AMD64StringLatin1InflateOp; 97 import org.graalvm.compiler.lir.amd64.AMD64StringUTF16CompressOp; 98 import org.graalvm.compiler.lir.amd64.AMD64ZapRegistersOp; 99 import org.graalvm.compiler.lir.amd64.AMD64ZapStackOp; 100 import org.graalvm.compiler.lir.amd64.AMD64ZeroMemoryOp; 101 import org.graalvm.compiler.lir.amd64.vector.AMD64VectorCompareOp; 102 import org.graalvm.compiler.lir.gen.LIRGenerationResult; 103 import org.graalvm.compiler.lir.gen.LIRGenerator; 104 import org.graalvm.compiler.lir.hashing.Hasher; 105 import org.graalvm.compiler.phases.util.Providers; 106 107 import jdk.vm.ci.amd64.AMD64; 108 import jdk.vm.ci.amd64.AMD64Kind; 109 import jdk.vm.ci.code.CallingConvention; 110 import jdk.vm.ci.code.Register; 111 import jdk.vm.ci.code.RegisterValue; 112 import jdk.vm.ci.code.StackSlot; 113 import jdk.vm.ci.meta.AllocatableValue; 114 import jdk.vm.ci.meta.JavaConstant; 115 import jdk.vm.ci.meta.JavaKind; 116 import jdk.vm.ci.meta.PlatformKind; 117 import jdk.vm.ci.meta.VMConstant; 118 import jdk.vm.ci.meta.Value; 119 import jdk.vm.ci.meta.ValueKind; 120 121 /** 122 * This class implements the AMD64 specific portion of the LIR generator. 123 */ 124 public abstract class AMD64LIRGenerator extends LIRGenerator { 125 AMD64LIRGenerator(LIRKindTool lirKindTool, AMD64ArithmeticLIRGenerator arithmeticLIRGen, MoveFactory moveFactory, Providers providers, LIRGenerationResult lirGenRes)126 public AMD64LIRGenerator(LIRKindTool lirKindTool, AMD64ArithmeticLIRGenerator arithmeticLIRGen, MoveFactory moveFactory, Providers providers, LIRGenerationResult lirGenRes) { 127 super(lirKindTool, arithmeticLIRGen, moveFactory, providers, lirGenRes); 128 } 129 130 /** 131 * Checks whether the supplied constant can be used without loading it into a register for store 132 * operations, i.e., on the right hand side of a memory access. 133 * 134 * @param c The constant to check. 135 * @return True if the constant can be used directly, false if the constant needs to be in a 136 * register. 137 */ canStoreConstant(JavaConstant c)138 protected static final boolean canStoreConstant(JavaConstant c) { 139 // there is no immediate move of 64-bit constants on Intel 140 switch (c.getJavaKind()) { 141 case Long: 142 return NumUtil.isInt(c.asLong()); 143 case Double: 144 return false; 145 case Object: 146 return c.isNull(); 147 default: 148 return true; 149 } 150 } 151 152 @Override zapValueForKind(PlatformKind kind)153 protected JavaConstant zapValueForKind(PlatformKind kind) { 154 long dead = 0xDEADDEADDEADDEADL; 155 switch ((AMD64Kind) kind) { 156 case BYTE: 157 return JavaConstant.forByte((byte) dead); 158 case WORD: 159 return JavaConstant.forShort((short) dead); 160 case DWORD: 161 return JavaConstant.forInt((int) dead); 162 case QWORD: 163 return JavaConstant.forLong(dead); 164 case SINGLE: 165 return JavaConstant.forFloat(Float.intBitsToFloat((int) dead)); 166 default: 167 // we don't support vector types, so just zap with double for all of them 168 return JavaConstant.forDouble(Double.longBitsToDouble(dead)); 169 } 170 } 171 asAddressValue(Value address)172 public AMD64AddressValue asAddressValue(Value address) { 173 if (address instanceof AMD64AddressValue) { 174 return (AMD64AddressValue) address; 175 } else { 176 if (address instanceof JavaConstant) { 177 long displacement = ((JavaConstant) address).asLong(); 178 if (NumUtil.isInt(displacement)) { 179 return new AMD64AddressValue(address.getValueKind(), Value.ILLEGAL, (int) displacement); 180 } 181 } 182 return new AMD64AddressValue(address.getValueKind(), asAllocatable(address), 0); 183 } 184 } 185 186 @Override emitAddress(AllocatableValue stackslot)187 public Variable emitAddress(AllocatableValue stackslot) { 188 Variable result = newVariable(LIRKind.value(target().arch.getWordKind())); 189 append(new StackLeaOp(result, stackslot)); 190 return result; 191 } 192 193 /** 194 * The AMD64 backend only uses DWORD and QWORD values in registers because of a performance 195 * penalty when accessing WORD or BYTE registers. This function converts small integer kinds to 196 * DWORD. 197 */ 198 @Override toRegisterKind(K kind)199 public <K extends ValueKind<K>> K toRegisterKind(K kind) { 200 switch ((AMD64Kind) kind.getPlatformKind()) { 201 case BYTE: 202 case WORD: 203 return kind.changeType(AMD64Kind.DWORD); 204 default: 205 return kind; 206 } 207 } 208 asAllocatable(Value value, ValueKind<?> kind)209 private AllocatableValue asAllocatable(Value value, ValueKind<?> kind) { 210 if (value.getValueKind().equals(kind)) { 211 return asAllocatable(value); 212 } else if (isRegister(value)) { 213 return asRegister(value).asValue(kind); 214 } else if (isConstantValue(value)) { 215 return emitLoadConstant(kind, asConstant(value)); 216 } else { 217 Variable variable = newVariable(kind); 218 emitMove(variable, value); 219 return variable; 220 } 221 } 222 emitCompareAndSwap(boolean isLogic, LIRKind accessKind, Value address, Value expectedValue, Value newValue, Value trueValue, Value falseValue)223 private Value emitCompareAndSwap(boolean isLogic, LIRKind accessKind, Value address, Value expectedValue, Value newValue, Value trueValue, Value falseValue) { 224 ValueKind<?> kind = newValue.getValueKind(); 225 assert kind.equals(expectedValue.getValueKind()); 226 227 AMD64AddressValue addressValue = asAddressValue(address); 228 LIRKind integralAccessKind = accessKind; 229 Value reinterpretedExpectedValue = expectedValue; 230 Value reinterpretedNewValue = newValue; 231 boolean isXmm = ((AMD64Kind) accessKind.getPlatformKind()).isXMM(); 232 if (isXmm) { 233 if (accessKind.getPlatformKind().equals(AMD64Kind.SINGLE)) { 234 integralAccessKind = LIRKind.fromJavaKind(target().arch, JavaKind.Int); 235 } else { 236 integralAccessKind = LIRKind.fromJavaKind(target().arch, JavaKind.Long); 237 } 238 reinterpretedExpectedValue = arithmeticLIRGen.emitReinterpret(integralAccessKind, expectedValue); 239 reinterpretedNewValue = arithmeticLIRGen.emitReinterpret(integralAccessKind, newValue); 240 } 241 AMD64Kind memKind = (AMD64Kind) integralAccessKind.getPlatformKind(); 242 RegisterValue aRes = AMD64.rax.asValue(integralAccessKind); 243 AllocatableValue allocatableNewValue = asAllocatable(reinterpretedNewValue, integralAccessKind); 244 emitMove(aRes, reinterpretedExpectedValue); 245 append(new CompareAndSwapOp(memKind, aRes, addressValue, aRes, allocatableNewValue)); 246 247 if (isLogic) { 248 assert trueValue.getValueKind().equals(falseValue.getValueKind()); 249 Variable result = newVariable(trueValue.getValueKind()); 250 append(new CondMoveOp(result, Condition.EQ, asAllocatable(trueValue), falseValue)); 251 return result; 252 } else { 253 if (isXmm) { 254 return arithmeticLIRGen.emitReinterpret(accessKind, aRes); 255 } else { 256 Variable result = newVariable(kind); 257 emitMove(result, aRes); 258 return result; 259 } 260 } 261 } 262 263 @Override emitLogicCompareAndSwap(LIRKind accessKind, Value address, Value expectedValue, Value newValue, Value trueValue, Value falseValue)264 public Variable emitLogicCompareAndSwap(LIRKind accessKind, Value address, Value expectedValue, Value newValue, Value trueValue, Value falseValue) { 265 return (Variable) emitCompareAndSwap(true, accessKind, address, expectedValue, newValue, trueValue, falseValue); 266 } 267 268 @Override emitValueCompareAndSwap(LIRKind accessKind, Value address, Value expectedValue, Value newValue)269 public Value emitValueCompareAndSwap(LIRKind accessKind, Value address, Value expectedValue, Value newValue) { 270 return emitCompareAndSwap(false, accessKind, address, expectedValue, newValue, null, null); 271 } 272 emitCompareAndSwapBranch(ValueKind<?> kind, AMD64AddressValue address, Value expectedValue, Value newValue, Condition condition, LabelRef trueLabel, LabelRef falseLabel, double trueLabelProbability)273 public void emitCompareAndSwapBranch(ValueKind<?> kind, AMD64AddressValue address, Value expectedValue, Value newValue, Condition condition, LabelRef trueLabel, LabelRef falseLabel, 274 double trueLabelProbability) { 275 assert kind.getPlatformKind().getSizeInBytes() <= expectedValue.getValueKind().getPlatformKind().getSizeInBytes(); 276 assert kind.getPlatformKind().getSizeInBytes() <= newValue.getValueKind().getPlatformKind().getSizeInBytes(); 277 assert condition == Condition.EQ || condition == Condition.NE; 278 AMD64Kind memKind = (AMD64Kind) kind.getPlatformKind(); 279 RegisterValue raxValue = AMD64.rax.asValue(kind); 280 emitMove(raxValue, expectedValue); 281 append(new CompareAndSwapOp(memKind, raxValue, address, raxValue, asAllocatable(newValue))); 282 append(new BranchOp(condition, trueLabel, falseLabel, trueLabelProbability)); 283 } 284 285 @Override emitAtomicReadAndAdd(Value address, ValueKind<?> kind, Value delta)286 public Value emitAtomicReadAndAdd(Value address, ValueKind<?> kind, Value delta) { 287 Variable result = newVariable(kind); 288 AMD64AddressValue addressValue = asAddressValue(address); 289 append(new AMD64Move.AtomicReadAndAddOp((AMD64Kind) kind.getPlatformKind(), result, addressValue, asAllocatable(delta))); 290 return result; 291 } 292 293 @Override emitAtomicReadAndWrite(Value address, ValueKind<?> kind, Value newValue)294 public Value emitAtomicReadAndWrite(Value address, ValueKind<?> kind, Value newValue) { 295 Variable result = newVariable(kind); 296 AMD64AddressValue addressValue = asAddressValue(address); 297 append(new AMD64Move.AtomicReadAndWriteOp((AMD64Kind) kind.getPlatformKind(), result, addressValue, asAllocatable(newValue))); 298 return result; 299 } 300 301 @Override emitNullCheck(Value address, LIRFrameState state)302 public void emitNullCheck(Value address, LIRFrameState state) { 303 append(new AMD64Move.NullCheckOp(asAddressValue(address), state)); 304 } 305 306 @Override emitJump(LabelRef label)307 public void emitJump(LabelRef label) { 308 assert label != null; 309 append(new JumpOp(label)); 310 } 311 312 @Override emitCompareBranch(PlatformKind cmpKind, Value left, Value right, Condition cond, boolean unorderedIsTrue, LabelRef trueLabel, LabelRef falseLabel, double trueLabelProbability)313 public void emitCompareBranch(PlatformKind cmpKind, Value left, Value right, Condition cond, boolean unorderedIsTrue, LabelRef trueLabel, LabelRef falseLabel, double trueLabelProbability) { 314 Condition finalCondition = emitCompare(cmpKind, left, right, cond); 315 if (cmpKind == AMD64Kind.SINGLE || cmpKind == AMD64Kind.DOUBLE) { 316 append(new FloatBranchOp(finalCondition, unorderedIsTrue, trueLabel, falseLabel, trueLabelProbability)); 317 } else { 318 append(new BranchOp(finalCondition, trueLabel, falseLabel, trueLabelProbability)); 319 } 320 } 321 emitCompareBranchMemory(AMD64Kind cmpKind, Value left, AMD64AddressValue right, LIRFrameState state, Condition cond, boolean unorderedIsTrue, LabelRef trueLabel, LabelRef falseLabel, double trueLabelProbability)322 public void emitCompareBranchMemory(AMD64Kind cmpKind, Value left, AMD64AddressValue right, LIRFrameState state, Condition cond, boolean unorderedIsTrue, LabelRef trueLabel, LabelRef falseLabel, 323 double trueLabelProbability) { 324 boolean mirrored = emitCompareMemory(cmpKind, left, right, state); 325 Condition finalCondition = mirrored ? cond.mirror() : cond; 326 if (cmpKind.isXMM()) { 327 append(new FloatBranchOp(finalCondition, unorderedIsTrue, trueLabel, falseLabel, trueLabelProbability)); 328 } else { 329 append(new BranchOp(finalCondition, trueLabel, falseLabel, trueLabelProbability)); 330 } 331 } 332 333 @Override emitOverflowCheckBranch(LabelRef overflow, LabelRef noOverflow, LIRKind cmpLIRKind, double overflowProbability)334 public void emitOverflowCheckBranch(LabelRef overflow, LabelRef noOverflow, LIRKind cmpLIRKind, double overflowProbability) { 335 append(new BranchOp(ConditionFlag.Overflow, overflow, noOverflow, overflowProbability)); 336 } 337 338 @Override emitIntegerTestBranch(Value left, Value right, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability)339 public void emitIntegerTestBranch(Value left, Value right, LabelRef trueDestination, LabelRef falseDestination, double trueDestinationProbability) { 340 emitIntegerTest(left, right); 341 append(new BranchOp(Condition.EQ, trueDestination, falseDestination, trueDestinationProbability)); 342 } 343 344 @Override emitConditionalMove(PlatformKind cmpKind, Value left, Value right, Condition cond, boolean unorderedIsTrue, Value trueValue, Value falseValue)345 public Variable emitConditionalMove(PlatformKind cmpKind, Value left, Value right, Condition cond, boolean unorderedIsTrue, Value trueValue, Value falseValue) { 346 boolean isFloatComparison = cmpKind == AMD64Kind.SINGLE || cmpKind == AMD64Kind.DOUBLE; 347 348 Condition finalCondition = cond; 349 Value finalTrueValue = trueValue; 350 Value finalFalseValue = falseValue; 351 if (isFloatComparison) { 352 // eliminate the parity check in case of a float comparison 353 Value finalLeft = left; 354 Value finalRight = right; 355 if (unorderedIsTrue != AMD64ControlFlow.trueOnUnordered(finalCondition)) { 356 if (unorderedIsTrue == AMD64ControlFlow.trueOnUnordered(finalCondition.mirror())) { 357 finalCondition = finalCondition.mirror(); 358 finalLeft = right; 359 finalRight = left; 360 } else if (finalCondition != Condition.EQ && finalCondition != Condition.NE) { 361 // negating EQ and NE does not make any sense as we would need to negate 362 // unorderedIsTrue as well (otherwise, we would no longer fulfill the Java 363 // NaN semantics) 364 assert unorderedIsTrue == AMD64ControlFlow.trueOnUnordered(finalCondition.negate()); 365 finalCondition = finalCondition.negate(); 366 finalTrueValue = falseValue; 367 finalFalseValue = trueValue; 368 } 369 } 370 emitRawCompare(cmpKind, finalLeft, finalRight); 371 } else { 372 finalCondition = emitCompare(cmpKind, left, right, cond); 373 } 374 375 boolean isParityCheckNecessary = isFloatComparison && unorderedIsTrue != AMD64ControlFlow.trueOnUnordered(finalCondition); 376 Variable result = newVariable(finalTrueValue.getValueKind()); 377 if (!isParityCheckNecessary && isIntConstant(finalTrueValue, 1) && isIntConstant(finalFalseValue, 0)) { 378 if (isFloatComparison) { 379 append(new FloatCondSetOp(result, finalCondition)); 380 } else { 381 append(new CondSetOp(result, finalCondition)); 382 } 383 } else if (!isParityCheckNecessary && isIntConstant(finalTrueValue, 0) && isIntConstant(finalFalseValue, 1)) { 384 if (isFloatComparison) { 385 if (unorderedIsTrue == AMD64ControlFlow.trueOnUnordered(finalCondition.negate())) { 386 append(new FloatCondSetOp(result, finalCondition.negate())); 387 } else { 388 append(new FloatCondSetOp(result, finalCondition)); 389 Variable negatedResult = newVariable(result.getValueKind()); 390 append(new AMD64Binary.ConstOp(AMD64BinaryArithmetic.XOR, OperandSize.get(result.getPlatformKind()), negatedResult, result, 1)); 391 result = negatedResult; 392 } 393 } else { 394 append(new CondSetOp(result, finalCondition.negate())); 395 } 396 } else if (isFloatComparison) { 397 append(new FloatCondMoveOp(result, finalCondition, unorderedIsTrue, load(finalTrueValue), load(finalFalseValue))); 398 } else { 399 append(new CondMoveOp(result, finalCondition, load(finalTrueValue), loadNonConst(finalFalseValue))); 400 } 401 return result; 402 } 403 404 @Override emitIntegerTestMove(Value left, Value right, Value trueValue, Value falseValue)405 public Variable emitIntegerTestMove(Value left, Value right, Value trueValue, Value falseValue) { 406 emitIntegerTest(left, right); 407 Variable result = newVariable(trueValue.getValueKind()); 408 append(new CondMoveOp(result, Condition.EQ, load(trueValue), loadNonConst(falseValue))); 409 return result; 410 } 411 getRegisterSize(Value a)412 protected static AVXSize getRegisterSize(Value a) { 413 AMD64Kind kind = (AMD64Kind) a.getPlatformKind(); 414 if (kind.isXMM()) { 415 return AVXKind.getRegisterSize(kind); 416 } else { 417 return AVXSize.XMM; 418 } 419 } 420 emitIntegerTest(Value a, Value b)421 private void emitIntegerTest(Value a, Value b) { 422 if (a.getPlatformKind().getVectorLength() > 1) { 423 append(new AMD64VectorCompareOp(VexRMOp.VPTEST, getRegisterSize(a), asAllocatable(a), asAllocatable(b))); 424 } else { 425 assert ((AMD64Kind) a.getPlatformKind()).isInteger(); 426 OperandSize size = a.getPlatformKind() == AMD64Kind.QWORD ? QWORD : DWORD; 427 if (isJavaConstant(b) && NumUtil.is32bit(asJavaConstant(b).asLong())) { 428 append(new AMD64BinaryConsumer.ConstOp(AMD64MIOp.TEST, size, asAllocatable(a), (int) asJavaConstant(b).asLong())); 429 } else if (isJavaConstant(a) && NumUtil.is32bit(asJavaConstant(a).asLong())) { 430 append(new AMD64BinaryConsumer.ConstOp(AMD64MIOp.TEST, size, asAllocatable(b), (int) asJavaConstant(a).asLong())); 431 } else if (isAllocatableValue(b)) { 432 append(new AMD64BinaryConsumer.Op(AMD64RMOp.TEST, size, asAllocatable(b), asAllocatable(a))); 433 } else { 434 append(new AMD64BinaryConsumer.Op(AMD64RMOp.TEST, size, asAllocatable(a), asAllocatable(b))); 435 } 436 } 437 } 438 439 /** 440 * This method emits the compare against memory instruction, and may reorder the operands. It 441 * returns true if it did so. 442 * 443 * @param b the right operand of the comparison 444 * @return true if the left and right operands were switched, false otherwise 445 */ emitCompareMemory(AMD64Kind cmpKind, Value a, AMD64AddressValue b, LIRFrameState state)446 private boolean emitCompareMemory(AMD64Kind cmpKind, Value a, AMD64AddressValue b, LIRFrameState state) { 447 OperandSize size; 448 switch (cmpKind) { 449 case BYTE: 450 size = OperandSize.BYTE; 451 break; 452 case WORD: 453 size = OperandSize.WORD; 454 break; 455 case DWORD: 456 size = OperandSize.DWORD; 457 break; 458 case QWORD: 459 size = OperandSize.QWORD; 460 break; 461 case SINGLE: 462 append(new AMD64BinaryConsumer.MemoryRMOp(SSEOp.UCOMIS, PS, asAllocatable(a), b, state)); 463 return false; 464 case DOUBLE: 465 append(new AMD64BinaryConsumer.MemoryRMOp(SSEOp.UCOMIS, PD, asAllocatable(a), b, state)); 466 return false; 467 default: 468 throw GraalError.shouldNotReachHere("unexpected kind: " + cmpKind); 469 } 470 471 if (isConstantValue(a)) { 472 return emitCompareMemoryConOp(size, asConstantValue(a), b, state); 473 } else { 474 return emitCompareRegMemoryOp(size, asAllocatable(a), b, state); 475 } 476 } 477 emitCompareMemoryConOp(OperandSize size, ConstantValue a, AMD64AddressValue b, LIRFrameState state)478 protected boolean emitCompareMemoryConOp(OperandSize size, ConstantValue a, AMD64AddressValue b, LIRFrameState state) { 479 if (JavaConstant.isNull(a.getConstant())) { 480 append(new AMD64BinaryConsumer.MemoryConstOp(CMP, size, b, 0, state)); 481 return true; 482 } else if (a.getConstant() instanceof VMConstant && size == DWORD && target().inlineObjects) { 483 VMConstant vc = (VMConstant) a.getConstant(); 484 append(new AMD64BinaryConsumer.MemoryVMConstOp(CMP.getMIOpcode(size, false), b, vc, state)); 485 return true; 486 } else { 487 if (a.getConstant() instanceof JavaConstant && a.getJavaConstant().getJavaKind() != JavaKind.Object) { 488 long value = a.getJavaConstant().asLong(); 489 if (NumUtil.is32bit(value)) { 490 append(new AMD64BinaryConsumer.MemoryConstOp(CMP, size, b, (int) value, state)); 491 return true; 492 } 493 } 494 return emitCompareRegMemoryOp(size, asAllocatable(a), b, state); 495 } 496 } 497 emitCompareRegMemoryOp(OperandSize size, AllocatableValue a, AMD64AddressValue b, LIRFrameState state)498 private boolean emitCompareRegMemoryOp(OperandSize size, AllocatableValue a, AMD64AddressValue b, LIRFrameState state) { 499 AMD64RMOp op = CMP.getRMOpcode(size); 500 append(new AMD64BinaryConsumer.MemoryRMOp(op, size, a, b, state)); 501 return false; 502 } 503 504 /** 505 * This method emits the compare instruction, and may reorder the operands. It returns true if 506 * it did so. 507 * 508 * @param a the left operand of the comparison 509 * @param b the right operand of the comparison 510 * @param cond the condition of the comparison 511 * @return true if the left and right operands were switched, false otherwise 512 */ emitCompare(PlatformKind cmpKind, Value a, Value b, Condition cond)513 private Condition emitCompare(PlatformKind cmpKind, Value a, Value b, Condition cond) { 514 if (LIRValueUtil.isVariable(b)) { 515 emitRawCompare(cmpKind, b, a); 516 return cond.mirror(); 517 } else { 518 emitRawCompare(cmpKind, a, b); 519 return cond; 520 } 521 } 522 emitRawCompare(PlatformKind cmpKind, Value left, Value right)523 private void emitRawCompare(PlatformKind cmpKind, Value left, Value right) { 524 ((AMD64ArithmeticLIRGeneratorTool) arithmeticLIRGen).emitCompareOp((AMD64Kind) cmpKind, load(left), loadNonConst(right)); 525 } 526 527 @Override emitMembar(int barriers)528 public void emitMembar(int barriers) { 529 int necessaryBarriers = target().arch.requiredBarriers(barriers); 530 if (target().isMP && necessaryBarriers != 0) { 531 append(new MembarOp(necessaryBarriers)); 532 } 533 } 534 emitCCall(long address, CallingConvention nativeCallingConvention, Value[] args, int numberOfFloatingPointArguments)535 public abstract void emitCCall(long address, CallingConvention nativeCallingConvention, Value[] args, int numberOfFloatingPointArguments); 536 537 @Override emitForeignCallOp(ForeignCallLinkage linkage, Value result, Value[] arguments, Value[] temps, LIRFrameState info)538 protected void emitForeignCallOp(ForeignCallLinkage linkage, Value result, Value[] arguments, Value[] temps, LIRFrameState info) { 539 long maxOffset = linkage.getMaxCallTargetOffset(); 540 if (maxOffset != (int) maxOffset && !GeneratePIC.getValue(getResult().getLIR().getOptions())) { 541 append(new AMD64Call.DirectFarForeignCallOp(linkage, result, arguments, temps, info)); 542 } else { 543 append(new AMD64Call.DirectNearForeignCallOp(linkage, result, arguments, temps, info)); 544 } 545 } 546 547 @Override emitByteSwap(Value input)548 public Variable emitByteSwap(Value input) { 549 Variable result = newVariable(LIRKind.combine(input)); 550 append(new AMD64ByteSwapOp(result, input)); 551 return result; 552 } 553 554 @Override emitArrayCompareTo(JavaKind kind1, JavaKind kind2, Value array1, Value array2, Value length1, Value length2)555 public Variable emitArrayCompareTo(JavaKind kind1, JavaKind kind2, Value array1, Value array2, Value length1, Value length2) { 556 LIRKind resultKind = LIRKind.value(AMD64Kind.DWORD); 557 RegisterValue raxRes = AMD64.rax.asValue(resultKind); 558 RegisterValue cnt1 = AMD64.rcx.asValue(length1.getValueKind()); 559 RegisterValue cnt2 = AMD64.rdx.asValue(length2.getValueKind()); 560 emitMove(cnt1, length1); 561 emitMove(cnt2, length2); 562 append(new AMD64ArrayCompareToOp(this, kind1, kind2, raxRes, array1, array2, cnt1, cnt2)); 563 Variable result = newVariable(resultKind); 564 emitMove(result, raxRes); 565 return result; 566 } 567 568 @Override emitArrayEquals(JavaKind kind, Value array1, Value array2, Value length, boolean directPointers)569 public Variable emitArrayEquals(JavaKind kind, Value array1, Value array2, Value length, boolean directPointers) { 570 Variable result = newVariable(LIRKind.value(AMD64Kind.DWORD)); 571 append(new AMD64ArrayEqualsOp(this, kind, kind, result, array1, array2, length, directPointers, getMaxVectorSize())); 572 return result; 573 } 574 575 @Override emitArrayEquals(JavaKind kind1, JavaKind kind2, Value array1, Value array2, Value length, boolean directPointers)576 public Variable emitArrayEquals(JavaKind kind1, JavaKind kind2, Value array1, Value array2, Value length, boolean directPointers) { 577 Variable result = newVariable(LIRKind.value(AMD64Kind.DWORD)); 578 append(new AMD64ArrayEqualsOp(this, kind1, kind2, result, array1, array2, length, directPointers, getMaxVectorSize())); 579 return result; 580 } 581 582 /** 583 * Return the maximum size of vector registers used in SSE/AVX instructions. 584 */ getMaxVectorSize()585 protected int getMaxVectorSize() { 586 // default for "unlimited" 587 return -1; 588 } 589 590 @Override emitArrayIndexOf(JavaKind arrayKind, JavaKind valueKind, boolean findTwoConsecutive, Value arrayPointer, Value arrayLength, Value fromIndex, Value... searchValues)591 public Variable emitArrayIndexOf(JavaKind arrayKind, JavaKind valueKind, boolean findTwoConsecutive, Value arrayPointer, Value arrayLength, Value fromIndex, Value... searchValues) { 592 Variable result = newVariable(LIRKind.value(AMD64Kind.DWORD)); 593 append(new AMD64ArrayIndexOfOp(arrayKind, valueKind, findTwoConsecutive, getMaxVectorSize(), this, result, 594 asAllocatable(arrayPointer), asAllocatable(arrayLength), asAllocatable(fromIndex), searchValues)); 595 return result; 596 } 597 598 @Override emitStringLatin1Inflate(Value src, Value dst, Value len)599 public void emitStringLatin1Inflate(Value src, Value dst, Value len) { 600 RegisterValue rsrc = AMD64.rsi.asValue(src.getValueKind()); 601 RegisterValue rdst = AMD64.rdi.asValue(dst.getValueKind()); 602 RegisterValue rlen = AMD64.rdx.asValue(len.getValueKind()); 603 604 emitMove(rsrc, src); 605 emitMove(rdst, dst); 606 emitMove(rlen, len); 607 608 append(new AMD64StringLatin1InflateOp(this, rsrc, rdst, rlen)); 609 } 610 611 @Override emitStringUTF16Compress(Value src, Value dst, Value len)612 public Variable emitStringUTF16Compress(Value src, Value dst, Value len) { 613 RegisterValue rsrc = AMD64.rsi.asValue(src.getValueKind()); 614 RegisterValue rdst = AMD64.rdi.asValue(dst.getValueKind()); 615 RegisterValue rlen = AMD64.rdx.asValue(len.getValueKind()); 616 617 emitMove(rsrc, src); 618 emitMove(rdst, dst); 619 emitMove(rlen, len); 620 621 LIRKind reskind = LIRKind.value(AMD64Kind.DWORD); 622 RegisterValue rres = AMD64.rax.asValue(reskind); 623 624 append(new AMD64StringUTF16CompressOp(this, rres, rsrc, rdst, rlen)); 625 626 Variable res = newVariable(reskind); 627 emitMove(res, rres); 628 return res; 629 } 630 631 @Override emitReturn(JavaKind kind, Value input)632 public void emitReturn(JavaKind kind, Value input) { 633 AllocatableValue operand = Value.ILLEGAL; 634 if (input != null) { 635 operand = resultOperandFor(kind, input.getValueKind()); 636 emitMove(operand, input); 637 } 638 append(new ReturnOp(operand)); 639 } 640 createStrategySwitchOp(SwitchStrategy strategy, LabelRef[] keyTargets, LabelRef defaultTarget, Variable key, AllocatableValue temp)641 protected StrategySwitchOp createStrategySwitchOp(SwitchStrategy strategy, LabelRef[] keyTargets, LabelRef defaultTarget, Variable key, AllocatableValue temp) { 642 return new StrategySwitchOp(strategy, keyTargets, defaultTarget, key, temp); 643 } 644 645 @Override emitStrategySwitch(SwitchStrategy strategy, Variable key, LabelRef[] keyTargets, LabelRef defaultTarget)646 public void emitStrategySwitch(SwitchStrategy strategy, Variable key, LabelRef[] keyTargets, LabelRef defaultTarget) { 647 // a temp is needed for loading object constants 648 boolean needsTemp = !LIRKind.isValue(key); 649 append(createStrategySwitchOp(strategy, keyTargets, defaultTarget, key, needsTemp ? newVariable(key.getValueKind()) : Value.ILLEGAL)); 650 } 651 652 @Override emitTableSwitch(int lowKey, LabelRef defaultTarget, LabelRef[] targets, Value key)653 protected void emitTableSwitch(int lowKey, LabelRef defaultTarget, LabelRef[] targets, Value key) { 654 append(new TableSwitchOp(lowKey, defaultTarget, targets, key, newVariable(LIRKind.value(target().arch.getWordKind())), newVariable(key.getValueKind()))); 655 } 656 657 @Override hasherFor(JavaConstant[] keyConstants, double minDensity)658 protected Optional<Hasher> hasherFor(JavaConstant[] keyConstants, double minDensity) { 659 return Hasher.forKeys(keyConstants, minDensity); 660 } 661 662 @Override emitHashTableSwitch(Hasher hasher, JavaConstant[] keys, LabelRef defaultTarget, LabelRef[] targets, Value value)663 protected void emitHashTableSwitch(Hasher hasher, JavaConstant[] keys, LabelRef defaultTarget, LabelRef[] targets, Value value) { 664 Value index = hasher.hash(value, arithmeticLIRGen); 665 Variable scratch = newVariable(LIRKind.value(target().arch.getWordKind())); 666 Variable entryScratch = newVariable(LIRKind.value(target().arch.getWordKind())); 667 append(new HashTableSwitchOp(keys, defaultTarget, targets, value, index, scratch, entryScratch)); 668 } 669 670 @Override emitPause()671 public void emitPause() { 672 append(new AMD64PauseOp()); 673 } 674 675 @Override createZapRegisters(Register[] zappedRegisters, JavaConstant[] zapValues)676 public ZapRegistersOp createZapRegisters(Register[] zappedRegisters, JavaConstant[] zapValues) { 677 return new AMD64ZapRegistersOp(zappedRegisters, zapValues); 678 } 679 680 @Override createZapArgumentSpace(StackSlot[] zappedStack, JavaConstant[] zapValues)681 public LIRInstruction createZapArgumentSpace(StackSlot[] zappedStack, JavaConstant[] zapValues) { 682 return new AMD64ZapStackOp(zappedStack, zapValues); 683 } 684 685 @Override emitSpeculationFence()686 public void emitSpeculationFence() { 687 append(new AMD64LFenceOp()); 688 } 689 690 @Override emitZeroMemory(Value address, Value length, boolean isAligned)691 public void emitZeroMemory(Value address, Value length, boolean isAligned) { 692 RegisterValue lengthReg = AMD64.rcx.asValue(length.getValueKind()); 693 emitMove(lengthReg, length); 694 append(new AMD64ZeroMemoryOp(asAddressValue(address), lengthReg)); 695 } 696 } 697