/openbsd/gnu/llvm/llvm/include/llvm/Support/ |
H A D | MathExtras.h | 375 unsigned &MaskLen) { in isShiftedMask_32() argument 379 MaskLen = llvm::popcount(Value); in isShiftedMask_32() 388 unsigned &MaskLen) { in isShiftedMask_64() argument 392 MaskLen = llvm::popcount(Value); in isShiftedMask_64()
|
/openbsd/gnu/llvm/llvm/lib/Target/LoongArch/ |
H A D | LoongArchISelLowering.cpp | 1406 unsigned MaskIdx, MaskLen; in performSRLCombine() local 1413 !isShiftedMask_64(CN->getZExtValue(), MaskIdx, MaskLen)) in performSRLCombine() 1421 if (MaskIdx <= Shamt && Shamt <= MaskIdx + MaskLen - 1) in performSRLCombine() 1424 DAG.getConstant(MaskIdx + MaskLen - 1, DL, GRLenVT), in performSRLCombine() 1565 unsigned MaskIdx, MaskLen; in performORCombine() local 1568 isShiftedMask_64(CNMask->getZExtValue(), MaskIdx, MaskLen) && in performORCombine() 1570 CNShamt->getZExtValue() + MaskLen <= ValBits) { in performORCombine() 1577 DAG.getConstant(Shamt + MaskLen - 1, DL, GRLenVT), in performORCombine() 1592 isShiftedMask_64(CNMask->getZExtValue(), MaskIdx, MaskLen) && in performORCombine() 1601 DAG.getConstant(MaskIdx + MaskLen - 1, DL, GRLenVT), in performORCombine() [all …]
|
/openbsd/gnu/llvm/llvm/include/llvm/ADT/ |
H A D | APInt.h | 510 bool isShiftedMask(unsigned &MaskIdx, unsigned &MaskLen) const { in isShiftedMask() argument 512 return isShiftedMask_64(U.VAL, MaskIdx, MaskLen); in isShiftedMask() 518 MaskLen = Ones; in isShiftedMask()
|
/openbsd/gnu/llvm/llvm/lib/Target/X86/ |
H A D | X86InstCombineIntrinsic.cpp | 999 unsigned MaskIdx, MaskLen; in instCombineIntrinsic() local 1000 if (MaskC->getValue().isShiftedMask(MaskIdx, MaskLen)) { in instCombineIntrinsic() 1043 unsigned MaskIdx, MaskLen; in instCombineIntrinsic() local 1044 if (MaskC->getValue().isShiftedMask(MaskIdx, MaskLen)) { in instCombineIntrinsic()
|
/openbsd/gnu/llvm/llvm/lib/Target/Hexagon/ |
H A D | HexagonISelDAGToDAGHVX.cpp | 1086 unsigned MaskLen = SM.Mask.size(); in getOutputSegmentMap() local 1087 assert(MaskLen % SegLen == 0); in getOutputSegmentMap() 1088 SmallVector<unsigned, 4> Map(MaskLen / SegLen); in getOutputSegmentMap()
|
/openbsd/gnu/llvm/llvm/lib/Target/AArch64/ |
H A D | AArch64ISelDAGToDAG.cpp | 675 unsigned LowZBits, MaskLen; in SelectShiftedRegisterFromAnd() local 676 if (!AndMask.isShiftedMask(LowZBits, MaskLen)) in SelectShiftedRegisterFromAnd() 686 if (LowZBits <= ShiftAmtC || (BitWidth != LowZBits + MaskLen)) in SelectShiftedRegisterFromAnd() 701 if (LHSOpcode == ISD::SRA && (BitWidth != (LowZBits + MaskLen))) in SelectShiftedRegisterFromAnd() 705 if (LHSOpcode == ISD::SRL && (BitWidth > (NewShiftC + MaskLen))) in SelectShiftedRegisterFromAnd()
|
H A D | AArch64ISelLowering.cpp | 14971 unsigned MaskIdx, MaskLen; in isDesirableToCommuteXorWithShift() local 14972 if (XorC->getAPIntValue().isShiftedMask(MaskIdx, MaskLen)) { in isDesirableToCommuteXorWithShift() 14976 return MaskIdx == ShiftAmt && MaskLen == (BitWidth - ShiftAmt); in isDesirableToCommuteXorWithShift() 14977 return MaskIdx == 0 && MaskLen == (BitWidth - ShiftAmt); in isDesirableToCommuteXorWithShift()
|
/openbsd/gnu/llvm/llvm/lib/Target/AMDGPU/ |
H A D | AMDGPUISelLowering.cpp | 3280 unsigned MaskIdx, MaskLen; in performSrlCombine() local 3281 if (Mask->getAPIntValue().isShiftedMask(MaskIdx, MaskLen) && in performSrlCombine()
|
/openbsd/gnu/llvm/llvm/lib/Target/ARM/ |
H A D | ARMISelLowering.cpp | 13698 unsigned MaskIdx, MaskLen; in isDesirableToCommuteXorWithShift() local 13699 if (XorC->getAPIntValue().isShiftedMask(MaskIdx, MaskLen)) { in isDesirableToCommuteXorWithShift() 13703 return MaskIdx == ShiftAmt && MaskLen == (BitWidth - ShiftAmt); in isDesirableToCommuteXorWithShift() 13704 return MaskIdx == 0 && MaskLen == (BitWidth - ShiftAmt); in isDesirableToCommuteXorWithShift()
|