/openbsd/gnu/llvm/llvm/lib/Target/M68k/ |
H A D | M68kInstrShiftRotate.td | 15 /// SHL [~] ASR [~] LSR [~] SWAP [ ] 94 defm LSR : MxSROp<"lsr", srl, MxRODI_R, MxROOP_LS>;
|
/openbsd/gnu/llvm/llvm/lib/Target/AArch64/MCTargetDesc/ |
H A D | AArch64AddressingModes.h | 36 LSR, enumerator 57 case AArch64_AM::LSR: return "lsr"; in getShiftExtendName() 78 case 1: return AArch64_AM::LSR; in getShiftType() 106 case AArch64_AM::LSR: STEnc = 1; break; in getShifterImm()
|
/openbsd/gnu/llvm/llvm/lib/Target/AVR/ |
H A D | AVRISelLowering.h | 43 LSR, ///< Logical shift right. enumerator
|
H A D | AVRISelLowering.cpp | 254 NODE(LSR); in getTargetNodeName() 386 Opc8 = AVRISD::LSR; in LowerShifts()
|
H A D | AVRInstrInfo.td | 59 def AVRlsr : SDNode<"AVRISD::LSR", SDTIntUnaryOp>;
|
/openbsd/gnu/llvm/lldb/source/Plugins/Process/Utility/ |
H A D | ARMUtils.h | 125 static inline uint32_t LSR(const uint32_t value, const uint32_t amount, in LSR() function
|
/openbsd/gnu/llvm/llvm/lib/ExecutionEngine/Orc/ |
H A D | OrcV2CBindings.cpp | 298 LLVMOrcLookupStateRef LSR = ::wrap(OrcV2CAPIHelper::extractLookupState(LS)); in tryToGenerate() local 318 auto Err = unwrap(TryToGenerate(::wrap(this), Ctx, &LSR, CLookupKind, in tryToGenerate() 323 OrcV2CAPIHelper::resetLookupState(LS, ::unwrap(LSR)); in tryToGenerate()
|
/openbsd/gnu/llvm/llvm/lib/Target/ARM/ |
H A D | README.txt | 304 3) Enhance LSR to generate more opportunities for indexed ops. 406 More LSR enhancements possible: 408 1. Teach LSR about pre- and post- indexed ops to allow iv increment be merged
|
H A D | ARMScheduleM7.td | 338 def : InstRW<[WriteALUsi], (instregex "(t|t2)(LSL|LSR|ASR|ROR)")>;
|
H A D | ARMInstrThumb.td | 1179 // LSR immediate 1190 // LSR register
|
H A D | ARMScheduleR52.td | 330 (instregex "ASRr", "RORS?r", "LSR", "LSL")>;
|
H A D | ARMScheduleA57.td | 218 // (ASR, LSL, LSR, ROR, RRX)=MOVsi, MVN
|
/openbsd/gnu/llvm/llvm/lib/Target/AArch64/ |
H A D | AArch64SchedPredicates.td | 49 def CheckShiftLSR : CheckImmOperand_s<3, "AArch64_AM::LSR">;
|
H A D | AArch64AsmPrinter.cpp | 517 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSR, 56)), in emitHwasanMemaccessSymbols() 607 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSR, 56)), in emitHwasanMemaccessSymbols()
|
H A D | AArch64SchedNeoverseN2.td | 1602 (instregex "^(ASR|LSL|LSR)_WIDE_ZPmZ_[BHS]$", 1603 "^(ASR|LSL|LSR)_WIDE_ZZZ_[BHS]$", 1604 "^(ASR|LSL|LSR)_ZPmI_[BHSD]$", 1605 "^(ASR|LSL|LSR)_ZPmZ_[BHSD]$", 1606 "^(ASR|LSL|LSR)_ZZI_[BHSD]$",
|
H A D | AArch64ISelDAGToDAG.cpp | 591 return AArch64_AM::LSR; in getShiftTypeForNode() 2524 } else if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSR) { in getUsefulBitsFromOrWithShiftedReg() 3059 EncodedShiftImm = AArch64_AM::getShifterImm(AArch64_AM::LSR, SrlImm); in isWorthFoldingIntoOrrWithShift() 3159 AArch64_AM::getShifterImm(AArch64_AM::LSR, SrlImm), DL, VT)}; in tryOrrWithShift() 3322 SDNode *LSR = CurDAG->getMachineNode( in tryBitfieldInsertOpFromOr() local 3331 SDValue Ops[] = {Dst, SDValue(LSR, 0), in tryBitfieldInsertOpFromOr()
|
H A D | AArch64SchedAmpere1.td | 990 (instregex "(ASR|LSL|LSR|ROR)V(W|X)r")>;
|
/openbsd/gnu/llvm/llvm/lib/Target/AArch64/Utils/ |
H A D | AArch64BaseInfo.h | 611 LSR, enumerator
|
/openbsd/gnu/llvm/llvm/lib/Target/Hexagon/ |
H A D | HexagonConstPropagation.cpp | 2708 LatticeCell LSR; in evaluateHexCondMove() local 2709 if (!evaluate(R, LR, LSR)) in evaluateHexCondMove() 2711 RC.meet(LSR); in evaluateHexCondMove()
|
/openbsd/gnu/usr.bin/binutils-2.17/cpu/ |
H A D | mt.cpu | 276 LSL LSR ASR - - - - - 829 (dni lsr "LSR DstReg, SrcReg1, SrcReg2"
|
H A D | cris.cpu | 1676 "AND" "OR" "ASR" "LSR") 3769 ; LSR.m Rs,Rd [ Rd | 011111mm | Rs ]
|
/openbsd/gnu/llvm/llvm/lib/Target/AArch64/AsmParser/ |
H A D | AArch64AsmParser.cpp | 1461 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || in isShifter() 1556 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || in isArithmeticShifter() 1567 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR || in isLogicalShifter() 3565 .Case("lsr", AArch64_AM::LSR) in tryParseOptionalShiftExtend() 3588 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR || in tryParseOptionalShiftExtend()
|
/openbsd/gnu/llvm/llvm/lib/Target/X86/ |
H A D | README.txt | 181 The following tests perform worse with LSR: 771 1. LSR should rewrite the first cmp with induction variable %ecx.
|
/openbsd/gnu/llvm/llvm/lib/Target/ARC/ |
H A D | ARCInstrInfo.td | 303 defm LSR : ArcBinaryEXT5Inst<0b000001, "lsr">;
|
/openbsd/gnu/llvm/llvm/lib/Transforms/Scalar/ |
H A D | LoopStrengthReduce.cpp | 6579 const LSRInstance &LSR) { in GetInductionVariable() argument 6592 for (const WeakVH &IV : LSR.getScalarEvolutionIVs()) { in GetInductionVariable()
|