/openbsd/gnu/llvm/llvm/lib/Target/RISCV/ |
H A D | RISCVGatherScatterLowering.cpp | 166 Value *Stride; in matchStridedStart() local 216 assert(Stride != nullptr); in matchStridedRecurrence() 313 if (isa<ConstantInt>(Stride) && cast<ConstantInt>(Stride)->isOne()) in matchStridedRecurrence() 314 Stride = SplatOp; in matchStridedRecurrence() 316 Stride = Builder.CreateMul(Stride, SplatOp, "stride"); in matchStridedRecurrence() 326 Stride = Builder.CreateShl(Stride, SplatOp, "stride"); in matchStridedRecurrence() 388 assert(Stride); in determineBaseAndStride() 403 Stride = Builder.CreateMul(Stride, ConstantInt::get(IntPtrTy, TypeScale)); in determineBaseAndStride() 443 Stride = Builder.CreateMul(Stride, ConstantInt::get(IntPtrTy, TypeScale)); in determineBaseAndStride() 465 Value *BasePtr, *Stride; in tryCreateStridedLoadStore() local [all …]
|
/openbsd/gnu/llvm/llvm/include/llvm/Analysis/ |
H A D | VectorUtils.h | 551 llvm::SmallVector<int, 16> createStrideMask(unsigned Start, unsigned Stride, 629 InterleaveGroup(InstTy *Instr, int32_t Stride, Align Alignment) in InterleaveGroup() argument 631 Factor = std::abs(Stride); in InterleaveGroup() 634 Reverse = Stride < 0; in InterleaveGroup() 860 StrideDescriptor(int64_t Stride, const SCEV *Scev, uint64_t Size, in StrideDescriptor() 862 : Stride(Stride), Scev(Scev), Size(Size), Alignment(Alignment) {} in StrideDescriptor() 865 int64_t Stride = 0; member 885 createInterleaveGroup(Instruction *Instr, int Stride, Align Alignment) { in createInterleaveGroup() argument 889 new InterleaveGroup<Instruction>(Instr, Stride, Alignment); in createInterleaveGroup() 910 static bool isStrided(int Stride); [all …]
|
H A D | LoopCacheAnalysis.h | 114 bool isConsecutive(const Loop &L, const SCEV *&Stride, unsigned CLS) const;
|
/openbsd/gnu/llvm/llvm/lib/Analysis/ |
H A D | LoopCacheAnalysis.cpp | 291 const SCEV *Stride = nullptr; in computeRefCost() local 292 if (isConsecutive(L, Stride, CLS)) { in computeRefCost() 295 assert(Stride != nullptr && in computeRefCost() 297 Type *WiderType = SE.getWiderType(Stride->getType(), TripCount->getType()); in computeRefCost() 299 Stride = SE.getNoopOrAnyExtend(Stride, WiderType); in computeRefCost() 301 const SCEV *Numerator = SE.getMulExpr(Stride, TripCount); in computeRefCost() 462 bool IndexedReference::isConsecutive(const Loop &L, const SCEV *&Stride, in isConsecutive() argument 488 Stride = SE.getMulExpr(SE.getNoopOrSignExtend(Coeff, WiderType), in isConsecutive() 490 const SCEV *CacheLineSize = SE.getConstant(Stride->getType(), CLS); in isConsecutive() 492 Stride = SE.isKnownNegative(Stride) ? SE.getNegativeSCEV(Stride) : Stride; in isConsecutive() [all …]
|
H A D | VectorUtils.cpp | 270 Value *Stride = U->getValue(); in getStrideFromPointer() local 271 if (!Lp->isLoopInvariant(Stride)) in getStrideFromPointer() 277 Stride = getUniqueCastUse(Stride, Lp, StripedOffRecurrenceCast); in getStrideFromPointer() 279 return Stride; in getStrideFromPointer() 980 Mask.push_back(Start + i * Stride); in createStrideMask() 1139 bool InterleavedAccessInfo::isStrided(int Stride) { in isStrided() argument 1140 unsigned Factor = std::abs(Stride); in isStrided() 1177 int64_t Stride = in collectConstStrideAccesses() local 1264 if (isStrided(DesB.Stride) && in analyzeInterleaving() 1327 if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride)) in analyzeInterleaving() [all …]
|
H A D | LoopAccessAnalysis.cpp | 1454 int64_t Stride = StepVal / Size; in getPtrStride() local 1462 if (!IsNoWrapAddRec && Stride != 1 && Stride != -1 && in getPtrStride() 1477 return Stride; in getPtrStride() 1761 const uint64_t ByteStride = Stride * TypeByteSize; in isSafeDependenceDistance() 1827 return ScaledDist % Stride; in areStridedAccessesIndependent() 1889 uint64_t Stride = std::abs(StrideAPtr); in isDependent() local 1893 Stride, TypeByteSize)) in isDependent() 2565 if (!Stride) in collectStridedAccess() 2585 const SCEV *StrideExpr = PSE->getSCEV(Stride); in collectStridedAccess() 2614 SymbolicStrides[Ptr] = Stride; in collectStridedAccess() [all …]
|
H A D | ScalarEvolution.cpp | 12690 return getZero(Stride->getType()); in computeMaxBECountForLT() 12695 if (IsSigned && isKnownNegative(Stride)) in computeMaxBECountForLT() 12704 IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride); in computeMaxBECountForLT() 12882 if (!isKnownNonZero(Stride)) { in howManyLessThans() 12910 Stride = getUMaxExpr(Stride, getOne(Stride->getType())); in howManyLessThans() 12913 } else if (!Stride->isOne() && !NoWrap) { in howManyLessThans() 13109 if (Start == Stride || Start == getMinusSCEV(Stride, One)) { in howManyLessThans() 13125 getUDivExpr(getAddExpr(Delta, getMinusSCEV(Stride, One)), Stride); in howManyLessThans() 13184 if (!isKnownPositive(Stride)) in howManyGreaterThans() 13191 if (!Stride->isOne() && !NoWrap) in howManyGreaterThans() [all …]
|
/openbsd/gnu/llvm/llvm/include/llvm/Object/ |
H A D | Minidump.h | 111 MemoryInfoIterator(ArrayRef<uint8_t> Storage, size_t Stride) in MemoryInfoIterator() argument 112 : Storage(Storage), Stride(Stride) { in MemoryInfoIterator() 113 assert(Storage.size() % Stride == 0); in MemoryInfoIterator() 126 Storage = Storage.drop_front(Stride); 132 size_t Stride; variable
|
/openbsd/gnu/llvm/llvm/lib/Target/X86/ |
H A D | X86LowerAMXType.cpp | 244 Value *Stride = Builder.getInt64(64); in combineLoadBitcast() local 272 Value *Stride = Builder.getInt64(64); in combineBitcastStore() local 297 Value *I8Ptr, *Stride; in transformBitcast() local 303 Stride = Builder.getInt64(64); in transformBitcast() 474 Value *Stride = Builder.getInt64(64); in createTileStore() local 499 Value *Stride = Builder.getInt64(64); in replaceWithTileLoad() local 500 std::array<Value *, 4> Args = {Row, Col, Ptr, Stride}; in replaceWithTileLoad() 937 Value *Stride = Builder.getInt64(64); in combineCastStore() local 963 Value *Stride = Builder.getInt64(64); in combineLoadCast() local 1125 Value *I8Ptr, *Stride; in transformAMXCast() local [all …]
|
H A D | X86InterleavedAccess.cpp | 293 unsigned VecElems, unsigned Stride, in reorderSubVector() argument 297 for (unsigned i = 0; i < Stride; i++) in reorderSubVector() 305 for (unsigned i = 0; i < (VecElems / 16) * Stride; i += 2) { in reorderSubVector() 306 genShuffleBland(VT, VPShuf, OptimizeShuf, (i / Stride) * 16, in reorderSubVector() 307 (i + 1) / Stride * 16); in reorderSubVector() 309 Vec[i % Stride], Vec[(i + 1) % Stride], OptimizeShuf); in reorderSubVector() 314 std::copy(Temp, Temp + Stride, TransposedMatrix.begin()); in reorderSubVector() 317 for (unsigned i = 0; i < Stride; i++) in reorderSubVector() 440 static void createShuffleStride(MVT VT, int Stride, in createShuffleStride() argument 447 Mask.push_back((i * Stride) % LaneSize + LaneSize * Lane); in createShuffleStride()
|
H A D | X86LowerAMXIntrinsics.cpp | 79 Value *Ptr, Value *Stride, Value *Tile); 151 Value *Col, Value *Ptr, Value *Stride, Value *Tile) { in createTileLoadStoreLoops() argument 184 Value *CurrentRowZExt = B.CreateZExt(CurrentRow, Stride->getType()); in createTileLoadStoreLoops() 185 Value *CurrentColZExt = B.CreateZExt(CurrentCol, Stride->getType()); in createTileLoadStoreLoops() 187 B.CreateAdd(B.CreateMul(CurrentRowZExt, Stride), CurrentColZExt); in createTileLoadStoreLoops() 517 Value *M, *N, *Ptr, *Stride, *Tile; in lowerTileLoadStore() local 521 m_Value(M), m_Value(N), m_Value(Ptr), m_Value(Stride))); in lowerTileLoadStore() 525 m_Value(Stride), m_Value(Tile))); in lowerTileLoadStore() 531 Value *StrideDWord = PreBuilder.CreateLShr(Stride, PreBuilder.getInt64(2)); in lowerTileLoadStore()
|
/openbsd/gnu/llvm/llvm/include/llvm/IR/ |
H A D | MatrixBuilder.h | 67 Value *Stride, bool IsVolatile, unsigned Rows, 71 Value *Ops[] = {DataPtr, Stride, B.getInt1(IsVolatile), B.getInt32(Rows), 73 Type *OverloadedTypes[] = {RetType, Stride->getType()}; 90 Value *Stride, bool IsVolatile, 94 Stride, B.getInt1(IsVolatile), 96 Type *OverloadedTypes[] = {Matrix->getType(), Stride->getType()};
|
/openbsd/gnu/llvm/llvm/lib/Transforms/Scalar/ |
H A D | StraightLineStrengthReduce.cpp | 144 : CandidateKind(CT), Base(B), Index(Idx), Stride(S), Ins(I) {} in Candidate() 155 Value *Stride = nullptr; member 281 Basis.Base == C.Base && Basis.Stride == C.Stride && in isBasisFor() 293 static bool isAddFoldable(const SCEV *Base, ConstantInt *Index, Value *Stride, in isAddFoldable() argument 305 return isAddFoldable(C.Base, C.Index, C.Stride, TTI); in isFoldable() 607 return C.Stride; in emitBump() 610 return Builder.CreateNeg(C.Stride); in emitBump() 616 Value *ExtendedStride = Builder.CreateSExtOrTrunc(C.Stride, DeltaType); in emitBump() 635 C.Stride == Basis.Stride); in rewriteCandidateWithBasis()
|
H A D | LowerMatrixIntrinsics.cpp | 171 Value *computeVectorAddr(Value *BasePtr, Value *VecIdx, Value *Stride, in computeVectorAddr() argument 175 assert((!isa<ConstantInt>(Stride) || in computeVectorAddr() 176 cast<ConstantInt>(Stride)->getZExtValue() >= NumElements) && in computeVectorAddr() 181 Value *VecStart = Builder.CreateMul(VecIdx, Stride, "vec.start"); in computeVectorAddr() 1087 if (auto *ConstStride = dyn_cast<ConstantInt>(Stride)) { in getAlignForIndex() 1107 Stride, Shape.getStride(), EltTy, Builder); in loadMatrix() 1109 VecTy, GEP, getAlignForIndex(I, Stride, EltTy, MAlign), in loadMatrix() 1160 Value *Stride = Inst->getArgOperand(1); in LowerColumnMajorLoad() local 1161 LowerLoad(Inst, Ptr, Inst->getParamAlign(0), Stride, in LowerColumnMajorLoad() 1230 Value *Stride = Inst->getArgOperand(2); in LowerColumnMajorStore() local [all …]
|
H A D | LoopIdiomRecognize.cpp | 549 APInt Stride = getStoreStride(StoreEv); in isLegalStore() local 551 if (StoreSize != Stride && StoreSize != -Stride) in isLegalStore() 778 APInt Stride = getStoreStride(StoreEv); in processLoopStores() local 782 if (StoreSize != Stride && StoreSize != -Stride) in processLoopStores() 785 bool IsNegStride = StoreSize == -Stride; in processLoopStores() 938 APInt Stride = ConstStride->getAPInt(); in processLoopMemSet() local 939 if (SizeInBytes != Stride && SizeInBytes != -Stride) in processLoopMemSet() 942 IsNegStride = SizeInBytes == -Stride; in processLoopMemSet() 1349 APInt Stride = getStoreStride(StoreEv); in processLoopStoreOfLoopLoad() local 1356 bool IsNegStride = StoreSize == -Stride; in processLoopStoreOfLoopLoad()
|
/openbsd/gnu/llvm/llvm/include/llvm/ADT/ |
H A D | SparseSet.h | 206 const unsigned Stride = std::numeric_limits<SparseT>::max() + 1u; 207 for (unsigned i = Sparse[Idx], e = size(); i < e; i += Stride) { 213 if (!Stride)
|
H A D | SparseMultiSet.h | 356 const unsigned Stride = std::numeric_limits<SparseT>::max() + 1u; in findIndex() local 357 for (unsigned i = Sparse[Idx], e = Dense.size(); i < e; i += Stride) { in findIndex() 364 if (!Stride) in findIndex()
|
/openbsd/gnu/llvm/llvm/lib/IR/ |
H A D | LLVMContextImpl.h | 367 Metadata *Stride; 370 Metadata *Stride) 372 Stride(Stride) {} 396 BoundsEqual(Stride, RHS->getRawStride()); 403 LowerBound, UpperBound, Stride); 404 return hash_combine(CountNode, LowerBound, UpperBound, Stride); 412 Metadata *Stride; 415 Metadata *Stride) 417 Stride(Stride) {} 426 (Stride == RHS->getRawStride()); [all …]
|
/openbsd/gnu/llvm/compiler-rt/lib/sanitizer_common/tests/ |
H A D | sanitizer_vector_test.cpp | 29 TEST(Vector, Stride) { in TEST() argument
|
/openbsd/gnu/llvm/compiler-rt/lib/scudo/standalone/tests/ |
H A D | vector_test.cpp | 25 TEST(ScudoVectorTest, Stride) { in TEST() argument
|
/openbsd/gnu/llvm/llvm/lib/CodeGen/ |
H A D | MachineRegisterInfo.cpp | 339 int Stride = 1; in moveOperands() local 341 Stride = -1; in moveOperands() 370 Dst += Stride; in moveOperands() 371 Src += Stride; in moveOperands()
|
/openbsd/gnu/llvm/clang/include/clang/AST/ |
H A D | ExprOpenMP.h | 64 OMPArraySectionExpr(Expr *Base, Expr *LowerBound, Expr *Length, Expr *Stride, in OMPArraySectionExpr() argument 74 SubExprs[STRIDE] = Stride; in OMPArraySectionExpr()
|
/openbsd/gnu/llvm/llvm/lib/Transforms/Vectorize/ |
H A D | LoopVectorizationLegality.cpp | 458 int Stride = getPtrStride(PSE, AccessTy, Ptr, TheLoop, Strides, in isConsecutivePtr() local 460 if (Stride == 1 || Stride == -1) in isConsecutivePtr() 461 return Stride; in isConsecutivePtr()
|
/openbsd/gnu/llvm/llvm/include/llvm/CodeGen/ |
H A D | SelectionDAG.h | 1485 SDValue Offset, SDValue Stride, SDValue Mask, 1493 SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, 1500 return getStridedLoadVP(AM, ExtType, VT, DL, Chain, Ptr, Offset, Stride, 1507 SDValue Offset, SDValue Stride, SDValue Mask, 1511 SDValue Stride, SDValue Mask, SDValue EVL, 1518 SDValue Stride, SDValue Mask, SDValue EVL, 1522 SDValue Chain, SDValue Ptr, SDValue Stride, SDValue Mask, 1527 SDValue Chain, SDValue Ptr, SDValue Stride, 1534 SDValue Ptr, SDValue Offset, SDValue Stride, 1540 SDValue Ptr, SDValue Stride, SDValue Mask, [all …]
|
/openbsd/gnu/llvm/llvm/lib/Target/AArch64/MCTargetDesc/ |
H A D | AArch64InstPrinter.cpp | 1487 static unsigned getNextVectorRegister(unsigned Reg, unsigned Stride = 1) { in getNextVectorRegister() argument 1488 while (Stride--) { in getNextVectorRegister() 1655 unsigned Stride = 1; in printVectorList() local 1657 Stride = 8; in printVectorList() 1659 Stride = 4; in printVectorList() 1681 NumRegs > 1 && Stride == 1 && in printVectorList() 1696 ++i, Reg = getNextVectorRegister(Reg, Stride)) { in printVectorList()
|