/openbsd/gnu/llvm/llvm/lib/Target/AMDGPU/ |
H A D | AMDGPULowerKernelAttributes.cpp | 136 if (LoadSize == 4) in processUse() 140 if (LoadSize == 4) in processUse() 144 if (LoadSize == 4) in processUse() 148 if (LoadSize == 2) in processUse() 152 if (LoadSize == 2) in processUse() 156 if (LoadSize == 2) in processUse() 160 if (LoadSize == 2) in processUse() 164 if (LoadSize == 2) in processUse() 168 if (LoadSize == 2) in processUse() 177 if (LoadSize == 2) in processUse() [all …]
|
H A D | AMDGPUTargetTransformInfo.h | 122 unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
|
H A D | AMDGPUTargetTransformInfo.cpp | 335 unsigned GCNTTIImpl::getLoadVectorFactor(unsigned VF, unsigned LoadSize, in getLoadVectorFactor() argument 338 unsigned VecRegBitWidth = VF * LoadSize; in getLoadVectorFactor() 341 return 128 / LoadSize; in getLoadVectorFactor()
|
H A D | AMDGPURegisterBankInfo.cpp | 1055 unsigned LoadSize = LoadTy.getSizeInBits(); in applyMappingLoad() local 1063 if (LoadSize != 32 && LoadSize != 96) in applyMappingLoad() 1072 if (LoadSize == 32 && in applyMappingLoad() 1081 if (LoadSize == 32) { in applyMappingLoad() 1127 if (LoadSize <= MaxNonSmrdLoadSize) in applyMappingLoad() 1136 assert(LoadSize % MaxNonSmrdLoadSize == 0); in applyMappingLoad() 1354 unsigned LoadSize = Ty.getSizeInBits(); in applyMappingSBufferLoad() local 1356 if (LoadSize == 256 || LoadSize == 512) { in applyMappingSBufferLoad() 1357 NumLoads = LoadSize / 128; in applyMappingSBufferLoad()
|
H A D | SIInstrInfo.cpp | 509 const unsigned LoadSize = NumBytes / NumLoads; in shouldClusterMemOps() local 510 const unsigned NumDWORDs = ((LoadSize + 3) / 4) * NumLoads; in shouldClusterMemOps()
|
/openbsd/gnu/llvm/llvm/lib/Transforms/Utils/ |
H A D | VNCoercion.cpp | 189 uint64_t LoadSize = DL.getTypeSizeInBits(LoadTy).getFixedValue(); in analyzeLoadFromClobberingWrite() local 191 if ((WriteSizeInBits & 7) | (LoadSize & 7)) in analyzeLoadFromClobberingWrite() 194 LoadSize /= 8; in analyzeLoadFromClobberingWrite() 430 ShiftAmt = (StoreSize - LoadSize - Offset) * 8; in getStoreValueForLoadHelper() 435 if (LoadSize != StoreSize) in getStoreValueForLoadHelper() 470 if (Offset + LoadSize > SrcValStoreSize) { in getLoadValueForLoad() 475 unsigned NewLoadSize = Offset + LoadSize; in getLoadValueForLoad() 515 if (Offset + LoadSize > SrcValStoreSize) in getConstantLoadValueForLoad() 535 if (LoadSize != 1) in getMemInstValueForLoad() 541 for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize;) { in getMemInstValueForLoad() [all …]
|
/openbsd/gnu/llvm/llvm/lib/CodeGen/ |
H A D | ExpandMemCmp.cpp | 91 LoadEntry(unsigned LoadSize, uint64_t Offset) in LoadEntry() 92 : LoadSize(LoadSize), Offset(Offset) { in LoadEntry() 96 unsigned LoadSize; member 150 const unsigned LoadSize = LoadSizes.front(); in computeGreedyLoadSequence() local 151 const uint64_t NumLoadsForThisSize = Size / LoadSize; in computeGreedyLoadSequence() 161 LoadSequence.push_back({LoadSize, Offset}); in computeGreedyLoadSequence() 162 Offset += LoadSize; in computeGreedyLoadSequence() 164 if (LoadSize > 1) in computeGreedyLoadSequence() 166 Size = Size % LoadSize; in computeGreedyLoadSequence() 477 if (CurLoadEntry.LoadSize == 1) { in emitLoadCompareBlock() [all …]
|
H A D | StackSlotColoring.cpp | 440 unsigned LoadSize = 0; in RemoveDeadStores() local 442 if (!(LoadReg = TII->isLoadFromStackSlot(*I, FirstSS, LoadSize))) in RemoveDeadStores() 453 LoadSize != StoreSize) in RemoveDeadStores()
|
/openbsd/gnu/llvm/llvm/lib/Analysis/ |
H A D | Loads.cpp | 342 const uint64_t LoadSize = Size.getZExtValue(); in isSafeToLoadUnconditionally() local 392 LoadSize <= DL.getTypeStoreSize(AccessedTy)) in isSafeToLoadUnconditionally() 396 LoadSize <= DL.getTypeStoreSize(AccessedTy)) in isSafeToLoadUnconditionally() 512 TypeSize LoadSize = DL.getTypeSizeInBits(AccessTy); in getAvailableLoadStore() local 513 if (TypeSize::isKnownLE(LoadSize, StoreSize)) in getAvailableLoadStore() 542 uint64_t LoadSize = LoadTypeSize.getFixedValue(); in getAvailableLoadStore() local 543 if ((Len->getValue() * 8).ult(LoadSize)) in getAvailableLoadStore() 546 APInt Splat = LoadSize >= 8 ? APInt::getSplat(LoadSize, Val->getValue()) in getAvailableLoadStore() 547 : Val->getValue().trunc(LoadSize); in getAvailableLoadStore()
|
H A D | TargetTransformInfo.cpp | 1134 unsigned LoadSize, in getLoadVectorFactor() argument 1137 return TTIImpl->getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy); in getLoadVectorFactor()
|
/openbsd/gnu/llvm/llvm/lib/Transforms/AggressiveInstCombine/ |
H A D | AggressiveInstCombine.cpp | 615 uint64_t LoadSize = 0; member 696 Loc = Loc.getWithNewSize(LOps.LoadSize); in foldLoadsRecursive() 735 LoadSize1 = LOps.LoadSize; in foldLoadsRecursive() 737 LoadSize2 = LOps.LoadSize; in foldLoadsRecursive() 755 LOps.LoadSize = LoadSize1 + LoadSize2; in foldLoadsRecursive() 783 IntegerType *WiderType = IntegerType::get(I.getContext(), LOps.LoadSize); in foldConsecutiveLoads() 791 Allowed = TTI.allowsMisalignedMemoryAccesses(I.getContext(), LOps.LoadSize, in foldConsecutiveLoads()
|
/openbsd/gnu/llvm/llvm/lib/Target/AArch64/ |
H A D | AArch64LoadStoreOptimizer.cpp | 625 int LoadSize = TII->getMemScale(LoadInst); in isLdOffsetInRangeOfSt() local 634 : AArch64InstrInfo::getLdStOffsetOp(LoadInst).getImm() * LoadSize; in isLdOffsetInRangeOfSt() 636 (UnscaledLdOffset + LoadSize <= (UnscaledStOffset + StoreSize)); in isLdOffsetInRangeOfSt() 1073 int LoadSize = TII->getMemScale(*LoadI); in promoteLoadFromStore() local 1085 if (LoadSize == StoreSize && (LoadSize == 4 || LoadSize == 8)) { in promoteLoadFromStore() 1088 if (StRt == LdRt && LoadSize == 8) { in promoteLoadFromStore() 1118 assert(LoadSize <= StoreSize && "Invalid load size"); in promoteLoadFromStore() 1122 : AArch64InstrInfo::getLdStOffsetOp(*LoadI).getImm() * LoadSize; in promoteLoadFromStore() 1127 int Width = LoadSize * 8; in promoteLoadFromStore() 1134 (UnscaledLdOffset + LoadSize) <= UnscaledStOffset + StoreSize) && in promoteLoadFromStore()
|
/openbsd/gnu/llvm/llvm/lib/Target/PowerPC/ |
H A D | PPCHazardRecognizers.h | 94 bool isLoadOfStoredAddress(uint64_t LoadSize, int64_t LoadOffset,
|
H A D | PPCHazardRecognizers.cpp | 298 isLoadOfStoredAddress(uint64_t LoadSize, int64_t LoadOffset, in isLoadOfStoredAddress() argument 313 if (int64_t(LoadOffset+LoadSize) > StoreOffset[i]) return true; in isLoadOfStoredAddress()
|
/openbsd/gnu/llvm/llvm/lib/Target/X86/ |
H A D | X86AvoidStoreForwardingBlocks.cpp | 619 static bool isBlockingStore(int64_t LoadDispImm, unsigned LoadSize, in isBlockingStore() argument 622 (StoreDispImm <= LoadDispImm + (LoadSize - StoreSize))); in isBlockingStore()
|
/openbsd/gnu/llvm/llvm/include/llvm/Analysis/ |
H A D | TargetTransformInfo.h | 1451 unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize, 1890 virtual unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize, 2539 unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize, in getLoadVectorFactor() argument 2542 return Impl.getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy); in getLoadVectorFactor()
|
H A D | TargetTransformInfoImpl.h | 817 unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize, in getLoadVectorFactor() argument
|
/openbsd/gnu/llvm/llvm/lib/Transforms/Scalar/ |
H A D | LoopIdiomRecognize.cpp | 1300 int64_t LoadSize = in loadAndStoreMayFormMemmove() local 1302 if (BP1 != BP2 || LoadSize != int64_t(StoreSize)) in loadAndStoreMayFormMemmove() 1305 (IsNegStride && LoadOff + LoadSize > StoreOff)) in loadAndStoreMayFormMemmove()
|
H A D | SROA.cpp | 1334 APInt LoadSize = in isSafePHIToSpeculate() local 1358 if (isSafeToLoadUnconditionally(InVal, MaxAlign, LoadSize, DL, TI)) in isSafePHIToSpeculate()
|
/openbsd/gnu/llvm/clang/lib/CodeGen/ |
H A D | CGBuiltin.cpp | 451 CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy); in EmitISOVolatileLoad() local 453 llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8); in EmitISOVolatileLoad() 455 llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(ITy, Ptr, LoadSize); in EmitISOVolatileLoad()
|