1 //===----------- VectorUtils.cpp - Vectorizer utility functions -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines vectorizer utilities.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/Analysis/VectorUtils.h"
14 #include "llvm/ADT/EquivalenceClasses.h"
15 #include "llvm/Analysis/DemandedBits.h"
16 #include "llvm/Analysis/LoopInfo.h"
17 #include "llvm/Analysis/LoopIterator.h"
18 #include "llvm/Analysis/ScalarEvolution.h"
19 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/IR/Constants.h"
23 #include "llvm/IR/GetElementPtrTypeIterator.h"
24 #include "llvm/IR/IRBuilder.h"
25 #include "llvm/IR/PatternMatch.h"
26 #include "llvm/IR/Value.h"
27 #include "llvm/Support/CommandLine.h"
28
29 #define DEBUG_TYPE "vectorutils"
30
31 using namespace llvm;
32 using namespace llvm::PatternMatch;
33
34 /// Maximum factor for an interleaved memory access.
35 static cl::opt<unsigned> MaxInterleaveGroupFactor(
36 "max-interleave-group-factor", cl::Hidden,
37 cl::desc("Maximum factor for an interleaved access group (default = 8)"),
38 cl::init(8));
39
40 /// Return true if all of the intrinsic's arguments and return type are scalars
41 /// for the scalar form of the intrinsic, and vectors for the vector form of the
42 /// intrinsic (except operands that are marked as always being scalar by
43 /// hasVectorInstrinsicScalarOpd).
isTriviallyVectorizable(Intrinsic::ID ID)44 bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) {
45 switch (ID) {
46 case Intrinsic::abs: // Begin integer bit-manipulation.
47 case Intrinsic::bswap:
48 case Intrinsic::bitreverse:
49 case Intrinsic::ctpop:
50 case Intrinsic::ctlz:
51 case Intrinsic::cttz:
52 case Intrinsic::fshl:
53 case Intrinsic::fshr:
54 case Intrinsic::smax:
55 case Intrinsic::smin:
56 case Intrinsic::umax:
57 case Intrinsic::umin:
58 case Intrinsic::sadd_sat:
59 case Intrinsic::ssub_sat:
60 case Intrinsic::uadd_sat:
61 case Intrinsic::usub_sat:
62 case Intrinsic::smul_fix:
63 case Intrinsic::smul_fix_sat:
64 case Intrinsic::umul_fix:
65 case Intrinsic::umul_fix_sat:
66 case Intrinsic::sqrt: // Begin floating-point.
67 case Intrinsic::sin:
68 case Intrinsic::cos:
69 case Intrinsic::exp:
70 case Intrinsic::exp2:
71 case Intrinsic::log:
72 case Intrinsic::log10:
73 case Intrinsic::log2:
74 case Intrinsic::fabs:
75 case Intrinsic::minnum:
76 case Intrinsic::maxnum:
77 case Intrinsic::minimum:
78 case Intrinsic::maximum:
79 case Intrinsic::copysign:
80 case Intrinsic::floor:
81 case Intrinsic::ceil:
82 case Intrinsic::trunc:
83 case Intrinsic::rint:
84 case Intrinsic::nearbyint:
85 case Intrinsic::round:
86 case Intrinsic::roundeven:
87 case Intrinsic::pow:
88 case Intrinsic::fma:
89 case Intrinsic::fmuladd:
90 case Intrinsic::powi:
91 case Intrinsic::canonicalize:
92 return true;
93 default:
94 return false;
95 }
96 }
97
98 /// Identifies if the vector form of the intrinsic has a scalar operand.
hasVectorInstrinsicScalarOpd(Intrinsic::ID ID,unsigned ScalarOpdIdx)99 bool llvm::hasVectorInstrinsicScalarOpd(Intrinsic::ID ID,
100 unsigned ScalarOpdIdx) {
101 switch (ID) {
102 case Intrinsic::abs:
103 case Intrinsic::ctlz:
104 case Intrinsic::cttz:
105 case Intrinsic::powi:
106 return (ScalarOpdIdx == 1);
107 case Intrinsic::smul_fix:
108 case Intrinsic::smul_fix_sat:
109 case Intrinsic::umul_fix:
110 case Intrinsic::umul_fix_sat:
111 return (ScalarOpdIdx == 2);
112 default:
113 return false;
114 }
115 }
116
hasVectorInstrinsicOverloadedScalarOpd(Intrinsic::ID ID,unsigned ScalarOpdIdx)117 bool llvm::hasVectorInstrinsicOverloadedScalarOpd(Intrinsic::ID ID,
118 unsigned ScalarOpdIdx) {
119 switch (ID) {
120 case Intrinsic::powi:
121 return (ScalarOpdIdx == 1);
122 default:
123 return false;
124 }
125 }
126
127 /// Returns intrinsic ID for call.
128 /// For the input call instruction it finds mapping intrinsic and returns
129 /// its ID, in case it does not found it return not_intrinsic.
getVectorIntrinsicIDForCall(const CallInst * CI,const TargetLibraryInfo * TLI)130 Intrinsic::ID llvm::getVectorIntrinsicIDForCall(const CallInst *CI,
131 const TargetLibraryInfo *TLI) {
132 Intrinsic::ID ID = getIntrinsicForCallSite(*CI, TLI);
133 if (ID == Intrinsic::not_intrinsic)
134 return Intrinsic::not_intrinsic;
135
136 if (isTriviallyVectorizable(ID) || ID == Intrinsic::lifetime_start ||
137 ID == Intrinsic::lifetime_end || ID == Intrinsic::assume ||
138 ID == Intrinsic::experimental_noalias_scope_decl ||
139 ID == Intrinsic::sideeffect || ID == Intrinsic::pseudoprobe)
140 return ID;
141 return Intrinsic::not_intrinsic;
142 }
143
144 /// Find the operand of the GEP that should be checked for consecutive
145 /// stores. This ignores trailing indices that have no effect on the final
146 /// pointer.
getGEPInductionOperand(const GetElementPtrInst * Gep)147 unsigned llvm::getGEPInductionOperand(const GetElementPtrInst *Gep) {
148 const DataLayout &DL = Gep->getModule()->getDataLayout();
149 unsigned LastOperand = Gep->getNumOperands() - 1;
150 TypeSize GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType());
151
152 // Walk backwards and try to peel off zeros.
153 while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) {
154 // Find the type we're currently indexing into.
155 gep_type_iterator GEPTI = gep_type_begin(Gep);
156 std::advance(GEPTI, LastOperand - 2);
157
158 // If it's a type with the same allocation size as the result of the GEP we
159 // can peel off the zero index.
160 if (DL.getTypeAllocSize(GEPTI.getIndexedType()) != GEPAllocSize)
161 break;
162 --LastOperand;
163 }
164
165 return LastOperand;
166 }
167
168 /// If the argument is a GEP, then returns the operand identified by
169 /// getGEPInductionOperand. However, if there is some other non-loop-invariant
170 /// operand, it returns that instead.
stripGetElementPtr(Value * Ptr,ScalarEvolution * SE,Loop * Lp)171 Value *llvm::stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
172 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
173 if (!GEP)
174 return Ptr;
175
176 unsigned InductionOperand = getGEPInductionOperand(GEP);
177
178 // Check that all of the gep indices are uniform except for our induction
179 // operand.
180 for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i)
181 if (i != InductionOperand &&
182 !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(i)), Lp))
183 return Ptr;
184 return GEP->getOperand(InductionOperand);
185 }
186
187 /// If a value has only one user that is a CastInst, return it.
getUniqueCastUse(Value * Ptr,Loop * Lp,Type * Ty)188 Value *llvm::getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty) {
189 Value *UniqueCast = nullptr;
190 for (User *U : Ptr->users()) {
191 CastInst *CI = dyn_cast<CastInst>(U);
192 if (CI && CI->getType() == Ty) {
193 if (!UniqueCast)
194 UniqueCast = CI;
195 else
196 return nullptr;
197 }
198 }
199 return UniqueCast;
200 }
201
202 /// Get the stride of a pointer access in a loop. Looks for symbolic
203 /// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
getStrideFromPointer(Value * Ptr,ScalarEvolution * SE,Loop * Lp)204 Value *llvm::getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
205 auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
206 if (!PtrTy || PtrTy->isAggregateType())
207 return nullptr;
208
209 // Try to remove a gep instruction to make the pointer (actually index at this
210 // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the
211 // pointer, otherwise, we are analyzing the index.
212 Value *OrigPtr = Ptr;
213
214 // The size of the pointer access.
215 int64_t PtrAccessSize = 1;
216
217 Ptr = stripGetElementPtr(Ptr, SE, Lp);
218 const SCEV *V = SE->getSCEV(Ptr);
219
220 if (Ptr != OrigPtr)
221 // Strip off casts.
222 while (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(V))
223 V = C->getOperand();
224
225 const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V);
226 if (!S)
227 return nullptr;
228
229 V = S->getStepRecurrence(*SE);
230 if (!V)
231 return nullptr;
232
233 // Strip off the size of access multiplication if we are still analyzing the
234 // pointer.
235 if (OrigPtr == Ptr) {
236 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) {
237 if (M->getOperand(0)->getSCEVType() != scConstant)
238 return nullptr;
239
240 const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt();
241
242 // Huge step value - give up.
243 if (APStepVal.getBitWidth() > 64)
244 return nullptr;
245
246 int64_t StepVal = APStepVal.getSExtValue();
247 if (PtrAccessSize != StepVal)
248 return nullptr;
249 V = M->getOperand(1);
250 }
251 }
252
253 // Strip off casts.
254 Type *StripedOffRecurrenceCast = nullptr;
255 if (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(V)) {
256 StripedOffRecurrenceCast = C->getType();
257 V = C->getOperand();
258 }
259
260 // Look for the loop invariant symbolic value.
261 const SCEVUnknown *U = dyn_cast<SCEVUnknown>(V);
262 if (!U)
263 return nullptr;
264
265 Value *Stride = U->getValue();
266 if (!Lp->isLoopInvariant(Stride))
267 return nullptr;
268
269 // If we have stripped off the recurrence cast we have to make sure that we
270 // return the value that is used in this loop so that we can replace it later.
271 if (StripedOffRecurrenceCast)
272 Stride = getUniqueCastUse(Stride, Lp, StripedOffRecurrenceCast);
273
274 return Stride;
275 }
276
277 /// Given a vector and an element number, see if the scalar value is
278 /// already around as a register, for example if it were inserted then extracted
279 /// from the vector.
findScalarElement(Value * V,unsigned EltNo)280 Value *llvm::findScalarElement(Value *V, unsigned EltNo) {
281 assert(V->getType()->isVectorTy() && "Not looking at a vector?");
282 VectorType *VTy = cast<VectorType>(V->getType());
283 // For fixed-length vector, return undef for out of range access.
284 if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
285 unsigned Width = FVTy->getNumElements();
286 if (EltNo >= Width)
287 return UndefValue::get(FVTy->getElementType());
288 }
289
290 if (Constant *C = dyn_cast<Constant>(V))
291 return C->getAggregateElement(EltNo);
292
293 if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) {
294 // If this is an insert to a variable element, we don't know what it is.
295 if (!isa<ConstantInt>(III->getOperand(2)))
296 return nullptr;
297 unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue();
298
299 // If this is an insert to the element we are looking for, return the
300 // inserted value.
301 if (EltNo == IIElt)
302 return III->getOperand(1);
303
304 // Guard against infinite loop on malformed, unreachable IR.
305 if (III == III->getOperand(0))
306 return nullptr;
307
308 // Otherwise, the insertelement doesn't modify the value, recurse on its
309 // vector input.
310 return findScalarElement(III->getOperand(0), EltNo);
311 }
312
313 ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V);
314 // Restrict the following transformation to fixed-length vector.
315 if (SVI && isa<FixedVectorType>(SVI->getType())) {
316 unsigned LHSWidth =
317 cast<FixedVectorType>(SVI->getOperand(0)->getType())->getNumElements();
318 int InEl = SVI->getMaskValue(EltNo);
319 if (InEl < 0)
320 return UndefValue::get(VTy->getElementType());
321 if (InEl < (int)LHSWidth)
322 return findScalarElement(SVI->getOperand(0), InEl);
323 return findScalarElement(SVI->getOperand(1), InEl - LHSWidth);
324 }
325
326 // Extract a value from a vector add operation with a constant zero.
327 // TODO: Use getBinOpIdentity() to generalize this.
328 Value *Val; Constant *C;
329 if (match(V, m_Add(m_Value(Val), m_Constant(C))))
330 if (Constant *Elt = C->getAggregateElement(EltNo))
331 if (Elt->isNullValue())
332 return findScalarElement(Val, EltNo);
333
334 // If the vector is a splat then we can trivially find the scalar element.
335 if (isa<ScalableVectorType>(VTy))
336 if (Value *Splat = getSplatValue(V))
337 if (EltNo < VTy->getElementCount().getKnownMinValue())
338 return Splat;
339
340 // Otherwise, we don't know.
341 return nullptr;
342 }
343
getSplatIndex(ArrayRef<int> Mask)344 int llvm::getSplatIndex(ArrayRef<int> Mask) {
345 int SplatIndex = -1;
346 for (int M : Mask) {
347 // Ignore invalid (undefined) mask elements.
348 if (M < 0)
349 continue;
350
351 // There can be only 1 non-negative mask element value if this is a splat.
352 if (SplatIndex != -1 && SplatIndex != M)
353 return -1;
354
355 // Initialize the splat index to the 1st non-negative mask element.
356 SplatIndex = M;
357 }
358 assert((SplatIndex == -1 || SplatIndex >= 0) && "Negative index?");
359 return SplatIndex;
360 }
361
362 /// Get splat value if the input is a splat vector or return nullptr.
363 /// This function is not fully general. It checks only 2 cases:
364 /// the input value is (1) a splat constant vector or (2) a sequence
365 /// of instructions that broadcasts a scalar at element 0.
getSplatValue(const Value * V)366 Value *llvm::getSplatValue(const Value *V) {
367 if (isa<VectorType>(V->getType()))
368 if (auto *C = dyn_cast<Constant>(V))
369 return C->getSplatValue();
370
371 // shuf (inselt ?, Splat, 0), ?, <0, undef, 0, ...>
372 Value *Splat;
373 if (match(V,
374 m_Shuffle(m_InsertElt(m_Value(), m_Value(Splat), m_ZeroInt()),
375 m_Value(), m_ZeroMask())))
376 return Splat;
377
378 return nullptr;
379 }
380
isSplatValue(const Value * V,int Index,unsigned Depth)381 bool llvm::isSplatValue(const Value *V, int Index, unsigned Depth) {
382 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
383
384 if (isa<VectorType>(V->getType())) {
385 if (isa<UndefValue>(V))
386 return true;
387 // FIXME: We can allow undefs, but if Index was specified, we may want to
388 // check that the constant is defined at that index.
389 if (auto *C = dyn_cast<Constant>(V))
390 return C->getSplatValue() != nullptr;
391 }
392
393 if (auto *Shuf = dyn_cast<ShuffleVectorInst>(V)) {
394 // FIXME: We can safely allow undefs here. If Index was specified, we will
395 // check that the mask elt is defined at the required index.
396 if (!is_splat(Shuf->getShuffleMask()))
397 return false;
398
399 // Match any index.
400 if (Index == -1)
401 return true;
402
403 // Match a specific element. The mask should be defined at and match the
404 // specified index.
405 return Shuf->getMaskValue(Index) == Index;
406 }
407
408 // The remaining tests are all recursive, so bail out if we hit the limit.
409 if (Depth++ == MaxAnalysisRecursionDepth)
410 return false;
411
412 // If both operands of a binop are splats, the result is a splat.
413 Value *X, *Y, *Z;
414 if (match(V, m_BinOp(m_Value(X), m_Value(Y))))
415 return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth);
416
417 // If all operands of a select are splats, the result is a splat.
418 if (match(V, m_Select(m_Value(X), m_Value(Y), m_Value(Z))))
419 return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth) &&
420 isSplatValue(Z, Index, Depth);
421
422 // TODO: Add support for unary ops (fneg), casts, intrinsics (overflow ops).
423
424 return false;
425 }
426
narrowShuffleMaskElts(int Scale,ArrayRef<int> Mask,SmallVectorImpl<int> & ScaledMask)427 void llvm::narrowShuffleMaskElts(int Scale, ArrayRef<int> Mask,
428 SmallVectorImpl<int> &ScaledMask) {
429 assert(Scale > 0 && "Unexpected scaling factor");
430
431 // Fast-path: if no scaling, then it is just a copy.
432 if (Scale == 1) {
433 ScaledMask.assign(Mask.begin(), Mask.end());
434 return;
435 }
436
437 ScaledMask.clear();
438 for (int MaskElt : Mask) {
439 if (MaskElt >= 0) {
440 assert(((uint64_t)Scale * MaskElt + (Scale - 1)) <= INT32_MAX &&
441 "Overflowed 32-bits");
442 }
443 for (int SliceElt = 0; SliceElt != Scale; ++SliceElt)
444 ScaledMask.push_back(MaskElt < 0 ? MaskElt : Scale * MaskElt + SliceElt);
445 }
446 }
447
widenShuffleMaskElts(int Scale,ArrayRef<int> Mask,SmallVectorImpl<int> & ScaledMask)448 bool llvm::widenShuffleMaskElts(int Scale, ArrayRef<int> Mask,
449 SmallVectorImpl<int> &ScaledMask) {
450 assert(Scale > 0 && "Unexpected scaling factor");
451
452 // Fast-path: if no scaling, then it is just a copy.
453 if (Scale == 1) {
454 ScaledMask.assign(Mask.begin(), Mask.end());
455 return true;
456 }
457
458 // We must map the original elements down evenly to a type with less elements.
459 int NumElts = Mask.size();
460 if (NumElts % Scale != 0)
461 return false;
462
463 ScaledMask.clear();
464 ScaledMask.reserve(NumElts / Scale);
465
466 // Step through the input mask by splitting into Scale-sized slices.
467 do {
468 ArrayRef<int> MaskSlice = Mask.take_front(Scale);
469 assert((int)MaskSlice.size() == Scale && "Expected Scale-sized slice.");
470
471 // The first element of the slice determines how we evaluate this slice.
472 int SliceFront = MaskSlice.front();
473 if (SliceFront < 0) {
474 // Negative values (undef or other "sentinel" values) must be equal across
475 // the entire slice.
476 if (!is_splat(MaskSlice))
477 return false;
478 ScaledMask.push_back(SliceFront);
479 } else {
480 // A positive mask element must be cleanly divisible.
481 if (SliceFront % Scale != 0)
482 return false;
483 // Elements of the slice must be consecutive.
484 for (int i = 1; i < Scale; ++i)
485 if (MaskSlice[i] != SliceFront + i)
486 return false;
487 ScaledMask.push_back(SliceFront / Scale);
488 }
489 Mask = Mask.drop_front(Scale);
490 } while (!Mask.empty());
491
492 assert((int)ScaledMask.size() * Scale == NumElts && "Unexpected scaled mask");
493
494 // All elements of the original mask can be scaled down to map to the elements
495 // of a mask with wider elements.
496 return true;
497 }
498
499 MapVector<Instruction *, uint64_t>
computeMinimumValueSizes(ArrayRef<BasicBlock * > Blocks,DemandedBits & DB,const TargetTransformInfo * TTI)500 llvm::computeMinimumValueSizes(ArrayRef<BasicBlock *> Blocks, DemandedBits &DB,
501 const TargetTransformInfo *TTI) {
502
503 // DemandedBits will give us every value's live-out bits. But we want
504 // to ensure no extra casts would need to be inserted, so every DAG
505 // of connected values must have the same minimum bitwidth.
506 EquivalenceClasses<Value *> ECs;
507 SmallVector<Value *, 16> Worklist;
508 SmallPtrSet<Value *, 4> Roots;
509 SmallPtrSet<Value *, 16> Visited;
510 DenseMap<Value *, uint64_t> DBits;
511 SmallPtrSet<Instruction *, 4> InstructionSet;
512 MapVector<Instruction *, uint64_t> MinBWs;
513
514 // Determine the roots. We work bottom-up, from truncs or icmps.
515 bool SeenExtFromIllegalType = false;
516 for (auto *BB : Blocks)
517 for (auto &I : *BB) {
518 InstructionSet.insert(&I);
519
520 if (TTI && (isa<ZExtInst>(&I) || isa<SExtInst>(&I)) &&
521 !TTI->isTypeLegal(I.getOperand(0)->getType()))
522 SeenExtFromIllegalType = true;
523
524 // Only deal with non-vector integers up to 64-bits wide.
525 if ((isa<TruncInst>(&I) || isa<ICmpInst>(&I)) &&
526 !I.getType()->isVectorTy() &&
527 I.getOperand(0)->getType()->getScalarSizeInBits() <= 64) {
528 // Don't make work for ourselves. If we know the loaded type is legal,
529 // don't add it to the worklist.
530 if (TTI && isa<TruncInst>(&I) && TTI->isTypeLegal(I.getType()))
531 continue;
532
533 Worklist.push_back(&I);
534 Roots.insert(&I);
535 }
536 }
537 // Early exit.
538 if (Worklist.empty() || (TTI && !SeenExtFromIllegalType))
539 return MinBWs;
540
541 // Now proceed breadth-first, unioning values together.
542 while (!Worklist.empty()) {
543 Value *Val = Worklist.pop_back_val();
544 Value *Leader = ECs.getOrInsertLeaderValue(Val);
545
546 if (Visited.count(Val))
547 continue;
548 Visited.insert(Val);
549
550 // Non-instructions terminate a chain successfully.
551 if (!isa<Instruction>(Val))
552 continue;
553 Instruction *I = cast<Instruction>(Val);
554
555 // If we encounter a type that is larger than 64 bits, we can't represent
556 // it so bail out.
557 if (DB.getDemandedBits(I).getBitWidth() > 64)
558 return MapVector<Instruction *, uint64_t>();
559
560 uint64_t V = DB.getDemandedBits(I).getZExtValue();
561 DBits[Leader] |= V;
562 DBits[I] = V;
563
564 // Casts, loads and instructions outside of our range terminate a chain
565 // successfully.
566 if (isa<SExtInst>(I) || isa<ZExtInst>(I) || isa<LoadInst>(I) ||
567 !InstructionSet.count(I))
568 continue;
569
570 // Unsafe casts terminate a chain unsuccessfully. We can't do anything
571 // useful with bitcasts, ptrtoints or inttoptrs and it'd be unsafe to
572 // transform anything that relies on them.
573 if (isa<BitCastInst>(I) || isa<PtrToIntInst>(I) || isa<IntToPtrInst>(I) ||
574 !I->getType()->isIntegerTy()) {
575 DBits[Leader] |= ~0ULL;
576 continue;
577 }
578
579 // We don't modify the types of PHIs. Reductions will already have been
580 // truncated if possible, and inductions' sizes will have been chosen by
581 // indvars.
582 if (isa<PHINode>(I))
583 continue;
584
585 if (DBits[Leader] == ~0ULL)
586 // All bits demanded, no point continuing.
587 continue;
588
589 for (Value *O : cast<User>(I)->operands()) {
590 ECs.unionSets(Leader, O);
591 Worklist.push_back(O);
592 }
593 }
594
595 // Now we've discovered all values, walk them to see if there are
596 // any users we didn't see. If there are, we can't optimize that
597 // chain.
598 for (auto &I : DBits)
599 for (auto *U : I.first->users())
600 if (U->getType()->isIntegerTy() && DBits.count(U) == 0)
601 DBits[ECs.getOrInsertLeaderValue(I.first)] |= ~0ULL;
602
603 for (auto I = ECs.begin(), E = ECs.end(); I != E; ++I) {
604 uint64_t LeaderDemandedBits = 0;
605 for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end()))
606 LeaderDemandedBits |= DBits[M];
607
608 uint64_t MinBW = (sizeof(LeaderDemandedBits) * 8) -
609 llvm::countLeadingZeros(LeaderDemandedBits);
610 // Round up to a power of 2
611 if (!isPowerOf2_64((uint64_t)MinBW))
612 MinBW = NextPowerOf2(MinBW);
613
614 // We don't modify the types of PHIs. Reductions will already have been
615 // truncated if possible, and inductions' sizes will have been chosen by
616 // indvars.
617 // If we are required to shrink a PHI, abandon this entire equivalence class.
618 bool Abort = false;
619 for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end()))
620 if (isa<PHINode>(M) && MinBW < M->getType()->getScalarSizeInBits()) {
621 Abort = true;
622 break;
623 }
624 if (Abort)
625 continue;
626
627 for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end())) {
628 if (!isa<Instruction>(M))
629 continue;
630 Type *Ty = M->getType();
631 if (Roots.count(M))
632 Ty = cast<Instruction>(M)->getOperand(0)->getType();
633 if (MinBW < Ty->getScalarSizeInBits())
634 MinBWs[cast<Instruction>(M)] = MinBW;
635 }
636 }
637
638 return MinBWs;
639 }
640
641 /// Add all access groups in @p AccGroups to @p List.
642 template <typename ListT>
addToAccessGroupList(ListT & List,MDNode * AccGroups)643 static void addToAccessGroupList(ListT &List, MDNode *AccGroups) {
644 // Interpret an access group as a list containing itself.
645 if (AccGroups->getNumOperands() == 0) {
646 assert(isValidAsAccessGroup(AccGroups) && "Node must be an access group");
647 List.insert(AccGroups);
648 return;
649 }
650
651 for (auto &AccGroupListOp : AccGroups->operands()) {
652 auto *Item = cast<MDNode>(AccGroupListOp.get());
653 assert(isValidAsAccessGroup(Item) && "List item must be an access group");
654 List.insert(Item);
655 }
656 }
657
uniteAccessGroups(MDNode * AccGroups1,MDNode * AccGroups2)658 MDNode *llvm::uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2) {
659 if (!AccGroups1)
660 return AccGroups2;
661 if (!AccGroups2)
662 return AccGroups1;
663 if (AccGroups1 == AccGroups2)
664 return AccGroups1;
665
666 SmallSetVector<Metadata *, 4> Union;
667 addToAccessGroupList(Union, AccGroups1);
668 addToAccessGroupList(Union, AccGroups2);
669
670 if (Union.size() == 0)
671 return nullptr;
672 if (Union.size() == 1)
673 return cast<MDNode>(Union.front());
674
675 LLVMContext &Ctx = AccGroups1->getContext();
676 return MDNode::get(Ctx, Union.getArrayRef());
677 }
678
intersectAccessGroups(const Instruction * Inst1,const Instruction * Inst2)679 MDNode *llvm::intersectAccessGroups(const Instruction *Inst1,
680 const Instruction *Inst2) {
681 bool MayAccessMem1 = Inst1->mayReadOrWriteMemory();
682 bool MayAccessMem2 = Inst2->mayReadOrWriteMemory();
683
684 if (!MayAccessMem1 && !MayAccessMem2)
685 return nullptr;
686 if (!MayAccessMem1)
687 return Inst2->getMetadata(LLVMContext::MD_access_group);
688 if (!MayAccessMem2)
689 return Inst1->getMetadata(LLVMContext::MD_access_group);
690
691 MDNode *MD1 = Inst1->getMetadata(LLVMContext::MD_access_group);
692 MDNode *MD2 = Inst2->getMetadata(LLVMContext::MD_access_group);
693 if (!MD1 || !MD2)
694 return nullptr;
695 if (MD1 == MD2)
696 return MD1;
697
698 // Use set for scalable 'contains' check.
699 SmallPtrSet<Metadata *, 4> AccGroupSet2;
700 addToAccessGroupList(AccGroupSet2, MD2);
701
702 SmallVector<Metadata *, 4> Intersection;
703 if (MD1->getNumOperands() == 0) {
704 assert(isValidAsAccessGroup(MD1) && "Node must be an access group");
705 if (AccGroupSet2.count(MD1))
706 Intersection.push_back(MD1);
707 } else {
708 for (const MDOperand &Node : MD1->operands()) {
709 auto *Item = cast<MDNode>(Node.get());
710 assert(isValidAsAccessGroup(Item) && "List item must be an access group");
711 if (AccGroupSet2.count(Item))
712 Intersection.push_back(Item);
713 }
714 }
715
716 if (Intersection.size() == 0)
717 return nullptr;
718 if (Intersection.size() == 1)
719 return cast<MDNode>(Intersection.front());
720
721 LLVMContext &Ctx = Inst1->getContext();
722 return MDNode::get(Ctx, Intersection);
723 }
724
725 /// \returns \p I after propagating metadata from \p VL.
propagateMetadata(Instruction * Inst,ArrayRef<Value * > VL)726 Instruction *llvm::propagateMetadata(Instruction *Inst, ArrayRef<Value *> VL) {
727 if (VL.empty())
728 return Inst;
729 Instruction *I0 = cast<Instruction>(VL[0]);
730 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
731 I0->getAllMetadataOtherThanDebugLoc(Metadata);
732
733 for (auto Kind : {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
734 LLVMContext::MD_noalias, LLVMContext::MD_fpmath,
735 LLVMContext::MD_nontemporal, LLVMContext::MD_invariant_load,
736 LLVMContext::MD_access_group}) {
737 MDNode *MD = I0->getMetadata(Kind);
738
739 for (int J = 1, E = VL.size(); MD && J != E; ++J) {
740 const Instruction *IJ = cast<Instruction>(VL[J]);
741 MDNode *IMD = IJ->getMetadata(Kind);
742 switch (Kind) {
743 case LLVMContext::MD_tbaa:
744 MD = MDNode::getMostGenericTBAA(MD, IMD);
745 break;
746 case LLVMContext::MD_alias_scope:
747 MD = MDNode::getMostGenericAliasScope(MD, IMD);
748 break;
749 case LLVMContext::MD_fpmath:
750 MD = MDNode::getMostGenericFPMath(MD, IMD);
751 break;
752 case LLVMContext::MD_noalias:
753 case LLVMContext::MD_nontemporal:
754 case LLVMContext::MD_invariant_load:
755 MD = MDNode::intersect(MD, IMD);
756 break;
757 case LLVMContext::MD_access_group:
758 MD = intersectAccessGroups(Inst, IJ);
759 break;
760 default:
761 llvm_unreachable("unhandled metadata");
762 }
763 }
764
765 Inst->setMetadata(Kind, MD);
766 }
767
768 return Inst;
769 }
770
771 Constant *
createBitMaskForGaps(IRBuilderBase & Builder,unsigned VF,const InterleaveGroup<Instruction> & Group)772 llvm::createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF,
773 const InterleaveGroup<Instruction> &Group) {
774 // All 1's means mask is not needed.
775 if (Group.getNumMembers() == Group.getFactor())
776 return nullptr;
777
778 // TODO: support reversed access.
779 assert(!Group.isReverse() && "Reversed group not supported.");
780
781 SmallVector<Constant *, 16> Mask;
782 for (unsigned i = 0; i < VF; i++)
783 for (unsigned j = 0; j < Group.getFactor(); ++j) {
784 unsigned HasMember = Group.getMember(j) ? 1 : 0;
785 Mask.push_back(Builder.getInt1(HasMember));
786 }
787
788 return ConstantVector::get(Mask);
789 }
790
791 llvm::SmallVector<int, 16>
createReplicatedMask(unsigned ReplicationFactor,unsigned VF)792 llvm::createReplicatedMask(unsigned ReplicationFactor, unsigned VF) {
793 SmallVector<int, 16> MaskVec;
794 for (unsigned i = 0; i < VF; i++)
795 for (unsigned j = 0; j < ReplicationFactor; j++)
796 MaskVec.push_back(i);
797
798 return MaskVec;
799 }
800
createInterleaveMask(unsigned VF,unsigned NumVecs)801 llvm::SmallVector<int, 16> llvm::createInterleaveMask(unsigned VF,
802 unsigned NumVecs) {
803 SmallVector<int, 16> Mask;
804 for (unsigned i = 0; i < VF; i++)
805 for (unsigned j = 0; j < NumVecs; j++)
806 Mask.push_back(j * VF + i);
807
808 return Mask;
809 }
810
811 llvm::SmallVector<int, 16>
createStrideMask(unsigned Start,unsigned Stride,unsigned VF)812 llvm::createStrideMask(unsigned Start, unsigned Stride, unsigned VF) {
813 SmallVector<int, 16> Mask;
814 for (unsigned i = 0; i < VF; i++)
815 Mask.push_back(Start + i * Stride);
816
817 return Mask;
818 }
819
createSequentialMask(unsigned Start,unsigned NumInts,unsigned NumUndefs)820 llvm::SmallVector<int, 16> llvm::createSequentialMask(unsigned Start,
821 unsigned NumInts,
822 unsigned NumUndefs) {
823 SmallVector<int, 16> Mask;
824 for (unsigned i = 0; i < NumInts; i++)
825 Mask.push_back(Start + i);
826
827 for (unsigned i = 0; i < NumUndefs; i++)
828 Mask.push_back(-1);
829
830 return Mask;
831 }
832
833 /// A helper function for concatenating vectors. This function concatenates two
834 /// vectors having the same element type. If the second vector has fewer
835 /// elements than the first, it is padded with undefs.
concatenateTwoVectors(IRBuilderBase & Builder,Value * V1,Value * V2)836 static Value *concatenateTwoVectors(IRBuilderBase &Builder, Value *V1,
837 Value *V2) {
838 VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType());
839 VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType());
840 assert(VecTy1 && VecTy2 &&
841 VecTy1->getScalarType() == VecTy2->getScalarType() &&
842 "Expect two vectors with the same element type");
843
844 unsigned NumElts1 = cast<FixedVectorType>(VecTy1)->getNumElements();
845 unsigned NumElts2 = cast<FixedVectorType>(VecTy2)->getNumElements();
846 assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements");
847
848 if (NumElts1 > NumElts2) {
849 // Extend with UNDEFs.
850 V2 = Builder.CreateShuffleVector(
851 V2, createSequentialMask(0, NumElts2, NumElts1 - NumElts2));
852 }
853
854 return Builder.CreateShuffleVector(
855 V1, V2, createSequentialMask(0, NumElts1 + NumElts2, 0));
856 }
857
concatenateVectors(IRBuilderBase & Builder,ArrayRef<Value * > Vecs)858 Value *llvm::concatenateVectors(IRBuilderBase &Builder,
859 ArrayRef<Value *> Vecs) {
860 unsigned NumVecs = Vecs.size();
861 assert(NumVecs > 1 && "Should be at least two vectors");
862
863 SmallVector<Value *, 8> ResList;
864 ResList.append(Vecs.begin(), Vecs.end());
865 do {
866 SmallVector<Value *, 8> TmpList;
867 for (unsigned i = 0; i < NumVecs - 1; i += 2) {
868 Value *V0 = ResList[i], *V1 = ResList[i + 1];
869 assert((V0->getType() == V1->getType() || i == NumVecs - 2) &&
870 "Only the last vector may have a different type");
871
872 TmpList.push_back(concatenateTwoVectors(Builder, V0, V1));
873 }
874
875 // Push the last vector if the total number of vectors is odd.
876 if (NumVecs % 2 != 0)
877 TmpList.push_back(ResList[NumVecs - 1]);
878
879 ResList = TmpList;
880 NumVecs = ResList.size();
881 } while (NumVecs > 1);
882
883 return ResList[0];
884 }
885
maskIsAllZeroOrUndef(Value * Mask)886 bool llvm::maskIsAllZeroOrUndef(Value *Mask) {
887 assert(isa<VectorType>(Mask->getType()) &&
888 isa<IntegerType>(Mask->getType()->getScalarType()) &&
889 cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
890 1 &&
891 "Mask must be a vector of i1");
892
893 auto *ConstMask = dyn_cast<Constant>(Mask);
894 if (!ConstMask)
895 return false;
896 if (ConstMask->isNullValue() || isa<UndefValue>(ConstMask))
897 return true;
898 if (isa<ScalableVectorType>(ConstMask->getType()))
899 return false;
900 for (unsigned
901 I = 0,
902 E = cast<FixedVectorType>(ConstMask->getType())->getNumElements();
903 I != E; ++I) {
904 if (auto *MaskElt = ConstMask->getAggregateElement(I))
905 if (MaskElt->isNullValue() || isa<UndefValue>(MaskElt))
906 continue;
907 return false;
908 }
909 return true;
910 }
911
maskIsAllOneOrUndef(Value * Mask)912 bool llvm::maskIsAllOneOrUndef(Value *Mask) {
913 assert(isa<VectorType>(Mask->getType()) &&
914 isa<IntegerType>(Mask->getType()->getScalarType()) &&
915 cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
916 1 &&
917 "Mask must be a vector of i1");
918
919 auto *ConstMask = dyn_cast<Constant>(Mask);
920 if (!ConstMask)
921 return false;
922 if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask))
923 return true;
924 if (isa<ScalableVectorType>(ConstMask->getType()))
925 return false;
926 for (unsigned
927 I = 0,
928 E = cast<FixedVectorType>(ConstMask->getType())->getNumElements();
929 I != E; ++I) {
930 if (auto *MaskElt = ConstMask->getAggregateElement(I))
931 if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt))
932 continue;
933 return false;
934 }
935 return true;
936 }
937
938 /// TODO: This is a lot like known bits, but for
939 /// vectors. Is there something we can common this with?
possiblyDemandedEltsInMask(Value * Mask)940 APInt llvm::possiblyDemandedEltsInMask(Value *Mask) {
941 assert(isa<FixedVectorType>(Mask->getType()) &&
942 isa<IntegerType>(Mask->getType()->getScalarType()) &&
943 cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
944 1 &&
945 "Mask must be a fixed width vector of i1");
946
947 const unsigned VWidth =
948 cast<FixedVectorType>(Mask->getType())->getNumElements();
949 APInt DemandedElts = APInt::getAllOnes(VWidth);
950 if (auto *CV = dyn_cast<ConstantVector>(Mask))
951 for (unsigned i = 0; i < VWidth; i++)
952 if (CV->getAggregateElement(i)->isNullValue())
953 DemandedElts.clearBit(i);
954 return DemandedElts;
955 }
956
isStrided(int Stride)957 bool InterleavedAccessInfo::isStrided(int Stride) {
958 unsigned Factor = std::abs(Stride);
959 return Factor >= 2 && Factor <= MaxInterleaveGroupFactor;
960 }
961
collectConstStrideAccesses(MapVector<Instruction *,StrideDescriptor> & AccessStrideInfo,const ValueToValueMap & Strides)962 void InterleavedAccessInfo::collectConstStrideAccesses(
963 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
964 const ValueToValueMap &Strides) {
965 auto &DL = TheLoop->getHeader()->getModule()->getDataLayout();
966
967 // Since it's desired that the load/store instructions be maintained in
968 // "program order" for the interleaved access analysis, we have to visit the
969 // blocks in the loop in reverse postorder (i.e., in a topological order).
970 // Such an ordering will ensure that any load/store that may be executed
971 // before a second load/store will precede the second load/store in
972 // AccessStrideInfo.
973 LoopBlocksDFS DFS(TheLoop);
974 DFS.perform(LI);
975 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO()))
976 for (auto &I : *BB) {
977 Value *Ptr = getLoadStorePointerOperand(&I);
978 if (!Ptr)
979 continue;
980 Type *ElementTy = getLoadStoreType(&I);
981
982 // We don't check wrapping here because we don't know yet if Ptr will be
983 // part of a full group or a group with gaps. Checking wrapping for all
984 // pointers (even those that end up in groups with no gaps) will be overly
985 // conservative. For full groups, wrapping should be ok since if we would
986 // wrap around the address space we would do a memory access at nullptr
987 // even without the transformation. The wrapping checks are therefore
988 // deferred until after we've formed the interleaved groups.
989 int64_t Stride = getPtrStride(PSE, ElementTy, Ptr, TheLoop, Strides,
990 /*Assume=*/true, /*ShouldCheckWrap=*/false);
991
992 const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
993 uint64_t Size = DL.getTypeAllocSize(ElementTy);
994 AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size,
995 getLoadStoreAlignment(&I));
996 }
997 }
998
999 // Analyze interleaved accesses and collect them into interleaved load and
1000 // store groups.
1001 //
1002 // When generating code for an interleaved load group, we effectively hoist all
1003 // loads in the group to the location of the first load in program order. When
1004 // generating code for an interleaved store group, we sink all stores to the
1005 // location of the last store. This code motion can change the order of load
1006 // and store instructions and may break dependences.
1007 //
1008 // The code generation strategy mentioned above ensures that we won't violate
1009 // any write-after-read (WAR) dependences.
1010 //
1011 // E.g., for the WAR dependence: a = A[i]; // (1)
1012 // A[i] = b; // (2)
1013 //
1014 // The store group of (2) is always inserted at or below (2), and the load
1015 // group of (1) is always inserted at or above (1). Thus, the instructions will
1016 // never be reordered. All other dependences are checked to ensure the
1017 // correctness of the instruction reordering.
1018 //
1019 // The algorithm visits all memory accesses in the loop in bottom-up program
1020 // order. Program order is established by traversing the blocks in the loop in
1021 // reverse postorder when collecting the accesses.
1022 //
1023 // We visit the memory accesses in bottom-up order because it can simplify the
1024 // construction of store groups in the presence of write-after-write (WAW)
1025 // dependences.
1026 //
1027 // E.g., for the WAW dependence: A[i] = a; // (1)
1028 // A[i] = b; // (2)
1029 // A[i + 1] = c; // (3)
1030 //
1031 // We will first create a store group with (3) and (2). (1) can't be added to
1032 // this group because it and (2) are dependent. However, (1) can be grouped
1033 // with other accesses that may precede it in program order. Note that a
1034 // bottom-up order does not imply that WAW dependences should not be checked.
analyzeInterleaving(bool EnablePredicatedInterleavedMemAccesses)1035 void InterleavedAccessInfo::analyzeInterleaving(
1036 bool EnablePredicatedInterleavedMemAccesses) {
1037 LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n");
1038 const ValueToValueMap &Strides = LAI->getSymbolicStrides();
1039
1040 // Holds all accesses with a constant stride.
1041 MapVector<Instruction *, StrideDescriptor> AccessStrideInfo;
1042 collectConstStrideAccesses(AccessStrideInfo, Strides);
1043
1044 if (AccessStrideInfo.empty())
1045 return;
1046
1047 // Collect the dependences in the loop.
1048 collectDependences();
1049
1050 // Holds all interleaved store groups temporarily.
1051 SmallSetVector<InterleaveGroup<Instruction> *, 4> StoreGroups;
1052 // Holds all interleaved load groups temporarily.
1053 SmallSetVector<InterleaveGroup<Instruction> *, 4> LoadGroups;
1054
1055 // Search in bottom-up program order for pairs of accesses (A and B) that can
1056 // form interleaved load or store groups. In the algorithm below, access A
1057 // precedes access B in program order. We initialize a group for B in the
1058 // outer loop of the algorithm, and then in the inner loop, we attempt to
1059 // insert each A into B's group if:
1060 //
1061 // 1. A and B have the same stride,
1062 // 2. A and B have the same memory object size, and
1063 // 3. A belongs in B's group according to its distance from B.
1064 //
1065 // Special care is taken to ensure group formation will not break any
1066 // dependences.
1067 for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend();
1068 BI != E; ++BI) {
1069 Instruction *B = BI->first;
1070 StrideDescriptor DesB = BI->second;
1071
1072 // Initialize a group for B if it has an allowable stride. Even if we don't
1073 // create a group for B, we continue with the bottom-up algorithm to ensure
1074 // we don't break any of B's dependences.
1075 InterleaveGroup<Instruction> *Group = nullptr;
1076 if (isStrided(DesB.Stride) &&
1077 (!isPredicated(B->getParent()) || EnablePredicatedInterleavedMemAccesses)) {
1078 Group = getInterleaveGroup(B);
1079 if (!Group) {
1080 LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B
1081 << '\n');
1082 Group = createInterleaveGroup(B, DesB.Stride, DesB.Alignment);
1083 }
1084 if (B->mayWriteToMemory())
1085 StoreGroups.insert(Group);
1086 else
1087 LoadGroups.insert(Group);
1088 }
1089
1090 for (auto AI = std::next(BI); AI != E; ++AI) {
1091 Instruction *A = AI->first;
1092 StrideDescriptor DesA = AI->second;
1093
1094 // Our code motion strategy implies that we can't have dependences
1095 // between accesses in an interleaved group and other accesses located
1096 // between the first and last member of the group. Note that this also
1097 // means that a group can't have more than one member at a given offset.
1098 // The accesses in a group can have dependences with other accesses, but
1099 // we must ensure we don't extend the boundaries of the group such that
1100 // we encompass those dependent accesses.
1101 //
1102 // For example, assume we have the sequence of accesses shown below in a
1103 // stride-2 loop:
1104 //
1105 // (1, 2) is a group | A[i] = a; // (1)
1106 // | A[i-1] = b; // (2) |
1107 // A[i-3] = c; // (3)
1108 // A[i] = d; // (4) | (2, 4) is not a group
1109 //
1110 // Because accesses (2) and (3) are dependent, we can group (2) with (1)
1111 // but not with (4). If we did, the dependent access (3) would be within
1112 // the boundaries of the (2, 4) group.
1113 if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) {
1114 // If a dependence exists and A is already in a group, we know that A
1115 // must be a store since A precedes B and WAR dependences are allowed.
1116 // Thus, A would be sunk below B. We release A's group to prevent this
1117 // illegal code motion. A will then be free to form another group with
1118 // instructions that precede it.
1119 if (isInterleaved(A)) {
1120 InterleaveGroup<Instruction> *StoreGroup = getInterleaveGroup(A);
1121
1122 LLVM_DEBUG(dbgs() << "LV: Invalidated store group due to "
1123 "dependence between " << *A << " and "<< *B << '\n');
1124
1125 StoreGroups.remove(StoreGroup);
1126 releaseGroup(StoreGroup);
1127 }
1128
1129 // If a dependence exists and A is not already in a group (or it was
1130 // and we just released it), B might be hoisted above A (if B is a
1131 // load) or another store might be sunk below A (if B is a store). In
1132 // either case, we can't add additional instructions to B's group. B
1133 // will only form a group with instructions that it precedes.
1134 break;
1135 }
1136
1137 // At this point, we've checked for illegal code motion. If either A or B
1138 // isn't strided, there's nothing left to do.
1139 if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride))
1140 continue;
1141
1142 // Ignore A if it's already in a group or isn't the same kind of memory
1143 // operation as B.
1144 // Note that mayReadFromMemory() isn't mutually exclusive to
1145 // mayWriteToMemory in the case of atomic loads. We shouldn't see those
1146 // here, canVectorizeMemory() should have returned false - except for the
1147 // case we asked for optimization remarks.
1148 if (isInterleaved(A) ||
1149 (A->mayReadFromMemory() != B->mayReadFromMemory()) ||
1150 (A->mayWriteToMemory() != B->mayWriteToMemory()))
1151 continue;
1152
1153 // Check rules 1 and 2. Ignore A if its stride or size is different from
1154 // that of B.
1155 if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size)
1156 continue;
1157
1158 // Ignore A if the memory object of A and B don't belong to the same
1159 // address space
1160 if (getLoadStoreAddressSpace(A) != getLoadStoreAddressSpace(B))
1161 continue;
1162
1163 // Calculate the distance from A to B.
1164 const SCEVConstant *DistToB = dyn_cast<SCEVConstant>(
1165 PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev));
1166 if (!DistToB)
1167 continue;
1168 int64_t DistanceToB = DistToB->getAPInt().getSExtValue();
1169
1170 // Check rule 3. Ignore A if its distance to B is not a multiple of the
1171 // size.
1172 if (DistanceToB % static_cast<int64_t>(DesB.Size))
1173 continue;
1174
1175 // All members of a predicated interleave-group must have the same predicate,
1176 // and currently must reside in the same BB.
1177 BasicBlock *BlockA = A->getParent();
1178 BasicBlock *BlockB = B->getParent();
1179 if ((isPredicated(BlockA) || isPredicated(BlockB)) &&
1180 (!EnablePredicatedInterleavedMemAccesses || BlockA != BlockB))
1181 continue;
1182
1183 // The index of A is the index of B plus A's distance to B in multiples
1184 // of the size.
1185 int IndexA =
1186 Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size);
1187
1188 // Try to insert A into B's group.
1189 if (Group->insertMember(A, IndexA, DesA.Alignment)) {
1190 LLVM_DEBUG(dbgs() << "LV: Inserted:" << *A << '\n'
1191 << " into the interleave group with" << *B
1192 << '\n');
1193 InterleaveGroupMap[A] = Group;
1194
1195 // Set the first load in program order as the insert position.
1196 if (A->mayReadFromMemory())
1197 Group->setInsertPos(A);
1198 }
1199 } // Iteration over A accesses.
1200 } // Iteration over B accesses.
1201
1202 auto InvalidateGroupIfMemberMayWrap = [&](InterleaveGroup<Instruction> *Group,
1203 int Index,
1204 std::string FirstOrLast) -> bool {
1205 Instruction *Member = Group->getMember(Index);
1206 assert(Member && "Group member does not exist");
1207 Value *MemberPtr = getLoadStorePointerOperand(Member);
1208 Type *AccessTy = getLoadStoreType(Member);
1209 if (getPtrStride(PSE, AccessTy, MemberPtr, TheLoop, Strides,
1210 /*Assume=*/false, /*ShouldCheckWrap=*/true))
1211 return false;
1212 LLVM_DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
1213 << FirstOrLast
1214 << " group member potentially pointer-wrapping.\n");
1215 releaseGroup(Group);
1216 return true;
1217 };
1218
1219 // Remove interleaved groups with gaps whose memory
1220 // accesses may wrap around. We have to revisit the getPtrStride analysis,
1221 // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does
1222 // not check wrapping (see documentation there).
1223 // FORNOW we use Assume=false;
1224 // TODO: Change to Assume=true but making sure we don't exceed the threshold
1225 // of runtime SCEV assumptions checks (thereby potentially failing to
1226 // vectorize altogether).
1227 // Additional optional optimizations:
1228 // TODO: If we are peeling the loop and we know that the first pointer doesn't
1229 // wrap then we can deduce that all pointers in the group don't wrap.
1230 // This means that we can forcefully peel the loop in order to only have to
1231 // check the first pointer for no-wrap. When we'll change to use Assume=true
1232 // we'll only need at most one runtime check per interleaved group.
1233 for (auto *Group : LoadGroups) {
1234 // Case 1: A full group. Can Skip the checks; For full groups, if the wide
1235 // load would wrap around the address space we would do a memory access at
1236 // nullptr even without the transformation.
1237 if (Group->getNumMembers() == Group->getFactor())
1238 continue;
1239
1240 // Case 2: If first and last members of the group don't wrap this implies
1241 // that all the pointers in the group don't wrap.
1242 // So we check only group member 0 (which is always guaranteed to exist),
1243 // and group member Factor - 1; If the latter doesn't exist we rely on
1244 // peeling (if it is a non-reversed accsess -- see Case 3).
1245 if (InvalidateGroupIfMemberMayWrap(Group, 0, std::string("first")))
1246 continue;
1247 if (Group->getMember(Group->getFactor() - 1))
1248 InvalidateGroupIfMemberMayWrap(Group, Group->getFactor() - 1,
1249 std::string("last"));
1250 else {
1251 // Case 3: A non-reversed interleaved load group with gaps: We need
1252 // to execute at least one scalar epilogue iteration. This will ensure
1253 // we don't speculatively access memory out-of-bounds. We only need
1254 // to look for a member at index factor - 1, since every group must have
1255 // a member at index zero.
1256 if (Group->isReverse()) {
1257 LLVM_DEBUG(
1258 dbgs() << "LV: Invalidate candidate interleaved group due to "
1259 "a reverse access with gaps.\n");
1260 releaseGroup(Group);
1261 continue;
1262 }
1263 LLVM_DEBUG(
1264 dbgs() << "LV: Interleaved group requires epilogue iteration.\n");
1265 RequiresScalarEpilogue = true;
1266 }
1267 }
1268
1269 for (auto *Group : StoreGroups) {
1270 // Case 1: A full group. Can Skip the checks; For full groups, if the wide
1271 // store would wrap around the address space we would do a memory access at
1272 // nullptr even without the transformation.
1273 if (Group->getNumMembers() == Group->getFactor())
1274 continue;
1275
1276 // Interleave-store-group with gaps is implemented using masked wide store.
1277 // Remove interleaved store groups with gaps if
1278 // masked-interleaved-accesses are not enabled by the target.
1279 if (!EnablePredicatedInterleavedMemAccesses) {
1280 LLVM_DEBUG(
1281 dbgs() << "LV: Invalidate candidate interleaved store group due "
1282 "to gaps.\n");
1283 releaseGroup(Group);
1284 continue;
1285 }
1286
1287 // Case 2: If first and last members of the group don't wrap this implies
1288 // that all the pointers in the group don't wrap.
1289 // So we check only group member 0 (which is always guaranteed to exist),
1290 // and the last group member. Case 3 (scalar epilog) is not relevant for
1291 // stores with gaps, which are implemented with masked-store (rather than
1292 // speculative access, as in loads).
1293 if (InvalidateGroupIfMemberMayWrap(Group, 0, std::string("first")))
1294 continue;
1295 for (int Index = Group->getFactor() - 1; Index > 0; Index--)
1296 if (Group->getMember(Index)) {
1297 InvalidateGroupIfMemberMayWrap(Group, Index, std::string("last"));
1298 break;
1299 }
1300 }
1301 }
1302
invalidateGroupsRequiringScalarEpilogue()1303 void InterleavedAccessInfo::invalidateGroupsRequiringScalarEpilogue() {
1304 // If no group had triggered the requirement to create an epilogue loop,
1305 // there is nothing to do.
1306 if (!requiresScalarEpilogue())
1307 return;
1308
1309 bool ReleasedGroup = false;
1310 // Release groups requiring scalar epilogues. Note that this also removes them
1311 // from InterleaveGroups.
1312 for (auto *Group : make_early_inc_range(InterleaveGroups)) {
1313 if (!Group->requiresScalarEpilogue())
1314 continue;
1315 LLVM_DEBUG(
1316 dbgs()
1317 << "LV: Invalidate candidate interleaved group due to gaps that "
1318 "require a scalar epilogue (not allowed under optsize) and cannot "
1319 "be masked (not enabled). \n");
1320 releaseGroup(Group);
1321 ReleasedGroup = true;
1322 }
1323 assert(ReleasedGroup && "At least one group must be invalidated, as a "
1324 "scalar epilogue was required");
1325 (void)ReleasedGroup;
1326 RequiresScalarEpilogue = false;
1327 }
1328
1329 template <typename InstT>
addMetadata(InstT * NewInst) const1330 void InterleaveGroup<InstT>::addMetadata(InstT *NewInst) const {
1331 llvm_unreachable("addMetadata can only be used for Instruction");
1332 }
1333
1334 namespace llvm {
1335 template <>
addMetadata(Instruction * NewInst) const1336 void InterleaveGroup<Instruction>::addMetadata(Instruction *NewInst) const {
1337 SmallVector<Value *, 4> VL;
1338 std::transform(Members.begin(), Members.end(), std::back_inserter(VL),
1339 [](std::pair<int, Instruction *> p) { return p.second; });
1340 propagateMetadata(NewInst, VL);
1341 }
1342 }
1343
mangleTLIVectorName(StringRef VectorName,StringRef ScalarName,unsigned numArgs,ElementCount VF)1344 std::string VFABI::mangleTLIVectorName(StringRef VectorName,
1345 StringRef ScalarName, unsigned numArgs,
1346 ElementCount VF) {
1347 SmallString<256> Buffer;
1348 llvm::raw_svector_ostream Out(Buffer);
1349 Out << "_ZGV" << VFABI::_LLVM_ << "N";
1350 if (VF.isScalable())
1351 Out << 'x';
1352 else
1353 Out << VF.getFixedValue();
1354 for (unsigned I = 0; I < numArgs; ++I)
1355 Out << "v";
1356 Out << "_" << ScalarName << "(" << VectorName << ")";
1357 return std::string(Out.str());
1358 }
1359
getVectorVariantNames(const CallInst & CI,SmallVectorImpl<std::string> & VariantMappings)1360 void VFABI::getVectorVariantNames(
1361 const CallInst &CI, SmallVectorImpl<std::string> &VariantMappings) {
1362 const StringRef S = CI.getFnAttr(VFABI::MappingsAttrName).getValueAsString();
1363 if (S.empty())
1364 return;
1365
1366 SmallVector<StringRef, 8> ListAttr;
1367 S.split(ListAttr, ",");
1368
1369 for (auto &S : SetVector<StringRef>(ListAttr.begin(), ListAttr.end())) {
1370 #ifndef NDEBUG
1371 LLVM_DEBUG(dbgs() << "VFABI: adding mapping '" << S << "'\n");
1372 Optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, *(CI.getModule()));
1373 assert(Info.hasValue() && "Invalid name for a VFABI variant.");
1374 assert(CI.getModule()->getFunction(Info.getValue().VectorName) &&
1375 "Vector function is missing.");
1376 #endif
1377 VariantMappings.push_back(std::string(S));
1378 }
1379 }
1380
hasValidParameterList() const1381 bool VFShape::hasValidParameterList() const {
1382 for (unsigned Pos = 0, NumParams = Parameters.size(); Pos < NumParams;
1383 ++Pos) {
1384 assert(Parameters[Pos].ParamPos == Pos && "Broken parameter list.");
1385
1386 switch (Parameters[Pos].ParamKind) {
1387 default: // Nothing to check.
1388 break;
1389 case VFParamKind::OMP_Linear:
1390 case VFParamKind::OMP_LinearRef:
1391 case VFParamKind::OMP_LinearVal:
1392 case VFParamKind::OMP_LinearUVal:
1393 // Compile time linear steps must be non-zero.
1394 if (Parameters[Pos].LinearStepOrPos == 0)
1395 return false;
1396 break;
1397 case VFParamKind::OMP_LinearPos:
1398 case VFParamKind::OMP_LinearRefPos:
1399 case VFParamKind::OMP_LinearValPos:
1400 case VFParamKind::OMP_LinearUValPos:
1401 // The runtime linear step must be referring to some other
1402 // parameters in the signature.
1403 if (Parameters[Pos].LinearStepOrPos >= int(NumParams))
1404 return false;
1405 // The linear step parameter must be marked as uniform.
1406 if (Parameters[Parameters[Pos].LinearStepOrPos].ParamKind !=
1407 VFParamKind::OMP_Uniform)
1408 return false;
1409 // The linear step parameter can't point at itself.
1410 if (Parameters[Pos].LinearStepOrPos == int(Pos))
1411 return false;
1412 break;
1413 case VFParamKind::GlobalPredicate:
1414 // The global predicate must be the unique. Can be placed anywhere in the
1415 // signature.
1416 for (unsigned NextPos = Pos + 1; NextPos < NumParams; ++NextPos)
1417 if (Parameters[NextPos].ParamKind == VFParamKind::GlobalPredicate)
1418 return false;
1419 break;
1420 }
1421 }
1422 return true;
1423 }
1424