1 //===----------- VectorUtils.cpp - Vectorizer utility functions -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines vectorizer utilities.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/Analysis/VectorUtils.h"
14 #include "llvm/ADT/EquivalenceClasses.h"
15 #include "llvm/Analysis/DemandedBits.h"
16 #include "llvm/Analysis/LoopInfo.h"
17 #include "llvm/Analysis/LoopIterator.h"
18 #include "llvm/Analysis/ScalarEvolution.h"
19 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/IR/Constants.h"
23 #include "llvm/IR/GetElementPtrTypeIterator.h"
24 #include "llvm/IR/IRBuilder.h"
25 #include "llvm/IR/PatternMatch.h"
26 #include "llvm/IR/Value.h"
27 #include "llvm/Support/CommandLine.h"
28
29 #define DEBUG_TYPE "vectorutils"
30
31 using namespace llvm;
32 using namespace llvm::PatternMatch;
33
34 /// Maximum factor for an interleaved memory access.
35 static cl::opt<unsigned> MaxInterleaveGroupFactor(
36 "max-interleave-group-factor", cl::Hidden,
37 cl::desc("Maximum factor for an interleaved access group (default = 8)"),
38 cl::init(8));
39
40 /// Return true if all of the intrinsic's arguments and return type are scalars
41 /// for the scalar form of the intrinsic, and vectors for the vector form of the
42 /// intrinsic (except operands that are marked as always being scalar by
43 /// hasVectorInstrinsicScalarOpd).
isTriviallyVectorizable(Intrinsic::ID ID)44 bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) {
45 switch (ID) {
46 case Intrinsic::bswap: // Begin integer bit-manipulation.
47 case Intrinsic::bitreverse:
48 case Intrinsic::ctpop:
49 case Intrinsic::ctlz:
50 case Intrinsic::cttz:
51 case Intrinsic::fshl:
52 case Intrinsic::fshr:
53 case Intrinsic::sadd_sat:
54 case Intrinsic::ssub_sat:
55 case Intrinsic::uadd_sat:
56 case Intrinsic::usub_sat:
57 case Intrinsic::smul_fix:
58 case Intrinsic::smul_fix_sat:
59 case Intrinsic::umul_fix:
60 case Intrinsic::umul_fix_sat:
61 case Intrinsic::sqrt: // Begin floating-point.
62 case Intrinsic::sin:
63 case Intrinsic::cos:
64 case Intrinsic::exp:
65 case Intrinsic::exp2:
66 case Intrinsic::log:
67 case Intrinsic::log10:
68 case Intrinsic::log2:
69 case Intrinsic::fabs:
70 case Intrinsic::minnum:
71 case Intrinsic::maxnum:
72 case Intrinsic::minimum:
73 case Intrinsic::maximum:
74 case Intrinsic::copysign:
75 case Intrinsic::floor:
76 case Intrinsic::ceil:
77 case Intrinsic::trunc:
78 case Intrinsic::rint:
79 case Intrinsic::nearbyint:
80 case Intrinsic::round:
81 case Intrinsic::roundeven:
82 case Intrinsic::pow:
83 case Intrinsic::fma:
84 case Intrinsic::fmuladd:
85 case Intrinsic::powi:
86 case Intrinsic::canonicalize:
87 return true;
88 default:
89 return false;
90 }
91 }
92
93 /// Identifies if the vector form of the intrinsic has a scalar operand.
hasVectorInstrinsicScalarOpd(Intrinsic::ID ID,unsigned ScalarOpdIdx)94 bool llvm::hasVectorInstrinsicScalarOpd(Intrinsic::ID ID,
95 unsigned ScalarOpdIdx) {
96 switch (ID) {
97 case Intrinsic::ctlz:
98 case Intrinsic::cttz:
99 case Intrinsic::powi:
100 return (ScalarOpdIdx == 1);
101 case Intrinsic::smul_fix:
102 case Intrinsic::smul_fix_sat:
103 case Intrinsic::umul_fix:
104 case Intrinsic::umul_fix_sat:
105 return (ScalarOpdIdx == 2);
106 default:
107 return false;
108 }
109 }
110
111 /// Returns intrinsic ID for call.
112 /// For the input call instruction it finds mapping intrinsic and returns
113 /// its ID, in case it does not found it return not_intrinsic.
getVectorIntrinsicIDForCall(const CallInst * CI,const TargetLibraryInfo * TLI)114 Intrinsic::ID llvm::getVectorIntrinsicIDForCall(const CallInst *CI,
115 const TargetLibraryInfo *TLI) {
116 Intrinsic::ID ID = getIntrinsicForCallSite(*CI, TLI);
117 if (ID == Intrinsic::not_intrinsic)
118 return Intrinsic::not_intrinsic;
119
120 if (isTriviallyVectorizable(ID) || ID == Intrinsic::lifetime_start ||
121 ID == Intrinsic::lifetime_end || ID == Intrinsic::assume ||
122 ID == Intrinsic::sideeffect)
123 return ID;
124 return Intrinsic::not_intrinsic;
125 }
126
127 /// Find the operand of the GEP that should be checked for consecutive
128 /// stores. This ignores trailing indices that have no effect on the final
129 /// pointer.
getGEPInductionOperand(const GetElementPtrInst * Gep)130 unsigned llvm::getGEPInductionOperand(const GetElementPtrInst *Gep) {
131 const DataLayout &DL = Gep->getModule()->getDataLayout();
132 unsigned LastOperand = Gep->getNumOperands() - 1;
133 unsigned GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType());
134
135 // Walk backwards and try to peel off zeros.
136 while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) {
137 // Find the type we're currently indexing into.
138 gep_type_iterator GEPTI = gep_type_begin(Gep);
139 std::advance(GEPTI, LastOperand - 2);
140
141 // If it's a type with the same allocation size as the result of the GEP we
142 // can peel off the zero index.
143 if (DL.getTypeAllocSize(GEPTI.getIndexedType()) != GEPAllocSize)
144 break;
145 --LastOperand;
146 }
147
148 return LastOperand;
149 }
150
151 /// If the argument is a GEP, then returns the operand identified by
152 /// getGEPInductionOperand. However, if there is some other non-loop-invariant
153 /// operand, it returns that instead.
stripGetElementPtr(Value * Ptr,ScalarEvolution * SE,Loop * Lp)154 Value *llvm::stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
155 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
156 if (!GEP)
157 return Ptr;
158
159 unsigned InductionOperand = getGEPInductionOperand(GEP);
160
161 // Check that all of the gep indices are uniform except for our induction
162 // operand.
163 for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i)
164 if (i != InductionOperand &&
165 !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(i)), Lp))
166 return Ptr;
167 return GEP->getOperand(InductionOperand);
168 }
169
170 /// If a value has only one user that is a CastInst, return it.
getUniqueCastUse(Value * Ptr,Loop * Lp,Type * Ty)171 Value *llvm::getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty) {
172 Value *UniqueCast = nullptr;
173 for (User *U : Ptr->users()) {
174 CastInst *CI = dyn_cast<CastInst>(U);
175 if (CI && CI->getType() == Ty) {
176 if (!UniqueCast)
177 UniqueCast = CI;
178 else
179 return nullptr;
180 }
181 }
182 return UniqueCast;
183 }
184
185 /// Get the stride of a pointer access in a loop. Looks for symbolic
186 /// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
getStrideFromPointer(Value * Ptr,ScalarEvolution * SE,Loop * Lp)187 Value *llvm::getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
188 auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
189 if (!PtrTy || PtrTy->isAggregateType())
190 return nullptr;
191
192 // Try to remove a gep instruction to make the pointer (actually index at this
193 // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the
194 // pointer, otherwise, we are analyzing the index.
195 Value *OrigPtr = Ptr;
196
197 // The size of the pointer access.
198 int64_t PtrAccessSize = 1;
199
200 Ptr = stripGetElementPtr(Ptr, SE, Lp);
201 const SCEV *V = SE->getSCEV(Ptr);
202
203 if (Ptr != OrigPtr)
204 // Strip off casts.
205 while (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(V))
206 V = C->getOperand();
207
208 const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V);
209 if (!S)
210 return nullptr;
211
212 V = S->getStepRecurrence(*SE);
213 if (!V)
214 return nullptr;
215
216 // Strip off the size of access multiplication if we are still analyzing the
217 // pointer.
218 if (OrigPtr == Ptr) {
219 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) {
220 if (M->getOperand(0)->getSCEVType() != scConstant)
221 return nullptr;
222
223 const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt();
224
225 // Huge step value - give up.
226 if (APStepVal.getBitWidth() > 64)
227 return nullptr;
228
229 int64_t StepVal = APStepVal.getSExtValue();
230 if (PtrAccessSize != StepVal)
231 return nullptr;
232 V = M->getOperand(1);
233 }
234 }
235
236 // Strip off casts.
237 Type *StripedOffRecurrenceCast = nullptr;
238 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(V)) {
239 StripedOffRecurrenceCast = C->getType();
240 V = C->getOperand();
241 }
242
243 // Look for the loop invariant symbolic value.
244 const SCEVUnknown *U = dyn_cast<SCEVUnknown>(V);
245 if (!U)
246 return nullptr;
247
248 Value *Stride = U->getValue();
249 if (!Lp->isLoopInvariant(Stride))
250 return nullptr;
251
252 // If we have stripped off the recurrence cast we have to make sure that we
253 // return the value that is used in this loop so that we can replace it later.
254 if (StripedOffRecurrenceCast)
255 Stride = getUniqueCastUse(Stride, Lp, StripedOffRecurrenceCast);
256
257 return Stride;
258 }
259
260 /// Given a vector and an element number, see if the scalar value is
261 /// already around as a register, for example if it were inserted then extracted
262 /// from the vector.
findScalarElement(Value * V,unsigned EltNo)263 Value *llvm::findScalarElement(Value *V, unsigned EltNo) {
264 assert(V->getType()->isVectorTy() && "Not looking at a vector?");
265 VectorType *VTy = cast<VectorType>(V->getType());
266 // For fixed-length vector, return undef for out of range access.
267 if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
268 unsigned Width = FVTy->getNumElements();
269 if (EltNo >= Width)
270 return UndefValue::get(FVTy->getElementType());
271 }
272
273 if (Constant *C = dyn_cast<Constant>(V))
274 return C->getAggregateElement(EltNo);
275
276 if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) {
277 // If this is an insert to a variable element, we don't know what it is.
278 if (!isa<ConstantInt>(III->getOperand(2)))
279 return nullptr;
280 unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue();
281
282 // If this is an insert to the element we are looking for, return the
283 // inserted value.
284 if (EltNo == IIElt)
285 return III->getOperand(1);
286
287 // Otherwise, the insertelement doesn't modify the value, recurse on its
288 // vector input.
289 return findScalarElement(III->getOperand(0), EltNo);
290 }
291
292 ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V);
293 // Restrict the following transformation to fixed-length vector.
294 if (SVI && isa<FixedVectorType>(SVI->getType())) {
295 unsigned LHSWidth =
296 cast<FixedVectorType>(SVI->getOperand(0)->getType())->getNumElements();
297 int InEl = SVI->getMaskValue(EltNo);
298 if (InEl < 0)
299 return UndefValue::get(VTy->getElementType());
300 if (InEl < (int)LHSWidth)
301 return findScalarElement(SVI->getOperand(0), InEl);
302 return findScalarElement(SVI->getOperand(1), InEl - LHSWidth);
303 }
304
305 // Extract a value from a vector add operation with a constant zero.
306 // TODO: Use getBinOpIdentity() to generalize this.
307 Value *Val; Constant *C;
308 if (match(V, m_Add(m_Value(Val), m_Constant(C))))
309 if (Constant *Elt = C->getAggregateElement(EltNo))
310 if (Elt->isNullValue())
311 return findScalarElement(Val, EltNo);
312
313 // Otherwise, we don't know.
314 return nullptr;
315 }
316
getSplatIndex(ArrayRef<int> Mask)317 int llvm::getSplatIndex(ArrayRef<int> Mask) {
318 int SplatIndex = -1;
319 for (int M : Mask) {
320 // Ignore invalid (undefined) mask elements.
321 if (M < 0)
322 continue;
323
324 // There can be only 1 non-negative mask element value if this is a splat.
325 if (SplatIndex != -1 && SplatIndex != M)
326 return -1;
327
328 // Initialize the splat index to the 1st non-negative mask element.
329 SplatIndex = M;
330 }
331 assert((SplatIndex == -1 || SplatIndex >= 0) && "Negative index?");
332 return SplatIndex;
333 }
334
335 /// Get splat value if the input is a splat vector or return nullptr.
336 /// This function is not fully general. It checks only 2 cases:
337 /// the input value is (1) a splat constant vector or (2) a sequence
338 /// of instructions that broadcasts a scalar at element 0.
getSplatValue(const Value * V)339 const llvm::Value *llvm::getSplatValue(const Value *V) {
340 if (isa<VectorType>(V->getType()))
341 if (auto *C = dyn_cast<Constant>(V))
342 return C->getSplatValue();
343
344 // shuf (inselt ?, Splat, 0), ?, <0, undef, 0, ...>
345 Value *Splat;
346 if (match(V,
347 m_Shuffle(m_InsertElt(m_Value(), m_Value(Splat), m_ZeroInt()),
348 m_Value(), m_ZeroMask())))
349 return Splat;
350
351 return nullptr;
352 }
353
354 // This setting is based on its counterpart in value tracking, but it could be
355 // adjusted if needed.
356 const unsigned MaxDepth = 6;
357
isSplatValue(const Value * V,int Index,unsigned Depth)358 bool llvm::isSplatValue(const Value *V, int Index, unsigned Depth) {
359 assert(Depth <= MaxDepth && "Limit Search Depth");
360
361 if (isa<VectorType>(V->getType())) {
362 if (isa<UndefValue>(V))
363 return true;
364 // FIXME: We can allow undefs, but if Index was specified, we may want to
365 // check that the constant is defined at that index.
366 if (auto *C = dyn_cast<Constant>(V))
367 return C->getSplatValue() != nullptr;
368 }
369
370 if (auto *Shuf = dyn_cast<ShuffleVectorInst>(V)) {
371 // FIXME: We can safely allow undefs here. If Index was specified, we will
372 // check that the mask elt is defined at the required index.
373 if (!is_splat(Shuf->getShuffleMask()))
374 return false;
375
376 // Match any index.
377 if (Index == -1)
378 return true;
379
380 // Match a specific element. The mask should be defined at and match the
381 // specified index.
382 return Shuf->getMaskValue(Index) == Index;
383 }
384
385 // The remaining tests are all recursive, so bail out if we hit the limit.
386 if (Depth++ == MaxDepth)
387 return false;
388
389 // If both operands of a binop are splats, the result is a splat.
390 Value *X, *Y, *Z;
391 if (match(V, m_BinOp(m_Value(X), m_Value(Y))))
392 return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth);
393
394 // If all operands of a select are splats, the result is a splat.
395 if (match(V, m_Select(m_Value(X), m_Value(Y), m_Value(Z))))
396 return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth) &&
397 isSplatValue(Z, Index, Depth);
398
399 // TODO: Add support for unary ops (fneg), casts, intrinsics (overflow ops).
400
401 return false;
402 }
403
narrowShuffleMaskElts(int Scale,ArrayRef<int> Mask,SmallVectorImpl<int> & ScaledMask)404 void llvm::narrowShuffleMaskElts(int Scale, ArrayRef<int> Mask,
405 SmallVectorImpl<int> &ScaledMask) {
406 assert(Scale > 0 && "Unexpected scaling factor");
407
408 // Fast-path: if no scaling, then it is just a copy.
409 if (Scale == 1) {
410 ScaledMask.assign(Mask.begin(), Mask.end());
411 return;
412 }
413
414 ScaledMask.clear();
415 for (int MaskElt : Mask) {
416 if (MaskElt >= 0) {
417 assert(((uint64_t)Scale * MaskElt + (Scale - 1)) <=
418 std::numeric_limits<int32_t>::max() &&
419 "Overflowed 32-bits");
420 }
421 for (int SliceElt = 0; SliceElt != Scale; ++SliceElt)
422 ScaledMask.push_back(MaskElt < 0 ? MaskElt : Scale * MaskElt + SliceElt);
423 }
424 }
425
widenShuffleMaskElts(int Scale,ArrayRef<int> Mask,SmallVectorImpl<int> & ScaledMask)426 bool llvm::widenShuffleMaskElts(int Scale, ArrayRef<int> Mask,
427 SmallVectorImpl<int> &ScaledMask) {
428 assert(Scale > 0 && "Unexpected scaling factor");
429
430 // Fast-path: if no scaling, then it is just a copy.
431 if (Scale == 1) {
432 ScaledMask.assign(Mask.begin(), Mask.end());
433 return true;
434 }
435
436 // We must map the original elements down evenly to a type with less elements.
437 int NumElts = Mask.size();
438 if (NumElts % Scale != 0)
439 return false;
440
441 ScaledMask.clear();
442 ScaledMask.reserve(NumElts / Scale);
443
444 // Step through the input mask by splitting into Scale-sized slices.
445 do {
446 ArrayRef<int> MaskSlice = Mask.take_front(Scale);
447 assert((int)MaskSlice.size() == Scale && "Expected Scale-sized slice.");
448
449 // The first element of the slice determines how we evaluate this slice.
450 int SliceFront = MaskSlice.front();
451 if (SliceFront < 0) {
452 // Negative values (undef or other "sentinel" values) must be equal across
453 // the entire slice.
454 if (!is_splat(MaskSlice))
455 return false;
456 ScaledMask.push_back(SliceFront);
457 } else {
458 // A positive mask element must be cleanly divisible.
459 if (SliceFront % Scale != 0)
460 return false;
461 // Elements of the slice must be consecutive.
462 for (int i = 1; i < Scale; ++i)
463 if (MaskSlice[i] != SliceFront + i)
464 return false;
465 ScaledMask.push_back(SliceFront / Scale);
466 }
467 Mask = Mask.drop_front(Scale);
468 } while (!Mask.empty());
469
470 assert((int)ScaledMask.size() * Scale == NumElts && "Unexpected scaled mask");
471
472 // All elements of the original mask can be scaled down to map to the elements
473 // of a mask with wider elements.
474 return true;
475 }
476
477 MapVector<Instruction *, uint64_t>
computeMinimumValueSizes(ArrayRef<BasicBlock * > Blocks,DemandedBits & DB,const TargetTransformInfo * TTI)478 llvm::computeMinimumValueSizes(ArrayRef<BasicBlock *> Blocks, DemandedBits &DB,
479 const TargetTransformInfo *TTI) {
480
481 // DemandedBits will give us every value's live-out bits. But we want
482 // to ensure no extra casts would need to be inserted, so every DAG
483 // of connected values must have the same minimum bitwidth.
484 EquivalenceClasses<Value *> ECs;
485 SmallVector<Value *, 16> Worklist;
486 SmallPtrSet<Value *, 4> Roots;
487 SmallPtrSet<Value *, 16> Visited;
488 DenseMap<Value *, uint64_t> DBits;
489 SmallPtrSet<Instruction *, 4> InstructionSet;
490 MapVector<Instruction *, uint64_t> MinBWs;
491
492 // Determine the roots. We work bottom-up, from truncs or icmps.
493 bool SeenExtFromIllegalType = false;
494 for (auto *BB : Blocks)
495 for (auto &I : *BB) {
496 InstructionSet.insert(&I);
497
498 if (TTI && (isa<ZExtInst>(&I) || isa<SExtInst>(&I)) &&
499 !TTI->isTypeLegal(I.getOperand(0)->getType()))
500 SeenExtFromIllegalType = true;
501
502 // Only deal with non-vector integers up to 64-bits wide.
503 if ((isa<TruncInst>(&I) || isa<ICmpInst>(&I)) &&
504 !I.getType()->isVectorTy() &&
505 I.getOperand(0)->getType()->getScalarSizeInBits() <= 64) {
506 // Don't make work for ourselves. If we know the loaded type is legal,
507 // don't add it to the worklist.
508 if (TTI && isa<TruncInst>(&I) && TTI->isTypeLegal(I.getType()))
509 continue;
510
511 Worklist.push_back(&I);
512 Roots.insert(&I);
513 }
514 }
515 // Early exit.
516 if (Worklist.empty() || (TTI && !SeenExtFromIllegalType))
517 return MinBWs;
518
519 // Now proceed breadth-first, unioning values together.
520 while (!Worklist.empty()) {
521 Value *Val = Worklist.pop_back_val();
522 Value *Leader = ECs.getOrInsertLeaderValue(Val);
523
524 if (Visited.count(Val))
525 continue;
526 Visited.insert(Val);
527
528 // Non-instructions terminate a chain successfully.
529 if (!isa<Instruction>(Val))
530 continue;
531 Instruction *I = cast<Instruction>(Val);
532
533 // If we encounter a type that is larger than 64 bits, we can't represent
534 // it so bail out.
535 if (DB.getDemandedBits(I).getBitWidth() > 64)
536 return MapVector<Instruction *, uint64_t>();
537
538 uint64_t V = DB.getDemandedBits(I).getZExtValue();
539 DBits[Leader] |= V;
540 DBits[I] = V;
541
542 // Casts, loads and instructions outside of our range terminate a chain
543 // successfully.
544 if (isa<SExtInst>(I) || isa<ZExtInst>(I) || isa<LoadInst>(I) ||
545 !InstructionSet.count(I))
546 continue;
547
548 // Unsafe casts terminate a chain unsuccessfully. We can't do anything
549 // useful with bitcasts, ptrtoints or inttoptrs and it'd be unsafe to
550 // transform anything that relies on them.
551 if (isa<BitCastInst>(I) || isa<PtrToIntInst>(I) || isa<IntToPtrInst>(I) ||
552 !I->getType()->isIntegerTy()) {
553 DBits[Leader] |= ~0ULL;
554 continue;
555 }
556
557 // We don't modify the types of PHIs. Reductions will already have been
558 // truncated if possible, and inductions' sizes will have been chosen by
559 // indvars.
560 if (isa<PHINode>(I))
561 continue;
562
563 if (DBits[Leader] == ~0ULL)
564 // All bits demanded, no point continuing.
565 continue;
566
567 for (Value *O : cast<User>(I)->operands()) {
568 ECs.unionSets(Leader, O);
569 Worklist.push_back(O);
570 }
571 }
572
573 // Now we've discovered all values, walk them to see if there are
574 // any users we didn't see. If there are, we can't optimize that
575 // chain.
576 for (auto &I : DBits)
577 for (auto *U : I.first->users())
578 if (U->getType()->isIntegerTy() && DBits.count(U) == 0)
579 DBits[ECs.getOrInsertLeaderValue(I.first)] |= ~0ULL;
580
581 for (auto I = ECs.begin(), E = ECs.end(); I != E; ++I) {
582 uint64_t LeaderDemandedBits = 0;
583 for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI)
584 LeaderDemandedBits |= DBits[*MI];
585
586 uint64_t MinBW = (sizeof(LeaderDemandedBits) * 8) -
587 llvm::countLeadingZeros(LeaderDemandedBits);
588 // Round up to a power of 2
589 if (!isPowerOf2_64((uint64_t)MinBW))
590 MinBW = NextPowerOf2(MinBW);
591
592 // We don't modify the types of PHIs. Reductions will already have been
593 // truncated if possible, and inductions' sizes will have been chosen by
594 // indvars.
595 // If we are required to shrink a PHI, abandon this entire equivalence class.
596 bool Abort = false;
597 for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI)
598 if (isa<PHINode>(*MI) && MinBW < (*MI)->getType()->getScalarSizeInBits()) {
599 Abort = true;
600 break;
601 }
602 if (Abort)
603 continue;
604
605 for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI) {
606 if (!isa<Instruction>(*MI))
607 continue;
608 Type *Ty = (*MI)->getType();
609 if (Roots.count(*MI))
610 Ty = cast<Instruction>(*MI)->getOperand(0)->getType();
611 if (MinBW < Ty->getScalarSizeInBits())
612 MinBWs[cast<Instruction>(*MI)] = MinBW;
613 }
614 }
615
616 return MinBWs;
617 }
618
619 /// Add all access groups in @p AccGroups to @p List.
620 template <typename ListT>
addToAccessGroupList(ListT & List,MDNode * AccGroups)621 static void addToAccessGroupList(ListT &List, MDNode *AccGroups) {
622 // Interpret an access group as a list containing itself.
623 if (AccGroups->getNumOperands() == 0) {
624 assert(isValidAsAccessGroup(AccGroups) && "Node must be an access group");
625 List.insert(AccGroups);
626 return;
627 }
628
629 for (auto &AccGroupListOp : AccGroups->operands()) {
630 auto *Item = cast<MDNode>(AccGroupListOp.get());
631 assert(isValidAsAccessGroup(Item) && "List item must be an access group");
632 List.insert(Item);
633 }
634 }
635
uniteAccessGroups(MDNode * AccGroups1,MDNode * AccGroups2)636 MDNode *llvm::uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2) {
637 if (!AccGroups1)
638 return AccGroups2;
639 if (!AccGroups2)
640 return AccGroups1;
641 if (AccGroups1 == AccGroups2)
642 return AccGroups1;
643
644 SmallSetVector<Metadata *, 4> Union;
645 addToAccessGroupList(Union, AccGroups1);
646 addToAccessGroupList(Union, AccGroups2);
647
648 if (Union.size() == 0)
649 return nullptr;
650 if (Union.size() == 1)
651 return cast<MDNode>(Union.front());
652
653 LLVMContext &Ctx = AccGroups1->getContext();
654 return MDNode::get(Ctx, Union.getArrayRef());
655 }
656
intersectAccessGroups(const Instruction * Inst1,const Instruction * Inst2)657 MDNode *llvm::intersectAccessGroups(const Instruction *Inst1,
658 const Instruction *Inst2) {
659 bool MayAccessMem1 = Inst1->mayReadOrWriteMemory();
660 bool MayAccessMem2 = Inst2->mayReadOrWriteMemory();
661
662 if (!MayAccessMem1 && !MayAccessMem2)
663 return nullptr;
664 if (!MayAccessMem1)
665 return Inst2->getMetadata(LLVMContext::MD_access_group);
666 if (!MayAccessMem2)
667 return Inst1->getMetadata(LLVMContext::MD_access_group);
668
669 MDNode *MD1 = Inst1->getMetadata(LLVMContext::MD_access_group);
670 MDNode *MD2 = Inst2->getMetadata(LLVMContext::MD_access_group);
671 if (!MD1 || !MD2)
672 return nullptr;
673 if (MD1 == MD2)
674 return MD1;
675
676 // Use set for scalable 'contains' check.
677 SmallPtrSet<Metadata *, 4> AccGroupSet2;
678 addToAccessGroupList(AccGroupSet2, MD2);
679
680 SmallVector<Metadata *, 4> Intersection;
681 if (MD1->getNumOperands() == 0) {
682 assert(isValidAsAccessGroup(MD1) && "Node must be an access group");
683 if (AccGroupSet2.count(MD1))
684 Intersection.push_back(MD1);
685 } else {
686 for (const MDOperand &Node : MD1->operands()) {
687 auto *Item = cast<MDNode>(Node.get());
688 assert(isValidAsAccessGroup(Item) && "List item must be an access group");
689 if (AccGroupSet2.count(Item))
690 Intersection.push_back(Item);
691 }
692 }
693
694 if (Intersection.size() == 0)
695 return nullptr;
696 if (Intersection.size() == 1)
697 return cast<MDNode>(Intersection.front());
698
699 LLVMContext &Ctx = Inst1->getContext();
700 return MDNode::get(Ctx, Intersection);
701 }
702
703 /// \returns \p I after propagating metadata from \p VL.
propagateMetadata(Instruction * Inst,ArrayRef<Value * > VL)704 Instruction *llvm::propagateMetadata(Instruction *Inst, ArrayRef<Value *> VL) {
705 Instruction *I0 = cast<Instruction>(VL[0]);
706 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
707 I0->getAllMetadataOtherThanDebugLoc(Metadata);
708
709 for (auto Kind : {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
710 LLVMContext::MD_noalias, LLVMContext::MD_fpmath,
711 LLVMContext::MD_nontemporal, LLVMContext::MD_invariant_load,
712 LLVMContext::MD_access_group}) {
713 MDNode *MD = I0->getMetadata(Kind);
714
715 for (int J = 1, E = VL.size(); MD && J != E; ++J) {
716 const Instruction *IJ = cast<Instruction>(VL[J]);
717 MDNode *IMD = IJ->getMetadata(Kind);
718 switch (Kind) {
719 case LLVMContext::MD_tbaa:
720 MD = MDNode::getMostGenericTBAA(MD, IMD);
721 break;
722 case LLVMContext::MD_alias_scope:
723 MD = MDNode::getMostGenericAliasScope(MD, IMD);
724 break;
725 case LLVMContext::MD_fpmath:
726 MD = MDNode::getMostGenericFPMath(MD, IMD);
727 break;
728 case LLVMContext::MD_noalias:
729 case LLVMContext::MD_nontemporal:
730 case LLVMContext::MD_invariant_load:
731 MD = MDNode::intersect(MD, IMD);
732 break;
733 case LLVMContext::MD_access_group:
734 MD = intersectAccessGroups(Inst, IJ);
735 break;
736 default:
737 llvm_unreachable("unhandled metadata");
738 }
739 }
740
741 Inst->setMetadata(Kind, MD);
742 }
743
744 return Inst;
745 }
746
747 Constant *
createBitMaskForGaps(IRBuilderBase & Builder,unsigned VF,const InterleaveGroup<Instruction> & Group)748 llvm::createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF,
749 const InterleaveGroup<Instruction> &Group) {
750 // All 1's means mask is not needed.
751 if (Group.getNumMembers() == Group.getFactor())
752 return nullptr;
753
754 // TODO: support reversed access.
755 assert(!Group.isReverse() && "Reversed group not supported.");
756
757 SmallVector<Constant *, 16> Mask;
758 for (unsigned i = 0; i < VF; i++)
759 for (unsigned j = 0; j < Group.getFactor(); ++j) {
760 unsigned HasMember = Group.getMember(j) ? 1 : 0;
761 Mask.push_back(Builder.getInt1(HasMember));
762 }
763
764 return ConstantVector::get(Mask);
765 }
766
767 llvm::SmallVector<int, 16>
createReplicatedMask(unsigned ReplicationFactor,unsigned VF)768 llvm::createReplicatedMask(unsigned ReplicationFactor, unsigned VF) {
769 SmallVector<int, 16> MaskVec;
770 for (unsigned i = 0; i < VF; i++)
771 for (unsigned j = 0; j < ReplicationFactor; j++)
772 MaskVec.push_back(i);
773
774 return MaskVec;
775 }
776
createInterleaveMask(unsigned VF,unsigned NumVecs)777 llvm::SmallVector<int, 16> llvm::createInterleaveMask(unsigned VF,
778 unsigned NumVecs) {
779 SmallVector<int, 16> Mask;
780 for (unsigned i = 0; i < VF; i++)
781 for (unsigned j = 0; j < NumVecs; j++)
782 Mask.push_back(j * VF + i);
783
784 return Mask;
785 }
786
787 llvm::SmallVector<int, 16>
createStrideMask(unsigned Start,unsigned Stride,unsigned VF)788 llvm::createStrideMask(unsigned Start, unsigned Stride, unsigned VF) {
789 SmallVector<int, 16> Mask;
790 for (unsigned i = 0; i < VF; i++)
791 Mask.push_back(Start + i * Stride);
792
793 return Mask;
794 }
795
createSequentialMask(unsigned Start,unsigned NumInts,unsigned NumUndefs)796 llvm::SmallVector<int, 16> llvm::createSequentialMask(unsigned Start,
797 unsigned NumInts,
798 unsigned NumUndefs) {
799 SmallVector<int, 16> Mask;
800 for (unsigned i = 0; i < NumInts; i++)
801 Mask.push_back(Start + i);
802
803 for (unsigned i = 0; i < NumUndefs; i++)
804 Mask.push_back(-1);
805
806 return Mask;
807 }
808
809 /// A helper function for concatenating vectors. This function concatenates two
810 /// vectors having the same element type. If the second vector has fewer
811 /// elements than the first, it is padded with undefs.
concatenateTwoVectors(IRBuilderBase & Builder,Value * V1,Value * V2)812 static Value *concatenateTwoVectors(IRBuilderBase &Builder, Value *V1,
813 Value *V2) {
814 VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType());
815 VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType());
816 assert(VecTy1 && VecTy2 &&
817 VecTy1->getScalarType() == VecTy2->getScalarType() &&
818 "Expect two vectors with the same element type");
819
820 unsigned NumElts1 = VecTy1->getNumElements();
821 unsigned NumElts2 = VecTy2->getNumElements();
822 assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements");
823
824 if (NumElts1 > NumElts2) {
825 // Extend with UNDEFs.
826 V2 = Builder.CreateShuffleVector(
827 V2, UndefValue::get(VecTy2),
828 createSequentialMask(0, NumElts2, NumElts1 - NumElts2));
829 }
830
831 return Builder.CreateShuffleVector(
832 V1, V2, createSequentialMask(0, NumElts1 + NumElts2, 0));
833 }
834
concatenateVectors(IRBuilderBase & Builder,ArrayRef<Value * > Vecs)835 Value *llvm::concatenateVectors(IRBuilderBase &Builder,
836 ArrayRef<Value *> Vecs) {
837 unsigned NumVecs = Vecs.size();
838 assert(NumVecs > 1 && "Should be at least two vectors");
839
840 SmallVector<Value *, 8> ResList;
841 ResList.append(Vecs.begin(), Vecs.end());
842 do {
843 SmallVector<Value *, 8> TmpList;
844 for (unsigned i = 0; i < NumVecs - 1; i += 2) {
845 Value *V0 = ResList[i], *V1 = ResList[i + 1];
846 assert((V0->getType() == V1->getType() || i == NumVecs - 2) &&
847 "Only the last vector may have a different type");
848
849 TmpList.push_back(concatenateTwoVectors(Builder, V0, V1));
850 }
851
852 // Push the last vector if the total number of vectors is odd.
853 if (NumVecs % 2 != 0)
854 TmpList.push_back(ResList[NumVecs - 1]);
855
856 ResList = TmpList;
857 NumVecs = ResList.size();
858 } while (NumVecs > 1);
859
860 return ResList[0];
861 }
862
maskIsAllZeroOrUndef(Value * Mask)863 bool llvm::maskIsAllZeroOrUndef(Value *Mask) {
864 auto *ConstMask = dyn_cast<Constant>(Mask);
865 if (!ConstMask)
866 return false;
867 if (ConstMask->isNullValue() || isa<UndefValue>(ConstMask))
868 return true;
869 for (unsigned I = 0,
870 E = cast<VectorType>(ConstMask->getType())->getNumElements();
871 I != E; ++I) {
872 if (auto *MaskElt = ConstMask->getAggregateElement(I))
873 if (MaskElt->isNullValue() || isa<UndefValue>(MaskElt))
874 continue;
875 return false;
876 }
877 return true;
878 }
879
880
maskIsAllOneOrUndef(Value * Mask)881 bool llvm::maskIsAllOneOrUndef(Value *Mask) {
882 auto *ConstMask = dyn_cast<Constant>(Mask);
883 if (!ConstMask)
884 return false;
885 if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask))
886 return true;
887 for (unsigned I = 0,
888 E = cast<VectorType>(ConstMask->getType())->getNumElements();
889 I != E; ++I) {
890 if (auto *MaskElt = ConstMask->getAggregateElement(I))
891 if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt))
892 continue;
893 return false;
894 }
895 return true;
896 }
897
898 /// TODO: This is a lot like known bits, but for
899 /// vectors. Is there something we can common this with?
possiblyDemandedEltsInMask(Value * Mask)900 APInt llvm::possiblyDemandedEltsInMask(Value *Mask) {
901
902 const unsigned VWidth = cast<VectorType>(Mask->getType())->getNumElements();
903 APInt DemandedElts = APInt::getAllOnesValue(VWidth);
904 if (auto *CV = dyn_cast<ConstantVector>(Mask))
905 for (unsigned i = 0; i < VWidth; i++)
906 if (CV->getAggregateElement(i)->isNullValue())
907 DemandedElts.clearBit(i);
908 return DemandedElts;
909 }
910
isStrided(int Stride)911 bool InterleavedAccessInfo::isStrided(int Stride) {
912 unsigned Factor = std::abs(Stride);
913 return Factor >= 2 && Factor <= MaxInterleaveGroupFactor;
914 }
915
collectConstStrideAccesses(MapVector<Instruction *,StrideDescriptor> & AccessStrideInfo,const ValueToValueMap & Strides)916 void InterleavedAccessInfo::collectConstStrideAccesses(
917 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
918 const ValueToValueMap &Strides) {
919 auto &DL = TheLoop->getHeader()->getModule()->getDataLayout();
920
921 // Since it's desired that the load/store instructions be maintained in
922 // "program order" for the interleaved access analysis, we have to visit the
923 // blocks in the loop in reverse postorder (i.e., in a topological order).
924 // Such an ordering will ensure that any load/store that may be executed
925 // before a second load/store will precede the second load/store in
926 // AccessStrideInfo.
927 LoopBlocksDFS DFS(TheLoop);
928 DFS.perform(LI);
929 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO()))
930 for (auto &I : *BB) {
931 auto *LI = dyn_cast<LoadInst>(&I);
932 auto *SI = dyn_cast<StoreInst>(&I);
933 if (!LI && !SI)
934 continue;
935
936 Value *Ptr = getLoadStorePointerOperand(&I);
937 // We don't check wrapping here because we don't know yet if Ptr will be
938 // part of a full group or a group with gaps. Checking wrapping for all
939 // pointers (even those that end up in groups with no gaps) will be overly
940 // conservative. For full groups, wrapping should be ok since if we would
941 // wrap around the address space we would do a memory access at nullptr
942 // even without the transformation. The wrapping checks are therefore
943 // deferred until after we've formed the interleaved groups.
944 int64_t Stride = getPtrStride(PSE, Ptr, TheLoop, Strides,
945 /*Assume=*/true, /*ShouldCheckWrap=*/false);
946
947 const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
948 PointerType *PtrTy = cast<PointerType>(Ptr->getType());
949 uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType());
950 AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size,
951 getLoadStoreAlignment(&I));
952 }
953 }
954
955 // Analyze interleaved accesses and collect them into interleaved load and
956 // store groups.
957 //
958 // When generating code for an interleaved load group, we effectively hoist all
959 // loads in the group to the location of the first load in program order. When
960 // generating code for an interleaved store group, we sink all stores to the
961 // location of the last store. This code motion can change the order of load
962 // and store instructions and may break dependences.
963 //
964 // The code generation strategy mentioned above ensures that we won't violate
965 // any write-after-read (WAR) dependences.
966 //
967 // E.g., for the WAR dependence: a = A[i]; // (1)
968 // A[i] = b; // (2)
969 //
970 // The store group of (2) is always inserted at or below (2), and the load
971 // group of (1) is always inserted at or above (1). Thus, the instructions will
972 // never be reordered. All other dependences are checked to ensure the
973 // correctness of the instruction reordering.
974 //
975 // The algorithm visits all memory accesses in the loop in bottom-up program
976 // order. Program order is established by traversing the blocks in the loop in
977 // reverse postorder when collecting the accesses.
978 //
979 // We visit the memory accesses in bottom-up order because it can simplify the
980 // construction of store groups in the presence of write-after-write (WAW)
981 // dependences.
982 //
983 // E.g., for the WAW dependence: A[i] = a; // (1)
984 // A[i] = b; // (2)
985 // A[i + 1] = c; // (3)
986 //
987 // We will first create a store group with (3) and (2). (1) can't be added to
988 // this group because it and (2) are dependent. However, (1) can be grouped
989 // with other accesses that may precede it in program order. Note that a
990 // bottom-up order does not imply that WAW dependences should not be checked.
analyzeInterleaving(bool EnablePredicatedInterleavedMemAccesses)991 void InterleavedAccessInfo::analyzeInterleaving(
992 bool EnablePredicatedInterleavedMemAccesses) {
993 LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n");
994 const ValueToValueMap &Strides = LAI->getSymbolicStrides();
995
996 // Holds all accesses with a constant stride.
997 MapVector<Instruction *, StrideDescriptor> AccessStrideInfo;
998 collectConstStrideAccesses(AccessStrideInfo, Strides);
999
1000 if (AccessStrideInfo.empty())
1001 return;
1002
1003 // Collect the dependences in the loop.
1004 collectDependences();
1005
1006 // Holds all interleaved store groups temporarily.
1007 SmallSetVector<InterleaveGroup<Instruction> *, 4> StoreGroups;
1008 // Holds all interleaved load groups temporarily.
1009 SmallSetVector<InterleaveGroup<Instruction> *, 4> LoadGroups;
1010
1011 // Search in bottom-up program order for pairs of accesses (A and B) that can
1012 // form interleaved load or store groups. In the algorithm below, access A
1013 // precedes access B in program order. We initialize a group for B in the
1014 // outer loop of the algorithm, and then in the inner loop, we attempt to
1015 // insert each A into B's group if:
1016 //
1017 // 1. A and B have the same stride,
1018 // 2. A and B have the same memory object size, and
1019 // 3. A belongs in B's group according to its distance from B.
1020 //
1021 // Special care is taken to ensure group formation will not break any
1022 // dependences.
1023 for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend();
1024 BI != E; ++BI) {
1025 Instruction *B = BI->first;
1026 StrideDescriptor DesB = BI->second;
1027
1028 // Initialize a group for B if it has an allowable stride. Even if we don't
1029 // create a group for B, we continue with the bottom-up algorithm to ensure
1030 // we don't break any of B's dependences.
1031 InterleaveGroup<Instruction> *Group = nullptr;
1032 if (isStrided(DesB.Stride) &&
1033 (!isPredicated(B->getParent()) || EnablePredicatedInterleavedMemAccesses)) {
1034 Group = getInterleaveGroup(B);
1035 if (!Group) {
1036 LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B
1037 << '\n');
1038 Group = createInterleaveGroup(B, DesB.Stride, DesB.Alignment);
1039 }
1040 if (B->mayWriteToMemory())
1041 StoreGroups.insert(Group);
1042 else
1043 LoadGroups.insert(Group);
1044 }
1045
1046 for (auto AI = std::next(BI); AI != E; ++AI) {
1047 Instruction *A = AI->first;
1048 StrideDescriptor DesA = AI->second;
1049
1050 // Our code motion strategy implies that we can't have dependences
1051 // between accesses in an interleaved group and other accesses located
1052 // between the first and last member of the group. Note that this also
1053 // means that a group can't have more than one member at a given offset.
1054 // The accesses in a group can have dependences with other accesses, but
1055 // we must ensure we don't extend the boundaries of the group such that
1056 // we encompass those dependent accesses.
1057 //
1058 // For example, assume we have the sequence of accesses shown below in a
1059 // stride-2 loop:
1060 //
1061 // (1, 2) is a group | A[i] = a; // (1)
1062 // | A[i-1] = b; // (2) |
1063 // A[i-3] = c; // (3)
1064 // A[i] = d; // (4) | (2, 4) is not a group
1065 //
1066 // Because accesses (2) and (3) are dependent, we can group (2) with (1)
1067 // but not with (4). If we did, the dependent access (3) would be within
1068 // the boundaries of the (2, 4) group.
1069 if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) {
1070 // If a dependence exists and A is already in a group, we know that A
1071 // must be a store since A precedes B and WAR dependences are allowed.
1072 // Thus, A would be sunk below B. We release A's group to prevent this
1073 // illegal code motion. A will then be free to form another group with
1074 // instructions that precede it.
1075 if (isInterleaved(A)) {
1076 InterleaveGroup<Instruction> *StoreGroup = getInterleaveGroup(A);
1077
1078 LLVM_DEBUG(dbgs() << "LV: Invalidated store group due to "
1079 "dependence between " << *A << " and "<< *B << '\n');
1080
1081 StoreGroups.remove(StoreGroup);
1082 releaseGroup(StoreGroup);
1083 }
1084
1085 // If a dependence exists and A is not already in a group (or it was
1086 // and we just released it), B might be hoisted above A (if B is a
1087 // load) or another store might be sunk below A (if B is a store). In
1088 // either case, we can't add additional instructions to B's group. B
1089 // will only form a group with instructions that it precedes.
1090 break;
1091 }
1092
1093 // At this point, we've checked for illegal code motion. If either A or B
1094 // isn't strided, there's nothing left to do.
1095 if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride))
1096 continue;
1097
1098 // Ignore A if it's already in a group or isn't the same kind of memory
1099 // operation as B.
1100 // Note that mayReadFromMemory() isn't mutually exclusive to
1101 // mayWriteToMemory in the case of atomic loads. We shouldn't see those
1102 // here, canVectorizeMemory() should have returned false - except for the
1103 // case we asked for optimization remarks.
1104 if (isInterleaved(A) ||
1105 (A->mayReadFromMemory() != B->mayReadFromMemory()) ||
1106 (A->mayWriteToMemory() != B->mayWriteToMemory()))
1107 continue;
1108
1109 // Check rules 1 and 2. Ignore A if its stride or size is different from
1110 // that of B.
1111 if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size)
1112 continue;
1113
1114 // Ignore A if the memory object of A and B don't belong to the same
1115 // address space
1116 if (getLoadStoreAddressSpace(A) != getLoadStoreAddressSpace(B))
1117 continue;
1118
1119 // Calculate the distance from A to B.
1120 const SCEVConstant *DistToB = dyn_cast<SCEVConstant>(
1121 PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev));
1122 if (!DistToB)
1123 continue;
1124 int64_t DistanceToB = DistToB->getAPInt().getSExtValue();
1125
1126 // Check rule 3. Ignore A if its distance to B is not a multiple of the
1127 // size.
1128 if (DistanceToB % static_cast<int64_t>(DesB.Size))
1129 continue;
1130
1131 // All members of a predicated interleave-group must have the same predicate,
1132 // and currently must reside in the same BB.
1133 BasicBlock *BlockA = A->getParent();
1134 BasicBlock *BlockB = B->getParent();
1135 if ((isPredicated(BlockA) || isPredicated(BlockB)) &&
1136 (!EnablePredicatedInterleavedMemAccesses || BlockA != BlockB))
1137 continue;
1138
1139 // The index of A is the index of B plus A's distance to B in multiples
1140 // of the size.
1141 int IndexA =
1142 Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size);
1143
1144 // Try to insert A into B's group.
1145 if (Group->insertMember(A, IndexA, DesA.Alignment)) {
1146 LLVM_DEBUG(dbgs() << "LV: Inserted:" << *A << '\n'
1147 << " into the interleave group with" << *B
1148 << '\n');
1149 InterleaveGroupMap[A] = Group;
1150
1151 // Set the first load in program order as the insert position.
1152 if (A->mayReadFromMemory())
1153 Group->setInsertPos(A);
1154 }
1155 } // Iteration over A accesses.
1156 } // Iteration over B accesses.
1157
1158 // Remove interleaved store groups with gaps.
1159 for (auto *Group : StoreGroups)
1160 if (Group->getNumMembers() != Group->getFactor()) {
1161 LLVM_DEBUG(
1162 dbgs() << "LV: Invalidate candidate interleaved store group due "
1163 "to gaps.\n");
1164 releaseGroup(Group);
1165 }
1166 // Remove interleaved groups with gaps (currently only loads) whose memory
1167 // accesses may wrap around. We have to revisit the getPtrStride analysis,
1168 // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does
1169 // not check wrapping (see documentation there).
1170 // FORNOW we use Assume=false;
1171 // TODO: Change to Assume=true but making sure we don't exceed the threshold
1172 // of runtime SCEV assumptions checks (thereby potentially failing to
1173 // vectorize altogether).
1174 // Additional optional optimizations:
1175 // TODO: If we are peeling the loop and we know that the first pointer doesn't
1176 // wrap then we can deduce that all pointers in the group don't wrap.
1177 // This means that we can forcefully peel the loop in order to only have to
1178 // check the first pointer for no-wrap. When we'll change to use Assume=true
1179 // we'll only need at most one runtime check per interleaved group.
1180 for (auto *Group : LoadGroups) {
1181 // Case 1: A full group. Can Skip the checks; For full groups, if the wide
1182 // load would wrap around the address space we would do a memory access at
1183 // nullptr even without the transformation.
1184 if (Group->getNumMembers() == Group->getFactor())
1185 continue;
1186
1187 // Case 2: If first and last members of the group don't wrap this implies
1188 // that all the pointers in the group don't wrap.
1189 // So we check only group member 0 (which is always guaranteed to exist),
1190 // and group member Factor - 1; If the latter doesn't exist we rely on
1191 // peeling (if it is a non-reversed accsess -- see Case 3).
1192 Value *FirstMemberPtr = getLoadStorePointerOperand(Group->getMember(0));
1193 if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false,
1194 /*ShouldCheckWrap=*/true)) {
1195 LLVM_DEBUG(
1196 dbgs() << "LV: Invalidate candidate interleaved group due to "
1197 "first group member potentially pointer-wrapping.\n");
1198 releaseGroup(Group);
1199 continue;
1200 }
1201 Instruction *LastMember = Group->getMember(Group->getFactor() - 1);
1202 if (LastMember) {
1203 Value *LastMemberPtr = getLoadStorePointerOperand(LastMember);
1204 if (!getPtrStride(PSE, LastMemberPtr, TheLoop, Strides, /*Assume=*/false,
1205 /*ShouldCheckWrap=*/true)) {
1206 LLVM_DEBUG(
1207 dbgs() << "LV: Invalidate candidate interleaved group due to "
1208 "last group member potentially pointer-wrapping.\n");
1209 releaseGroup(Group);
1210 }
1211 } else {
1212 // Case 3: A non-reversed interleaved load group with gaps: We need
1213 // to execute at least one scalar epilogue iteration. This will ensure
1214 // we don't speculatively access memory out-of-bounds. We only need
1215 // to look for a member at index factor - 1, since every group must have
1216 // a member at index zero.
1217 if (Group->isReverse()) {
1218 LLVM_DEBUG(
1219 dbgs() << "LV: Invalidate candidate interleaved group due to "
1220 "a reverse access with gaps.\n");
1221 releaseGroup(Group);
1222 continue;
1223 }
1224 LLVM_DEBUG(
1225 dbgs() << "LV: Interleaved group requires epilogue iteration.\n");
1226 RequiresScalarEpilogue = true;
1227 }
1228 }
1229 }
1230
invalidateGroupsRequiringScalarEpilogue()1231 void InterleavedAccessInfo::invalidateGroupsRequiringScalarEpilogue() {
1232 // If no group had triggered the requirement to create an epilogue loop,
1233 // there is nothing to do.
1234 if (!requiresScalarEpilogue())
1235 return;
1236
1237 bool ReleasedGroup = false;
1238 // Release groups requiring scalar epilogues. Note that this also removes them
1239 // from InterleaveGroups.
1240 for (auto *Group : make_early_inc_range(InterleaveGroups)) {
1241 if (!Group->requiresScalarEpilogue())
1242 continue;
1243 LLVM_DEBUG(
1244 dbgs()
1245 << "LV: Invalidate candidate interleaved group due to gaps that "
1246 "require a scalar epilogue (not allowed under optsize) and cannot "
1247 "be masked (not enabled). \n");
1248 releaseGroup(Group);
1249 ReleasedGroup = true;
1250 }
1251 assert(ReleasedGroup && "At least one group must be invalidated, as a "
1252 "scalar epilogue was required");
1253 (void)ReleasedGroup;
1254 RequiresScalarEpilogue = false;
1255 }
1256
1257 template <typename InstT>
addMetadata(InstT * NewInst) const1258 void InterleaveGroup<InstT>::addMetadata(InstT *NewInst) const {
1259 llvm_unreachable("addMetadata can only be used for Instruction");
1260 }
1261
1262 namespace llvm {
1263 template <>
addMetadata(Instruction * NewInst) const1264 void InterleaveGroup<Instruction>::addMetadata(Instruction *NewInst) const {
1265 SmallVector<Value *, 4> VL;
1266 std::transform(Members.begin(), Members.end(), std::back_inserter(VL),
1267 [](std::pair<int, Instruction *> p) { return p.second; });
1268 propagateMetadata(NewInst, VL);
1269 }
1270 }
1271
mangleTLIVectorName(StringRef VectorName,StringRef ScalarName,unsigned numArgs,unsigned VF)1272 std::string VFABI::mangleTLIVectorName(StringRef VectorName,
1273 StringRef ScalarName, unsigned numArgs,
1274 unsigned VF) {
1275 SmallString<256> Buffer;
1276 llvm::raw_svector_ostream Out(Buffer);
1277 Out << "_ZGV" << VFABI::_LLVM_ << "N" << VF;
1278 for (unsigned I = 0; I < numArgs; ++I)
1279 Out << "v";
1280 Out << "_" << ScalarName << "(" << VectorName << ")";
1281 return std::string(Out.str());
1282 }
1283
getVectorVariantNames(const CallInst & CI,SmallVectorImpl<std::string> & VariantMappings)1284 void VFABI::getVectorVariantNames(
1285 const CallInst &CI, SmallVectorImpl<std::string> &VariantMappings) {
1286 const StringRef S =
1287 CI.getAttribute(AttributeList::FunctionIndex, VFABI::MappingsAttrName)
1288 .getValueAsString();
1289 if (S.empty())
1290 return;
1291
1292 SmallVector<StringRef, 8> ListAttr;
1293 S.split(ListAttr, ",");
1294
1295 for (auto &S : SetVector<StringRef>(ListAttr.begin(), ListAttr.end())) {
1296 #ifndef NDEBUG
1297 LLVM_DEBUG(dbgs() << "VFABI: adding mapping '" << S << "'\n");
1298 Optional<VFInfo> Info = VFABI::tryDemangleForVFABI(S, *(CI.getModule()));
1299 assert(Info.hasValue() && "Invalid name for a VFABI variant.");
1300 assert(CI.getModule()->getFunction(Info.getValue().VectorName) &&
1301 "Vector function is missing.");
1302 #endif
1303 VariantMappings.push_back(std::string(S));
1304 }
1305 }
1306
hasValidParameterList() const1307 bool VFShape::hasValidParameterList() const {
1308 for (unsigned Pos = 0, NumParams = Parameters.size(); Pos < NumParams;
1309 ++Pos) {
1310 assert(Parameters[Pos].ParamPos == Pos && "Broken parameter list.");
1311
1312 switch (Parameters[Pos].ParamKind) {
1313 default: // Nothing to check.
1314 break;
1315 case VFParamKind::OMP_Linear:
1316 case VFParamKind::OMP_LinearRef:
1317 case VFParamKind::OMP_LinearVal:
1318 case VFParamKind::OMP_LinearUVal:
1319 // Compile time linear steps must be non-zero.
1320 if (Parameters[Pos].LinearStepOrPos == 0)
1321 return false;
1322 break;
1323 case VFParamKind::OMP_LinearPos:
1324 case VFParamKind::OMP_LinearRefPos:
1325 case VFParamKind::OMP_LinearValPos:
1326 case VFParamKind::OMP_LinearUValPos:
1327 // The runtime linear step must be referring to some other
1328 // parameters in the signature.
1329 if (Parameters[Pos].LinearStepOrPos >= int(NumParams))
1330 return false;
1331 // The linear step parameter must be marked as uniform.
1332 if (Parameters[Parameters[Pos].LinearStepOrPos].ParamKind !=
1333 VFParamKind::OMP_Uniform)
1334 return false;
1335 // The linear step parameter can't point at itself.
1336 if (Parameters[Pos].LinearStepOrPos == int(Pos))
1337 return false;
1338 break;
1339 case VFParamKind::GlobalPredicate:
1340 // The global predicate must be the unique. Can be placed anywhere in the
1341 // signature.
1342 for (unsigned NextPos = Pos + 1; NextPos < NumParams; ++NextPos)
1343 if (Parameters[NextPos].ParamKind == VFParamKind::GlobalPredicate)
1344 return false;
1345 break;
1346 }
1347 }
1348 return true;
1349 }
1350