1 //===----------- VectorUtils.cpp - Vectorizer utility functions -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines vectorizer utilities.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/Analysis/VectorUtils.h"
14 #include "llvm/ADT/EquivalenceClasses.h"
15 #include "llvm/Analysis/DemandedBits.h"
16 #include "llvm/Analysis/LoopInfo.h"
17 #include "llvm/Analysis/LoopIterator.h"
18 #include "llvm/Analysis/ScalarEvolution.h"
19 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/IR/Constants.h"
23 #include "llvm/IR/GetElementPtrTypeIterator.h"
24 #include "llvm/IR/IRBuilder.h"
25 #include "llvm/IR/PatternMatch.h"
26 #include "llvm/IR/Value.h"
27 #include "llvm/Support/CommandLine.h"
28
29 #define DEBUG_TYPE "vectorutils"
30
31 using namespace llvm;
32 using namespace llvm::PatternMatch;
33
34 /// Maximum factor for an interleaved memory access.
35 static cl::opt<unsigned> MaxInterleaveGroupFactor(
36 "max-interleave-group-factor", cl::Hidden,
37 cl::desc("Maximum factor for an interleaved access group (default = 8)"),
38 cl::init(8));
39
40 /// Return true if all of the intrinsic's arguments and return type are scalars
41 /// for the scalar form of the intrinsic, and vectors for the vector form of the
42 /// intrinsic (except operands that are marked as always being scalar by
43 /// isVectorIntrinsicWithScalarOpAtArg).
isTriviallyVectorizable(Intrinsic::ID ID)44 bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) {
45 switch (ID) {
46 case Intrinsic::abs: // Begin integer bit-manipulation.
47 case Intrinsic::bswap:
48 case Intrinsic::bitreverse:
49 case Intrinsic::ctpop:
50 case Intrinsic::ctlz:
51 case Intrinsic::cttz:
52 case Intrinsic::fshl:
53 case Intrinsic::fshr:
54 case Intrinsic::smax:
55 case Intrinsic::smin:
56 case Intrinsic::umax:
57 case Intrinsic::umin:
58 case Intrinsic::sadd_sat:
59 case Intrinsic::ssub_sat:
60 case Intrinsic::uadd_sat:
61 case Intrinsic::usub_sat:
62 case Intrinsic::smul_fix:
63 case Intrinsic::smul_fix_sat:
64 case Intrinsic::umul_fix:
65 case Intrinsic::umul_fix_sat:
66 case Intrinsic::sqrt: // Begin floating-point.
67 case Intrinsic::sin:
68 case Intrinsic::cos:
69 case Intrinsic::exp:
70 case Intrinsic::exp2:
71 case Intrinsic::log:
72 case Intrinsic::log10:
73 case Intrinsic::log2:
74 case Intrinsic::fabs:
75 case Intrinsic::minnum:
76 case Intrinsic::maxnum:
77 case Intrinsic::minimum:
78 case Intrinsic::maximum:
79 case Intrinsic::copysign:
80 case Intrinsic::floor:
81 case Intrinsic::ceil:
82 case Intrinsic::trunc:
83 case Intrinsic::rint:
84 case Intrinsic::nearbyint:
85 case Intrinsic::round:
86 case Intrinsic::roundeven:
87 case Intrinsic::pow:
88 case Intrinsic::fma:
89 case Intrinsic::fmuladd:
90 case Intrinsic::powi:
91 case Intrinsic::canonicalize:
92 case Intrinsic::fptosi_sat:
93 case Intrinsic::fptoui_sat:
94 return true;
95 default:
96 return false;
97 }
98 }
99
100 /// Identifies if the vector form of the intrinsic has a scalar operand.
isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID,unsigned ScalarOpdIdx)101 bool llvm::isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID,
102 unsigned ScalarOpdIdx) {
103 switch (ID) {
104 case Intrinsic::abs:
105 case Intrinsic::ctlz:
106 case Intrinsic::cttz:
107 case Intrinsic::powi:
108 return (ScalarOpdIdx == 1);
109 case Intrinsic::smul_fix:
110 case Intrinsic::smul_fix_sat:
111 case Intrinsic::umul_fix:
112 case Intrinsic::umul_fix_sat:
113 return (ScalarOpdIdx == 2);
114 default:
115 return false;
116 }
117 }
118
isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID,unsigned OpdIdx)119 bool llvm::isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID,
120 unsigned OpdIdx) {
121 switch (ID) {
122 case Intrinsic::fptosi_sat:
123 case Intrinsic::fptoui_sat:
124 return OpdIdx == 0;
125 case Intrinsic::powi:
126 return OpdIdx == 1;
127 default:
128 return false;
129 }
130 }
131
132 /// Returns intrinsic ID for call.
133 /// For the input call instruction it finds mapping intrinsic and returns
134 /// its ID, in case it does not found it return not_intrinsic.
getVectorIntrinsicIDForCall(const CallInst * CI,const TargetLibraryInfo * TLI)135 Intrinsic::ID llvm::getVectorIntrinsicIDForCall(const CallInst *CI,
136 const TargetLibraryInfo *TLI) {
137 Intrinsic::ID ID = getIntrinsicForCallSite(*CI, TLI);
138 if (ID == Intrinsic::not_intrinsic)
139 return Intrinsic::not_intrinsic;
140
141 if (isTriviallyVectorizable(ID) || ID == Intrinsic::lifetime_start ||
142 ID == Intrinsic::lifetime_end || ID == Intrinsic::assume ||
143 ID == Intrinsic::experimental_noalias_scope_decl ||
144 ID == Intrinsic::sideeffect || ID == Intrinsic::pseudoprobe)
145 return ID;
146 return Intrinsic::not_intrinsic;
147 }
148
149 /// Find the operand of the GEP that should be checked for consecutive
150 /// stores. This ignores trailing indices that have no effect on the final
151 /// pointer.
getGEPInductionOperand(const GetElementPtrInst * Gep)152 unsigned llvm::getGEPInductionOperand(const GetElementPtrInst *Gep) {
153 const DataLayout &DL = Gep->getModule()->getDataLayout();
154 unsigned LastOperand = Gep->getNumOperands() - 1;
155 TypeSize GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType());
156
157 // Walk backwards and try to peel off zeros.
158 while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) {
159 // Find the type we're currently indexing into.
160 gep_type_iterator GEPTI = gep_type_begin(Gep);
161 std::advance(GEPTI, LastOperand - 2);
162
163 // If it's a type with the same allocation size as the result of the GEP we
164 // can peel off the zero index.
165 if (DL.getTypeAllocSize(GEPTI.getIndexedType()) != GEPAllocSize)
166 break;
167 --LastOperand;
168 }
169
170 return LastOperand;
171 }
172
173 /// If the argument is a GEP, then returns the operand identified by
174 /// getGEPInductionOperand. However, if there is some other non-loop-invariant
175 /// operand, it returns that instead.
stripGetElementPtr(Value * Ptr,ScalarEvolution * SE,Loop * Lp)176 Value *llvm::stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
177 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
178 if (!GEP)
179 return Ptr;
180
181 unsigned InductionOperand = getGEPInductionOperand(GEP);
182
183 // Check that all of the gep indices are uniform except for our induction
184 // operand.
185 for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i)
186 if (i != InductionOperand &&
187 !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(i)), Lp))
188 return Ptr;
189 return GEP->getOperand(InductionOperand);
190 }
191
192 /// If a value has only one user that is a CastInst, return it.
getUniqueCastUse(Value * Ptr,Loop * Lp,Type * Ty)193 Value *llvm::getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty) {
194 Value *UniqueCast = nullptr;
195 for (User *U : Ptr->users()) {
196 CastInst *CI = dyn_cast<CastInst>(U);
197 if (CI && CI->getType() == Ty) {
198 if (!UniqueCast)
199 UniqueCast = CI;
200 else
201 return nullptr;
202 }
203 }
204 return UniqueCast;
205 }
206
207 /// Get the stride of a pointer access in a loop. Looks for symbolic
208 /// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
getStrideFromPointer(Value * Ptr,ScalarEvolution * SE,Loop * Lp)209 Value *llvm::getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
210 auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
211 if (!PtrTy || PtrTy->isAggregateType())
212 return nullptr;
213
214 // Try to remove a gep instruction to make the pointer (actually index at this
215 // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the
216 // pointer, otherwise, we are analyzing the index.
217 Value *OrigPtr = Ptr;
218
219 // The size of the pointer access.
220 int64_t PtrAccessSize = 1;
221
222 Ptr = stripGetElementPtr(Ptr, SE, Lp);
223 const SCEV *V = SE->getSCEV(Ptr);
224
225 if (Ptr != OrigPtr)
226 // Strip off casts.
227 while (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(V))
228 V = C->getOperand();
229
230 const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V);
231 if (!S)
232 return nullptr;
233
234 V = S->getStepRecurrence(*SE);
235 if (!V)
236 return nullptr;
237
238 // Strip off the size of access multiplication if we are still analyzing the
239 // pointer.
240 if (OrigPtr == Ptr) {
241 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) {
242 if (M->getOperand(0)->getSCEVType() != scConstant)
243 return nullptr;
244
245 const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt();
246
247 // Huge step value - give up.
248 if (APStepVal.getBitWidth() > 64)
249 return nullptr;
250
251 int64_t StepVal = APStepVal.getSExtValue();
252 if (PtrAccessSize != StepVal)
253 return nullptr;
254 V = M->getOperand(1);
255 }
256 }
257
258 // Strip off casts.
259 Type *StripedOffRecurrenceCast = nullptr;
260 if (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(V)) {
261 StripedOffRecurrenceCast = C->getType();
262 V = C->getOperand();
263 }
264
265 // Look for the loop invariant symbolic value.
266 const SCEVUnknown *U = dyn_cast<SCEVUnknown>(V);
267 if (!U)
268 return nullptr;
269
270 Value *Stride = U->getValue();
271 if (!Lp->isLoopInvariant(Stride))
272 return nullptr;
273
274 // If we have stripped off the recurrence cast we have to make sure that we
275 // return the value that is used in this loop so that we can replace it later.
276 if (StripedOffRecurrenceCast)
277 Stride = getUniqueCastUse(Stride, Lp, StripedOffRecurrenceCast);
278
279 return Stride;
280 }
281
282 /// Given a vector and an element number, see if the scalar value is
283 /// already around as a register, for example if it were inserted then extracted
284 /// from the vector.
findScalarElement(Value * V,unsigned EltNo)285 Value *llvm::findScalarElement(Value *V, unsigned EltNo) {
286 assert(V->getType()->isVectorTy() && "Not looking at a vector?");
287 VectorType *VTy = cast<VectorType>(V->getType());
288 // For fixed-length vector, return undef for out of range access.
289 if (auto *FVTy = dyn_cast<FixedVectorType>(VTy)) {
290 unsigned Width = FVTy->getNumElements();
291 if (EltNo >= Width)
292 return UndefValue::get(FVTy->getElementType());
293 }
294
295 if (Constant *C = dyn_cast<Constant>(V))
296 return C->getAggregateElement(EltNo);
297
298 if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) {
299 // If this is an insert to a variable element, we don't know what it is.
300 if (!isa<ConstantInt>(III->getOperand(2)))
301 return nullptr;
302 unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue();
303
304 // If this is an insert to the element we are looking for, return the
305 // inserted value.
306 if (EltNo == IIElt)
307 return III->getOperand(1);
308
309 // Guard against infinite loop on malformed, unreachable IR.
310 if (III == III->getOperand(0))
311 return nullptr;
312
313 // Otherwise, the insertelement doesn't modify the value, recurse on its
314 // vector input.
315 return findScalarElement(III->getOperand(0), EltNo);
316 }
317
318 ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V);
319 // Restrict the following transformation to fixed-length vector.
320 if (SVI && isa<FixedVectorType>(SVI->getType())) {
321 unsigned LHSWidth =
322 cast<FixedVectorType>(SVI->getOperand(0)->getType())->getNumElements();
323 int InEl = SVI->getMaskValue(EltNo);
324 if (InEl < 0)
325 return UndefValue::get(VTy->getElementType());
326 if (InEl < (int)LHSWidth)
327 return findScalarElement(SVI->getOperand(0), InEl);
328 return findScalarElement(SVI->getOperand(1), InEl - LHSWidth);
329 }
330
331 // Extract a value from a vector add operation with a constant zero.
332 // TODO: Use getBinOpIdentity() to generalize this.
333 Value *Val; Constant *C;
334 if (match(V, m_Add(m_Value(Val), m_Constant(C))))
335 if (Constant *Elt = C->getAggregateElement(EltNo))
336 if (Elt->isNullValue())
337 return findScalarElement(Val, EltNo);
338
339 // If the vector is a splat then we can trivially find the scalar element.
340 if (isa<ScalableVectorType>(VTy))
341 if (Value *Splat = getSplatValue(V))
342 if (EltNo < VTy->getElementCount().getKnownMinValue())
343 return Splat;
344
345 // Otherwise, we don't know.
346 return nullptr;
347 }
348
getSplatIndex(ArrayRef<int> Mask)349 int llvm::getSplatIndex(ArrayRef<int> Mask) {
350 int SplatIndex = -1;
351 for (int M : Mask) {
352 // Ignore invalid (undefined) mask elements.
353 if (M < 0)
354 continue;
355
356 // There can be only 1 non-negative mask element value if this is a splat.
357 if (SplatIndex != -1 && SplatIndex != M)
358 return -1;
359
360 // Initialize the splat index to the 1st non-negative mask element.
361 SplatIndex = M;
362 }
363 assert((SplatIndex == -1 || SplatIndex >= 0) && "Negative index?");
364 return SplatIndex;
365 }
366
367 /// Get splat value if the input is a splat vector or return nullptr.
368 /// This function is not fully general. It checks only 2 cases:
369 /// the input value is (1) a splat constant vector or (2) a sequence
370 /// of instructions that broadcasts a scalar at element 0.
getSplatValue(const Value * V)371 Value *llvm::getSplatValue(const Value *V) {
372 if (isa<VectorType>(V->getType()))
373 if (auto *C = dyn_cast<Constant>(V))
374 return C->getSplatValue();
375
376 // shuf (inselt ?, Splat, 0), ?, <0, undef, 0, ...>
377 Value *Splat;
378 if (match(V,
379 m_Shuffle(m_InsertElt(m_Value(), m_Value(Splat), m_ZeroInt()),
380 m_Value(), m_ZeroMask())))
381 return Splat;
382
383 return nullptr;
384 }
385
isSplatValue(const Value * V,int Index,unsigned Depth)386 bool llvm::isSplatValue(const Value *V, int Index, unsigned Depth) {
387 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
388
389 if (isa<VectorType>(V->getType())) {
390 if (isa<UndefValue>(V))
391 return true;
392 // FIXME: We can allow undefs, but if Index was specified, we may want to
393 // check that the constant is defined at that index.
394 if (auto *C = dyn_cast<Constant>(V))
395 return C->getSplatValue() != nullptr;
396 }
397
398 if (auto *Shuf = dyn_cast<ShuffleVectorInst>(V)) {
399 // FIXME: We can safely allow undefs here. If Index was specified, we will
400 // check that the mask elt is defined at the required index.
401 if (!all_equal(Shuf->getShuffleMask()))
402 return false;
403
404 // Match any index.
405 if (Index == -1)
406 return true;
407
408 // Match a specific element. The mask should be defined at and match the
409 // specified index.
410 return Shuf->getMaskValue(Index) == Index;
411 }
412
413 // The remaining tests are all recursive, so bail out if we hit the limit.
414 if (Depth++ == MaxAnalysisRecursionDepth)
415 return false;
416
417 // If both operands of a binop are splats, the result is a splat.
418 Value *X, *Y, *Z;
419 if (match(V, m_BinOp(m_Value(X), m_Value(Y))))
420 return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth);
421
422 // If all operands of a select are splats, the result is a splat.
423 if (match(V, m_Select(m_Value(X), m_Value(Y), m_Value(Z))))
424 return isSplatValue(X, Index, Depth) && isSplatValue(Y, Index, Depth) &&
425 isSplatValue(Z, Index, Depth);
426
427 // TODO: Add support for unary ops (fneg), casts, intrinsics (overflow ops).
428
429 return false;
430 }
431
getShuffleDemandedElts(int SrcWidth,ArrayRef<int> Mask,const APInt & DemandedElts,APInt & DemandedLHS,APInt & DemandedRHS,bool AllowUndefElts)432 bool llvm::getShuffleDemandedElts(int SrcWidth, ArrayRef<int> Mask,
433 const APInt &DemandedElts, APInt &DemandedLHS,
434 APInt &DemandedRHS, bool AllowUndefElts) {
435 DemandedLHS = DemandedRHS = APInt::getZero(SrcWidth);
436
437 // Early out if we don't demand any elements.
438 if (DemandedElts.isZero())
439 return true;
440
441 // Simple case of a shuffle with zeroinitializer.
442 if (all_of(Mask, [](int Elt) { return Elt == 0; })) {
443 DemandedLHS.setBit(0);
444 return true;
445 }
446
447 for (unsigned I = 0, E = Mask.size(); I != E; ++I) {
448 int M = Mask[I];
449 assert((-1 <= M) && (M < (SrcWidth * 2)) &&
450 "Invalid shuffle mask constant");
451
452 if (!DemandedElts[I] || (AllowUndefElts && (M < 0)))
453 continue;
454
455 // For undef elements, we don't know anything about the common state of
456 // the shuffle result.
457 if (M < 0)
458 return false;
459
460 if (M < SrcWidth)
461 DemandedLHS.setBit(M);
462 else
463 DemandedRHS.setBit(M - SrcWidth);
464 }
465
466 return true;
467 }
468
narrowShuffleMaskElts(int Scale,ArrayRef<int> Mask,SmallVectorImpl<int> & ScaledMask)469 void llvm::narrowShuffleMaskElts(int Scale, ArrayRef<int> Mask,
470 SmallVectorImpl<int> &ScaledMask) {
471 assert(Scale > 0 && "Unexpected scaling factor");
472
473 // Fast-path: if no scaling, then it is just a copy.
474 if (Scale == 1) {
475 ScaledMask.assign(Mask.begin(), Mask.end());
476 return;
477 }
478
479 ScaledMask.clear();
480 for (int MaskElt : Mask) {
481 if (MaskElt >= 0) {
482 assert(((uint64_t)Scale * MaskElt + (Scale - 1)) <= INT32_MAX &&
483 "Overflowed 32-bits");
484 }
485 for (int SliceElt = 0; SliceElt != Scale; ++SliceElt)
486 ScaledMask.push_back(MaskElt < 0 ? MaskElt : Scale * MaskElt + SliceElt);
487 }
488 }
489
widenShuffleMaskElts(int Scale,ArrayRef<int> Mask,SmallVectorImpl<int> & ScaledMask)490 bool llvm::widenShuffleMaskElts(int Scale, ArrayRef<int> Mask,
491 SmallVectorImpl<int> &ScaledMask) {
492 assert(Scale > 0 && "Unexpected scaling factor");
493
494 // Fast-path: if no scaling, then it is just a copy.
495 if (Scale == 1) {
496 ScaledMask.assign(Mask.begin(), Mask.end());
497 return true;
498 }
499
500 // We must map the original elements down evenly to a type with less elements.
501 int NumElts = Mask.size();
502 if (NumElts % Scale != 0)
503 return false;
504
505 ScaledMask.clear();
506 ScaledMask.reserve(NumElts / Scale);
507
508 // Step through the input mask by splitting into Scale-sized slices.
509 do {
510 ArrayRef<int> MaskSlice = Mask.take_front(Scale);
511 assert((int)MaskSlice.size() == Scale && "Expected Scale-sized slice.");
512
513 // The first element of the slice determines how we evaluate this slice.
514 int SliceFront = MaskSlice.front();
515 if (SliceFront < 0) {
516 // Negative values (undef or other "sentinel" values) must be equal across
517 // the entire slice.
518 if (!all_equal(MaskSlice))
519 return false;
520 ScaledMask.push_back(SliceFront);
521 } else {
522 // A positive mask element must be cleanly divisible.
523 if (SliceFront % Scale != 0)
524 return false;
525 // Elements of the slice must be consecutive.
526 for (int i = 1; i < Scale; ++i)
527 if (MaskSlice[i] != SliceFront + i)
528 return false;
529 ScaledMask.push_back(SliceFront / Scale);
530 }
531 Mask = Mask.drop_front(Scale);
532 } while (!Mask.empty());
533
534 assert((int)ScaledMask.size() * Scale == NumElts && "Unexpected scaled mask");
535
536 // All elements of the original mask can be scaled down to map to the elements
537 // of a mask with wider elements.
538 return true;
539 }
540
getShuffleMaskWithWidestElts(ArrayRef<int> Mask,SmallVectorImpl<int> & ScaledMask)541 void llvm::getShuffleMaskWithWidestElts(ArrayRef<int> Mask,
542 SmallVectorImpl<int> &ScaledMask) {
543 std::array<SmallVector<int, 16>, 2> TmpMasks;
544 SmallVectorImpl<int> *Output = &TmpMasks[0], *Tmp = &TmpMasks[1];
545 ArrayRef<int> InputMask = Mask;
546 for (unsigned Scale = 2; Scale <= InputMask.size(); ++Scale) {
547 while (widenShuffleMaskElts(Scale, InputMask, *Output)) {
548 InputMask = *Output;
549 std::swap(Output, Tmp);
550 }
551 }
552 ScaledMask.assign(InputMask.begin(), InputMask.end());
553 }
554
processShuffleMasks(ArrayRef<int> Mask,unsigned NumOfSrcRegs,unsigned NumOfDestRegs,unsigned NumOfUsedRegs,function_ref<void ()> NoInputAction,function_ref<void (ArrayRef<int>,unsigned,unsigned)> SingleInputAction,function_ref<void (ArrayRef<int>,unsigned,unsigned)> ManyInputsAction)555 void llvm::processShuffleMasks(
556 ArrayRef<int> Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs,
557 unsigned NumOfUsedRegs, function_ref<void()> NoInputAction,
558 function_ref<void(ArrayRef<int>, unsigned, unsigned)> SingleInputAction,
559 function_ref<void(ArrayRef<int>, unsigned, unsigned)> ManyInputsAction) {
560 SmallVector<SmallVector<SmallVector<int>>> Res(NumOfDestRegs);
561 // Try to perform better estimation of the permutation.
562 // 1. Split the source/destination vectors into real registers.
563 // 2. Do the mask analysis to identify which real registers are
564 // permuted.
565 int Sz = Mask.size();
566 unsigned SzDest = Sz / NumOfDestRegs;
567 unsigned SzSrc = Sz / NumOfSrcRegs;
568 for (unsigned I = 0; I < NumOfDestRegs; ++I) {
569 auto &RegMasks = Res[I];
570 RegMasks.assign(NumOfSrcRegs, {});
571 // Check that the values in dest registers are in the one src
572 // register.
573 for (unsigned K = 0; K < SzDest; ++K) {
574 int Idx = I * SzDest + K;
575 if (Idx == Sz)
576 break;
577 if (Mask[Idx] >= Sz || Mask[Idx] == UndefMaskElem)
578 continue;
579 int SrcRegIdx = Mask[Idx] / SzSrc;
580 // Add a cost of PermuteTwoSrc for each new source register permute,
581 // if we have more than one source registers.
582 if (RegMasks[SrcRegIdx].empty())
583 RegMasks[SrcRegIdx].assign(SzDest, UndefMaskElem);
584 RegMasks[SrcRegIdx][K] = Mask[Idx] % SzSrc;
585 }
586 }
587 // Process split mask.
588 for (unsigned I = 0; I < NumOfUsedRegs; ++I) {
589 auto &Dest = Res[I];
590 int NumSrcRegs =
591 count_if(Dest, [](ArrayRef<int> Mask) { return !Mask.empty(); });
592 switch (NumSrcRegs) {
593 case 0:
594 // No input vectors were used!
595 NoInputAction();
596 break;
597 case 1: {
598 // Find the only mask with at least single undef mask elem.
599 auto *It =
600 find_if(Dest, [](ArrayRef<int> Mask) { return !Mask.empty(); });
601 unsigned SrcReg = std::distance(Dest.begin(), It);
602 SingleInputAction(*It, SrcReg, I);
603 break;
604 }
605 default: {
606 // The first mask is a permutation of a single register. Since we have >2
607 // input registers to shuffle, we merge the masks for 2 first registers
608 // and generate a shuffle of 2 registers rather than the reordering of the
609 // first register and then shuffle with the second register. Next,
610 // generate the shuffles of the resulting register + the remaining
611 // registers from the list.
612 auto &&CombineMasks = [](MutableArrayRef<int> FirstMask,
613 ArrayRef<int> SecondMask) {
614 for (int Idx = 0, VF = FirstMask.size(); Idx < VF; ++Idx) {
615 if (SecondMask[Idx] != UndefMaskElem) {
616 assert(FirstMask[Idx] == UndefMaskElem &&
617 "Expected undefined mask element.");
618 FirstMask[Idx] = SecondMask[Idx] + VF;
619 }
620 }
621 };
622 auto &&NormalizeMask = [](MutableArrayRef<int> Mask) {
623 for (int Idx = 0, VF = Mask.size(); Idx < VF; ++Idx) {
624 if (Mask[Idx] != UndefMaskElem)
625 Mask[Idx] = Idx;
626 }
627 };
628 int SecondIdx;
629 do {
630 int FirstIdx = -1;
631 SecondIdx = -1;
632 MutableArrayRef<int> FirstMask, SecondMask;
633 for (unsigned I = 0; I < NumOfDestRegs; ++I) {
634 SmallVectorImpl<int> &RegMask = Dest[I];
635 if (RegMask.empty())
636 continue;
637
638 if (FirstIdx == SecondIdx) {
639 FirstIdx = I;
640 FirstMask = RegMask;
641 continue;
642 }
643 SecondIdx = I;
644 SecondMask = RegMask;
645 CombineMasks(FirstMask, SecondMask);
646 ManyInputsAction(FirstMask, FirstIdx, SecondIdx);
647 NormalizeMask(FirstMask);
648 RegMask.clear();
649 SecondMask = FirstMask;
650 SecondIdx = FirstIdx;
651 }
652 if (FirstIdx != SecondIdx && SecondIdx >= 0) {
653 CombineMasks(SecondMask, FirstMask);
654 ManyInputsAction(SecondMask, SecondIdx, FirstIdx);
655 Dest[FirstIdx].clear();
656 NormalizeMask(SecondMask);
657 }
658 } while (SecondIdx >= 0);
659 break;
660 }
661 }
662 }
663 }
664
665 MapVector<Instruction *, uint64_t>
computeMinimumValueSizes(ArrayRef<BasicBlock * > Blocks,DemandedBits & DB,const TargetTransformInfo * TTI)666 llvm::computeMinimumValueSizes(ArrayRef<BasicBlock *> Blocks, DemandedBits &DB,
667 const TargetTransformInfo *TTI) {
668
669 // DemandedBits will give us every value's live-out bits. But we want
670 // to ensure no extra casts would need to be inserted, so every DAG
671 // of connected values must have the same minimum bitwidth.
672 EquivalenceClasses<Value *> ECs;
673 SmallVector<Value *, 16> Worklist;
674 SmallPtrSet<Value *, 4> Roots;
675 SmallPtrSet<Value *, 16> Visited;
676 DenseMap<Value *, uint64_t> DBits;
677 SmallPtrSet<Instruction *, 4> InstructionSet;
678 MapVector<Instruction *, uint64_t> MinBWs;
679
680 // Determine the roots. We work bottom-up, from truncs or icmps.
681 bool SeenExtFromIllegalType = false;
682 for (auto *BB : Blocks)
683 for (auto &I : *BB) {
684 InstructionSet.insert(&I);
685
686 if (TTI && (isa<ZExtInst>(&I) || isa<SExtInst>(&I)) &&
687 !TTI->isTypeLegal(I.getOperand(0)->getType()))
688 SeenExtFromIllegalType = true;
689
690 // Only deal with non-vector integers up to 64-bits wide.
691 if ((isa<TruncInst>(&I) || isa<ICmpInst>(&I)) &&
692 !I.getType()->isVectorTy() &&
693 I.getOperand(0)->getType()->getScalarSizeInBits() <= 64) {
694 // Don't make work for ourselves. If we know the loaded type is legal,
695 // don't add it to the worklist.
696 if (TTI && isa<TruncInst>(&I) && TTI->isTypeLegal(I.getType()))
697 continue;
698
699 Worklist.push_back(&I);
700 Roots.insert(&I);
701 }
702 }
703 // Early exit.
704 if (Worklist.empty() || (TTI && !SeenExtFromIllegalType))
705 return MinBWs;
706
707 // Now proceed breadth-first, unioning values together.
708 while (!Worklist.empty()) {
709 Value *Val = Worklist.pop_back_val();
710 Value *Leader = ECs.getOrInsertLeaderValue(Val);
711
712 if (!Visited.insert(Val).second)
713 continue;
714
715 // Non-instructions terminate a chain successfully.
716 if (!isa<Instruction>(Val))
717 continue;
718 Instruction *I = cast<Instruction>(Val);
719
720 // If we encounter a type that is larger than 64 bits, we can't represent
721 // it so bail out.
722 if (DB.getDemandedBits(I).getBitWidth() > 64)
723 return MapVector<Instruction *, uint64_t>();
724
725 uint64_t V = DB.getDemandedBits(I).getZExtValue();
726 DBits[Leader] |= V;
727 DBits[I] = V;
728
729 // Casts, loads and instructions outside of our range terminate a chain
730 // successfully.
731 if (isa<SExtInst>(I) || isa<ZExtInst>(I) || isa<LoadInst>(I) ||
732 !InstructionSet.count(I))
733 continue;
734
735 // Unsafe casts terminate a chain unsuccessfully. We can't do anything
736 // useful with bitcasts, ptrtoints or inttoptrs and it'd be unsafe to
737 // transform anything that relies on them.
738 if (isa<BitCastInst>(I) || isa<PtrToIntInst>(I) || isa<IntToPtrInst>(I) ||
739 !I->getType()->isIntegerTy()) {
740 DBits[Leader] |= ~0ULL;
741 continue;
742 }
743
744 // We don't modify the types of PHIs. Reductions will already have been
745 // truncated if possible, and inductions' sizes will have been chosen by
746 // indvars.
747 if (isa<PHINode>(I))
748 continue;
749
750 if (DBits[Leader] == ~0ULL)
751 // All bits demanded, no point continuing.
752 continue;
753
754 for (Value *O : cast<User>(I)->operands()) {
755 ECs.unionSets(Leader, O);
756 Worklist.push_back(O);
757 }
758 }
759
760 // Now we've discovered all values, walk them to see if there are
761 // any users we didn't see. If there are, we can't optimize that
762 // chain.
763 for (auto &I : DBits)
764 for (auto *U : I.first->users())
765 if (U->getType()->isIntegerTy() && DBits.count(U) == 0)
766 DBits[ECs.getOrInsertLeaderValue(I.first)] |= ~0ULL;
767
768 for (auto I = ECs.begin(), E = ECs.end(); I != E; ++I) {
769 uint64_t LeaderDemandedBits = 0;
770 for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end()))
771 LeaderDemandedBits |= DBits[M];
772
773 uint64_t MinBW = (sizeof(LeaderDemandedBits) * 8) -
774 llvm::countLeadingZeros(LeaderDemandedBits);
775 // Round up to a power of 2
776 if (!isPowerOf2_64((uint64_t)MinBW))
777 MinBW = NextPowerOf2(MinBW);
778
779 // We don't modify the types of PHIs. Reductions will already have been
780 // truncated if possible, and inductions' sizes will have been chosen by
781 // indvars.
782 // If we are required to shrink a PHI, abandon this entire equivalence class.
783 bool Abort = false;
784 for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end()))
785 if (isa<PHINode>(M) && MinBW < M->getType()->getScalarSizeInBits()) {
786 Abort = true;
787 break;
788 }
789 if (Abort)
790 continue;
791
792 for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end())) {
793 if (!isa<Instruction>(M))
794 continue;
795 Type *Ty = M->getType();
796 if (Roots.count(M))
797 Ty = cast<Instruction>(M)->getOperand(0)->getType();
798 if (MinBW < Ty->getScalarSizeInBits())
799 MinBWs[cast<Instruction>(M)] = MinBW;
800 }
801 }
802
803 return MinBWs;
804 }
805
806 /// Add all access groups in @p AccGroups to @p List.
807 template <typename ListT>
addToAccessGroupList(ListT & List,MDNode * AccGroups)808 static void addToAccessGroupList(ListT &List, MDNode *AccGroups) {
809 // Interpret an access group as a list containing itself.
810 if (AccGroups->getNumOperands() == 0) {
811 assert(isValidAsAccessGroup(AccGroups) && "Node must be an access group");
812 List.insert(AccGroups);
813 return;
814 }
815
816 for (const auto &AccGroupListOp : AccGroups->operands()) {
817 auto *Item = cast<MDNode>(AccGroupListOp.get());
818 assert(isValidAsAccessGroup(Item) && "List item must be an access group");
819 List.insert(Item);
820 }
821 }
822
uniteAccessGroups(MDNode * AccGroups1,MDNode * AccGroups2)823 MDNode *llvm::uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2) {
824 if (!AccGroups1)
825 return AccGroups2;
826 if (!AccGroups2)
827 return AccGroups1;
828 if (AccGroups1 == AccGroups2)
829 return AccGroups1;
830
831 SmallSetVector<Metadata *, 4> Union;
832 addToAccessGroupList(Union, AccGroups1);
833 addToAccessGroupList(Union, AccGroups2);
834
835 if (Union.size() == 0)
836 return nullptr;
837 if (Union.size() == 1)
838 return cast<MDNode>(Union.front());
839
840 LLVMContext &Ctx = AccGroups1->getContext();
841 return MDNode::get(Ctx, Union.getArrayRef());
842 }
843
intersectAccessGroups(const Instruction * Inst1,const Instruction * Inst2)844 MDNode *llvm::intersectAccessGroups(const Instruction *Inst1,
845 const Instruction *Inst2) {
846 bool MayAccessMem1 = Inst1->mayReadOrWriteMemory();
847 bool MayAccessMem2 = Inst2->mayReadOrWriteMemory();
848
849 if (!MayAccessMem1 && !MayAccessMem2)
850 return nullptr;
851 if (!MayAccessMem1)
852 return Inst2->getMetadata(LLVMContext::MD_access_group);
853 if (!MayAccessMem2)
854 return Inst1->getMetadata(LLVMContext::MD_access_group);
855
856 MDNode *MD1 = Inst1->getMetadata(LLVMContext::MD_access_group);
857 MDNode *MD2 = Inst2->getMetadata(LLVMContext::MD_access_group);
858 if (!MD1 || !MD2)
859 return nullptr;
860 if (MD1 == MD2)
861 return MD1;
862
863 // Use set for scalable 'contains' check.
864 SmallPtrSet<Metadata *, 4> AccGroupSet2;
865 addToAccessGroupList(AccGroupSet2, MD2);
866
867 SmallVector<Metadata *, 4> Intersection;
868 if (MD1->getNumOperands() == 0) {
869 assert(isValidAsAccessGroup(MD1) && "Node must be an access group");
870 if (AccGroupSet2.count(MD1))
871 Intersection.push_back(MD1);
872 } else {
873 for (const MDOperand &Node : MD1->operands()) {
874 auto *Item = cast<MDNode>(Node.get());
875 assert(isValidAsAccessGroup(Item) && "List item must be an access group");
876 if (AccGroupSet2.count(Item))
877 Intersection.push_back(Item);
878 }
879 }
880
881 if (Intersection.size() == 0)
882 return nullptr;
883 if (Intersection.size() == 1)
884 return cast<MDNode>(Intersection.front());
885
886 LLVMContext &Ctx = Inst1->getContext();
887 return MDNode::get(Ctx, Intersection);
888 }
889
890 /// \returns \p I after propagating metadata from \p VL.
propagateMetadata(Instruction * Inst,ArrayRef<Value * > VL)891 Instruction *llvm::propagateMetadata(Instruction *Inst, ArrayRef<Value *> VL) {
892 if (VL.empty())
893 return Inst;
894 Instruction *I0 = cast<Instruction>(VL[0]);
895 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
896 I0->getAllMetadataOtherThanDebugLoc(Metadata);
897
898 for (auto Kind : {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
899 LLVMContext::MD_noalias, LLVMContext::MD_fpmath,
900 LLVMContext::MD_nontemporal, LLVMContext::MD_invariant_load,
901 LLVMContext::MD_access_group}) {
902 MDNode *MD = I0->getMetadata(Kind);
903
904 for (int J = 1, E = VL.size(); MD && J != E; ++J) {
905 const Instruction *IJ = cast<Instruction>(VL[J]);
906 MDNode *IMD = IJ->getMetadata(Kind);
907 switch (Kind) {
908 case LLVMContext::MD_tbaa:
909 MD = MDNode::getMostGenericTBAA(MD, IMD);
910 break;
911 case LLVMContext::MD_alias_scope:
912 MD = MDNode::getMostGenericAliasScope(MD, IMD);
913 break;
914 case LLVMContext::MD_fpmath:
915 MD = MDNode::getMostGenericFPMath(MD, IMD);
916 break;
917 case LLVMContext::MD_noalias:
918 case LLVMContext::MD_nontemporal:
919 case LLVMContext::MD_invariant_load:
920 MD = MDNode::intersect(MD, IMD);
921 break;
922 case LLVMContext::MD_access_group:
923 MD = intersectAccessGroups(Inst, IJ);
924 break;
925 default:
926 llvm_unreachable("unhandled metadata");
927 }
928 }
929
930 Inst->setMetadata(Kind, MD);
931 }
932
933 return Inst;
934 }
935
936 Constant *
createBitMaskForGaps(IRBuilderBase & Builder,unsigned VF,const InterleaveGroup<Instruction> & Group)937 llvm::createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF,
938 const InterleaveGroup<Instruction> &Group) {
939 // All 1's means mask is not needed.
940 if (Group.getNumMembers() == Group.getFactor())
941 return nullptr;
942
943 // TODO: support reversed access.
944 assert(!Group.isReverse() && "Reversed group not supported.");
945
946 SmallVector<Constant *, 16> Mask;
947 for (unsigned i = 0; i < VF; i++)
948 for (unsigned j = 0; j < Group.getFactor(); ++j) {
949 unsigned HasMember = Group.getMember(j) ? 1 : 0;
950 Mask.push_back(Builder.getInt1(HasMember));
951 }
952
953 return ConstantVector::get(Mask);
954 }
955
956 llvm::SmallVector<int, 16>
createReplicatedMask(unsigned ReplicationFactor,unsigned VF)957 llvm::createReplicatedMask(unsigned ReplicationFactor, unsigned VF) {
958 SmallVector<int, 16> MaskVec;
959 for (unsigned i = 0; i < VF; i++)
960 for (unsigned j = 0; j < ReplicationFactor; j++)
961 MaskVec.push_back(i);
962
963 return MaskVec;
964 }
965
createInterleaveMask(unsigned VF,unsigned NumVecs)966 llvm::SmallVector<int, 16> llvm::createInterleaveMask(unsigned VF,
967 unsigned NumVecs) {
968 SmallVector<int, 16> Mask;
969 for (unsigned i = 0; i < VF; i++)
970 for (unsigned j = 0; j < NumVecs; j++)
971 Mask.push_back(j * VF + i);
972
973 return Mask;
974 }
975
976 llvm::SmallVector<int, 16>
createStrideMask(unsigned Start,unsigned Stride,unsigned VF)977 llvm::createStrideMask(unsigned Start, unsigned Stride, unsigned VF) {
978 SmallVector<int, 16> Mask;
979 for (unsigned i = 0; i < VF; i++)
980 Mask.push_back(Start + i * Stride);
981
982 return Mask;
983 }
984
createSequentialMask(unsigned Start,unsigned NumInts,unsigned NumUndefs)985 llvm::SmallVector<int, 16> llvm::createSequentialMask(unsigned Start,
986 unsigned NumInts,
987 unsigned NumUndefs) {
988 SmallVector<int, 16> Mask;
989 for (unsigned i = 0; i < NumInts; i++)
990 Mask.push_back(Start + i);
991
992 for (unsigned i = 0; i < NumUndefs; i++)
993 Mask.push_back(-1);
994
995 return Mask;
996 }
997
createUnaryMask(ArrayRef<int> Mask,unsigned NumElts)998 llvm::SmallVector<int, 16> llvm::createUnaryMask(ArrayRef<int> Mask,
999 unsigned NumElts) {
1000 // Avoid casts in the loop and make sure we have a reasonable number.
1001 int NumEltsSigned = NumElts;
1002 assert(NumEltsSigned > 0 && "Expected smaller or non-zero element count");
1003
1004 // If the mask chooses an element from operand 1, reduce it to choose from the
1005 // corresponding element of operand 0. Undef mask elements are unchanged.
1006 SmallVector<int, 16> UnaryMask;
1007 for (int MaskElt : Mask) {
1008 assert((MaskElt < NumEltsSigned * 2) && "Expected valid shuffle mask");
1009 int UnaryElt = MaskElt >= NumEltsSigned ? MaskElt - NumEltsSigned : MaskElt;
1010 UnaryMask.push_back(UnaryElt);
1011 }
1012 return UnaryMask;
1013 }
1014
1015 /// A helper function for concatenating vectors. This function concatenates two
1016 /// vectors having the same element type. If the second vector has fewer
1017 /// elements than the first, it is padded with undefs.
concatenateTwoVectors(IRBuilderBase & Builder,Value * V1,Value * V2)1018 static Value *concatenateTwoVectors(IRBuilderBase &Builder, Value *V1,
1019 Value *V2) {
1020 VectorType *VecTy1 = dyn_cast<VectorType>(V1->getType());
1021 VectorType *VecTy2 = dyn_cast<VectorType>(V2->getType());
1022 assert(VecTy1 && VecTy2 &&
1023 VecTy1->getScalarType() == VecTy2->getScalarType() &&
1024 "Expect two vectors with the same element type");
1025
1026 unsigned NumElts1 = cast<FixedVectorType>(VecTy1)->getNumElements();
1027 unsigned NumElts2 = cast<FixedVectorType>(VecTy2)->getNumElements();
1028 assert(NumElts1 >= NumElts2 && "Unexpect the first vector has less elements");
1029
1030 if (NumElts1 > NumElts2) {
1031 // Extend with UNDEFs.
1032 V2 = Builder.CreateShuffleVector(
1033 V2, createSequentialMask(0, NumElts2, NumElts1 - NumElts2));
1034 }
1035
1036 return Builder.CreateShuffleVector(
1037 V1, V2, createSequentialMask(0, NumElts1 + NumElts2, 0));
1038 }
1039
concatenateVectors(IRBuilderBase & Builder,ArrayRef<Value * > Vecs)1040 Value *llvm::concatenateVectors(IRBuilderBase &Builder,
1041 ArrayRef<Value *> Vecs) {
1042 unsigned NumVecs = Vecs.size();
1043 assert(NumVecs > 1 && "Should be at least two vectors");
1044
1045 SmallVector<Value *, 8> ResList;
1046 ResList.append(Vecs.begin(), Vecs.end());
1047 do {
1048 SmallVector<Value *, 8> TmpList;
1049 for (unsigned i = 0; i < NumVecs - 1; i += 2) {
1050 Value *V0 = ResList[i], *V1 = ResList[i + 1];
1051 assert((V0->getType() == V1->getType() || i == NumVecs - 2) &&
1052 "Only the last vector may have a different type");
1053
1054 TmpList.push_back(concatenateTwoVectors(Builder, V0, V1));
1055 }
1056
1057 // Push the last vector if the total number of vectors is odd.
1058 if (NumVecs % 2 != 0)
1059 TmpList.push_back(ResList[NumVecs - 1]);
1060
1061 ResList = TmpList;
1062 NumVecs = ResList.size();
1063 } while (NumVecs > 1);
1064
1065 return ResList[0];
1066 }
1067
maskIsAllZeroOrUndef(Value * Mask)1068 bool llvm::maskIsAllZeroOrUndef(Value *Mask) {
1069 assert(isa<VectorType>(Mask->getType()) &&
1070 isa<IntegerType>(Mask->getType()->getScalarType()) &&
1071 cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
1072 1 &&
1073 "Mask must be a vector of i1");
1074
1075 auto *ConstMask = dyn_cast<Constant>(Mask);
1076 if (!ConstMask)
1077 return false;
1078 if (ConstMask->isNullValue() || isa<UndefValue>(ConstMask))
1079 return true;
1080 if (isa<ScalableVectorType>(ConstMask->getType()))
1081 return false;
1082 for (unsigned
1083 I = 0,
1084 E = cast<FixedVectorType>(ConstMask->getType())->getNumElements();
1085 I != E; ++I) {
1086 if (auto *MaskElt = ConstMask->getAggregateElement(I))
1087 if (MaskElt->isNullValue() || isa<UndefValue>(MaskElt))
1088 continue;
1089 return false;
1090 }
1091 return true;
1092 }
1093
maskIsAllOneOrUndef(Value * Mask)1094 bool llvm::maskIsAllOneOrUndef(Value *Mask) {
1095 assert(isa<VectorType>(Mask->getType()) &&
1096 isa<IntegerType>(Mask->getType()->getScalarType()) &&
1097 cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
1098 1 &&
1099 "Mask must be a vector of i1");
1100
1101 auto *ConstMask = dyn_cast<Constant>(Mask);
1102 if (!ConstMask)
1103 return false;
1104 if (ConstMask->isAllOnesValue() || isa<UndefValue>(ConstMask))
1105 return true;
1106 if (isa<ScalableVectorType>(ConstMask->getType()))
1107 return false;
1108 for (unsigned
1109 I = 0,
1110 E = cast<FixedVectorType>(ConstMask->getType())->getNumElements();
1111 I != E; ++I) {
1112 if (auto *MaskElt = ConstMask->getAggregateElement(I))
1113 if (MaskElt->isAllOnesValue() || isa<UndefValue>(MaskElt))
1114 continue;
1115 return false;
1116 }
1117 return true;
1118 }
1119
1120 /// TODO: This is a lot like known bits, but for
1121 /// vectors. Is there something we can common this with?
possiblyDemandedEltsInMask(Value * Mask)1122 APInt llvm::possiblyDemandedEltsInMask(Value *Mask) {
1123 assert(isa<FixedVectorType>(Mask->getType()) &&
1124 isa<IntegerType>(Mask->getType()->getScalarType()) &&
1125 cast<IntegerType>(Mask->getType()->getScalarType())->getBitWidth() ==
1126 1 &&
1127 "Mask must be a fixed width vector of i1");
1128
1129 const unsigned VWidth =
1130 cast<FixedVectorType>(Mask->getType())->getNumElements();
1131 APInt DemandedElts = APInt::getAllOnes(VWidth);
1132 if (auto *CV = dyn_cast<ConstantVector>(Mask))
1133 for (unsigned i = 0; i < VWidth; i++)
1134 if (CV->getAggregateElement(i)->isNullValue())
1135 DemandedElts.clearBit(i);
1136 return DemandedElts;
1137 }
1138
isStrided(int Stride)1139 bool InterleavedAccessInfo::isStrided(int Stride) {
1140 unsigned Factor = std::abs(Stride);
1141 return Factor >= 2 && Factor <= MaxInterleaveGroupFactor;
1142 }
1143
collectConstStrideAccesses(MapVector<Instruction *,StrideDescriptor> & AccessStrideInfo,const ValueToValueMap & Strides)1144 void InterleavedAccessInfo::collectConstStrideAccesses(
1145 MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
1146 const ValueToValueMap &Strides) {
1147 auto &DL = TheLoop->getHeader()->getModule()->getDataLayout();
1148
1149 // Since it's desired that the load/store instructions be maintained in
1150 // "program order" for the interleaved access analysis, we have to visit the
1151 // blocks in the loop in reverse postorder (i.e., in a topological order).
1152 // Such an ordering will ensure that any load/store that may be executed
1153 // before a second load/store will precede the second load/store in
1154 // AccessStrideInfo.
1155 LoopBlocksDFS DFS(TheLoop);
1156 DFS.perform(LI);
1157 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO()))
1158 for (auto &I : *BB) {
1159 Value *Ptr = getLoadStorePointerOperand(&I);
1160 if (!Ptr)
1161 continue;
1162 Type *ElementTy = getLoadStoreType(&I);
1163
1164 // Currently, codegen doesn't support cases where the type size doesn't
1165 // match the alloc size. Skip them for now.
1166 uint64_t Size = DL.getTypeAllocSize(ElementTy);
1167 if (Size * 8 != DL.getTypeSizeInBits(ElementTy))
1168 continue;
1169
1170 // We don't check wrapping here because we don't know yet if Ptr will be
1171 // part of a full group or a group with gaps. Checking wrapping for all
1172 // pointers (even those that end up in groups with no gaps) will be overly
1173 // conservative. For full groups, wrapping should be ok since if we would
1174 // wrap around the address space we would do a memory access at nullptr
1175 // even without the transformation. The wrapping checks are therefore
1176 // deferred until after we've formed the interleaved groups.
1177 int64_t Stride =
1178 getPtrStride(PSE, ElementTy, Ptr, TheLoop, Strides,
1179 /*Assume=*/true, /*ShouldCheckWrap=*/false).value_or(0);
1180
1181 const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
1182 AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size,
1183 getLoadStoreAlignment(&I));
1184 }
1185 }
1186
1187 // Analyze interleaved accesses and collect them into interleaved load and
1188 // store groups.
1189 //
1190 // When generating code for an interleaved load group, we effectively hoist all
1191 // loads in the group to the location of the first load in program order. When
1192 // generating code for an interleaved store group, we sink all stores to the
1193 // location of the last store. This code motion can change the order of load
1194 // and store instructions and may break dependences.
1195 //
1196 // The code generation strategy mentioned above ensures that we won't violate
1197 // any write-after-read (WAR) dependences.
1198 //
1199 // E.g., for the WAR dependence: a = A[i]; // (1)
1200 // A[i] = b; // (2)
1201 //
1202 // The store group of (2) is always inserted at or below (2), and the load
1203 // group of (1) is always inserted at or above (1). Thus, the instructions will
1204 // never be reordered. All other dependences are checked to ensure the
1205 // correctness of the instruction reordering.
1206 //
1207 // The algorithm visits all memory accesses in the loop in bottom-up program
1208 // order. Program order is established by traversing the blocks in the loop in
1209 // reverse postorder when collecting the accesses.
1210 //
1211 // We visit the memory accesses in bottom-up order because it can simplify the
1212 // construction of store groups in the presence of write-after-write (WAW)
1213 // dependences.
1214 //
1215 // E.g., for the WAW dependence: A[i] = a; // (1)
1216 // A[i] = b; // (2)
1217 // A[i + 1] = c; // (3)
1218 //
1219 // We will first create a store group with (3) and (2). (1) can't be added to
1220 // this group because it and (2) are dependent. However, (1) can be grouped
1221 // with other accesses that may precede it in program order. Note that a
1222 // bottom-up order does not imply that WAW dependences should not be checked.
analyzeInterleaving(bool EnablePredicatedInterleavedMemAccesses)1223 void InterleavedAccessInfo::analyzeInterleaving(
1224 bool EnablePredicatedInterleavedMemAccesses) {
1225 LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n");
1226 const ValueToValueMap &Strides = LAI->getSymbolicStrides();
1227
1228 // Holds all accesses with a constant stride.
1229 MapVector<Instruction *, StrideDescriptor> AccessStrideInfo;
1230 collectConstStrideAccesses(AccessStrideInfo, Strides);
1231
1232 if (AccessStrideInfo.empty())
1233 return;
1234
1235 // Collect the dependences in the loop.
1236 collectDependences();
1237
1238 // Holds all interleaved store groups temporarily.
1239 SmallSetVector<InterleaveGroup<Instruction> *, 4> StoreGroups;
1240 // Holds all interleaved load groups temporarily.
1241 SmallSetVector<InterleaveGroup<Instruction> *, 4> LoadGroups;
1242
1243 // Search in bottom-up program order for pairs of accesses (A and B) that can
1244 // form interleaved load or store groups. In the algorithm below, access A
1245 // precedes access B in program order. We initialize a group for B in the
1246 // outer loop of the algorithm, and then in the inner loop, we attempt to
1247 // insert each A into B's group if:
1248 //
1249 // 1. A and B have the same stride,
1250 // 2. A and B have the same memory object size, and
1251 // 3. A belongs in B's group according to its distance from B.
1252 //
1253 // Special care is taken to ensure group formation will not break any
1254 // dependences.
1255 for (auto BI = AccessStrideInfo.rbegin(), E = AccessStrideInfo.rend();
1256 BI != E; ++BI) {
1257 Instruction *B = BI->first;
1258 StrideDescriptor DesB = BI->second;
1259
1260 // Initialize a group for B if it has an allowable stride. Even if we don't
1261 // create a group for B, we continue with the bottom-up algorithm to ensure
1262 // we don't break any of B's dependences.
1263 InterleaveGroup<Instruction> *Group = nullptr;
1264 if (isStrided(DesB.Stride) &&
1265 (!isPredicated(B->getParent()) || EnablePredicatedInterleavedMemAccesses)) {
1266 Group = getInterleaveGroup(B);
1267 if (!Group) {
1268 LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B
1269 << '\n');
1270 Group = createInterleaveGroup(B, DesB.Stride, DesB.Alignment);
1271 }
1272 if (B->mayWriteToMemory())
1273 StoreGroups.insert(Group);
1274 else
1275 LoadGroups.insert(Group);
1276 }
1277
1278 for (auto AI = std::next(BI); AI != E; ++AI) {
1279 Instruction *A = AI->first;
1280 StrideDescriptor DesA = AI->second;
1281
1282 // Our code motion strategy implies that we can't have dependences
1283 // between accesses in an interleaved group and other accesses located
1284 // between the first and last member of the group. Note that this also
1285 // means that a group can't have more than one member at a given offset.
1286 // The accesses in a group can have dependences with other accesses, but
1287 // we must ensure we don't extend the boundaries of the group such that
1288 // we encompass those dependent accesses.
1289 //
1290 // For example, assume we have the sequence of accesses shown below in a
1291 // stride-2 loop:
1292 //
1293 // (1, 2) is a group | A[i] = a; // (1)
1294 // | A[i-1] = b; // (2) |
1295 // A[i-3] = c; // (3)
1296 // A[i] = d; // (4) | (2, 4) is not a group
1297 //
1298 // Because accesses (2) and (3) are dependent, we can group (2) with (1)
1299 // but not with (4). If we did, the dependent access (3) would be within
1300 // the boundaries of the (2, 4) group.
1301 if (!canReorderMemAccessesForInterleavedGroups(&*AI, &*BI)) {
1302 // If a dependence exists and A is already in a group, we know that A
1303 // must be a store since A precedes B and WAR dependences are allowed.
1304 // Thus, A would be sunk below B. We release A's group to prevent this
1305 // illegal code motion. A will then be free to form another group with
1306 // instructions that precede it.
1307 if (isInterleaved(A)) {
1308 InterleaveGroup<Instruction> *StoreGroup = getInterleaveGroup(A);
1309
1310 LLVM_DEBUG(dbgs() << "LV: Invalidated store group due to "
1311 "dependence between " << *A << " and "<< *B << '\n');
1312
1313 StoreGroups.remove(StoreGroup);
1314 releaseGroup(StoreGroup);
1315 }
1316
1317 // If a dependence exists and A is not already in a group (or it was
1318 // and we just released it), B might be hoisted above A (if B is a
1319 // load) or another store might be sunk below A (if B is a store). In
1320 // either case, we can't add additional instructions to B's group. B
1321 // will only form a group with instructions that it precedes.
1322 break;
1323 }
1324
1325 // At this point, we've checked for illegal code motion. If either A or B
1326 // isn't strided, there's nothing left to do.
1327 if (!isStrided(DesA.Stride) || !isStrided(DesB.Stride))
1328 continue;
1329
1330 // Ignore A if it's already in a group or isn't the same kind of memory
1331 // operation as B.
1332 // Note that mayReadFromMemory() isn't mutually exclusive to
1333 // mayWriteToMemory in the case of atomic loads. We shouldn't see those
1334 // here, canVectorizeMemory() should have returned false - except for the
1335 // case we asked for optimization remarks.
1336 if (isInterleaved(A) ||
1337 (A->mayReadFromMemory() != B->mayReadFromMemory()) ||
1338 (A->mayWriteToMemory() != B->mayWriteToMemory()))
1339 continue;
1340
1341 // Check rules 1 and 2. Ignore A if its stride or size is different from
1342 // that of B.
1343 if (DesA.Stride != DesB.Stride || DesA.Size != DesB.Size)
1344 continue;
1345
1346 // Ignore A if the memory object of A and B don't belong to the same
1347 // address space
1348 if (getLoadStoreAddressSpace(A) != getLoadStoreAddressSpace(B))
1349 continue;
1350
1351 // Calculate the distance from A to B.
1352 const SCEVConstant *DistToB = dyn_cast<SCEVConstant>(
1353 PSE.getSE()->getMinusSCEV(DesA.Scev, DesB.Scev));
1354 if (!DistToB)
1355 continue;
1356 int64_t DistanceToB = DistToB->getAPInt().getSExtValue();
1357
1358 // Check rule 3. Ignore A if its distance to B is not a multiple of the
1359 // size.
1360 if (DistanceToB % static_cast<int64_t>(DesB.Size))
1361 continue;
1362
1363 // All members of a predicated interleave-group must have the same predicate,
1364 // and currently must reside in the same BB.
1365 BasicBlock *BlockA = A->getParent();
1366 BasicBlock *BlockB = B->getParent();
1367 if ((isPredicated(BlockA) || isPredicated(BlockB)) &&
1368 (!EnablePredicatedInterleavedMemAccesses || BlockA != BlockB))
1369 continue;
1370
1371 // The index of A is the index of B plus A's distance to B in multiples
1372 // of the size.
1373 int IndexA =
1374 Group->getIndex(B) + DistanceToB / static_cast<int64_t>(DesB.Size);
1375
1376 // Try to insert A into B's group.
1377 if (Group->insertMember(A, IndexA, DesA.Alignment)) {
1378 LLVM_DEBUG(dbgs() << "LV: Inserted:" << *A << '\n'
1379 << " into the interleave group with" << *B
1380 << '\n');
1381 InterleaveGroupMap[A] = Group;
1382
1383 // Set the first load in program order as the insert position.
1384 if (A->mayReadFromMemory())
1385 Group->setInsertPos(A);
1386 }
1387 } // Iteration over A accesses.
1388 } // Iteration over B accesses.
1389
1390 auto InvalidateGroupIfMemberMayWrap = [&](InterleaveGroup<Instruction> *Group,
1391 int Index,
1392 std::string FirstOrLast) -> bool {
1393 Instruction *Member = Group->getMember(Index);
1394 assert(Member && "Group member does not exist");
1395 Value *MemberPtr = getLoadStorePointerOperand(Member);
1396 Type *AccessTy = getLoadStoreType(Member);
1397 if (getPtrStride(PSE, AccessTy, MemberPtr, TheLoop, Strides,
1398 /*Assume=*/false, /*ShouldCheckWrap=*/true).value_or(0))
1399 return false;
1400 LLVM_DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
1401 << FirstOrLast
1402 << " group member potentially pointer-wrapping.\n");
1403 releaseGroup(Group);
1404 return true;
1405 };
1406
1407 // Remove interleaved groups with gaps whose memory
1408 // accesses may wrap around. We have to revisit the getPtrStride analysis,
1409 // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does
1410 // not check wrapping (see documentation there).
1411 // FORNOW we use Assume=false;
1412 // TODO: Change to Assume=true but making sure we don't exceed the threshold
1413 // of runtime SCEV assumptions checks (thereby potentially failing to
1414 // vectorize altogether).
1415 // Additional optional optimizations:
1416 // TODO: If we are peeling the loop and we know that the first pointer doesn't
1417 // wrap then we can deduce that all pointers in the group don't wrap.
1418 // This means that we can forcefully peel the loop in order to only have to
1419 // check the first pointer for no-wrap. When we'll change to use Assume=true
1420 // we'll only need at most one runtime check per interleaved group.
1421 for (auto *Group : LoadGroups) {
1422 // Case 1: A full group. Can Skip the checks; For full groups, if the wide
1423 // load would wrap around the address space we would do a memory access at
1424 // nullptr even without the transformation.
1425 if (Group->getNumMembers() == Group->getFactor())
1426 continue;
1427
1428 // Case 2: If first and last members of the group don't wrap this implies
1429 // that all the pointers in the group don't wrap.
1430 // So we check only group member 0 (which is always guaranteed to exist),
1431 // and group member Factor - 1; If the latter doesn't exist we rely on
1432 // peeling (if it is a non-reversed accsess -- see Case 3).
1433 if (InvalidateGroupIfMemberMayWrap(Group, 0, std::string("first")))
1434 continue;
1435 if (Group->getMember(Group->getFactor() - 1))
1436 InvalidateGroupIfMemberMayWrap(Group, Group->getFactor() - 1,
1437 std::string("last"));
1438 else {
1439 // Case 3: A non-reversed interleaved load group with gaps: We need
1440 // to execute at least one scalar epilogue iteration. This will ensure
1441 // we don't speculatively access memory out-of-bounds. We only need
1442 // to look for a member at index factor - 1, since every group must have
1443 // a member at index zero.
1444 if (Group->isReverse()) {
1445 LLVM_DEBUG(
1446 dbgs() << "LV: Invalidate candidate interleaved group due to "
1447 "a reverse access with gaps.\n");
1448 releaseGroup(Group);
1449 continue;
1450 }
1451 LLVM_DEBUG(
1452 dbgs() << "LV: Interleaved group requires epilogue iteration.\n");
1453 RequiresScalarEpilogue = true;
1454 }
1455 }
1456
1457 for (auto *Group : StoreGroups) {
1458 // Case 1: A full group. Can Skip the checks; For full groups, if the wide
1459 // store would wrap around the address space we would do a memory access at
1460 // nullptr even without the transformation.
1461 if (Group->getNumMembers() == Group->getFactor())
1462 continue;
1463
1464 // Interleave-store-group with gaps is implemented using masked wide store.
1465 // Remove interleaved store groups with gaps if
1466 // masked-interleaved-accesses are not enabled by the target.
1467 if (!EnablePredicatedInterleavedMemAccesses) {
1468 LLVM_DEBUG(
1469 dbgs() << "LV: Invalidate candidate interleaved store group due "
1470 "to gaps.\n");
1471 releaseGroup(Group);
1472 continue;
1473 }
1474
1475 // Case 2: If first and last members of the group don't wrap this implies
1476 // that all the pointers in the group don't wrap.
1477 // So we check only group member 0 (which is always guaranteed to exist),
1478 // and the last group member. Case 3 (scalar epilog) is not relevant for
1479 // stores with gaps, which are implemented with masked-store (rather than
1480 // speculative access, as in loads).
1481 if (InvalidateGroupIfMemberMayWrap(Group, 0, std::string("first")))
1482 continue;
1483 for (int Index = Group->getFactor() - 1; Index > 0; Index--)
1484 if (Group->getMember(Index)) {
1485 InvalidateGroupIfMemberMayWrap(Group, Index, std::string("last"));
1486 break;
1487 }
1488 }
1489 }
1490
invalidateGroupsRequiringScalarEpilogue()1491 void InterleavedAccessInfo::invalidateGroupsRequiringScalarEpilogue() {
1492 // If no group had triggered the requirement to create an epilogue loop,
1493 // there is nothing to do.
1494 if (!requiresScalarEpilogue())
1495 return;
1496
1497 bool ReleasedGroup = false;
1498 // Release groups requiring scalar epilogues. Note that this also removes them
1499 // from InterleaveGroups.
1500 for (auto *Group : make_early_inc_range(InterleaveGroups)) {
1501 if (!Group->requiresScalarEpilogue())
1502 continue;
1503 LLVM_DEBUG(
1504 dbgs()
1505 << "LV: Invalidate candidate interleaved group due to gaps that "
1506 "require a scalar epilogue (not allowed under optsize) and cannot "
1507 "be masked (not enabled). \n");
1508 releaseGroup(Group);
1509 ReleasedGroup = true;
1510 }
1511 assert(ReleasedGroup && "At least one group must be invalidated, as a "
1512 "scalar epilogue was required");
1513 (void)ReleasedGroup;
1514 RequiresScalarEpilogue = false;
1515 }
1516
1517 template <typename InstT>
addMetadata(InstT * NewInst) const1518 void InterleaveGroup<InstT>::addMetadata(InstT *NewInst) const {
1519 llvm_unreachable("addMetadata can only be used for Instruction");
1520 }
1521
1522 namespace llvm {
1523 template <>
addMetadata(Instruction * NewInst) const1524 void InterleaveGroup<Instruction>::addMetadata(Instruction *NewInst) const {
1525 SmallVector<Value *, 4> VL;
1526 std::transform(Members.begin(), Members.end(), std::back_inserter(VL),
1527 [](std::pair<int, Instruction *> p) { return p.second; });
1528 propagateMetadata(NewInst, VL);
1529 }
1530 }
1531
mangleTLIVectorName(StringRef VectorName,StringRef ScalarName,unsigned numArgs,ElementCount VF)1532 std::string VFABI::mangleTLIVectorName(StringRef VectorName,
1533 StringRef ScalarName, unsigned numArgs,
1534 ElementCount VF) {
1535 SmallString<256> Buffer;
1536 llvm::raw_svector_ostream Out(Buffer);
1537 Out << "_ZGV" << VFABI::_LLVM_ << "N";
1538 if (VF.isScalable())
1539 Out << 'x';
1540 else
1541 Out << VF.getFixedValue();
1542 for (unsigned I = 0; I < numArgs; ++I)
1543 Out << "v";
1544 Out << "_" << ScalarName << "(" << VectorName << ")";
1545 return std::string(Out.str());
1546 }
1547
getVectorVariantNames(const CallInst & CI,SmallVectorImpl<std::string> & VariantMappings)1548 void VFABI::getVectorVariantNames(
1549 const CallInst &CI, SmallVectorImpl<std::string> &VariantMappings) {
1550 const StringRef S = CI.getFnAttr(VFABI::MappingsAttrName).getValueAsString();
1551 if (S.empty())
1552 return;
1553
1554 SmallVector<StringRef, 8> ListAttr;
1555 S.split(ListAttr, ",");
1556
1557 for (const auto &S : SetVector<StringRef>(ListAttr.begin(), ListAttr.end())) {
1558 #ifndef NDEBUG
1559 LLVM_DEBUG(dbgs() << "VFABI: adding mapping '" << S << "'\n");
1560 std::optional<VFInfo> Info =
1561 VFABI::tryDemangleForVFABI(S, *(CI.getModule()));
1562 assert(Info && "Invalid name for a VFABI variant.");
1563 assert(CI.getModule()->getFunction(Info->VectorName) &&
1564 "Vector function is missing.");
1565 #endif
1566 VariantMappings.push_back(std::string(S));
1567 }
1568 }
1569
hasValidParameterList() const1570 bool VFShape::hasValidParameterList() const {
1571 for (unsigned Pos = 0, NumParams = Parameters.size(); Pos < NumParams;
1572 ++Pos) {
1573 assert(Parameters[Pos].ParamPos == Pos && "Broken parameter list.");
1574
1575 switch (Parameters[Pos].ParamKind) {
1576 default: // Nothing to check.
1577 break;
1578 case VFParamKind::OMP_Linear:
1579 case VFParamKind::OMP_LinearRef:
1580 case VFParamKind::OMP_LinearVal:
1581 case VFParamKind::OMP_LinearUVal:
1582 // Compile time linear steps must be non-zero.
1583 if (Parameters[Pos].LinearStepOrPos == 0)
1584 return false;
1585 break;
1586 case VFParamKind::OMP_LinearPos:
1587 case VFParamKind::OMP_LinearRefPos:
1588 case VFParamKind::OMP_LinearValPos:
1589 case VFParamKind::OMP_LinearUValPos:
1590 // The runtime linear step must be referring to some other
1591 // parameters in the signature.
1592 if (Parameters[Pos].LinearStepOrPos >= int(NumParams))
1593 return false;
1594 // The linear step parameter must be marked as uniform.
1595 if (Parameters[Parameters[Pos].LinearStepOrPos].ParamKind !=
1596 VFParamKind::OMP_Uniform)
1597 return false;
1598 // The linear step parameter can't point at itself.
1599 if (Parameters[Pos].LinearStepOrPos == int(Pos))
1600 return false;
1601 break;
1602 case VFParamKind::GlobalPredicate:
1603 // The global predicate must be the unique. Can be placed anywhere in the
1604 // signature.
1605 for (unsigned NextPos = Pos + 1; NextPos < NumParams; ++NextPos)
1606 if (Parameters[NextPos].ParamKind == VFParamKind::GlobalPredicate)
1607 return false;
1608 break;
1609 }
1610 }
1611 return true;
1612 }
1613