1 //===-- HexagonVectorCombine.cpp ------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 // HexagonVectorCombine is a utility class implementing a variety of functions
9 // that assist in vector-based optimizations.
10 //
11 // AlignVectors: replace unaligned vector loads and stores with aligned ones.
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/ArrayRef.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/Optional.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/Analysis/AssumptionCache.h"
22 #include "llvm/Analysis/InstructionSimplify.h"
23 #include "llvm/Analysis/TargetLibraryInfo.h"
24 #include "llvm/Analysis/ValueTracking.h"
25 #include "llvm/CodeGen/TargetPassConfig.h"
26 #include "llvm/IR/Dominators.h"
27 #include "llvm/IR/IRBuilder.h"
28 #include "llvm/IR/IntrinsicInst.h"
29 #include "llvm/IR/Intrinsics.h"
30 #include "llvm/IR/IntrinsicsHexagon.h"
31 #include "llvm/InitializePasses.h"
32 #include "llvm/Pass.h"
33 #include "llvm/Support/KnownBits.h"
34 #include "llvm/Support/MathExtras.h"
35 #include "llvm/Support/raw_ostream.h"
36 #include "llvm/Target/TargetMachine.h"
37
38 #include "HexagonSubtarget.h"
39 #include "HexagonTargetMachine.h"
40
41 #include <algorithm>
42 #include <deque>
43 #include <map>
44 #include <set>
45 #include <utility>
46 #include <vector>
47
48 #define DEBUG_TYPE "hexagon-vc"
49
50 using namespace llvm;
51
52 namespace {
53 class HexagonVectorCombine {
54 public:
HexagonVectorCombine(Function & F_,AliasAnalysis & AA_,AssumptionCache & AC_,DominatorTree & DT_,TargetLibraryInfo & TLI_,const TargetMachine & TM_)55 HexagonVectorCombine(Function &F_, AliasAnalysis &AA_, AssumptionCache &AC_,
56 DominatorTree &DT_, TargetLibraryInfo &TLI_,
57 const TargetMachine &TM_)
58 : F(F_), DL(F.getParent()->getDataLayout()), AA(AA_), AC(AC_), DT(DT_),
59 TLI(TLI_),
60 HST(static_cast<const HexagonSubtarget &>(*TM_.getSubtargetImpl(F))) {}
61
62 bool run();
63
64 // Common integer type.
65 IntegerType *getIntTy() const;
66 // Byte type: either scalar (when Length = 0), or vector with given
67 // element count.
68 Type *getByteTy(int ElemCount = 0) const;
69 // Boolean type: either scalar (when Length = 0), or vector with given
70 // element count.
71 Type *getBoolTy(int ElemCount = 0) const;
72 // Create a ConstantInt of type returned by getIntTy with the value Val.
73 ConstantInt *getConstInt(int Val) const;
74 // Get the integer value of V, if it exists.
75 Optional<APInt> getIntValue(const Value *Val) const;
76 // Is V a constant 0, or a vector of 0s?
77 bool isZero(const Value *Val) const;
78 // Is V an undef value?
79 bool isUndef(const Value *Val) const;
80
81 int getSizeOf(const Value *Val) const;
82 int getSizeOf(const Type *Ty) const;
83 int getTypeAlignment(Type *Ty) const;
84
85 VectorType *getByteVectorTy(int ScLen) const;
86 Constant *getNullValue(Type *Ty) const;
87 Constant *getFullValue(Type *Ty) const;
88
89 Value *insertb(IRBuilder<> &Builder, Value *Dest, Value *Src, int Start,
90 int Length, int Where) const;
91 Value *vlalignb(IRBuilder<> &Builder, Value *Lo, Value *Hi, Value *Amt) const;
92 Value *vralignb(IRBuilder<> &Builder, Value *Lo, Value *Hi, Value *Amt) const;
93 Value *concat(IRBuilder<> &Builder, ArrayRef<Value *> Vecs) const;
94 Value *vresize(IRBuilder<> &Builder, Value *Val, int NewSize,
95 Value *Pad) const;
96 Value *rescale(IRBuilder<> &Builder, Value *Mask, Type *FromTy,
97 Type *ToTy) const;
98 Value *vlsb(IRBuilder<> &Builder, Value *Val) const;
99 Value *vbytes(IRBuilder<> &Builder, Value *Val) const;
100
101 Value *createHvxIntrinsic(IRBuilder<> &Builder, Intrinsic::ID IntID,
102 Type *RetTy, ArrayRef<Value *> Args) const;
103
104 Optional<int> calculatePointerDifference(Value *Ptr0, Value *Ptr1) const;
105
106 template <typename T = std::vector<Instruction *>>
107 bool isSafeToMoveBeforeInBB(const Instruction &In,
108 BasicBlock::const_iterator To,
109 const T &Ignore = {}) const;
110
111 Function &F;
112 const DataLayout &DL;
113 AliasAnalysis &AA;
114 AssumptionCache &AC;
115 DominatorTree &DT;
116 TargetLibraryInfo &TLI;
117 const HexagonSubtarget &HST;
118
119 private:
120 #ifndef NDEBUG
121 // These two functions are only used for assertions at the moment.
122 bool isByteVecTy(Type *Ty) const;
123 bool isSectorTy(Type *Ty) const;
124 #endif
125 Value *getElementRange(IRBuilder<> &Builder, Value *Lo, Value *Hi, int Start,
126 int Length) const;
127 };
128
129 class AlignVectors {
130 public:
AlignVectors(HexagonVectorCombine & HVC_)131 AlignVectors(HexagonVectorCombine &HVC_) : HVC(HVC_) {}
132
133 bool run();
134
135 private:
136 using InstList = std::vector<Instruction *>;
137
138 struct Segment {
139 void *Data;
140 int Start;
141 int Size;
142 };
143
144 struct AddrInfo {
145 AddrInfo(const AddrInfo &) = default;
AddrInfo__anon3f8d58910111::AlignVectors::AddrInfo146 AddrInfo(const HexagonVectorCombine &HVC, Instruction *I, Value *A, Type *T,
147 Align H)
148 : Inst(I), Addr(A), ValTy(T), HaveAlign(H),
149 NeedAlign(HVC.getTypeAlignment(ValTy)) {}
150
151 // XXX: add Size member?
152 Instruction *Inst;
153 Value *Addr;
154 Type *ValTy;
155 Align HaveAlign;
156 Align NeedAlign;
157 int Offset = 0; // Offset (in bytes) from the first member of the
158 // containing AddrList.
159 };
160 using AddrList = std::vector<AddrInfo>;
161
162 struct InstrLess {
operator ()__anon3f8d58910111::AlignVectors::InstrLess163 bool operator()(const Instruction *A, const Instruction *B) const {
164 return A->comesBefore(B);
165 }
166 };
167 using DepList = std::set<Instruction *, InstrLess>;
168
169 struct MoveGroup {
MoveGroup__anon3f8d58910111::AlignVectors::MoveGroup170 MoveGroup(const AddrInfo &AI, Instruction *B, bool Hvx, bool Load)
171 : Base(B), Main{AI.Inst}, IsHvx(Hvx), IsLoad(Load) {}
172 Instruction *Base; // Base instruction of the parent address group.
173 InstList Main; // Main group of instructions.
174 InstList Deps; // List of dependencies.
175 bool IsHvx; // Is this group of HVX instructions?
176 bool IsLoad; // Is this a load group?
177 };
178 using MoveList = std::vector<MoveGroup>;
179
180 struct ByteSpan {
181 struct Segment {
Segment__anon3f8d58910111::AlignVectors::ByteSpan::Segment182 Segment(Value *Val, int Begin, int Len)
183 : Val(Val), Start(Begin), Size(Len) {}
184 Segment(const Segment &Seg) = default;
185 Value *Val;
186 int Start;
187 int Size;
188 };
189
190 struct Block {
Block__anon3f8d58910111::AlignVectors::ByteSpan::Block191 Block(Value *Val, int Len, int Pos) : Seg(Val, 0, Len), Pos(Pos) {}
Block__anon3f8d58910111::AlignVectors::ByteSpan::Block192 Block(Value *Val, int Off, int Len, int Pos)
193 : Seg(Val, Off, Len), Pos(Pos) {}
194 Block(const Block &Blk) = default;
195 Segment Seg;
196 int Pos;
197 };
198
199 int extent() const;
200 ByteSpan section(int Start, int Length) const;
201 ByteSpan &shift(int Offset);
202
size__anon3f8d58910111::AlignVectors::ByteSpan203 int size() const { return Blocks.size(); }
operator []__anon3f8d58910111::AlignVectors::ByteSpan204 Block &operator[](int i) { return Blocks[i]; }
205
206 std::vector<Block> Blocks;
207
208 using iterator = decltype(Blocks)::iterator;
begin__anon3f8d58910111::AlignVectors::ByteSpan209 iterator begin() { return Blocks.begin(); }
end__anon3f8d58910111::AlignVectors::ByteSpan210 iterator end() { return Blocks.end(); }
211 using const_iterator = decltype(Blocks)::const_iterator;
begin__anon3f8d58910111::AlignVectors::ByteSpan212 const_iterator begin() const { return Blocks.begin(); }
end__anon3f8d58910111::AlignVectors::ByteSpan213 const_iterator end() const { return Blocks.end(); }
214 };
215
216 Align getAlignFromValue(const Value *V) const;
217 Optional<MemoryLocation> getLocation(const Instruction &In) const;
218 Optional<AddrInfo> getAddrInfo(Instruction &In) const;
219 bool isHvx(const AddrInfo &AI) const;
220
221 Value *getPayload(Value *Val) const;
222 Value *getMask(Value *Val) const;
223 Value *getPassThrough(Value *Val) const;
224
225 Value *createAdjustedPointer(IRBuilder<> &Builder, Value *Ptr, Type *ValTy,
226 int Adjust) const;
227 Value *createAlignedPointer(IRBuilder<> &Builder, Value *Ptr, Type *ValTy,
228 int Alignment) const;
229 Value *createAlignedLoad(IRBuilder<> &Builder, Type *ValTy, Value *Ptr,
230 int Alignment, Value *Mask, Value *PassThru) const;
231 Value *createAlignedStore(IRBuilder<> &Builder, Value *Val, Value *Ptr,
232 int Alignment, Value *Mask) const;
233
234 bool createAddressGroups();
235 MoveList createLoadGroups(const AddrList &Group) const;
236 MoveList createStoreGroups(const AddrList &Group) const;
237 bool move(const MoveGroup &Move) const;
238 bool realignGroup(const MoveGroup &Move) const;
239
240 friend raw_ostream &operator<<(raw_ostream &OS, const AddrInfo &AI);
241 friend raw_ostream &operator<<(raw_ostream &OS, const MoveGroup &MG);
242 friend raw_ostream &operator<<(raw_ostream &OS, const ByteSpan &BS);
243
244 std::map<Instruction *, AddrList> AddrGroups;
245 HexagonVectorCombine &HVC;
246 };
247
248 LLVM_ATTRIBUTE_UNUSED
operator <<(raw_ostream & OS,const AlignVectors::AddrInfo & AI)249 raw_ostream &operator<<(raw_ostream &OS, const AlignVectors::AddrInfo &AI) {
250 OS << "Inst: " << AI.Inst << " " << *AI.Inst << '\n';
251 OS << "Addr: " << *AI.Addr << '\n';
252 OS << "Type: " << *AI.ValTy << '\n';
253 OS << "HaveAlign: " << AI.HaveAlign.value() << '\n';
254 OS << "NeedAlign: " << AI.NeedAlign.value() << '\n';
255 OS << "Offset: " << AI.Offset;
256 return OS;
257 }
258
259 LLVM_ATTRIBUTE_UNUSED
operator <<(raw_ostream & OS,const AlignVectors::MoveGroup & MG)260 raw_ostream &operator<<(raw_ostream &OS, const AlignVectors::MoveGroup &MG) {
261 OS << "Main\n";
262 for (Instruction *I : MG.Main)
263 OS << " " << *I << '\n';
264 OS << "Deps\n";
265 for (Instruction *I : MG.Deps)
266 OS << " " << *I << '\n';
267 return OS;
268 }
269
270 LLVM_ATTRIBUTE_UNUSED
operator <<(raw_ostream & OS,const AlignVectors::ByteSpan & BS)271 raw_ostream &operator<<(raw_ostream &OS, const AlignVectors::ByteSpan &BS) {
272 OS << "ByteSpan[size=" << BS.size() << ", extent=" << BS.extent() << '\n';
273 for (const AlignVectors::ByteSpan::Block &B : BS) {
274 OS << " @" << B.Pos << " [" << B.Seg.Start << ',' << B.Seg.Size << "] "
275 << *B.Seg.Val << '\n';
276 }
277 OS << ']';
278 return OS;
279 }
280
281 } // namespace
282
283 namespace {
284
getIfUnordered(T * MaybeT)285 template <typename T> T *getIfUnordered(T *MaybeT) {
286 return MaybeT && MaybeT->isUnordered() ? MaybeT : nullptr;
287 }
isCandidate(Instruction * In)288 template <typename T> T *isCandidate(Instruction *In) {
289 return dyn_cast<T>(In);
290 }
isCandidate(Instruction * In)291 template <> LoadInst *isCandidate<LoadInst>(Instruction *In) {
292 return getIfUnordered(dyn_cast<LoadInst>(In));
293 }
isCandidate(Instruction * In)294 template <> StoreInst *isCandidate<StoreInst>(Instruction *In) {
295 return getIfUnordered(dyn_cast<StoreInst>(In));
296 }
297
298 #if !defined(_MSC_VER) || _MSC_VER >= 1924
299 // VS2017 has trouble compiling this:
300 // error C2976: 'std::map': too few template arguments
301 template <typename Pred, typename... Ts>
erase_if(std::map<Ts...> & map,Pred p)302 void erase_if(std::map<Ts...> &map, Pred p)
303 #else
304 template <typename Pred, typename T, typename U>
305 void erase_if(std::map<T, U> &map, Pred p)
306 #endif
307 {
308 for (auto i = map.begin(), e = map.end(); i != e;) {
309 if (p(*i))
310 i = map.erase(i);
311 else
312 i = std::next(i);
313 }
314 }
315
316 // Forward other erase_ifs to the LLVM implementations.
erase_if(T && container,Pred p)317 template <typename Pred, typename T> void erase_if(T &&container, Pred p) {
318 llvm::erase_if(std::forward<T>(container), p);
319 }
320
321 } // namespace
322
323 // --- Begin AlignVectors
324
extent() const325 auto AlignVectors::ByteSpan::extent() const -> int {
326 if (size() == 0)
327 return 0;
328 int Min = Blocks[0].Pos;
329 int Max = Blocks[0].Pos + Blocks[0].Seg.Size;
330 for (int i = 1, e = size(); i != e; ++i) {
331 Min = std::min(Min, Blocks[i].Pos);
332 Max = std::max(Max, Blocks[i].Pos + Blocks[i].Seg.Size);
333 }
334 return Max - Min;
335 }
336
section(int Start,int Length) const337 auto AlignVectors::ByteSpan::section(int Start, int Length) const -> ByteSpan {
338 ByteSpan Section;
339 for (const ByteSpan::Block &B : Blocks) {
340 int L = std::max(B.Pos, Start); // Left end.
341 int R = std::min(B.Pos + B.Seg.Size, Start + Length); // Right end+1.
342 if (L < R) {
343 // How much to chop off the beginning of the segment:
344 int Off = L > B.Pos ? L - B.Pos : 0;
345 Section.Blocks.emplace_back(B.Seg.Val, B.Seg.Start + Off, R - L, L);
346 }
347 }
348 return Section;
349 }
350
shift(int Offset)351 auto AlignVectors::ByteSpan::shift(int Offset) -> ByteSpan & {
352 for (Block &B : Blocks)
353 B.Pos += Offset;
354 return *this;
355 }
356
getAlignFromValue(const Value * V) const357 auto AlignVectors::getAlignFromValue(const Value *V) const -> Align {
358 const auto *C = dyn_cast<ConstantInt>(V);
359 assert(C && "Alignment must be a compile-time constant integer");
360 return C->getAlignValue();
361 }
362
getAddrInfo(Instruction & In) const363 auto AlignVectors::getAddrInfo(Instruction &In) const -> Optional<AddrInfo> {
364 if (auto *L = isCandidate<LoadInst>(&In))
365 return AddrInfo(HVC, L, L->getPointerOperand(), L->getType(),
366 L->getAlign());
367 if (auto *S = isCandidate<StoreInst>(&In))
368 return AddrInfo(HVC, S, S->getPointerOperand(),
369 S->getValueOperand()->getType(), S->getAlign());
370 if (auto *II = isCandidate<IntrinsicInst>(&In)) {
371 Intrinsic::ID ID = II->getIntrinsicID();
372 switch (ID) {
373 case Intrinsic::masked_load:
374 return AddrInfo(HVC, II, II->getArgOperand(0), II->getType(),
375 getAlignFromValue(II->getArgOperand(1)));
376 case Intrinsic::masked_store:
377 return AddrInfo(HVC, II, II->getArgOperand(1),
378 II->getArgOperand(0)->getType(),
379 getAlignFromValue(II->getArgOperand(2)));
380 }
381 }
382 return Optional<AddrInfo>();
383 }
384
isHvx(const AddrInfo & AI) const385 auto AlignVectors::isHvx(const AddrInfo &AI) const -> bool {
386 return HVC.HST.isTypeForHVX(AI.ValTy);
387 }
388
getPayload(Value * Val) const389 auto AlignVectors::getPayload(Value *Val) const -> Value * {
390 if (auto *In = dyn_cast<Instruction>(Val)) {
391 Intrinsic::ID ID = 0;
392 if (auto *II = dyn_cast<IntrinsicInst>(In))
393 ID = II->getIntrinsicID();
394 if (isa<StoreInst>(In) || ID == Intrinsic::masked_store)
395 return In->getOperand(0);
396 }
397 return Val;
398 }
399
getMask(Value * Val) const400 auto AlignVectors::getMask(Value *Val) const -> Value * {
401 if (auto *II = dyn_cast<IntrinsicInst>(Val)) {
402 switch (II->getIntrinsicID()) {
403 case Intrinsic::masked_load:
404 return II->getArgOperand(2);
405 case Intrinsic::masked_store:
406 return II->getArgOperand(3);
407 }
408 }
409
410 Type *ValTy = getPayload(Val)->getType();
411 if (auto *VecTy = dyn_cast<VectorType>(ValTy)) {
412 int ElemCount = VecTy->getElementCount().getFixedValue();
413 return HVC.getFullValue(HVC.getBoolTy(ElemCount));
414 }
415 return HVC.getFullValue(HVC.getBoolTy());
416 }
417
getPassThrough(Value * Val) const418 auto AlignVectors::getPassThrough(Value *Val) const -> Value * {
419 if (auto *II = dyn_cast<IntrinsicInst>(Val)) {
420 if (II->getIntrinsicID() == Intrinsic::masked_load)
421 return II->getArgOperand(3);
422 }
423 return UndefValue::get(getPayload(Val)->getType());
424 }
425
createAdjustedPointer(IRBuilder<> & Builder,Value * Ptr,Type * ValTy,int Adjust) const426 auto AlignVectors::createAdjustedPointer(IRBuilder<> &Builder, Value *Ptr,
427 Type *ValTy, int Adjust) const
428 -> Value * {
429 // The adjustment is in bytes, but if it's a multiple of the type size,
430 // we don't need to do pointer casts.
431 Type *ElemTy = cast<PointerType>(Ptr->getType())->getElementType();
432 int ElemSize = HVC.getSizeOf(ElemTy);
433 if (Adjust % ElemSize == 0) {
434 Value *Tmp0 = Builder.CreateGEP(Ptr, HVC.getConstInt(Adjust / ElemSize));
435 return Builder.CreatePointerCast(Tmp0, ValTy->getPointerTo());
436 }
437
438 PointerType *CharPtrTy = Type::getInt8PtrTy(HVC.F.getContext());
439 Value *Tmp0 = Builder.CreatePointerCast(Ptr, CharPtrTy);
440 Value *Tmp1 = Builder.CreateGEP(Tmp0, HVC.getConstInt(Adjust));
441 return Builder.CreatePointerCast(Tmp1, ValTy->getPointerTo());
442 }
443
createAlignedPointer(IRBuilder<> & Builder,Value * Ptr,Type * ValTy,int Alignment) const444 auto AlignVectors::createAlignedPointer(IRBuilder<> &Builder, Value *Ptr,
445 Type *ValTy, int Alignment) const
446 -> Value * {
447 Value *AsInt = Builder.CreatePtrToInt(Ptr, HVC.getIntTy());
448 Value *Mask = HVC.getConstInt(-Alignment);
449 Value *And = Builder.CreateAnd(AsInt, Mask);
450 return Builder.CreateIntToPtr(And, ValTy->getPointerTo());
451 }
452
createAlignedLoad(IRBuilder<> & Builder,Type * ValTy,Value * Ptr,int Alignment,Value * Mask,Value * PassThru) const453 auto AlignVectors::createAlignedLoad(IRBuilder<> &Builder, Type *ValTy,
454 Value *Ptr, int Alignment, Value *Mask,
455 Value *PassThru) const -> Value * {
456 assert(!HVC.isUndef(Mask)); // Should this be allowed?
457 if (HVC.isZero(Mask))
458 return PassThru;
459 if (Mask == ConstantInt::getTrue(Mask->getType()))
460 return Builder.CreateAlignedLoad(ValTy, Ptr, Align(Alignment));
461 return Builder.CreateMaskedLoad(Ptr, Align(Alignment), Mask, PassThru);
462 }
463
createAlignedStore(IRBuilder<> & Builder,Value * Val,Value * Ptr,int Alignment,Value * Mask) const464 auto AlignVectors::createAlignedStore(IRBuilder<> &Builder, Value *Val,
465 Value *Ptr, int Alignment,
466 Value *Mask) const -> Value * {
467 if (HVC.isZero(Mask) || HVC.isUndef(Val) || HVC.isUndef(Mask))
468 return UndefValue::get(Val->getType());
469 if (Mask == ConstantInt::getTrue(Mask->getType()))
470 return Builder.CreateAlignedStore(Val, Ptr, Align(Alignment));
471 return Builder.CreateMaskedStore(Val, Ptr, Align(Alignment), Mask);
472 }
473
createAddressGroups()474 auto AlignVectors::createAddressGroups() -> bool {
475 // An address group created here may contain instructions spanning
476 // multiple basic blocks.
477 AddrList WorkStack;
478
479 auto findBaseAndOffset = [&](AddrInfo &AI) -> std::pair<Instruction *, int> {
480 for (AddrInfo &W : WorkStack) {
481 if (auto D = HVC.calculatePointerDifference(AI.Addr, W.Addr))
482 return std::make_pair(W.Inst, *D);
483 }
484 return std::make_pair(nullptr, 0);
485 };
486
487 auto traverseBlock = [&](DomTreeNode *DomN, auto Visit) -> void {
488 BasicBlock &Block = *DomN->getBlock();
489 for (Instruction &I : Block) {
490 auto AI = this->getAddrInfo(I); // Use this-> for gcc6.
491 if (!AI)
492 continue;
493 auto F = findBaseAndOffset(*AI);
494 Instruction *GroupInst;
495 if (Instruction *BI = F.first) {
496 AI->Offset = F.second;
497 GroupInst = BI;
498 } else {
499 WorkStack.push_back(*AI);
500 GroupInst = AI->Inst;
501 }
502 AddrGroups[GroupInst].push_back(*AI);
503 }
504
505 for (DomTreeNode *C : DomN->children())
506 Visit(C, Visit);
507
508 while (!WorkStack.empty() && WorkStack.back().Inst->getParent() == &Block)
509 WorkStack.pop_back();
510 };
511
512 traverseBlock(HVC.DT.getRootNode(), traverseBlock);
513 assert(WorkStack.empty());
514
515 // AddrGroups are formed.
516
517 // Remove groups of size 1.
518 erase_if(AddrGroups, [](auto &G) { return G.second.size() == 1; });
519 // Remove groups that don't use HVX types.
520 erase_if(AddrGroups, [&](auto &G) {
521 return !llvm::any_of(
522 G.second, [&](auto &I) { return HVC.HST.isTypeForHVX(I.ValTy); });
523 });
524 // Remove groups where everything is properly aligned.
525 erase_if(AddrGroups, [&](auto &G) {
526 return llvm::all_of(G.second,
527 [&](auto &I) { return I.HaveAlign >= I.NeedAlign; });
528 });
529
530 return !AddrGroups.empty();
531 }
532
createLoadGroups(const AddrList & Group) const533 auto AlignVectors::createLoadGroups(const AddrList &Group) const -> MoveList {
534 // Form load groups.
535 // To avoid complications with moving code across basic blocks, only form
536 // groups that are contained within a single basic block.
537
538 auto getUpwardDeps = [](Instruction *In, Instruction *Base) {
539 BasicBlock *Parent = Base->getParent();
540 assert(In->getParent() == Parent &&
541 "Base and In should be in the same block");
542 assert(Base->comesBefore(In) && "Base should come before In");
543
544 DepList Deps;
545 std::deque<Instruction *> WorkQ = {In};
546 while (!WorkQ.empty()) {
547 Instruction *D = WorkQ.front();
548 WorkQ.pop_front();
549 Deps.insert(D);
550 for (Value *Op : D->operands()) {
551 if (auto *I = dyn_cast<Instruction>(Op)) {
552 if (I->getParent() == Parent && Base->comesBefore(I))
553 WorkQ.push_back(I);
554 }
555 }
556 }
557 return Deps;
558 };
559
560 auto tryAddTo = [&](const AddrInfo &Info, MoveGroup &Move) {
561 assert(!Move.Main.empty() && "Move group should have non-empty Main");
562 // Don't mix HVX and non-HVX instructions.
563 if (Move.IsHvx != isHvx(Info))
564 return false;
565 // Leading instruction in the load group.
566 Instruction *Base = Move.Main.front();
567 if (Base->getParent() != Info.Inst->getParent())
568 return false;
569
570 auto isSafeToMoveToBase = [&](const Instruction *I) {
571 return HVC.isSafeToMoveBeforeInBB(*I, Base->getIterator());
572 };
573 DepList Deps = getUpwardDeps(Info.Inst, Base);
574 if (!llvm::all_of(Deps, isSafeToMoveToBase))
575 return false;
576
577 // The dependencies will be moved together with the load, so make sure
578 // that none of them could be moved independently in another group.
579 Deps.erase(Info.Inst);
580 auto inAddrMap = [&](Instruction *I) { return AddrGroups.count(I) > 0; };
581 if (llvm::any_of(Deps, inAddrMap))
582 return false;
583 Move.Main.push_back(Info.Inst);
584 llvm::append_range(Move.Deps, Deps);
585 return true;
586 };
587
588 MoveList LoadGroups;
589
590 for (const AddrInfo &Info : Group) {
591 if (!Info.Inst->mayReadFromMemory())
592 continue;
593 if (LoadGroups.empty() || !tryAddTo(Info, LoadGroups.back()))
594 LoadGroups.emplace_back(Info, Group.front().Inst, isHvx(Info), true);
595 }
596
597 // Erase singleton groups.
598 erase_if(LoadGroups, [](const MoveGroup &G) { return G.Main.size() <= 1; });
599 return LoadGroups;
600 }
601
createStoreGroups(const AddrList & Group) const602 auto AlignVectors::createStoreGroups(const AddrList &Group) const -> MoveList {
603 // Form store groups.
604 // To avoid complications with moving code across basic blocks, only form
605 // groups that are contained within a single basic block.
606
607 auto tryAddTo = [&](const AddrInfo &Info, MoveGroup &Move) {
608 assert(!Move.Main.empty() && "Move group should have non-empty Main");
609 // For stores with return values we'd have to collect downward depenencies.
610 // There are no such stores that we handle at the moment, so omit that.
611 assert(Info.Inst->getType()->isVoidTy() &&
612 "Not handling stores with return values");
613 // Don't mix HVX and non-HVX instructions.
614 if (Move.IsHvx != isHvx(Info))
615 return false;
616 // For stores we need to be careful whether it's safe to move them.
617 // Stores that are otherwise safe to move together may not appear safe
618 // to move over one another (i.e. isSafeToMoveBefore may return false).
619 Instruction *Base = Move.Main.front();
620 if (Base->getParent() != Info.Inst->getParent())
621 return false;
622 if (!HVC.isSafeToMoveBeforeInBB(*Info.Inst, Base->getIterator(), Move.Main))
623 return false;
624 Move.Main.push_back(Info.Inst);
625 return true;
626 };
627
628 MoveList StoreGroups;
629
630 for (auto I = Group.rbegin(), E = Group.rend(); I != E; ++I) {
631 const AddrInfo &Info = *I;
632 if (!Info.Inst->mayWriteToMemory())
633 continue;
634 if (StoreGroups.empty() || !tryAddTo(Info, StoreGroups.back()))
635 StoreGroups.emplace_back(Info, Group.front().Inst, isHvx(Info), false);
636 }
637
638 // Erase singleton groups.
639 erase_if(StoreGroups, [](const MoveGroup &G) { return G.Main.size() <= 1; });
640 return StoreGroups;
641 }
642
move(const MoveGroup & Move) const643 auto AlignVectors::move(const MoveGroup &Move) const -> bool {
644 assert(!Move.Main.empty() && "Move group should have non-empty Main");
645 Instruction *Where = Move.Main.front();
646
647 if (Move.IsLoad) {
648 // Move all deps to before Where, keeping order.
649 for (Instruction *D : Move.Deps)
650 D->moveBefore(Where);
651 // Move all main instructions to after Where, keeping order.
652 ArrayRef<Instruction *> Main(Move.Main);
653 for (Instruction *M : Main.drop_front(1)) {
654 M->moveAfter(Where);
655 Where = M;
656 }
657 } else {
658 // NOTE: Deps are empty for "store" groups. If they need to be
659 // non-empty, decide on the order.
660 assert(Move.Deps.empty());
661 // Move all main instructions to before Where, inverting order.
662 ArrayRef<Instruction *> Main(Move.Main);
663 for (Instruction *M : Main.drop_front(1)) {
664 M->moveBefore(Where);
665 Where = M;
666 }
667 }
668
669 return Move.Main.size() + Move.Deps.size() > 1;
670 }
671
realignGroup(const MoveGroup & Move) const672 auto AlignVectors::realignGroup(const MoveGroup &Move) const -> bool {
673 // TODO: Needs support for masked loads/stores of "scalar" vectors.
674 if (!Move.IsHvx)
675 return false;
676
677 // Return the element with the maximum alignment from Range,
678 // where GetValue obtains the value to compare from an element.
679 auto getMaxOf = [](auto Range, auto GetValue) {
680 return *std::max_element(
681 Range.begin(), Range.end(),
682 [&GetValue](auto &A, auto &B) { return GetValue(A) < GetValue(B); });
683 };
684
685 const AddrList &BaseInfos = AddrGroups.at(Move.Base);
686
687 // Conceptually, there is a vector of N bytes covering the addresses
688 // starting from the minimum offset (i.e. Base.Addr+Start). This vector
689 // represents a contiguous memory region that spans all accessed memory
690 // locations.
691 // The correspondence between loaded or stored values will be expressed
692 // in terms of this vector. For example, the 0th element of the vector
693 // from the Base address info will start at byte Start from the beginning
694 // of this conceptual vector.
695 //
696 // This vector will be loaded/stored starting at the nearest down-aligned
697 // address and the amount od the down-alignment will be AlignVal:
698 // valign(load_vector(align_down(Base+Start)), AlignVal)
699
700 std::set<Instruction *> TestSet(Move.Main.begin(), Move.Main.end());
701 AddrList MoveInfos;
702 llvm::copy_if(
703 BaseInfos, std::back_inserter(MoveInfos),
704 [&TestSet](const AddrInfo &AI) { return TestSet.count(AI.Inst); });
705
706 // Maximum alignment present in the whole address group.
707 const AddrInfo &WithMaxAlign =
708 getMaxOf(BaseInfos, [](const AddrInfo &AI) { return AI.HaveAlign; });
709 Align MaxGiven = WithMaxAlign.HaveAlign;
710
711 // Minimum alignment present in the move address group.
712 const AddrInfo &WithMinOffset =
713 getMaxOf(MoveInfos, [](const AddrInfo &AI) { return -AI.Offset; });
714
715 const AddrInfo &WithMaxNeeded =
716 getMaxOf(MoveInfos, [](const AddrInfo &AI) { return AI.NeedAlign; });
717 Align MinNeeded = WithMaxNeeded.NeedAlign;
718
719 // Set the builder at the top instruction in the move group.
720 Instruction *TopIn = Move.IsLoad ? Move.Main.front() : Move.Main.back();
721 IRBuilder<> Builder(TopIn);
722 Value *AlignAddr = nullptr; // Actual aligned address.
723 Value *AlignVal = nullptr; // Right-shift amount (for valign).
724
725 if (MinNeeded <= MaxGiven) {
726 int Start = WithMinOffset.Offset;
727 int OffAtMax = WithMaxAlign.Offset;
728 // Shift the offset of the maximally aligned instruction (OffAtMax)
729 // back by just enough multiples of the required alignment to cover the
730 // distance from Start to OffAtMax.
731 // Calculate the address adjustment amount based on the address with the
732 // maximum alignment. This is to allow a simple gep instruction instead
733 // of potential bitcasts to i8*.
734 int Adjust = -alignTo(OffAtMax - Start, MinNeeded.value());
735 AlignAddr = createAdjustedPointer(Builder, WithMaxAlign.Addr,
736 WithMaxAlign.ValTy, Adjust);
737 int Diff = Start - (OffAtMax + Adjust);
738 AlignVal = HVC.getConstInt(Diff);
739 // Sanity.
740 assert(Diff >= 0);
741 assert(static_cast<decltype(MinNeeded.value())>(Diff) < MinNeeded.value());
742 } else {
743 // WithMinOffset is the lowest address in the group,
744 // WithMinOffset.Addr = Base+Start.
745 // Align instructions for both HVX (V6_valign) and scalar (S2_valignrb)
746 // mask off unnecessary bits, so it's ok to just the original pointer as
747 // the alignment amount.
748 // Do an explicit down-alignment of the address to avoid creating an
749 // aligned instruction with an address that is not really aligned.
750 AlignAddr = createAlignedPointer(Builder, WithMinOffset.Addr,
751 WithMinOffset.ValTy, MinNeeded.value());
752 AlignVal = Builder.CreatePtrToInt(WithMinOffset.Addr, HVC.getIntTy());
753 }
754
755 ByteSpan VSpan;
756 for (const AddrInfo &AI : MoveInfos) {
757 VSpan.Blocks.emplace_back(AI.Inst, HVC.getSizeOf(AI.ValTy),
758 AI.Offset - WithMinOffset.Offset);
759 }
760
761 // The aligned loads/stores will use blocks that are either scalars,
762 // or HVX vectors. Let "sector" be the unified term for such a block.
763 // blend(scalar, vector) -> sector...
764 int ScLen = Move.IsHvx ? HVC.HST.getVectorLength()
765 : std::max<int>(MinNeeded.value(), 4);
766 assert(!Move.IsHvx || ScLen == 64 || ScLen == 128);
767 assert(Move.IsHvx || ScLen == 4 || ScLen == 8);
768
769 Type *SecTy = HVC.getByteTy(ScLen);
770 int NumSectors = (VSpan.extent() + ScLen - 1) / ScLen;
771
772 if (Move.IsLoad) {
773 ByteSpan ASpan;
774 auto *True = HVC.getFullValue(HVC.getBoolTy(ScLen));
775 auto *Undef = UndefValue::get(SecTy);
776
777 for (int i = 0; i != NumSectors + 1; ++i) {
778 Value *Ptr = createAdjustedPointer(Builder, AlignAddr, SecTy, i * ScLen);
779 // FIXME: generate a predicated load?
780 Value *Load = createAlignedLoad(Builder, SecTy, Ptr, ScLen, True, Undef);
781 ASpan.Blocks.emplace_back(Load, ScLen, i * ScLen);
782 }
783
784 for (int j = 0; j != NumSectors; ++j) {
785 ASpan[j].Seg.Val = HVC.vralignb(Builder, ASpan[j].Seg.Val,
786 ASpan[j + 1].Seg.Val, AlignVal);
787 }
788
789 for (ByteSpan::Block &B : VSpan) {
790 ByteSpan Section = ASpan.section(B.Pos, B.Seg.Size).shift(-B.Pos);
791 Value *Accum = UndefValue::get(HVC.getByteTy(B.Seg.Size));
792 for (ByteSpan::Block &S : Section) {
793 Value *Pay = HVC.vbytes(Builder, getPayload(S.Seg.Val));
794 Accum =
795 HVC.insertb(Builder, Accum, Pay, S.Seg.Start, S.Seg.Size, S.Pos);
796 }
797 // Instead of casting everything to bytes for the vselect, cast to the
798 // original value type. This will avoid complications with casting masks.
799 // For example, in cases when the original mask applied to i32, it could
800 // be converted to a mask applicable to i8 via pred_typecast intrinsic,
801 // but if the mask is not exactly of HVX length, extra handling would be
802 // needed to make it work.
803 Type *ValTy = getPayload(B.Seg.Val)->getType();
804 Value *Cast = Builder.CreateBitCast(Accum, ValTy);
805 Value *Sel = Builder.CreateSelect(getMask(B.Seg.Val), Cast,
806 getPassThrough(B.Seg.Val));
807 B.Seg.Val->replaceAllUsesWith(Sel);
808 }
809 } else {
810 // Stores.
811 ByteSpan ASpanV, ASpanM;
812
813 // Return a vector value corresponding to the input value Val:
814 // either <1 x Val> for scalar Val, or Val itself for vector Val.
815 auto MakeVec = [](IRBuilder<> &Builder, Value *Val) -> Value * {
816 Type *Ty = Val->getType();
817 if (Ty->isVectorTy())
818 return Val;
819 auto *VecTy = VectorType::get(Ty, 1, /*Scalable*/ false);
820 return Builder.CreateBitCast(Val, VecTy);
821 };
822
823 // Create an extra "undef" sector at the beginning and at the end.
824 // They will be used as the left/right filler in the vlalign step.
825 for (int i = -1; i != NumSectors + 1; ++i) {
826 // For stores, the size of each section is an aligned vector length.
827 // Adjust the store offsets relative to the section start offset.
828 ByteSpan Section = VSpan.section(i * ScLen, ScLen).shift(-i * ScLen);
829 Value *AccumV = UndefValue::get(SecTy);
830 Value *AccumM = HVC.getNullValue(SecTy);
831 for (ByteSpan::Block &S : Section) {
832 Value *Pay = getPayload(S.Seg.Val);
833 Value *Mask = HVC.rescale(Builder, MakeVec(Builder, getMask(S.Seg.Val)),
834 Pay->getType(), HVC.getByteTy());
835 AccumM = HVC.insertb(Builder, AccumM, HVC.vbytes(Builder, Mask),
836 S.Seg.Start, S.Seg.Size, S.Pos);
837 AccumV = HVC.insertb(Builder, AccumV, HVC.vbytes(Builder, Pay),
838 S.Seg.Start, S.Seg.Size, S.Pos);
839 }
840 ASpanV.Blocks.emplace_back(AccumV, ScLen, i * ScLen);
841 ASpanM.Blocks.emplace_back(AccumM, ScLen, i * ScLen);
842 }
843
844 // vlalign
845 for (int j = 1; j != NumSectors + 2; ++j) {
846 ASpanV[j - 1].Seg.Val = HVC.vlalignb(Builder, ASpanV[j - 1].Seg.Val,
847 ASpanV[j].Seg.Val, AlignVal);
848 ASpanM[j - 1].Seg.Val = HVC.vlalignb(Builder, ASpanM[j - 1].Seg.Val,
849 ASpanM[j].Seg.Val, AlignVal);
850 }
851
852 for (int i = 0; i != NumSectors + 1; ++i) {
853 Value *Ptr = createAdjustedPointer(Builder, AlignAddr, SecTy, i * ScLen);
854 Value *Val = ASpanV[i].Seg.Val;
855 Value *Mask = ASpanM[i].Seg.Val; // bytes
856 if (!HVC.isUndef(Val) && !HVC.isZero(Mask))
857 createAlignedStore(Builder, Val, Ptr, ScLen, HVC.vlsb(Builder, Mask));
858 }
859 }
860
861 for (auto *Inst : Move.Main)
862 Inst->eraseFromParent();
863
864 return true;
865 }
866
run()867 auto AlignVectors::run() -> bool {
868 if (!createAddressGroups())
869 return false;
870
871 bool Changed = false;
872 MoveList LoadGroups, StoreGroups;
873
874 for (auto &G : AddrGroups) {
875 llvm::append_range(LoadGroups, createLoadGroups(G.second));
876 llvm::append_range(StoreGroups, createStoreGroups(G.second));
877 }
878
879 for (auto &M : LoadGroups)
880 Changed |= move(M);
881 for (auto &M : StoreGroups)
882 Changed |= move(M);
883
884 for (auto &M : LoadGroups)
885 Changed |= realignGroup(M);
886 for (auto &M : StoreGroups)
887 Changed |= realignGroup(M);
888
889 return Changed;
890 }
891
892 // --- End AlignVectors
893
run()894 auto HexagonVectorCombine::run() -> bool {
895 if (!HST.useHVXOps())
896 return false;
897
898 bool Changed = AlignVectors(*this).run();
899 return Changed;
900 }
901
getIntTy() const902 auto HexagonVectorCombine::getIntTy() const -> IntegerType * {
903 return Type::getInt32Ty(F.getContext());
904 }
905
getByteTy(int ElemCount) const906 auto HexagonVectorCombine::getByteTy(int ElemCount) const -> Type * {
907 assert(ElemCount >= 0);
908 IntegerType *ByteTy = Type::getInt8Ty(F.getContext());
909 if (ElemCount == 0)
910 return ByteTy;
911 return VectorType::get(ByteTy, ElemCount, /*Scalable*/ false);
912 }
913
getBoolTy(int ElemCount) const914 auto HexagonVectorCombine::getBoolTy(int ElemCount) const -> Type * {
915 assert(ElemCount >= 0);
916 IntegerType *BoolTy = Type::getInt1Ty(F.getContext());
917 if (ElemCount == 0)
918 return BoolTy;
919 return VectorType::get(BoolTy, ElemCount, /*Scalable*/ false);
920 }
921
getConstInt(int Val) const922 auto HexagonVectorCombine::getConstInt(int Val) const -> ConstantInt * {
923 return ConstantInt::getSigned(getIntTy(), Val);
924 }
925
isZero(const Value * Val) const926 auto HexagonVectorCombine::isZero(const Value *Val) const -> bool {
927 if (auto *C = dyn_cast<Constant>(Val))
928 return C->isZeroValue();
929 return false;
930 }
931
getIntValue(const Value * Val) const932 auto HexagonVectorCombine::getIntValue(const Value *Val) const
933 -> Optional<APInt> {
934 if (auto *CI = dyn_cast<ConstantInt>(Val))
935 return CI->getValue();
936 return None;
937 }
938
isUndef(const Value * Val) const939 auto HexagonVectorCombine::isUndef(const Value *Val) const -> bool {
940 return isa<UndefValue>(Val);
941 }
942
getSizeOf(const Value * Val) const943 auto HexagonVectorCombine::getSizeOf(const Value *Val) const -> int {
944 return getSizeOf(Val->getType());
945 }
946
getSizeOf(const Type * Ty) const947 auto HexagonVectorCombine::getSizeOf(const Type *Ty) const -> int {
948 return DL.getTypeStoreSize(const_cast<Type *>(Ty)).getFixedValue();
949 }
950
getTypeAlignment(Type * Ty) const951 auto HexagonVectorCombine::getTypeAlignment(Type *Ty) const -> int {
952 // The actual type may be shorter than the HVX vector, so determine
953 // the alignment based on subtarget info.
954 if (HST.isTypeForHVX(Ty))
955 return HST.getVectorLength();
956 return DL.getABITypeAlign(Ty).value();
957 }
958
getNullValue(Type * Ty) const959 auto HexagonVectorCombine::getNullValue(Type *Ty) const -> Constant * {
960 assert(Ty->isIntOrIntVectorTy());
961 auto Zero = ConstantInt::get(Ty->getScalarType(), 0);
962 if (auto *VecTy = dyn_cast<VectorType>(Ty))
963 return ConstantVector::getSplat(VecTy->getElementCount(), Zero);
964 return Zero;
965 }
966
getFullValue(Type * Ty) const967 auto HexagonVectorCombine::getFullValue(Type *Ty) const -> Constant * {
968 assert(Ty->isIntOrIntVectorTy());
969 auto Minus1 = ConstantInt::get(Ty->getScalarType(), -1);
970 if (auto *VecTy = dyn_cast<VectorType>(Ty))
971 return ConstantVector::getSplat(VecTy->getElementCount(), Minus1);
972 return Minus1;
973 }
974
975 // Insert bytes [Start..Start+Length) of Src into Dst at byte Where.
insertb(IRBuilder<> & Builder,Value * Dst,Value * Src,int Start,int Length,int Where) const976 auto HexagonVectorCombine::insertb(IRBuilder<> &Builder, Value *Dst, Value *Src,
977 int Start, int Length, int Where) const
978 -> Value * {
979 assert(isByteVecTy(Dst->getType()) && isByteVecTy(Src->getType()));
980 int SrcLen = getSizeOf(Src);
981 int DstLen = getSizeOf(Dst);
982 assert(0 <= Start && Start + Length <= SrcLen);
983 assert(0 <= Where && Where + Length <= DstLen);
984
985 int P2Len = PowerOf2Ceil(SrcLen | DstLen);
986 auto *Undef = UndefValue::get(getByteTy());
987 Value *P2Src = vresize(Builder, Src, P2Len, Undef);
988 Value *P2Dst = vresize(Builder, Dst, P2Len, Undef);
989
990 SmallVector<int, 256> SMask(P2Len);
991 for (int i = 0; i != P2Len; ++i) {
992 // If i is in [Where, Where+Length), pick Src[Start+(i-Where)].
993 // Otherwise, pick Dst[i];
994 SMask[i] =
995 (Where <= i && i < Where + Length) ? P2Len + Start + (i - Where) : i;
996 }
997
998 Value *P2Insert = Builder.CreateShuffleVector(P2Dst, P2Src, SMask);
999 return vresize(Builder, P2Insert, DstLen, Undef);
1000 }
1001
vlalignb(IRBuilder<> & Builder,Value * Lo,Value * Hi,Value * Amt) const1002 auto HexagonVectorCombine::vlalignb(IRBuilder<> &Builder, Value *Lo, Value *Hi,
1003 Value *Amt) const -> Value * {
1004 assert(Lo->getType() == Hi->getType() && "Argument type mismatch");
1005 assert(isSectorTy(Hi->getType()));
1006 if (isZero(Amt))
1007 return Hi;
1008 int VecLen = getSizeOf(Hi);
1009 if (auto IntAmt = getIntValue(Amt))
1010 return getElementRange(Builder, Lo, Hi, VecLen - IntAmt->getSExtValue(),
1011 VecLen);
1012
1013 if (HST.isTypeForHVX(Hi->getType())) {
1014 int HwLen = HST.getVectorLength();
1015 assert(VecLen == HwLen && "Expecting an exact HVX type");
1016 Intrinsic::ID V6_vlalignb = HwLen == 64
1017 ? Intrinsic::hexagon_V6_vlalignb
1018 : Intrinsic::hexagon_V6_vlalignb_128B;
1019 return createHvxIntrinsic(Builder, V6_vlalignb, Hi->getType(),
1020 {Hi, Lo, Amt});
1021 }
1022
1023 if (VecLen == 4) {
1024 Value *Pair = concat(Builder, {Lo, Hi});
1025 Value *Shift = Builder.CreateLShr(Builder.CreateShl(Pair, Amt), 32);
1026 Value *Trunc = Builder.CreateTrunc(Shift, Type::getInt32Ty(F.getContext()));
1027 return Builder.CreateBitCast(Trunc, Hi->getType());
1028 }
1029 if (VecLen == 8) {
1030 Value *Sub = Builder.CreateSub(getConstInt(VecLen), Amt);
1031 return vralignb(Builder, Lo, Hi, Sub);
1032 }
1033 llvm_unreachable("Unexpected vector length");
1034 }
1035
vralignb(IRBuilder<> & Builder,Value * Lo,Value * Hi,Value * Amt) const1036 auto HexagonVectorCombine::vralignb(IRBuilder<> &Builder, Value *Lo, Value *Hi,
1037 Value *Amt) const -> Value * {
1038 assert(Lo->getType() == Hi->getType() && "Argument type mismatch");
1039 assert(isSectorTy(Lo->getType()));
1040 if (isZero(Amt))
1041 return Lo;
1042 int VecLen = getSizeOf(Lo);
1043 if (auto IntAmt = getIntValue(Amt))
1044 return getElementRange(Builder, Lo, Hi, IntAmt->getSExtValue(), VecLen);
1045
1046 if (HST.isTypeForHVX(Lo->getType())) {
1047 int HwLen = HST.getVectorLength();
1048 assert(VecLen == HwLen && "Expecting an exact HVX type");
1049 Intrinsic::ID V6_valignb = HwLen == 64 ? Intrinsic::hexagon_V6_valignb
1050 : Intrinsic::hexagon_V6_valignb_128B;
1051 return createHvxIntrinsic(Builder, V6_valignb, Lo->getType(),
1052 {Hi, Lo, Amt});
1053 }
1054
1055 if (VecLen == 4) {
1056 Value *Pair = concat(Builder, {Lo, Hi});
1057 Value *Shift = Builder.CreateLShr(Pair, Amt);
1058 Value *Trunc = Builder.CreateTrunc(Shift, Type::getInt32Ty(F.getContext()));
1059 return Builder.CreateBitCast(Trunc, Lo->getType());
1060 }
1061 if (VecLen == 8) {
1062 Type *Int64Ty = Type::getInt64Ty(F.getContext());
1063 Value *Lo64 = Builder.CreateBitCast(Lo, Int64Ty);
1064 Value *Hi64 = Builder.CreateBitCast(Hi, Int64Ty);
1065 Function *FI = Intrinsic::getDeclaration(F.getParent(),
1066 Intrinsic::hexagon_S2_valignrb);
1067 Value *Call = Builder.CreateCall(FI, {Hi64, Lo64, Amt});
1068 return Builder.CreateBitCast(Call, Lo->getType());
1069 }
1070 llvm_unreachable("Unexpected vector length");
1071 }
1072
1073 // Concatenates a sequence of vectors of the same type.
concat(IRBuilder<> & Builder,ArrayRef<Value * > Vecs) const1074 auto HexagonVectorCombine::concat(IRBuilder<> &Builder,
1075 ArrayRef<Value *> Vecs) const -> Value * {
1076 assert(!Vecs.empty());
1077 SmallVector<int, 256> SMask;
1078 std::vector<Value *> Work[2];
1079 int ThisW = 0, OtherW = 1;
1080
1081 Work[ThisW].assign(Vecs.begin(), Vecs.end());
1082 while (Work[ThisW].size() > 1) {
1083 auto *Ty = cast<VectorType>(Work[ThisW].front()->getType());
1084 int ElemCount = Ty->getElementCount().getFixedValue();
1085 SMask.resize(ElemCount * 2);
1086 std::iota(SMask.begin(), SMask.end(), 0);
1087
1088 Work[OtherW].clear();
1089 if (Work[ThisW].size() % 2 != 0)
1090 Work[ThisW].push_back(UndefValue::get(Ty));
1091 for (int i = 0, e = Work[ThisW].size(); i < e; i += 2) {
1092 Value *Joined = Builder.CreateShuffleVector(Work[ThisW][i],
1093 Work[ThisW][i + 1], SMask);
1094 Work[OtherW].push_back(Joined);
1095 }
1096 std::swap(ThisW, OtherW);
1097 }
1098
1099 // Since there may have been some undefs appended to make shuffle operands
1100 // have the same type, perform the last shuffle to only pick the original
1101 // elements.
1102 SMask.resize(Vecs.size() * getSizeOf(Vecs.front()->getType()));
1103 std::iota(SMask.begin(), SMask.end(), 0);
1104 Value *Total = Work[OtherW].front();
1105 return Builder.CreateShuffleVector(Total, SMask);
1106 }
1107
vresize(IRBuilder<> & Builder,Value * Val,int NewSize,Value * Pad) const1108 auto HexagonVectorCombine::vresize(IRBuilder<> &Builder, Value *Val,
1109 int NewSize, Value *Pad) const -> Value * {
1110 assert(isa<VectorType>(Val->getType()));
1111 auto *ValTy = cast<VectorType>(Val->getType());
1112 assert(ValTy->getElementType() == Pad->getType());
1113
1114 int CurSize = ValTy->getElementCount().getFixedValue();
1115 if (CurSize == NewSize)
1116 return Val;
1117 // Truncate?
1118 if (CurSize > NewSize)
1119 return getElementRange(Builder, Val, /*Unused*/ Val, 0, NewSize);
1120 // Extend.
1121 SmallVector<int, 128> SMask(NewSize);
1122 std::iota(SMask.begin(), SMask.begin() + CurSize, 0);
1123 std::fill(SMask.begin() + CurSize, SMask.end(), CurSize);
1124 Value *PadVec = Builder.CreateVectorSplat(CurSize, Pad);
1125 return Builder.CreateShuffleVector(Val, PadVec, SMask);
1126 }
1127
rescale(IRBuilder<> & Builder,Value * Mask,Type * FromTy,Type * ToTy) const1128 auto HexagonVectorCombine::rescale(IRBuilder<> &Builder, Value *Mask,
1129 Type *FromTy, Type *ToTy) const -> Value * {
1130 // Mask is a vector <N x i1>, where each element corresponds to an
1131 // element of FromTy. Remap it so that each element will correspond
1132 // to an element of ToTy.
1133 assert(isa<VectorType>(Mask->getType()));
1134
1135 Type *FromSTy = FromTy->getScalarType();
1136 Type *ToSTy = ToTy->getScalarType();
1137 if (FromSTy == ToSTy)
1138 return Mask;
1139
1140 int FromSize = getSizeOf(FromSTy);
1141 int ToSize = getSizeOf(ToSTy);
1142 assert(FromSize % ToSize == 0 || ToSize % FromSize == 0);
1143
1144 auto *MaskTy = cast<VectorType>(Mask->getType());
1145 int FromCount = MaskTy->getElementCount().getFixedValue();
1146 int ToCount = (FromCount * FromSize) / ToSize;
1147 assert((FromCount * FromSize) % ToSize == 0);
1148
1149 // Mask <N x i1> -> sext to <N x FromTy> -> bitcast to <M x ToTy> ->
1150 // -> trunc to <M x i1>.
1151 Value *Ext = Builder.CreateSExt(
1152 Mask, VectorType::get(FromSTy, FromCount, /*Scalable*/ false));
1153 Value *Cast = Builder.CreateBitCast(
1154 Ext, VectorType::get(ToSTy, ToCount, /*Scalable*/ false));
1155 return Builder.CreateTrunc(
1156 Cast, VectorType::get(getBoolTy(), ToCount, /*Scalable*/ false));
1157 }
1158
1159 // Bitcast to bytes, and return least significant bits.
vlsb(IRBuilder<> & Builder,Value * Val) const1160 auto HexagonVectorCombine::vlsb(IRBuilder<> &Builder, Value *Val) const
1161 -> Value * {
1162 Type *ScalarTy = Val->getType()->getScalarType();
1163 if (ScalarTy == getBoolTy())
1164 return Val;
1165
1166 Value *Bytes = vbytes(Builder, Val);
1167 if (auto *VecTy = dyn_cast<VectorType>(Bytes->getType()))
1168 return Builder.CreateTrunc(Bytes, getBoolTy(getSizeOf(VecTy)));
1169 // If Bytes is a scalar (i.e. Val was a scalar byte), return i1, not
1170 // <1 x i1>.
1171 return Builder.CreateTrunc(Bytes, getBoolTy());
1172 }
1173
1174 // Bitcast to bytes for non-bool. For bool, convert i1 -> i8.
vbytes(IRBuilder<> & Builder,Value * Val) const1175 auto HexagonVectorCombine::vbytes(IRBuilder<> &Builder, Value *Val) const
1176 -> Value * {
1177 Type *ScalarTy = Val->getType()->getScalarType();
1178 if (ScalarTy == getByteTy())
1179 return Val;
1180
1181 if (ScalarTy != getBoolTy())
1182 return Builder.CreateBitCast(Val, getByteTy(getSizeOf(Val)));
1183 // For bool, return a sext from i1 to i8.
1184 if (auto *VecTy = dyn_cast<VectorType>(Val->getType()))
1185 return Builder.CreateSExt(Val, VectorType::get(getByteTy(), VecTy));
1186 return Builder.CreateSExt(Val, getByteTy());
1187 }
1188
createHvxIntrinsic(IRBuilder<> & Builder,Intrinsic::ID IntID,Type * RetTy,ArrayRef<Value * > Args) const1189 auto HexagonVectorCombine::createHvxIntrinsic(IRBuilder<> &Builder,
1190 Intrinsic::ID IntID, Type *RetTy,
1191 ArrayRef<Value *> Args) const
1192 -> Value * {
1193 int HwLen = HST.getVectorLength();
1194 Type *BoolTy = Type::getInt1Ty(F.getContext());
1195 Type *Int32Ty = Type::getInt32Ty(F.getContext());
1196 // HVX vector -> v16i32/v32i32
1197 // HVX vector predicate -> v512i1/v1024i1
1198 auto getTypeForIntrin = [&](Type *Ty) -> Type * {
1199 if (HST.isTypeForHVX(Ty, /*IncludeBool*/ true)) {
1200 Type *ElemTy = cast<VectorType>(Ty)->getElementType();
1201 if (ElemTy == Int32Ty)
1202 return Ty;
1203 if (ElemTy == BoolTy)
1204 return VectorType::get(BoolTy, 8 * HwLen, /*Scalable*/ false);
1205 return VectorType::get(Int32Ty, HwLen / 4, /*Scalable*/ false);
1206 }
1207 // Non-HVX type. It should be a scalar.
1208 assert(Ty == Int32Ty || Ty->isIntegerTy(64));
1209 return Ty;
1210 };
1211
1212 auto getCast = [&](IRBuilder<> &Builder, Value *Val,
1213 Type *DestTy) -> Value * {
1214 Type *SrcTy = Val->getType();
1215 if (SrcTy == DestTy)
1216 return Val;
1217 if (HST.isTypeForHVX(SrcTy, /*IncludeBool*/ true)) {
1218 if (cast<VectorType>(SrcTy)->getElementType() == BoolTy) {
1219 // This should take care of casts the other way too, for example
1220 // v1024i1 -> v32i1.
1221 Intrinsic::ID TC = HwLen == 64
1222 ? Intrinsic::hexagon_V6_pred_typecast
1223 : Intrinsic::hexagon_V6_pred_typecast_128B;
1224 Function *FI = Intrinsic::getDeclaration(F.getParent(), TC,
1225 {DestTy, Val->getType()});
1226 return Builder.CreateCall(FI, {Val});
1227 }
1228 // Non-predicate HVX vector.
1229 return Builder.CreateBitCast(Val, DestTy);
1230 }
1231 // Non-HVX type. It should be a scalar, and it should already have
1232 // a valid type.
1233 llvm_unreachable("Unexpected type");
1234 };
1235
1236 SmallVector<Value *, 4> IntOps;
1237 for (Value *A : Args)
1238 IntOps.push_back(getCast(Builder, A, getTypeForIntrin(A->getType())));
1239 Function *FI = Intrinsic::getDeclaration(F.getParent(), IntID);
1240 Value *Call = Builder.CreateCall(FI, IntOps);
1241
1242 Type *CallTy = Call->getType();
1243 if (CallTy == RetTy)
1244 return Call;
1245 // Scalar types should have RetTy matching the call return type.
1246 assert(HST.isTypeForHVX(CallTy, /*IncludeBool*/ true));
1247 if (cast<VectorType>(CallTy)->getElementType() == BoolTy)
1248 return getCast(Builder, Call, RetTy);
1249 return Builder.CreateBitCast(Call, RetTy);
1250 }
1251
calculatePointerDifference(Value * Ptr0,Value * Ptr1) const1252 auto HexagonVectorCombine::calculatePointerDifference(Value *Ptr0,
1253 Value *Ptr1) const
1254 -> Optional<int> {
1255 struct Builder : IRBuilder<> {
1256 Builder(BasicBlock *B) : IRBuilder<>(B) {}
1257 ~Builder() {
1258 for (Instruction *I : llvm::reverse(ToErase))
1259 I->eraseFromParent();
1260 }
1261 SmallVector<Instruction *, 8> ToErase;
1262 };
1263
1264 #define CallBuilder(B, F) \
1265 [&](auto &B_) { \
1266 Value *V = B_.F; \
1267 if (auto *I = dyn_cast<Instruction>(V)) \
1268 B_.ToErase.push_back(I); \
1269 return V; \
1270 }(B)
1271
1272 auto Simplify = [&](Value *V) {
1273 if (auto *I = dyn_cast<Instruction>(V)) {
1274 SimplifyQuery Q(DL, &TLI, &DT, &AC, I);
1275 if (Value *S = SimplifyInstruction(I, Q))
1276 return S;
1277 }
1278 return V;
1279 };
1280
1281 auto StripBitCast = [](Value *V) {
1282 while (auto *C = dyn_cast<BitCastInst>(V))
1283 V = C->getOperand(0);
1284 return V;
1285 };
1286
1287 Ptr0 = StripBitCast(Ptr0);
1288 Ptr1 = StripBitCast(Ptr1);
1289 if (!isa<GetElementPtrInst>(Ptr0) || !isa<GetElementPtrInst>(Ptr1))
1290 return None;
1291
1292 auto *Gep0 = cast<GetElementPtrInst>(Ptr0);
1293 auto *Gep1 = cast<GetElementPtrInst>(Ptr1);
1294 if (Gep0->getPointerOperand() != Gep1->getPointerOperand())
1295 return None;
1296
1297 Builder B(Gep0->getParent());
1298 Value *BasePtr = Gep0->getPointerOperand();
1299 int Scale = DL.getTypeStoreSize(BasePtr->getType()->getPointerElementType());
1300
1301 // FIXME: for now only check GEPs with a single index.
1302 if (Gep0->getNumOperands() != 2 || Gep1->getNumOperands() != 2)
1303 return None;
1304
1305 Value *Idx0 = Gep0->getOperand(1);
1306 Value *Idx1 = Gep1->getOperand(1);
1307
1308 // First, try to simplify the subtraction directly.
1309 if (auto *Diff = dyn_cast<ConstantInt>(
1310 Simplify(CallBuilder(B, CreateSub(Idx0, Idx1)))))
1311 return Diff->getSExtValue() * Scale;
1312
1313 KnownBits Known0 = computeKnownBits(Idx0, DL, 0, &AC, Gep0, &DT);
1314 KnownBits Known1 = computeKnownBits(Idx1, DL, 0, &AC, Gep1, &DT);
1315 APInt Unknown = ~(Known0.Zero | Known0.One) | ~(Known1.Zero | Known1.One);
1316 if (Unknown.isAllOnesValue())
1317 return None;
1318
1319 Value *MaskU = ConstantInt::get(Idx0->getType(), Unknown);
1320 Value *AndU0 = Simplify(CallBuilder(B, CreateAnd(Idx0, MaskU)));
1321 Value *AndU1 = Simplify(CallBuilder(B, CreateAnd(Idx1, MaskU)));
1322 Value *SubU = Simplify(CallBuilder(B, CreateSub(AndU0, AndU1)));
1323 int Diff0 = 0;
1324 if (auto *C = dyn_cast<ConstantInt>(SubU)) {
1325 Diff0 = C->getSExtValue();
1326 } else {
1327 return None;
1328 }
1329
1330 Value *MaskK = ConstantInt::get(MaskU->getType(), ~Unknown);
1331 Value *AndK0 = Simplify(CallBuilder(B, CreateAnd(Idx0, MaskK)));
1332 Value *AndK1 = Simplify(CallBuilder(B, CreateAnd(Idx1, MaskK)));
1333 Value *SubK = Simplify(CallBuilder(B, CreateSub(AndK0, AndK1)));
1334 int Diff1 = 0;
1335 if (auto *C = dyn_cast<ConstantInt>(SubK)) {
1336 Diff1 = C->getSExtValue();
1337 } else {
1338 return None;
1339 }
1340
1341 return (Diff0 + Diff1) * Scale;
1342
1343 #undef CallBuilder
1344 }
1345
1346 template <typename T>
isSafeToMoveBeforeInBB(const Instruction & In,BasicBlock::const_iterator To,const T & Ignore) const1347 auto HexagonVectorCombine::isSafeToMoveBeforeInBB(const Instruction &In,
1348 BasicBlock::const_iterator To,
1349 const T &Ignore) const
1350 -> bool {
1351 auto getLocOrNone = [this](const Instruction &I) -> Optional<MemoryLocation> {
1352 if (const auto *II = dyn_cast<IntrinsicInst>(&I)) {
1353 switch (II->getIntrinsicID()) {
1354 case Intrinsic::masked_load:
1355 return MemoryLocation::getForArgument(II, 0, TLI);
1356 case Intrinsic::masked_store:
1357 return MemoryLocation::getForArgument(II, 1, TLI);
1358 }
1359 }
1360 return MemoryLocation::getOrNone(&I);
1361 };
1362
1363 // The source and the destination must be in the same basic block.
1364 const BasicBlock &Block = *In.getParent();
1365 assert(Block.begin() == To || Block.end() == To || To->getParent() == &Block);
1366 // No PHIs.
1367 if (isa<PHINode>(In) || (To != Block.end() && isa<PHINode>(*To)))
1368 return false;
1369
1370 if (!mayBeMemoryDependent(In))
1371 return true;
1372 bool MayWrite = In.mayWriteToMemory();
1373 auto MaybeLoc = getLocOrNone(In);
1374
1375 auto From = In.getIterator();
1376 if (From == To)
1377 return true;
1378 bool MoveUp = (To != Block.end() && To->comesBefore(&In));
1379 auto Range =
1380 MoveUp ? std::make_pair(To, From) : std::make_pair(std::next(From), To);
1381 for (auto It = Range.first; It != Range.second; ++It) {
1382 const Instruction &I = *It;
1383 if (llvm::is_contained(Ignore, &I))
1384 continue;
1385 // Parts based on isSafeToMoveBefore from CoveMoverUtils.cpp.
1386 if (I.mayThrow())
1387 return false;
1388 if (auto *CB = dyn_cast<CallBase>(&I)) {
1389 if (!CB->hasFnAttr(Attribute::WillReturn))
1390 return false;
1391 if (!CB->hasFnAttr(Attribute::NoSync))
1392 return false;
1393 }
1394 if (I.mayReadOrWriteMemory()) {
1395 auto MaybeLocI = getLocOrNone(I);
1396 if (MayWrite || I.mayWriteToMemory()) {
1397 if (!MaybeLoc || !MaybeLocI)
1398 return false;
1399 if (!AA.isNoAlias(*MaybeLoc, *MaybeLocI))
1400 return false;
1401 }
1402 }
1403 }
1404 return true;
1405 }
1406
1407 #ifndef NDEBUG
isByteVecTy(Type * Ty) const1408 auto HexagonVectorCombine::isByteVecTy(Type *Ty) const -> bool {
1409 if (auto *VecTy = dyn_cast<VectorType>(Ty))
1410 return VecTy->getElementType() == getByteTy();
1411 return false;
1412 }
1413
isSectorTy(Type * Ty) const1414 auto HexagonVectorCombine::isSectorTy(Type *Ty) const -> bool {
1415 if (!isByteVecTy(Ty))
1416 return false;
1417 int Size = getSizeOf(Ty);
1418 if (HST.isTypeForHVX(Ty))
1419 return Size == static_cast<int>(HST.getVectorLength());
1420 return Size == 4 || Size == 8;
1421 }
1422 #endif
1423
getElementRange(IRBuilder<> & Builder,Value * Lo,Value * Hi,int Start,int Length) const1424 auto HexagonVectorCombine::getElementRange(IRBuilder<> &Builder, Value *Lo,
1425 Value *Hi, int Start,
1426 int Length) const -> Value * {
1427 assert(0 <= Start && Start < Length);
1428 SmallVector<int, 128> SMask(Length);
1429 std::iota(SMask.begin(), SMask.end(), Start);
1430 return Builder.CreateShuffleVector(Lo, Hi, SMask);
1431 }
1432
1433 // Pass management.
1434
1435 namespace llvm {
1436 void initializeHexagonVectorCombineLegacyPass(PassRegistry &);
1437 FunctionPass *createHexagonVectorCombineLegacyPass();
1438 } // namespace llvm
1439
1440 namespace {
1441 class HexagonVectorCombineLegacy : public FunctionPass {
1442 public:
1443 static char ID;
1444
HexagonVectorCombineLegacy()1445 HexagonVectorCombineLegacy() : FunctionPass(ID) {}
1446
getPassName() const1447 StringRef getPassName() const override { return "Hexagon Vector Combine"; }
1448
getAnalysisUsage(AnalysisUsage & AU) const1449 void getAnalysisUsage(AnalysisUsage &AU) const override {
1450 AU.setPreservesCFG();
1451 AU.addRequired<AAResultsWrapperPass>();
1452 AU.addRequired<AssumptionCacheTracker>();
1453 AU.addRequired<DominatorTreeWrapperPass>();
1454 AU.addRequired<TargetLibraryInfoWrapperPass>();
1455 AU.addRequired<TargetPassConfig>();
1456 FunctionPass::getAnalysisUsage(AU);
1457 }
1458
runOnFunction(Function & F)1459 bool runOnFunction(Function &F) override {
1460 AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
1461 AssumptionCache &AC =
1462 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1463 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1464 TargetLibraryInfo &TLI =
1465 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
1466 auto &TM = getAnalysis<TargetPassConfig>().getTM<HexagonTargetMachine>();
1467 HexagonVectorCombine HVC(F, AA, AC, DT, TLI, TM);
1468 return HVC.run();
1469 }
1470 };
1471 } // namespace
1472
1473 char HexagonVectorCombineLegacy::ID = 0;
1474
1475 INITIALIZE_PASS_BEGIN(HexagonVectorCombineLegacy, DEBUG_TYPE,
1476 "Hexagon Vector Combine", false, false)
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)1477 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
1478 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1479 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1480 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1481 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
1482 INITIALIZE_PASS_END(HexagonVectorCombineLegacy, DEBUG_TYPE,
1483 "Hexagon Vector Combine", false, false)
1484
1485 FunctionPass *llvm::createHexagonVectorCombineLegacyPass() {
1486 return new HexagonVectorCombineLegacy();
1487 }
1488