1 //===- ScopInfo.cpp -------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Create a polyhedral description for a static control flow region.
10 //
11 // The pass creates a polyhedral description of the Scops detected by the Scop
12 // detection derived from their LLVM-IR code.
13 //
14 // This representation is shared among several tools in the polyhedral
15 // community, which are e.g. Cloog, Pluto, Loopo, Graphite.
16 //
17 //===----------------------------------------------------------------------===//
18
19 #include "polly/ScopInfo.h"
20 #include "polly/LinkAllPasses.h"
21 #include "polly/Options.h"
22 #include "polly/ScopBuilder.h"
23 #include "polly/ScopDetection.h"
24 #include "polly/Support/GICHelper.h"
25 #include "polly/Support/ISLOStream.h"
26 #include "polly/Support/ISLTools.h"
27 #include "polly/Support/SCEVAffinator.h"
28 #include "polly/Support/SCEVValidator.h"
29 #include "polly/Support/ScopHelper.h"
30 #include "llvm/ADT/APInt.h"
31 #include "llvm/ADT/ArrayRef.h"
32 #include "llvm/ADT/PostOrderIterator.h"
33 #include "llvm/ADT/Sequence.h"
34 #include "llvm/ADT/SmallPtrSet.h"
35 #include "llvm/ADT/SmallSet.h"
36 #include "llvm/ADT/Statistic.h"
37 #include "llvm/Analysis/AliasAnalysis.h"
38 #include "llvm/Analysis/AssumptionCache.h"
39 #include "llvm/Analysis/Loads.h"
40 #include "llvm/Analysis/LoopInfo.h"
41 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
42 #include "llvm/Analysis/RegionInfo.h"
43 #include "llvm/Analysis/RegionIterator.h"
44 #include "llvm/Analysis/ScalarEvolution.h"
45 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
46 #include "llvm/IR/BasicBlock.h"
47 #include "llvm/IR/ConstantRange.h"
48 #include "llvm/IR/DataLayout.h"
49 #include "llvm/IR/DebugLoc.h"
50 #include "llvm/IR/Dominators.h"
51 #include "llvm/IR/Function.h"
52 #include "llvm/IR/InstrTypes.h"
53 #include "llvm/IR/Instruction.h"
54 #include "llvm/IR/Instructions.h"
55 #include "llvm/IR/Module.h"
56 #include "llvm/IR/PassManager.h"
57 #include "llvm/IR/Type.h"
58 #include "llvm/IR/Value.h"
59 #include "llvm/InitializePasses.h"
60 #include "llvm/Support/Compiler.h"
61 #include "llvm/Support/Debug.h"
62 #include "llvm/Support/ErrorHandling.h"
63 #include "llvm/Support/raw_ostream.h"
64 #include "isl/aff.h"
65 #include "isl/local_space.h"
66 #include "isl/map.h"
67 #include "isl/options.h"
68 #include "isl/set.h"
69 #include <cassert>
70
71 using namespace llvm;
72 using namespace polly;
73
74 #define DEBUG_TYPE "polly-scops"
75
76 STATISTIC(AssumptionsAliasing, "Number of aliasing assumptions taken.");
77 STATISTIC(AssumptionsInbounds, "Number of inbounds assumptions taken.");
78 STATISTIC(AssumptionsWrapping, "Number of wrapping assumptions taken.");
79 STATISTIC(AssumptionsUnsigned, "Number of unsigned assumptions taken.");
80 STATISTIC(AssumptionsComplexity, "Number of too complex SCoPs.");
81 STATISTIC(AssumptionsUnprofitable, "Number of unprofitable SCoPs.");
82 STATISTIC(AssumptionsErrorBlock, "Number of error block assumptions taken.");
83 STATISTIC(AssumptionsInfiniteLoop, "Number of bounded loop assumptions taken.");
84 STATISTIC(AssumptionsInvariantLoad,
85 "Number of invariant loads assumptions taken.");
86 STATISTIC(AssumptionsDelinearization,
87 "Number of delinearization assumptions taken.");
88
89 STATISTIC(NumScops, "Number of feasible SCoPs after ScopInfo");
90 STATISTIC(NumLoopsInScop, "Number of loops in scops");
91 STATISTIC(NumBoxedLoops, "Number of boxed loops in SCoPs after ScopInfo");
92 STATISTIC(NumAffineLoops, "Number of affine loops in SCoPs after ScopInfo");
93
94 STATISTIC(NumScopsDepthZero, "Number of scops with maximal loop depth 0");
95 STATISTIC(NumScopsDepthOne, "Number of scops with maximal loop depth 1");
96 STATISTIC(NumScopsDepthTwo, "Number of scops with maximal loop depth 2");
97 STATISTIC(NumScopsDepthThree, "Number of scops with maximal loop depth 3");
98 STATISTIC(NumScopsDepthFour, "Number of scops with maximal loop depth 4");
99 STATISTIC(NumScopsDepthFive, "Number of scops with maximal loop depth 5");
100 STATISTIC(NumScopsDepthLarger,
101 "Number of scops with maximal loop depth 6 and larger");
102 STATISTIC(MaxNumLoopsInScop, "Maximal number of loops in scops");
103
104 STATISTIC(NumValueWrites, "Number of scalar value writes after ScopInfo");
105 STATISTIC(
106 NumValueWritesInLoops,
107 "Number of scalar value writes nested in affine loops after ScopInfo");
108 STATISTIC(NumPHIWrites, "Number of scalar phi writes after ScopInfo");
109 STATISTIC(NumPHIWritesInLoops,
110 "Number of scalar phi writes nested in affine loops after ScopInfo");
111 STATISTIC(NumSingletonWrites, "Number of singleton writes after ScopInfo");
112 STATISTIC(NumSingletonWritesInLoops,
113 "Number of singleton writes nested in affine loops after ScopInfo");
114
115 int const polly::MaxDisjunctsInDomain = 20;
116
117 // The number of disjunct in the context after which we stop to add more
118 // disjuncts. This parameter is there to avoid exponential growth in the
119 // number of disjunct when adding non-convex sets to the context.
120 static int const MaxDisjunctsInContext = 4;
121
122 // Be a bit more generous for the defined behavior context which is used less
123 // often.
124 static int const MaxDisjunktsInDefinedBehaviourContext = 8;
125
126 static cl::opt<bool> PollyRemarksMinimal(
127 "polly-remarks-minimal",
128 cl::desc("Do not emit remarks about assumptions that are known"),
129 cl::Hidden, cl::ZeroOrMore, cl::init(false), cl::cat(PollyCategory));
130
131 static cl::opt<bool>
132 IslOnErrorAbort("polly-on-isl-error-abort",
133 cl::desc("Abort if an isl error is encountered"),
134 cl::init(true), cl::cat(PollyCategory));
135
136 static cl::opt<bool> PollyPreciseInbounds(
137 "polly-precise-inbounds",
138 cl::desc("Take more precise inbounds assumptions (do not scale well)"),
139 cl::Hidden, cl::init(false), cl::cat(PollyCategory));
140
141 static cl::opt<bool> PollyIgnoreParamBounds(
142 "polly-ignore-parameter-bounds",
143 cl::desc(
144 "Do not add parameter bounds and do no gist simplify sets accordingly"),
145 cl::Hidden, cl::init(false), cl::cat(PollyCategory));
146
147 static cl::opt<bool> PollyPreciseFoldAccesses(
148 "polly-precise-fold-accesses",
149 cl::desc("Fold memory accesses to model more possible delinearizations "
150 "(does not scale well)"),
151 cl::Hidden, cl::init(false), cl::cat(PollyCategory));
152
153 bool polly::UseInstructionNames;
154
155 static cl::opt<bool, true> XUseInstructionNames(
156 "polly-use-llvm-names",
157 cl::desc("Use LLVM-IR names when deriving statement names"),
158 cl::location(UseInstructionNames), cl::Hidden, cl::init(false),
159 cl::ZeroOrMore, cl::cat(PollyCategory));
160
161 static cl::opt<bool> PollyPrintInstructions(
162 "polly-print-instructions", cl::desc("Output instructions per ScopStmt"),
163 cl::Hidden, cl::Optional, cl::init(false), cl::cat(PollyCategory));
164
165 static cl::list<std::string> IslArgs("polly-isl-arg",
166 cl::value_desc("argument"),
167 cl::desc("Option passed to ISL"),
168 cl::ZeroOrMore, cl::cat(PollyCategory));
169
170 //===----------------------------------------------------------------------===//
171
addRangeBoundsToSet(isl::set S,const ConstantRange & Range,int dim,isl::dim type)172 static isl::set addRangeBoundsToSet(isl::set S, const ConstantRange &Range,
173 int dim, isl::dim type) {
174 isl::val V;
175 isl::ctx Ctx = S.ctx();
176
177 // The upper and lower bound for a parameter value is derived either from
178 // the data type of the parameter or from the - possibly more restrictive -
179 // range metadata.
180 V = valFromAPInt(Ctx.get(), Range.getSignedMin(), true);
181 S = S.lower_bound_val(type, dim, V);
182 V = valFromAPInt(Ctx.get(), Range.getSignedMax(), true);
183 S = S.upper_bound_val(type, dim, V);
184
185 if (Range.isFullSet())
186 return S;
187
188 if (S.n_basic_set() > MaxDisjunctsInContext)
189 return S;
190
191 // In case of signed wrapping, we can refine the set of valid values by
192 // excluding the part not covered by the wrapping range.
193 if (Range.isSignWrappedSet()) {
194 V = valFromAPInt(Ctx.get(), Range.getLower(), true);
195 isl::set SLB = S.lower_bound_val(type, dim, V);
196
197 V = valFromAPInt(Ctx.get(), Range.getUpper(), true);
198 V = V.sub_ui(1);
199 isl::set SUB = S.upper_bound_val(type, dim, V);
200 S = SLB.unite(SUB);
201 }
202
203 return S;
204 }
205
identifyBasePtrOriginSAI(Scop * S,Value * BasePtr)206 static const ScopArrayInfo *identifyBasePtrOriginSAI(Scop *S, Value *BasePtr) {
207 LoadInst *BasePtrLI = dyn_cast<LoadInst>(BasePtr);
208 if (!BasePtrLI)
209 return nullptr;
210
211 if (!S->contains(BasePtrLI))
212 return nullptr;
213
214 ScalarEvolution &SE = *S->getSE();
215
216 auto *OriginBaseSCEV =
217 SE.getPointerBase(SE.getSCEV(BasePtrLI->getPointerOperand()));
218 if (!OriginBaseSCEV)
219 return nullptr;
220
221 auto *OriginBaseSCEVUnknown = dyn_cast<SCEVUnknown>(OriginBaseSCEV);
222 if (!OriginBaseSCEVUnknown)
223 return nullptr;
224
225 return S->getScopArrayInfo(OriginBaseSCEVUnknown->getValue(),
226 MemoryKind::Array);
227 }
228
ScopArrayInfo(Value * BasePtr,Type * ElementType,isl::ctx Ctx,ArrayRef<const SCEV * > Sizes,MemoryKind Kind,const DataLayout & DL,Scop * S,const char * BaseName)229 ScopArrayInfo::ScopArrayInfo(Value *BasePtr, Type *ElementType, isl::ctx Ctx,
230 ArrayRef<const SCEV *> Sizes, MemoryKind Kind,
231 const DataLayout &DL, Scop *S,
232 const char *BaseName)
233 : BasePtr(BasePtr), ElementType(ElementType), Kind(Kind), DL(DL), S(*S) {
234 std::string BasePtrName =
235 BaseName ? BaseName
236 : getIslCompatibleName("MemRef", BasePtr, S->getNextArrayIdx(),
237 Kind == MemoryKind::PHI ? "__phi" : "",
238 UseInstructionNames);
239 Id = isl::id::alloc(Ctx, BasePtrName, this);
240
241 updateSizes(Sizes);
242
243 if (!BasePtr || Kind != MemoryKind::Array) {
244 BasePtrOriginSAI = nullptr;
245 return;
246 }
247
248 BasePtrOriginSAI = identifyBasePtrOriginSAI(S, BasePtr);
249 if (BasePtrOriginSAI)
250 const_cast<ScopArrayInfo *>(BasePtrOriginSAI)->addDerivedSAI(this);
251 }
252
253 ScopArrayInfo::~ScopArrayInfo() = default;
254
getSpace() const255 isl::space ScopArrayInfo::getSpace() const {
256 auto Space = isl::space(Id.ctx(), 0, getNumberOfDimensions());
257 Space = Space.set_tuple_id(isl::dim::set, Id);
258 return Space;
259 }
260
isReadOnly()261 bool ScopArrayInfo::isReadOnly() {
262 isl::union_set WriteSet = S.getWrites().range();
263 isl::space Space = getSpace();
264 WriteSet = WriteSet.extract_set(Space);
265
266 return bool(WriteSet.is_empty());
267 }
268
isCompatibleWith(const ScopArrayInfo * Array) const269 bool ScopArrayInfo::isCompatibleWith(const ScopArrayInfo *Array) const {
270 if (Array->getElementType() != getElementType())
271 return false;
272
273 if (Array->getNumberOfDimensions() != getNumberOfDimensions())
274 return false;
275
276 for (unsigned i = 0; i < getNumberOfDimensions(); i++)
277 if (Array->getDimensionSize(i) != getDimensionSize(i))
278 return false;
279
280 return true;
281 }
282
updateElementType(Type * NewElementType)283 void ScopArrayInfo::updateElementType(Type *NewElementType) {
284 if (NewElementType == ElementType)
285 return;
286
287 auto OldElementSize = DL.getTypeAllocSizeInBits(ElementType);
288 auto NewElementSize = DL.getTypeAllocSizeInBits(NewElementType);
289
290 if (NewElementSize == OldElementSize || NewElementSize == 0)
291 return;
292
293 if (NewElementSize % OldElementSize == 0 && NewElementSize < OldElementSize) {
294 ElementType = NewElementType;
295 } else {
296 auto GCD = GreatestCommonDivisor64(NewElementSize, OldElementSize);
297 ElementType = IntegerType::get(ElementType->getContext(), GCD);
298 }
299 }
300
301 /// Make the ScopArrayInfo model a Fortran Array
applyAndSetFAD(Value * FAD)302 void ScopArrayInfo::applyAndSetFAD(Value *FAD) {
303 assert(FAD && "got invalid Fortran array descriptor");
304 if (this->FAD) {
305 assert(this->FAD == FAD &&
306 "receiving different array descriptors for same array");
307 return;
308 }
309
310 assert(DimensionSizesPw.size() > 0 && DimensionSizesPw[0].is_null());
311 assert(!this->FAD);
312 this->FAD = FAD;
313
314 isl::space Space(S.getIslCtx(), 1, 0);
315
316 std::string param_name = getName();
317 param_name += "_fortranarr_size";
318 isl::id IdPwAff = isl::id::alloc(S.getIslCtx(), param_name, this);
319
320 Space = Space.set_dim_id(isl::dim::param, 0, IdPwAff);
321 isl::pw_aff PwAff =
322 isl::aff::var_on_domain(isl::local_space(Space), isl::dim::param, 0);
323
324 DimensionSizesPw[0] = PwAff;
325 }
326
updateSizes(ArrayRef<const SCEV * > NewSizes,bool CheckConsistency)327 bool ScopArrayInfo::updateSizes(ArrayRef<const SCEV *> NewSizes,
328 bool CheckConsistency) {
329 int SharedDims = std::min(NewSizes.size(), DimensionSizes.size());
330 int ExtraDimsNew = NewSizes.size() - SharedDims;
331 int ExtraDimsOld = DimensionSizes.size() - SharedDims;
332
333 if (CheckConsistency) {
334 for (int i = 0; i < SharedDims; i++) {
335 auto *NewSize = NewSizes[i + ExtraDimsNew];
336 auto *KnownSize = DimensionSizes[i + ExtraDimsOld];
337 if (NewSize && KnownSize && NewSize != KnownSize)
338 return false;
339 }
340
341 if (DimensionSizes.size() >= NewSizes.size())
342 return true;
343 }
344
345 DimensionSizes.clear();
346 DimensionSizes.insert(DimensionSizes.begin(), NewSizes.begin(),
347 NewSizes.end());
348 DimensionSizesPw.clear();
349 for (const SCEV *Expr : DimensionSizes) {
350 if (!Expr) {
351 DimensionSizesPw.push_back(isl::pw_aff());
352 continue;
353 }
354 isl::pw_aff Size = S.getPwAffOnly(Expr);
355 DimensionSizesPw.push_back(Size);
356 }
357 return true;
358 }
359
getName() const360 std::string ScopArrayInfo::getName() const { return Id.get_name(); }
361
getElemSizeInBytes() const362 int ScopArrayInfo::getElemSizeInBytes() const {
363 return DL.getTypeAllocSize(ElementType);
364 }
365
getBasePtrId() const366 isl::id ScopArrayInfo::getBasePtrId() const { return Id; }
367
368 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const369 LLVM_DUMP_METHOD void ScopArrayInfo::dump() const { print(errs()); }
370 #endif
371
print(raw_ostream & OS,bool SizeAsPwAff) const372 void ScopArrayInfo::print(raw_ostream &OS, bool SizeAsPwAff) const {
373 OS.indent(8) << *getElementType() << " " << getName();
374 unsigned u = 0;
375 // If this is a Fortran array, then we can print the outermost dimension
376 // as a isl_pw_aff even though there is no SCEV information.
377 bool IsOutermostSizeKnown = SizeAsPwAff && FAD;
378
379 if (!IsOutermostSizeKnown && getNumberOfDimensions() > 0 &&
380 !getDimensionSize(0)) {
381 OS << "[*]";
382 u++;
383 }
384 for (; u < getNumberOfDimensions(); u++) {
385 OS << "[";
386
387 if (SizeAsPwAff) {
388 isl::pw_aff Size = getDimensionSizePw(u);
389 OS << " " << Size << " ";
390 } else {
391 OS << *getDimensionSize(u);
392 }
393
394 OS << "]";
395 }
396
397 OS << ";";
398
399 if (BasePtrOriginSAI)
400 OS << " [BasePtrOrigin: " << BasePtrOriginSAI->getName() << "]";
401
402 OS << " // Element size " << getElemSizeInBytes() << "\n";
403 }
404
405 const ScopArrayInfo *
getFromAccessFunction(isl::pw_multi_aff PMA)406 ScopArrayInfo::getFromAccessFunction(isl::pw_multi_aff PMA) {
407 isl::id Id = PMA.get_tuple_id(isl::dim::out);
408 assert(!Id.is_null() && "Output dimension didn't have an ID");
409 return getFromId(Id);
410 }
411
getFromId(isl::id Id)412 const ScopArrayInfo *ScopArrayInfo::getFromId(isl::id Id) {
413 void *User = Id.get_user();
414 const ScopArrayInfo *SAI = static_cast<ScopArrayInfo *>(User);
415 return SAI;
416 }
417
wrapConstantDimensions()418 void MemoryAccess::wrapConstantDimensions() {
419 auto *SAI = getScopArrayInfo();
420 isl::space ArraySpace = SAI->getSpace();
421 isl::ctx Ctx = ArraySpace.ctx();
422 unsigned DimsArray = SAI->getNumberOfDimensions();
423
424 isl::multi_aff DivModAff = isl::multi_aff::identity(
425 ArraySpace.map_from_domain_and_range(ArraySpace));
426 isl::local_space LArraySpace = isl::local_space(ArraySpace);
427
428 // Begin with last dimension, to iteratively carry into higher dimensions.
429 for (int i = DimsArray - 1; i > 0; i--) {
430 auto *DimSize = SAI->getDimensionSize(i);
431 auto *DimSizeCst = dyn_cast<SCEVConstant>(DimSize);
432
433 // This transformation is not applicable to dimensions with dynamic size.
434 if (!DimSizeCst)
435 continue;
436
437 // This transformation is not applicable to dimensions of size zero.
438 if (DimSize->isZero())
439 continue;
440
441 isl::val DimSizeVal =
442 valFromAPInt(Ctx.get(), DimSizeCst->getAPInt(), false);
443 isl::aff Var = isl::aff::var_on_domain(LArraySpace, isl::dim::set, i);
444 isl::aff PrevVar =
445 isl::aff::var_on_domain(LArraySpace, isl::dim::set, i - 1);
446
447 // Compute: index % size
448 // Modulo must apply in the divide of the previous iteration, if any.
449 isl::aff Modulo = Var.mod(DimSizeVal);
450 Modulo = Modulo.pullback(DivModAff);
451
452 // Compute: floor(index / size)
453 isl::aff Divide = Var.div(isl::aff(LArraySpace, DimSizeVal));
454 Divide = Divide.floor();
455 Divide = Divide.add(PrevVar);
456 Divide = Divide.pullback(DivModAff);
457
458 // Apply Modulo and Divide.
459 DivModAff = DivModAff.set_aff(i, Modulo);
460 DivModAff = DivModAff.set_aff(i - 1, Divide);
461 }
462
463 // Apply all modulo/divides on the accesses.
464 isl::map Relation = AccessRelation;
465 Relation = Relation.apply_range(isl::map::from_multi_aff(DivModAff));
466 Relation = Relation.detect_equalities();
467 AccessRelation = Relation;
468 }
469
updateDimensionality()470 void MemoryAccess::updateDimensionality() {
471 auto *SAI = getScopArrayInfo();
472 isl::space ArraySpace = SAI->getSpace();
473 isl::space AccessSpace = AccessRelation.get_space().range();
474 isl::ctx Ctx = ArraySpace.ctx();
475
476 auto DimsArray = ArraySpace.dim(isl::dim::set);
477 auto DimsAccess = AccessSpace.dim(isl::dim::set);
478 auto DimsMissing = DimsArray - DimsAccess;
479
480 auto *BB = getStatement()->getEntryBlock();
481 auto &DL = BB->getModule()->getDataLayout();
482 unsigned ArrayElemSize = SAI->getElemSizeInBytes();
483 unsigned ElemBytes = DL.getTypeAllocSize(getElementType());
484
485 isl::map Map = isl::map::from_domain_and_range(
486 isl::set::universe(AccessSpace), isl::set::universe(ArraySpace));
487
488 for (auto i : seq<isl_size>(0, DimsMissing))
489 Map = Map.fix_si(isl::dim::out, i, 0);
490
491 for (auto i : seq<isl_size>(DimsMissing, DimsArray))
492 Map = Map.equate(isl::dim::in, i - DimsMissing, isl::dim::out, i);
493
494 AccessRelation = AccessRelation.apply_range(Map);
495
496 // For the non delinearized arrays, divide the access function of the last
497 // subscript by the size of the elements in the array.
498 //
499 // A stride one array access in C expressed as A[i] is expressed in
500 // LLVM-IR as something like A[i * elementsize]. This hides the fact that
501 // two subsequent values of 'i' index two values that are stored next to
502 // each other in memory. By this division we make this characteristic
503 // obvious again. If the base pointer was accessed with offsets not divisible
504 // by the accesses element size, we will have chosen a smaller ArrayElemSize
505 // that divides the offsets of all accesses to this base pointer.
506 if (DimsAccess == 1) {
507 isl::val V = isl::val(Ctx, ArrayElemSize);
508 AccessRelation = AccessRelation.floordiv_val(V);
509 }
510
511 // We currently do this only if we added at least one dimension, which means
512 // some dimension's indices have not been specified, an indicator that some
513 // index values have been added together.
514 // TODO: Investigate general usefulness; Effect on unit tests is to make index
515 // expressions more complicated.
516 if (DimsMissing)
517 wrapConstantDimensions();
518
519 if (!isAffine())
520 computeBoundsOnAccessRelation(ArrayElemSize);
521
522 // Introduce multi-element accesses in case the type loaded by this memory
523 // access is larger than the canonical element type of the array.
524 //
525 // An access ((float *)A)[i] to an array char *A is modeled as
526 // {[i] -> A[o] : 4 i <= o <= 4 i + 3
527 if (ElemBytes > ArrayElemSize) {
528 assert(ElemBytes % ArrayElemSize == 0 &&
529 "Loaded element size should be multiple of canonical element size");
530 isl::map Map = isl::map::from_domain_and_range(
531 isl::set::universe(ArraySpace), isl::set::universe(ArraySpace));
532 for (auto i : seq<isl_size>(0, DimsArray - 1))
533 Map = Map.equate(isl::dim::in, i, isl::dim::out, i);
534
535 isl::constraint C;
536 isl::local_space LS;
537
538 LS = isl::local_space(Map.get_space());
539 int Num = ElemBytes / getScopArrayInfo()->getElemSizeInBytes();
540
541 C = isl::constraint::alloc_inequality(LS);
542 C = C.set_constant_val(isl::val(Ctx, Num - 1));
543 C = C.set_coefficient_si(isl::dim::in, DimsArray - 1, 1);
544 C = C.set_coefficient_si(isl::dim::out, DimsArray - 1, -1);
545 Map = Map.add_constraint(C);
546
547 C = isl::constraint::alloc_inequality(LS);
548 C = C.set_coefficient_si(isl::dim::in, DimsArray - 1, -1);
549 C = C.set_coefficient_si(isl::dim::out, DimsArray - 1, 1);
550 C = C.set_constant_val(isl::val(Ctx, 0));
551 Map = Map.add_constraint(C);
552 AccessRelation = AccessRelation.apply_range(Map);
553 }
554 }
555
556 const std::string
getReductionOperatorStr(MemoryAccess::ReductionType RT)557 MemoryAccess::getReductionOperatorStr(MemoryAccess::ReductionType RT) {
558 switch (RT) {
559 case MemoryAccess::RT_NONE:
560 llvm_unreachable("Requested a reduction operator string for a memory "
561 "access which isn't a reduction");
562 case MemoryAccess::RT_ADD:
563 return "+";
564 case MemoryAccess::RT_MUL:
565 return "*";
566 case MemoryAccess::RT_BOR:
567 return "|";
568 case MemoryAccess::RT_BXOR:
569 return "^";
570 case MemoryAccess::RT_BAND:
571 return "&";
572 }
573 llvm_unreachable("Unknown reduction type");
574 }
575
getOriginalScopArrayInfo() const576 const ScopArrayInfo *MemoryAccess::getOriginalScopArrayInfo() const {
577 isl::id ArrayId = getArrayId();
578 void *User = ArrayId.get_user();
579 const ScopArrayInfo *SAI = static_cast<ScopArrayInfo *>(User);
580 return SAI;
581 }
582
getLatestScopArrayInfo() const583 const ScopArrayInfo *MemoryAccess::getLatestScopArrayInfo() const {
584 isl::id ArrayId = getLatestArrayId();
585 void *User = ArrayId.get_user();
586 const ScopArrayInfo *SAI = static_cast<ScopArrayInfo *>(User);
587 return SAI;
588 }
589
getOriginalArrayId() const590 isl::id MemoryAccess::getOriginalArrayId() const {
591 return AccessRelation.get_tuple_id(isl::dim::out);
592 }
593
getLatestArrayId() const594 isl::id MemoryAccess::getLatestArrayId() const {
595 if (!hasNewAccessRelation())
596 return getOriginalArrayId();
597 return NewAccessRelation.get_tuple_id(isl::dim::out);
598 }
599
getAddressFunction() const600 isl::map MemoryAccess::getAddressFunction() const {
601 return getAccessRelation().lexmin();
602 }
603
604 isl::pw_multi_aff
applyScheduleToAccessRelation(isl::union_map USchedule) const605 MemoryAccess::applyScheduleToAccessRelation(isl::union_map USchedule) const {
606 isl::map Schedule, ScheduledAccRel;
607 isl::union_set UDomain;
608
609 UDomain = getStatement()->getDomain();
610 USchedule = USchedule.intersect_domain(UDomain);
611 Schedule = isl::map::from_union_map(USchedule);
612 ScheduledAccRel = getAddressFunction().apply_domain(Schedule);
613 return isl::pw_multi_aff::from_map(ScheduledAccRel);
614 }
615
getOriginalAccessRelation() const616 isl::map MemoryAccess::getOriginalAccessRelation() const {
617 return AccessRelation;
618 }
619
getOriginalAccessRelationStr() const620 std::string MemoryAccess::getOriginalAccessRelationStr() const {
621 return stringFromIslObj(AccessRelation);
622 }
623
getOriginalAccessRelationSpace() const624 isl::space MemoryAccess::getOriginalAccessRelationSpace() const {
625 return AccessRelation.get_space();
626 }
627
getNewAccessRelation() const628 isl::map MemoryAccess::getNewAccessRelation() const {
629 return NewAccessRelation;
630 }
631
getNewAccessRelationStr() const632 std::string MemoryAccess::getNewAccessRelationStr() const {
633 return stringFromIslObj(NewAccessRelation);
634 }
635
getAccessRelationStr() const636 std::string MemoryAccess::getAccessRelationStr() const {
637 return stringFromIslObj(getAccessRelation());
638 }
639
createBasicAccessMap(ScopStmt * Statement)640 isl::basic_map MemoryAccess::createBasicAccessMap(ScopStmt *Statement) {
641 isl::space Space = isl::space(Statement->getIslCtx(), 0, 1);
642 Space = Space.align_params(Statement->getDomainSpace());
643
644 return isl::basic_map::from_domain_and_range(
645 isl::basic_set::universe(Statement->getDomainSpace()),
646 isl::basic_set::universe(Space));
647 }
648
649 // Formalize no out-of-bound access assumption
650 //
651 // When delinearizing array accesses we optimistically assume that the
652 // delinearized accesses do not access out of bound locations (the subscript
653 // expression of each array evaluates for each statement instance that is
654 // executed to a value that is larger than zero and strictly smaller than the
655 // size of the corresponding dimension). The only exception is the outermost
656 // dimension for which we do not need to assume any upper bound. At this point
657 // we formalize this assumption to ensure that at code generation time the
658 // relevant run-time checks can be generated.
659 //
660 // To find the set of constraints necessary to avoid out of bound accesses, we
661 // first build the set of data locations that are not within array bounds. We
662 // then apply the reverse access relation to obtain the set of iterations that
663 // may contain invalid accesses and reduce this set of iterations to the ones
664 // that are actually executed by intersecting them with the domain of the
665 // statement. If we now project out all loop dimensions, we obtain a set of
666 // parameters that may cause statement instances to be executed that may
667 // possibly yield out of bound memory accesses. The complement of these
668 // constraints is the set of constraints that needs to be assumed to ensure such
669 // statement instances are never executed.
assumeNoOutOfBound()670 isl::set MemoryAccess::assumeNoOutOfBound() {
671 auto *SAI = getScopArrayInfo();
672 isl::space Space = getOriginalAccessRelationSpace().range();
673 isl::set Outside = isl::set::empty(Space);
674 for (int i = 1, Size = Space.dim(isl::dim::set); i < Size; ++i) {
675 isl::local_space LS(Space);
676 isl::pw_aff Var = isl::pw_aff::var_on_domain(LS, isl::dim::set, i);
677 isl::pw_aff Zero = isl::pw_aff(LS);
678
679 isl::set DimOutside = Var.lt_set(Zero);
680 isl::pw_aff SizeE = SAI->getDimensionSizePw(i);
681 SizeE = SizeE.add_dims(isl::dim::in, Space.dim(isl::dim::set));
682 SizeE = SizeE.set_tuple_id(isl::dim::in, Space.get_tuple_id(isl::dim::set));
683 DimOutside = DimOutside.unite(SizeE.le_set(Var));
684
685 Outside = Outside.unite(DimOutside);
686 }
687
688 Outside = Outside.apply(getAccessRelation().reverse());
689 Outside = Outside.intersect(Statement->getDomain());
690 Outside = Outside.params();
691
692 // Remove divs to avoid the construction of overly complicated assumptions.
693 // Doing so increases the set of parameter combinations that are assumed to
694 // not appear. This is always save, but may make the resulting run-time check
695 // bail out more often than strictly necessary.
696 Outside = Outside.remove_divs();
697 Outside = Outside.complement();
698
699 if (!PollyPreciseInbounds)
700 Outside = Outside.gist_params(Statement->getDomain().params());
701 return Outside;
702 }
703
buildMemIntrinsicAccessRelation()704 void MemoryAccess::buildMemIntrinsicAccessRelation() {
705 assert(isMemoryIntrinsic());
706 assert(Subscripts.size() == 2 && Sizes.size() == 1);
707
708 isl::pw_aff SubscriptPWA = getPwAff(Subscripts[0]);
709 isl::map SubscriptMap = isl::map::from_pw_aff(SubscriptPWA);
710
711 isl::map LengthMap;
712 if (Subscripts[1] == nullptr) {
713 LengthMap = isl::map::universe(SubscriptMap.get_space());
714 } else {
715 isl::pw_aff LengthPWA = getPwAff(Subscripts[1]);
716 LengthMap = isl::map::from_pw_aff(LengthPWA);
717 isl::space RangeSpace = LengthMap.get_space().range();
718 LengthMap = LengthMap.apply_range(isl::map::lex_gt(RangeSpace));
719 }
720 LengthMap = LengthMap.lower_bound_si(isl::dim::out, 0, 0);
721 LengthMap = LengthMap.align_params(SubscriptMap.get_space());
722 SubscriptMap = SubscriptMap.align_params(LengthMap.get_space());
723 LengthMap = LengthMap.sum(SubscriptMap);
724 AccessRelation =
725 LengthMap.set_tuple_id(isl::dim::in, getStatement()->getDomainId());
726 }
727
computeBoundsOnAccessRelation(unsigned ElementSize)728 void MemoryAccess::computeBoundsOnAccessRelation(unsigned ElementSize) {
729 ScalarEvolution *SE = Statement->getParent()->getSE();
730
731 auto MAI = MemAccInst(getAccessInstruction());
732 if (isa<MemIntrinsic>(MAI))
733 return;
734
735 Value *Ptr = MAI.getPointerOperand();
736 if (!Ptr || !SE->isSCEVable(Ptr->getType()))
737 return;
738
739 auto *PtrSCEV = SE->getSCEV(Ptr);
740 if (isa<SCEVCouldNotCompute>(PtrSCEV))
741 return;
742
743 auto *BasePtrSCEV = SE->getPointerBase(PtrSCEV);
744 if (BasePtrSCEV && !isa<SCEVCouldNotCompute>(BasePtrSCEV))
745 PtrSCEV = SE->getMinusSCEV(PtrSCEV, BasePtrSCEV);
746
747 const ConstantRange &Range = SE->getSignedRange(PtrSCEV);
748 if (Range.isFullSet())
749 return;
750
751 if (Range.isUpperWrapped() || Range.isSignWrappedSet())
752 return;
753
754 bool isWrapping = Range.isSignWrappedSet();
755
756 unsigned BW = Range.getBitWidth();
757 const auto One = APInt(BW, 1);
758 const auto LB = isWrapping ? Range.getLower() : Range.getSignedMin();
759 const auto UB = isWrapping ? (Range.getUpper() - One) : Range.getSignedMax();
760
761 auto Min = LB.sdiv(APInt(BW, ElementSize));
762 auto Max = UB.sdiv(APInt(BW, ElementSize)) + One;
763
764 assert(Min.sle(Max) && "Minimum expected to be less or equal than max");
765
766 isl::map Relation = AccessRelation;
767 isl::set AccessRange = Relation.range();
768 AccessRange = addRangeBoundsToSet(AccessRange, ConstantRange(Min, Max), 0,
769 isl::dim::set);
770 AccessRelation = Relation.intersect_range(AccessRange);
771 }
772
foldAccessRelation()773 void MemoryAccess::foldAccessRelation() {
774 if (Sizes.size() < 2 || isa<SCEVConstant>(Sizes[1]))
775 return;
776
777 int Size = Subscripts.size();
778
779 isl::map NewAccessRelation = AccessRelation;
780
781 for (int i = Size - 2; i >= 0; --i) {
782 isl::space Space;
783 isl::map MapOne, MapTwo;
784 isl::pw_aff DimSize = getPwAff(Sizes[i + 1]);
785
786 isl::space SpaceSize = DimSize.get_space();
787 isl::id ParamId = SpaceSize.get_dim_id(isl::dim::param, 0);
788
789 Space = AccessRelation.get_space();
790 Space = Space.range().map_from_set();
791 Space = Space.align_params(SpaceSize);
792
793 int ParamLocation = Space.find_dim_by_id(isl::dim::param, ParamId);
794
795 MapOne = isl::map::universe(Space);
796 for (int j = 0; j < Size; ++j)
797 MapOne = MapOne.equate(isl::dim::in, j, isl::dim::out, j);
798 MapOne = MapOne.lower_bound_si(isl::dim::in, i + 1, 0);
799
800 MapTwo = isl::map::universe(Space);
801 for (int j = 0; j < Size; ++j)
802 if (j < i || j > i + 1)
803 MapTwo = MapTwo.equate(isl::dim::in, j, isl::dim::out, j);
804
805 isl::local_space LS(Space);
806 isl::constraint C;
807 C = isl::constraint::alloc_equality(LS);
808 C = C.set_constant_si(-1);
809 C = C.set_coefficient_si(isl::dim::in, i, 1);
810 C = C.set_coefficient_si(isl::dim::out, i, -1);
811 MapTwo = MapTwo.add_constraint(C);
812 C = isl::constraint::alloc_equality(LS);
813 C = C.set_coefficient_si(isl::dim::in, i + 1, 1);
814 C = C.set_coefficient_si(isl::dim::out, i + 1, -1);
815 C = C.set_coefficient_si(isl::dim::param, ParamLocation, 1);
816 MapTwo = MapTwo.add_constraint(C);
817 MapTwo = MapTwo.upper_bound_si(isl::dim::in, i + 1, -1);
818
819 MapOne = MapOne.unite(MapTwo);
820 NewAccessRelation = NewAccessRelation.apply_range(MapOne);
821 }
822
823 isl::id BaseAddrId = getScopArrayInfo()->getBasePtrId();
824 isl::space Space = Statement->getDomainSpace();
825 NewAccessRelation = NewAccessRelation.set_tuple_id(
826 isl::dim::in, Space.get_tuple_id(isl::dim::set));
827 NewAccessRelation = NewAccessRelation.set_tuple_id(isl::dim::out, BaseAddrId);
828 NewAccessRelation = NewAccessRelation.gist_domain(Statement->getDomain());
829
830 // Access dimension folding might in certain cases increase the number of
831 // disjuncts in the memory access, which can possibly complicate the generated
832 // run-time checks and can lead to costly compilation.
833 if (!PollyPreciseFoldAccesses &&
834 NewAccessRelation.n_basic_map() > AccessRelation.n_basic_map()) {
835 } else {
836 AccessRelation = NewAccessRelation;
837 }
838 }
839
buildAccessRelation(const ScopArrayInfo * SAI)840 void MemoryAccess::buildAccessRelation(const ScopArrayInfo *SAI) {
841 assert(AccessRelation.is_null() && "AccessRelation already built");
842
843 // Initialize the invalid domain which describes all iterations for which the
844 // access relation is not modeled correctly.
845 isl::set StmtInvalidDomain = getStatement()->getInvalidDomain();
846 InvalidDomain = isl::set::empty(StmtInvalidDomain.get_space());
847
848 isl::ctx Ctx = Id.ctx();
849 isl::id BaseAddrId = SAI->getBasePtrId();
850
851 if (getAccessInstruction() && isa<MemIntrinsic>(getAccessInstruction())) {
852 buildMemIntrinsicAccessRelation();
853 AccessRelation = AccessRelation.set_tuple_id(isl::dim::out, BaseAddrId);
854 return;
855 }
856
857 if (!isAffine()) {
858 // We overapproximate non-affine accesses with a possible access to the
859 // whole array. For read accesses it does not make a difference, if an
860 // access must or may happen. However, for write accesses it is important to
861 // differentiate between writes that must happen and writes that may happen.
862 if (AccessRelation.is_null())
863 AccessRelation = createBasicAccessMap(Statement);
864
865 AccessRelation = AccessRelation.set_tuple_id(isl::dim::out, BaseAddrId);
866 return;
867 }
868
869 isl::space Space = isl::space(Ctx, 0, Statement->getNumIterators(), 0);
870 AccessRelation = isl::map::universe(Space);
871
872 for (int i = 0, Size = Subscripts.size(); i < Size; ++i) {
873 isl::pw_aff Affine = getPwAff(Subscripts[i]);
874 isl::map SubscriptMap = isl::map::from_pw_aff(Affine);
875 AccessRelation = AccessRelation.flat_range_product(SubscriptMap);
876 }
877
878 Space = Statement->getDomainSpace();
879 AccessRelation = AccessRelation.set_tuple_id(
880 isl::dim::in, Space.get_tuple_id(isl::dim::set));
881 AccessRelation = AccessRelation.set_tuple_id(isl::dim::out, BaseAddrId);
882
883 AccessRelation = AccessRelation.gist_domain(Statement->getDomain());
884 }
885
MemoryAccess(ScopStmt * Stmt,Instruction * AccessInst,AccessType AccType,Value * BaseAddress,Type * ElementType,bool Affine,ArrayRef<const SCEV * > Subscripts,ArrayRef<const SCEV * > Sizes,Value * AccessValue,MemoryKind Kind)886 MemoryAccess::MemoryAccess(ScopStmt *Stmt, Instruction *AccessInst,
887 AccessType AccType, Value *BaseAddress,
888 Type *ElementType, bool Affine,
889 ArrayRef<const SCEV *> Subscripts,
890 ArrayRef<const SCEV *> Sizes, Value *AccessValue,
891 MemoryKind Kind)
892 : Kind(Kind), AccType(AccType), Statement(Stmt), InvalidDomain(),
893 BaseAddr(BaseAddress), ElementType(ElementType),
894 Sizes(Sizes.begin(), Sizes.end()), AccessInstruction(AccessInst),
895 AccessValue(AccessValue), IsAffine(Affine),
896 Subscripts(Subscripts.begin(), Subscripts.end()), AccessRelation(),
897 NewAccessRelation(), FAD(nullptr) {
898 static const std::string TypeStrings[] = {"", "_Read", "_Write", "_MayWrite"};
899 const std::string Access = TypeStrings[AccType] + utostr(Stmt->size());
900
901 std::string IdName = Stmt->getBaseName() + Access;
902 Id = isl::id::alloc(Stmt->getParent()->getIslCtx(), IdName, this);
903 }
904
MemoryAccess(ScopStmt * Stmt,AccessType AccType,isl::map AccRel)905 MemoryAccess::MemoryAccess(ScopStmt *Stmt, AccessType AccType, isl::map AccRel)
906 : Kind(MemoryKind::Array), AccType(AccType), Statement(Stmt),
907 InvalidDomain(), AccessRelation(), NewAccessRelation(AccRel),
908 FAD(nullptr) {
909 isl::id ArrayInfoId = NewAccessRelation.get_tuple_id(isl::dim::out);
910 auto *SAI = ScopArrayInfo::getFromId(ArrayInfoId);
911 Sizes.push_back(nullptr);
912 for (unsigned i = 1; i < SAI->getNumberOfDimensions(); i++)
913 Sizes.push_back(SAI->getDimensionSize(i));
914 ElementType = SAI->getElementType();
915 BaseAddr = SAI->getBasePtr();
916 static const std::string TypeStrings[] = {"", "_Read", "_Write", "_MayWrite"};
917 const std::string Access = TypeStrings[AccType] + utostr(Stmt->size());
918
919 std::string IdName = Stmt->getBaseName() + Access;
920 Id = isl::id::alloc(Stmt->getParent()->getIslCtx(), IdName, this);
921 }
922
923 MemoryAccess::~MemoryAccess() = default;
924
realignParams()925 void MemoryAccess::realignParams() {
926 isl::set Ctx = Statement->getParent()->getContext();
927 InvalidDomain = InvalidDomain.gist_params(Ctx);
928 AccessRelation = AccessRelation.gist_params(Ctx);
929
930 // Predictable parameter order is required for JSON imports. Ensure alignment
931 // by explicitly calling align_params.
932 isl::space CtxSpace = Ctx.get_space();
933 InvalidDomain = InvalidDomain.align_params(CtxSpace);
934 AccessRelation = AccessRelation.align_params(CtxSpace);
935 }
936
getReductionOperatorStr() const937 const std::string MemoryAccess::getReductionOperatorStr() const {
938 return MemoryAccess::getReductionOperatorStr(getReductionType());
939 }
940
getId() const941 isl::id MemoryAccess::getId() const { return Id; }
942
operator <<(raw_ostream & OS,MemoryAccess::ReductionType RT)943 raw_ostream &polly::operator<<(raw_ostream &OS,
944 MemoryAccess::ReductionType RT) {
945 if (RT == MemoryAccess::RT_NONE)
946 OS << "NONE";
947 else
948 OS << MemoryAccess::getReductionOperatorStr(RT);
949 return OS;
950 }
951
setFortranArrayDescriptor(Value * FAD)952 void MemoryAccess::setFortranArrayDescriptor(Value *FAD) { this->FAD = FAD; }
953
print(raw_ostream & OS) const954 void MemoryAccess::print(raw_ostream &OS) const {
955 switch (AccType) {
956 case READ:
957 OS.indent(12) << "ReadAccess :=\t";
958 break;
959 case MUST_WRITE:
960 OS.indent(12) << "MustWriteAccess :=\t";
961 break;
962 case MAY_WRITE:
963 OS.indent(12) << "MayWriteAccess :=\t";
964 break;
965 }
966
967 OS << "[Reduction Type: " << getReductionType() << "] ";
968
969 if (FAD) {
970 OS << "[Fortran array descriptor: " << FAD->getName();
971 OS << "] ";
972 };
973
974 OS << "[Scalar: " << isScalarKind() << "]\n";
975 OS.indent(16) << getOriginalAccessRelationStr() << ";\n";
976 if (hasNewAccessRelation())
977 OS.indent(11) << "new: " << getNewAccessRelationStr() << ";\n";
978 }
979
980 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const981 LLVM_DUMP_METHOD void MemoryAccess::dump() const { print(errs()); }
982 #endif
983
getPwAff(const SCEV * E)984 isl::pw_aff MemoryAccess::getPwAff(const SCEV *E) {
985 auto *Stmt = getStatement();
986 PWACtx PWAC = Stmt->getParent()->getPwAff(E, Stmt->getEntryBlock());
987 isl::set StmtDom = getStatement()->getDomain();
988 StmtDom = StmtDom.reset_tuple_id();
989 isl::set NewInvalidDom = StmtDom.intersect(PWAC.second);
990 InvalidDomain = InvalidDomain.unite(NewInvalidDom);
991 return PWAC.first;
992 }
993
994 // Create a map in the size of the provided set domain, that maps from the
995 // one element of the provided set domain to another element of the provided
996 // set domain.
997 // The mapping is limited to all points that are equal in all but the last
998 // dimension and for which the last dimension of the input is strict smaller
999 // than the last dimension of the output.
1000 //
1001 // getEqualAndLarger(set[i0, i1, ..., iX]):
1002 //
1003 // set[i0, i1, ..., iX] -> set[o0, o1, ..., oX]
1004 // : i0 = o0, i1 = o1, ..., i(X-1) = o(X-1), iX < oX
1005 //
getEqualAndLarger(isl::space SetDomain)1006 static isl::map getEqualAndLarger(isl::space SetDomain) {
1007 isl::space Space = SetDomain.map_from_set();
1008 isl::map Map = isl::map::universe(Space);
1009 unsigned lastDimension = Map.domain_tuple_dim() - 1;
1010
1011 // Set all but the last dimension to be equal for the input and output
1012 //
1013 // input[i0, i1, ..., iX] -> output[o0, o1, ..., oX]
1014 // : i0 = o0, i1 = o1, ..., i(X-1) = o(X-1)
1015 for (unsigned i = 0; i < lastDimension; ++i)
1016 Map = Map.equate(isl::dim::in, i, isl::dim::out, i);
1017
1018 // Set the last dimension of the input to be strict smaller than the
1019 // last dimension of the output.
1020 //
1021 // input[?,?,?,...,iX] -> output[?,?,?,...,oX] : iX < oX
1022 Map = Map.order_lt(isl::dim::in, lastDimension, isl::dim::out, lastDimension);
1023 return Map;
1024 }
1025
getStride(isl::map Schedule) const1026 isl::set MemoryAccess::getStride(isl::map Schedule) const {
1027 isl::map AccessRelation = getAccessRelation();
1028 isl::space Space = Schedule.get_space().range();
1029 isl::map NextScatt = getEqualAndLarger(Space);
1030
1031 Schedule = Schedule.reverse();
1032 NextScatt = NextScatt.lexmin();
1033
1034 NextScatt = NextScatt.apply_range(Schedule);
1035 NextScatt = NextScatt.apply_range(AccessRelation);
1036 NextScatt = NextScatt.apply_domain(Schedule);
1037 NextScatt = NextScatt.apply_domain(AccessRelation);
1038
1039 isl::set Deltas = NextScatt.deltas();
1040 return Deltas;
1041 }
1042
isStrideX(isl::map Schedule,int StrideWidth) const1043 bool MemoryAccess::isStrideX(isl::map Schedule, int StrideWidth) const {
1044 isl::set Stride, StrideX;
1045 bool IsStrideX;
1046
1047 Stride = getStride(Schedule);
1048 StrideX = isl::set::universe(Stride.get_space());
1049 for (auto i : seq<isl_size>(0, StrideX.tuple_dim() - 1))
1050 StrideX = StrideX.fix_si(isl::dim::set, i, 0);
1051 StrideX = StrideX.fix_si(isl::dim::set, StrideX.tuple_dim() - 1, StrideWidth);
1052 IsStrideX = Stride.is_subset(StrideX);
1053
1054 return IsStrideX;
1055 }
1056
isStrideZero(isl::map Schedule) const1057 bool MemoryAccess::isStrideZero(isl::map Schedule) const {
1058 return isStrideX(Schedule, 0);
1059 }
1060
isStrideOne(isl::map Schedule) const1061 bool MemoryAccess::isStrideOne(isl::map Schedule) const {
1062 return isStrideX(Schedule, 1);
1063 }
1064
setAccessRelation(isl::map NewAccess)1065 void MemoryAccess::setAccessRelation(isl::map NewAccess) {
1066 AccessRelation = NewAccess;
1067 }
1068
setNewAccessRelation(isl::map NewAccess)1069 void MemoryAccess::setNewAccessRelation(isl::map NewAccess) {
1070 assert(!NewAccess.is_null());
1071
1072 #ifndef NDEBUG
1073 // Check domain space compatibility.
1074 isl::space NewSpace = NewAccess.get_space();
1075 isl::space NewDomainSpace = NewSpace.domain();
1076 isl::space OriginalDomainSpace = getStatement()->getDomainSpace();
1077 assert(OriginalDomainSpace.has_equal_tuples(NewDomainSpace));
1078
1079 // Reads must be executed unconditionally. Writes might be executed in a
1080 // subdomain only.
1081 if (isRead()) {
1082 // Check whether there is an access for every statement instance.
1083 isl::set StmtDomain = getStatement()->getDomain();
1084 isl::set DefinedContext =
1085 getStatement()->getParent()->getBestKnownDefinedBehaviorContext();
1086 StmtDomain = StmtDomain.intersect_params(DefinedContext);
1087 isl::set NewDomain = NewAccess.domain();
1088 assert(!StmtDomain.is_subset(NewDomain).is_false() &&
1089 "Partial READ accesses not supported");
1090 }
1091
1092 isl::space NewAccessSpace = NewAccess.get_space();
1093 assert(NewAccessSpace.has_tuple_id(isl::dim::set) &&
1094 "Must specify the array that is accessed");
1095 isl::id NewArrayId = NewAccessSpace.get_tuple_id(isl::dim::set);
1096 auto *SAI = static_cast<ScopArrayInfo *>(NewArrayId.get_user());
1097 assert(SAI && "Must set a ScopArrayInfo");
1098
1099 if (SAI->isArrayKind() && SAI->getBasePtrOriginSAI()) {
1100 InvariantEquivClassTy *EqClass =
1101 getStatement()->getParent()->lookupInvariantEquivClass(
1102 SAI->getBasePtr());
1103 assert(EqClass &&
1104 "Access functions to indirect arrays must have an invariant and "
1105 "hoisted base pointer");
1106 }
1107
1108 // Check whether access dimensions correspond to number of dimensions of the
1109 // accesses array.
1110 isl_size Dims = SAI->getNumberOfDimensions();
1111 assert(NewAccessSpace.dim(isl::dim::set) == Dims &&
1112 "Access dims must match array dims");
1113 #endif
1114
1115 NewAccess = NewAccess.gist_params(getStatement()->getParent()->getContext());
1116 NewAccess = NewAccess.gist_domain(getStatement()->getDomain());
1117 NewAccessRelation = NewAccess;
1118 }
1119
isLatestPartialAccess() const1120 bool MemoryAccess::isLatestPartialAccess() const {
1121 isl::set StmtDom = getStatement()->getDomain();
1122 isl::set AccDom = getLatestAccessRelation().domain();
1123
1124 return !StmtDom.is_subset(AccDom);
1125 }
1126
1127 //===----------------------------------------------------------------------===//
1128
getSchedule() const1129 isl::map ScopStmt::getSchedule() const {
1130 isl::set Domain = getDomain();
1131 if (Domain.is_empty())
1132 return isl::map::from_aff(isl::aff(isl::local_space(getDomainSpace())));
1133 auto Schedule = getParent()->getSchedule();
1134 if (Schedule.is_null())
1135 return {};
1136 Schedule = Schedule.intersect_domain(isl::union_set(Domain));
1137 if (Schedule.is_empty())
1138 return isl::map::from_aff(isl::aff(isl::local_space(getDomainSpace())));
1139 isl::map M = M.from_union_map(Schedule);
1140 M = M.coalesce();
1141 M = M.gist_domain(Domain);
1142 M = M.coalesce();
1143 return M;
1144 }
1145
restrictDomain(isl::set NewDomain)1146 void ScopStmt::restrictDomain(isl::set NewDomain) {
1147 assert(NewDomain.is_subset(Domain) &&
1148 "New domain is not a subset of old domain!");
1149 Domain = NewDomain;
1150 }
1151
addAccess(MemoryAccess * Access,bool Prepend)1152 void ScopStmt::addAccess(MemoryAccess *Access, bool Prepend) {
1153 Instruction *AccessInst = Access->getAccessInstruction();
1154
1155 if (Access->isArrayKind()) {
1156 MemoryAccessList &MAL = InstructionToAccess[AccessInst];
1157 MAL.emplace_front(Access);
1158 } else if (Access->isValueKind() && Access->isWrite()) {
1159 Instruction *AccessVal = cast<Instruction>(Access->getAccessValue());
1160 assert(!ValueWrites.lookup(AccessVal));
1161
1162 ValueWrites[AccessVal] = Access;
1163 } else if (Access->isValueKind() && Access->isRead()) {
1164 Value *AccessVal = Access->getAccessValue();
1165 assert(!ValueReads.lookup(AccessVal));
1166
1167 ValueReads[AccessVal] = Access;
1168 } else if (Access->isAnyPHIKind() && Access->isWrite()) {
1169 PHINode *PHI = cast<PHINode>(Access->getAccessValue());
1170 assert(!PHIWrites.lookup(PHI));
1171
1172 PHIWrites[PHI] = Access;
1173 } else if (Access->isAnyPHIKind() && Access->isRead()) {
1174 PHINode *PHI = cast<PHINode>(Access->getAccessValue());
1175 assert(!PHIReads.lookup(PHI));
1176
1177 PHIReads[PHI] = Access;
1178 }
1179
1180 if (Prepend) {
1181 MemAccs.insert(MemAccs.begin(), Access);
1182 return;
1183 }
1184 MemAccs.push_back(Access);
1185 }
1186
realignParams()1187 void ScopStmt::realignParams() {
1188 for (MemoryAccess *MA : *this)
1189 MA->realignParams();
1190
1191 isl::set Ctx = Parent.getContext();
1192 InvalidDomain = InvalidDomain.gist_params(Ctx);
1193 Domain = Domain.gist_params(Ctx);
1194
1195 // Predictable parameter order is required for JSON imports. Ensure alignment
1196 // by explicitly calling align_params.
1197 isl::space CtxSpace = Ctx.get_space();
1198 InvalidDomain = InvalidDomain.align_params(CtxSpace);
1199 Domain = Domain.align_params(CtxSpace);
1200 }
1201
ScopStmt(Scop & parent,Region & R,StringRef Name,Loop * SurroundingLoop,std::vector<Instruction * > EntryBlockInstructions)1202 ScopStmt::ScopStmt(Scop &parent, Region &R, StringRef Name,
1203 Loop *SurroundingLoop,
1204 std::vector<Instruction *> EntryBlockInstructions)
1205 : Parent(parent), InvalidDomain(), Domain(), R(&R), Build(), BaseName(Name),
1206 SurroundingLoop(SurroundingLoop), Instructions(EntryBlockInstructions) {}
1207
ScopStmt(Scop & parent,BasicBlock & bb,StringRef Name,Loop * SurroundingLoop,std::vector<Instruction * > Instructions)1208 ScopStmt::ScopStmt(Scop &parent, BasicBlock &bb, StringRef Name,
1209 Loop *SurroundingLoop,
1210 std::vector<Instruction *> Instructions)
1211 : Parent(parent), InvalidDomain(), Domain(), BB(&bb), Build(),
1212 BaseName(Name), SurroundingLoop(SurroundingLoop),
1213 Instructions(Instructions) {}
1214
ScopStmt(Scop & parent,isl::map SourceRel,isl::map TargetRel,isl::set NewDomain)1215 ScopStmt::ScopStmt(Scop &parent, isl::map SourceRel, isl::map TargetRel,
1216 isl::set NewDomain)
1217 : Parent(parent), InvalidDomain(), Domain(NewDomain), Build() {
1218 BaseName = getIslCompatibleName("CopyStmt_", "",
1219 std::to_string(parent.getCopyStmtsNum()));
1220 isl::id Id = isl::id::alloc(getIslCtx(), getBaseName(), this);
1221 Domain = Domain.set_tuple_id(Id);
1222 TargetRel = TargetRel.set_tuple_id(isl::dim::in, Id);
1223 auto *Access =
1224 new MemoryAccess(this, MemoryAccess::AccessType::MUST_WRITE, TargetRel);
1225 parent.addAccessFunction(Access);
1226 addAccess(Access);
1227 SourceRel = SourceRel.set_tuple_id(isl::dim::in, Id);
1228 Access = new MemoryAccess(this, MemoryAccess::AccessType::READ, SourceRel);
1229 parent.addAccessFunction(Access);
1230 addAccess(Access);
1231 }
1232
1233 ScopStmt::~ScopStmt() = default;
1234
getDomainStr() const1235 std::string ScopStmt::getDomainStr() const { return stringFromIslObj(Domain); }
1236
getScheduleStr() const1237 std::string ScopStmt::getScheduleStr() const {
1238 return stringFromIslObj(getSchedule());
1239 }
1240
setInvalidDomain(isl::set ID)1241 void ScopStmt::setInvalidDomain(isl::set ID) { InvalidDomain = ID; }
1242
getEntryBlock() const1243 BasicBlock *ScopStmt::getEntryBlock() const {
1244 if (isBlockStmt())
1245 return getBasicBlock();
1246 return getRegion()->getEntry();
1247 }
1248
getNumIterators() const1249 unsigned ScopStmt::getNumIterators() const { return NestLoops.size(); }
1250
getBaseName() const1251 const char *ScopStmt::getBaseName() const { return BaseName.c_str(); }
1252
getLoopForDimension(unsigned Dimension) const1253 Loop *ScopStmt::getLoopForDimension(unsigned Dimension) const {
1254 return NestLoops[Dimension];
1255 }
1256
getIslCtx() const1257 isl::ctx ScopStmt::getIslCtx() const { return Parent.getIslCtx(); }
1258
getDomain() const1259 isl::set ScopStmt::getDomain() const { return Domain; }
1260
getDomainSpace() const1261 isl::space ScopStmt::getDomainSpace() const { return Domain.get_space(); }
1262
getDomainId() const1263 isl::id ScopStmt::getDomainId() const { return Domain.get_tuple_id(); }
1264
printInstructions(raw_ostream & OS) const1265 void ScopStmt::printInstructions(raw_ostream &OS) const {
1266 OS << "Instructions {\n";
1267
1268 for (Instruction *Inst : Instructions)
1269 OS.indent(16) << *Inst << "\n";
1270
1271 OS.indent(12) << "}\n";
1272 }
1273
print(raw_ostream & OS,bool PrintInstructions) const1274 void ScopStmt::print(raw_ostream &OS, bool PrintInstructions) const {
1275 OS << "\t" << getBaseName() << "\n";
1276 OS.indent(12) << "Domain :=\n";
1277
1278 if (!Domain.is_null()) {
1279 OS.indent(16) << getDomainStr() << ";\n";
1280 } else
1281 OS.indent(16) << "n/a\n";
1282
1283 OS.indent(12) << "Schedule :=\n";
1284
1285 if (!Domain.is_null()) {
1286 OS.indent(16) << getScheduleStr() << ";\n";
1287 } else
1288 OS.indent(16) << "n/a\n";
1289
1290 for (MemoryAccess *Access : MemAccs)
1291 Access->print(OS);
1292
1293 if (PrintInstructions)
1294 printInstructions(OS.indent(12));
1295 }
1296
1297 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const1298 LLVM_DUMP_METHOD void ScopStmt::dump() const { print(dbgs(), true); }
1299 #endif
1300
removeAccessData(MemoryAccess * MA)1301 void ScopStmt::removeAccessData(MemoryAccess *MA) {
1302 if (MA->isRead() && MA->isOriginalValueKind()) {
1303 bool Found = ValueReads.erase(MA->getAccessValue());
1304 (void)Found;
1305 assert(Found && "Expected access data not found");
1306 }
1307 if (MA->isWrite() && MA->isOriginalValueKind()) {
1308 bool Found = ValueWrites.erase(cast<Instruction>(MA->getAccessValue()));
1309 (void)Found;
1310 assert(Found && "Expected access data not found");
1311 }
1312 if (MA->isWrite() && MA->isOriginalAnyPHIKind()) {
1313 bool Found = PHIWrites.erase(cast<PHINode>(MA->getAccessInstruction()));
1314 (void)Found;
1315 assert(Found && "Expected access data not found");
1316 }
1317 if (MA->isRead() && MA->isOriginalAnyPHIKind()) {
1318 bool Found = PHIReads.erase(cast<PHINode>(MA->getAccessInstruction()));
1319 (void)Found;
1320 assert(Found && "Expected access data not found");
1321 }
1322 }
1323
removeMemoryAccess(MemoryAccess * MA)1324 void ScopStmt::removeMemoryAccess(MemoryAccess *MA) {
1325 // Remove the memory accesses from this statement together with all scalar
1326 // accesses that were caused by it. MemoryKind::Value READs have no access
1327 // instruction, hence would not be removed by this function. However, it is
1328 // only used for invariant LoadInst accesses, its arguments are always affine,
1329 // hence synthesizable, and therefore there are no MemoryKind::Value READ
1330 // accesses to be removed.
1331 auto Predicate = [&](MemoryAccess *Acc) {
1332 return Acc->getAccessInstruction() == MA->getAccessInstruction();
1333 };
1334 for (auto *MA : MemAccs) {
1335 if (Predicate(MA)) {
1336 removeAccessData(MA);
1337 Parent.removeAccessData(MA);
1338 }
1339 }
1340 MemAccs.erase(std::remove_if(MemAccs.begin(), MemAccs.end(), Predicate),
1341 MemAccs.end());
1342 InstructionToAccess.erase(MA->getAccessInstruction());
1343 }
1344
removeSingleMemoryAccess(MemoryAccess * MA,bool AfterHoisting)1345 void ScopStmt::removeSingleMemoryAccess(MemoryAccess *MA, bool AfterHoisting) {
1346 if (AfterHoisting) {
1347 auto MAIt = std::find(MemAccs.begin(), MemAccs.end(), MA);
1348 assert(MAIt != MemAccs.end());
1349 MemAccs.erase(MAIt);
1350
1351 removeAccessData(MA);
1352 Parent.removeAccessData(MA);
1353 }
1354
1355 auto It = InstructionToAccess.find(MA->getAccessInstruction());
1356 if (It != InstructionToAccess.end()) {
1357 It->second.remove(MA);
1358 if (It->second.empty())
1359 InstructionToAccess.erase(MA->getAccessInstruction());
1360 }
1361 }
1362
ensureValueRead(Value * V)1363 MemoryAccess *ScopStmt::ensureValueRead(Value *V) {
1364 MemoryAccess *Access = lookupInputAccessOf(V);
1365 if (Access)
1366 return Access;
1367
1368 ScopArrayInfo *SAI =
1369 Parent.getOrCreateScopArrayInfo(V, V->getType(), {}, MemoryKind::Value);
1370 Access = new MemoryAccess(this, nullptr, MemoryAccess::READ, V, V->getType(),
1371 true, {}, {}, V, MemoryKind::Value);
1372 Parent.addAccessFunction(Access);
1373 Access->buildAccessRelation(SAI);
1374 addAccess(Access);
1375 Parent.addAccessData(Access);
1376 return Access;
1377 }
1378
operator <<(raw_ostream & OS,const ScopStmt & S)1379 raw_ostream &polly::operator<<(raw_ostream &OS, const ScopStmt &S) {
1380 S.print(OS, PollyPrintInstructions);
1381 return OS;
1382 }
1383
1384 //===----------------------------------------------------------------------===//
1385 /// Scop class implement
1386
setContext(isl::set NewContext)1387 void Scop::setContext(isl::set NewContext) {
1388 Context = NewContext.align_params(Context.get_space());
1389 }
1390
1391 namespace {
1392
1393 /// Remap parameter values but keep AddRecs valid wrt. invariant loads.
1394 struct SCEVSensitiveParameterRewriter
1395 : public SCEVRewriteVisitor<SCEVSensitiveParameterRewriter> {
1396 const ValueToValueMap &VMap;
1397
1398 public:
SCEVSensitiveParameterRewriter__anonf0c1cf730211::SCEVSensitiveParameterRewriter1399 SCEVSensitiveParameterRewriter(const ValueToValueMap &VMap,
1400 ScalarEvolution &SE)
1401 : SCEVRewriteVisitor(SE), VMap(VMap) {}
1402
rewrite__anonf0c1cf730211::SCEVSensitiveParameterRewriter1403 static const SCEV *rewrite(const SCEV *E, ScalarEvolution &SE,
1404 const ValueToValueMap &VMap) {
1405 SCEVSensitiveParameterRewriter SSPR(VMap, SE);
1406 return SSPR.visit(E);
1407 }
1408
visitAddRecExpr__anonf0c1cf730211::SCEVSensitiveParameterRewriter1409 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *E) {
1410 auto *Start = visit(E->getStart());
1411 auto *AddRec = SE.getAddRecExpr(SE.getConstant(E->getType(), 0),
1412 visit(E->getStepRecurrence(SE)),
1413 E->getLoop(), SCEV::FlagAnyWrap);
1414 return SE.getAddExpr(Start, AddRec);
1415 }
1416
visitUnknown__anonf0c1cf730211::SCEVSensitiveParameterRewriter1417 const SCEV *visitUnknown(const SCEVUnknown *E) {
1418 if (auto *NewValue = VMap.lookup(E->getValue()))
1419 return SE.getUnknown(NewValue);
1420 return E;
1421 }
1422 };
1423
1424 /// Check whether we should remap a SCEV expression.
1425 struct SCEVFindInsideScop : public SCEVTraversal<SCEVFindInsideScop> {
1426 const ValueToValueMap &VMap;
1427 bool FoundInside = false;
1428 const Scop *S;
1429
1430 public:
SCEVFindInsideScop__anonf0c1cf730211::SCEVFindInsideScop1431 SCEVFindInsideScop(const ValueToValueMap &VMap, ScalarEvolution &SE,
1432 const Scop *S)
1433 : SCEVTraversal(*this), VMap(VMap), S(S) {}
1434
hasVariant__anonf0c1cf730211::SCEVFindInsideScop1435 static bool hasVariant(const SCEV *E, ScalarEvolution &SE,
1436 const ValueToValueMap &VMap, const Scop *S) {
1437 SCEVFindInsideScop SFIS(VMap, SE, S);
1438 SFIS.visitAll(E);
1439 return SFIS.FoundInside;
1440 }
1441
follow__anonf0c1cf730211::SCEVFindInsideScop1442 bool follow(const SCEV *E) {
1443 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(E)) {
1444 FoundInside |= S->getRegion().contains(AddRec->getLoop());
1445 } else if (auto *Unknown = dyn_cast<SCEVUnknown>(E)) {
1446 if (Instruction *I = dyn_cast<Instruction>(Unknown->getValue()))
1447 FoundInside |= S->getRegion().contains(I) && !VMap.count(I);
1448 }
1449 return !FoundInside;
1450 }
1451
isDone__anonf0c1cf730211::SCEVFindInsideScop1452 bool isDone() { return FoundInside; }
1453 };
1454 } // end anonymous namespace
1455
getRepresentingInvariantLoadSCEV(const SCEV * E) const1456 const SCEV *Scop::getRepresentingInvariantLoadSCEV(const SCEV *E) const {
1457 // Check whether it makes sense to rewrite the SCEV. (ScalarEvolution
1458 // doesn't like addition between an AddRec and an expression that
1459 // doesn't have a dominance relationship with it.)
1460 if (SCEVFindInsideScop::hasVariant(E, *SE, InvEquivClassVMap, this))
1461 return E;
1462
1463 // Rewrite SCEV.
1464 return SCEVSensitiveParameterRewriter::rewrite(E, *SE, InvEquivClassVMap);
1465 }
1466
1467 // This table of function names is used to translate parameter names in more
1468 // human-readable names. This makes it easier to interpret Polly analysis
1469 // results.
1470 StringMap<std::string> KnownNames = {
1471 {"_Z13get_global_idj", "global_id"},
1472 {"_Z12get_local_idj", "local_id"},
1473 {"_Z15get_global_sizej", "global_size"},
1474 {"_Z14get_local_sizej", "local_size"},
1475 {"_Z12get_work_dimv", "work_dim"},
1476 {"_Z17get_global_offsetj", "global_offset"},
1477 {"_Z12get_group_idj", "group_id"},
1478 {"_Z14get_num_groupsj", "num_groups"},
1479 };
1480
getCallParamName(CallInst * Call)1481 static std::string getCallParamName(CallInst *Call) {
1482 std::string Result;
1483 raw_string_ostream OS(Result);
1484 std::string Name = Call->getCalledFunction()->getName().str();
1485
1486 auto Iterator = KnownNames.find(Name);
1487 if (Iterator != KnownNames.end())
1488 Name = "__" + Iterator->getValue();
1489 OS << Name;
1490 for (auto &Operand : Call->arg_operands()) {
1491 ConstantInt *Op = cast<ConstantInt>(&Operand);
1492 OS << "_" << Op->getValue();
1493 }
1494 OS.flush();
1495 return Result;
1496 }
1497
createParameterId(const SCEV * Parameter)1498 void Scop::createParameterId(const SCEV *Parameter) {
1499 assert(Parameters.count(Parameter));
1500 assert(!ParameterIds.count(Parameter));
1501
1502 std::string ParameterName = "p_" + std::to_string(getNumParams() - 1);
1503
1504 if (const SCEVUnknown *ValueParameter = dyn_cast<SCEVUnknown>(Parameter)) {
1505 Value *Val = ValueParameter->getValue();
1506 CallInst *Call = dyn_cast<CallInst>(Val);
1507
1508 if (Call && isConstCall(Call)) {
1509 ParameterName = getCallParamName(Call);
1510 } else if (UseInstructionNames) {
1511 // If this parameter references a specific Value and this value has a name
1512 // we use this name as it is likely to be unique and more useful than just
1513 // a number.
1514 if (Val->hasName())
1515 ParameterName = Val->getName().str();
1516 else if (LoadInst *LI = dyn_cast<LoadInst>(Val)) {
1517 auto *LoadOrigin = LI->getPointerOperand()->stripInBoundsOffsets();
1518 if (LoadOrigin->hasName()) {
1519 ParameterName += "_loaded_from_";
1520 ParameterName +=
1521 LI->getPointerOperand()->stripInBoundsOffsets()->getName();
1522 }
1523 }
1524 }
1525
1526 ParameterName = getIslCompatibleName("", ParameterName, "");
1527 }
1528
1529 isl::id Id = isl::id::alloc(getIslCtx(), ParameterName,
1530 const_cast<void *>((const void *)Parameter));
1531 ParameterIds[Parameter] = Id;
1532 }
1533
addParams(const ParameterSetTy & NewParameters)1534 void Scop::addParams(const ParameterSetTy &NewParameters) {
1535 for (const SCEV *Parameter : NewParameters) {
1536 // Normalize the SCEV to get the representing element for an invariant load.
1537 Parameter = extractConstantFactor(Parameter, *SE).second;
1538 Parameter = getRepresentingInvariantLoadSCEV(Parameter);
1539
1540 if (Parameters.insert(Parameter))
1541 createParameterId(Parameter);
1542 }
1543 }
1544
getIdForParam(const SCEV * Parameter) const1545 isl::id Scop::getIdForParam(const SCEV *Parameter) const {
1546 // Normalize the SCEV to get the representing element for an invariant load.
1547 Parameter = getRepresentingInvariantLoadSCEV(Parameter);
1548 return ParameterIds.lookup(Parameter);
1549 }
1550
isDominatedBy(const DominatorTree & DT,BasicBlock * BB) const1551 bool Scop::isDominatedBy(const DominatorTree &DT, BasicBlock *BB) const {
1552 return DT.dominates(BB, getEntry());
1553 }
1554
buildContext()1555 void Scop::buildContext() {
1556 isl::space Space = isl::space::params_alloc(getIslCtx(), 0);
1557 Context = isl::set::universe(Space);
1558 InvalidContext = isl::set::empty(Space);
1559 AssumedContext = isl::set::universe(Space);
1560 DefinedBehaviorContext = isl::set::universe(Space);
1561 }
1562
addParameterBounds()1563 void Scop::addParameterBounds() {
1564 unsigned PDim = 0;
1565 for (auto *Parameter : Parameters) {
1566 ConstantRange SRange = SE->getSignedRange(Parameter);
1567 Context = addRangeBoundsToSet(Context, SRange, PDim++, isl::dim::param);
1568 }
1569 intersectDefinedBehavior(Context, AS_ASSUMPTION);
1570 }
1571
getFortranArrayIds(Scop::array_range Arrays)1572 static std::vector<isl::id> getFortranArrayIds(Scop::array_range Arrays) {
1573 std::vector<isl::id> OutermostSizeIds;
1574 for (auto Array : Arrays) {
1575 // To check if an array is a Fortran array, we check if it has a isl_pw_aff
1576 // for its outermost dimension. Fortran arrays will have this since the
1577 // outermost dimension size can be picked up from their runtime description.
1578 // TODO: actually need to check if it has a FAD, but for now this works.
1579 if (Array->getNumberOfDimensions() > 0) {
1580 isl::pw_aff PwAff = Array->getDimensionSizePw(0);
1581 if (PwAff.is_null())
1582 continue;
1583
1584 isl::id Id = PwAff.get_dim_id(isl::dim::param, 0);
1585 assert(!Id.is_null() &&
1586 "Invalid Id for PwAff expression in Fortran array");
1587 OutermostSizeIds.push_back(Id);
1588 }
1589 }
1590 return OutermostSizeIds;
1591 }
1592
1593 // The FORTRAN array size parameters are known to be non-negative.
boundFortranArrayParams(isl::set Context,Scop::array_range Arrays)1594 static isl::set boundFortranArrayParams(isl::set Context,
1595 Scop::array_range Arrays) {
1596 std::vector<isl::id> OutermostSizeIds;
1597 OutermostSizeIds = getFortranArrayIds(Arrays);
1598
1599 for (isl::id Id : OutermostSizeIds) {
1600 int dim = Context.find_dim_by_id(isl::dim::param, Id);
1601 Context = Context.lower_bound_si(isl::dim::param, dim, 0);
1602 }
1603
1604 return Context;
1605 }
1606
realignParams()1607 void Scop::realignParams() {
1608 if (PollyIgnoreParamBounds)
1609 return;
1610
1611 // Add all parameters into a common model.
1612 isl::space Space = getFullParamSpace();
1613
1614 // Align the parameters of all data structures to the model.
1615 Context = Context.align_params(Space);
1616 AssumedContext = AssumedContext.align_params(Space);
1617 InvalidContext = InvalidContext.align_params(Space);
1618
1619 // Bound the size of the fortran array dimensions.
1620 Context = boundFortranArrayParams(Context, arrays());
1621
1622 // As all parameters are known add bounds to them.
1623 addParameterBounds();
1624
1625 for (ScopStmt &Stmt : *this)
1626 Stmt.realignParams();
1627 // Simplify the schedule according to the context too.
1628 Schedule = Schedule.gist_domain_params(getContext());
1629
1630 // Predictable parameter order is required for JSON imports. Ensure alignment
1631 // by explicitly calling align_params.
1632 Schedule = Schedule.align_params(Space);
1633 }
1634
simplifyAssumptionContext(isl::set AssumptionContext,const Scop & S)1635 static isl::set simplifyAssumptionContext(isl::set AssumptionContext,
1636 const Scop &S) {
1637 // If we have modeled all blocks in the SCoP that have side effects we can
1638 // simplify the context with the constraints that are needed for anything to
1639 // be executed at all. However, if we have error blocks in the SCoP we already
1640 // assumed some parameter combinations cannot occur and removed them from the
1641 // domains, thus we cannot use the remaining domain to simplify the
1642 // assumptions.
1643 if (!S.hasErrorBlock()) {
1644 auto DomainParameters = S.getDomains().params();
1645 AssumptionContext = AssumptionContext.gist_params(DomainParameters);
1646 }
1647
1648 AssumptionContext = AssumptionContext.gist_params(S.getContext());
1649 return AssumptionContext;
1650 }
1651
simplifyContexts()1652 void Scop::simplifyContexts() {
1653 // The parameter constraints of the iteration domains give us a set of
1654 // constraints that need to hold for all cases where at least a single
1655 // statement iteration is executed in the whole scop. We now simplify the
1656 // assumed context under the assumption that such constraints hold and at
1657 // least a single statement iteration is executed. For cases where no
1658 // statement instances are executed, the assumptions we have taken about
1659 // the executed code do not matter and can be changed.
1660 //
1661 // WARNING: This only holds if the assumptions we have taken do not reduce
1662 // the set of statement instances that are executed. Otherwise we
1663 // may run into a case where the iteration domains suggest that
1664 // for a certain set of parameter constraints no code is executed,
1665 // but in the original program some computation would have been
1666 // performed. In such a case, modifying the run-time conditions and
1667 // possibly influencing the run-time check may cause certain scops
1668 // to not be executed.
1669 //
1670 // Example:
1671 //
1672 // When delinearizing the following code:
1673 //
1674 // for (long i = 0; i < 100; i++)
1675 // for (long j = 0; j < m; j++)
1676 // A[i+p][j] = 1.0;
1677 //
1678 // we assume that the condition m <= 0 or (m >= 1 and p >= 0) holds as
1679 // otherwise we would access out of bound data. Now, knowing that code is
1680 // only executed for the case m >= 0, it is sufficient to assume p >= 0.
1681 AssumedContext = simplifyAssumptionContext(AssumedContext, *this);
1682 InvalidContext = InvalidContext.align_params(getParamSpace());
1683 simplify(DefinedBehaviorContext);
1684 DefinedBehaviorContext = DefinedBehaviorContext.align_params(getParamSpace());
1685 }
1686
getDomainConditions(const ScopStmt * Stmt) const1687 isl::set Scop::getDomainConditions(const ScopStmt *Stmt) const {
1688 return getDomainConditions(Stmt->getEntryBlock());
1689 }
1690
getDomainConditions(BasicBlock * BB) const1691 isl::set Scop::getDomainConditions(BasicBlock *BB) const {
1692 auto DIt = DomainMap.find(BB);
1693 if (DIt != DomainMap.end())
1694 return DIt->getSecond();
1695
1696 auto &RI = *R.getRegionInfo();
1697 auto *BBR = RI.getRegionFor(BB);
1698 while (BBR->getEntry() == BB)
1699 BBR = BBR->getParent();
1700 return getDomainConditions(BBR->getEntry());
1701 }
1702
Scop(Region & R,ScalarEvolution & ScalarEvolution,LoopInfo & LI,DominatorTree & DT,ScopDetection::DetectionContext & DC,OptimizationRemarkEmitter & ORE,int ID)1703 Scop::Scop(Region &R, ScalarEvolution &ScalarEvolution, LoopInfo &LI,
1704 DominatorTree &DT, ScopDetection::DetectionContext &DC,
1705 OptimizationRemarkEmitter &ORE, int ID)
1706 : IslCtx(isl_ctx_alloc(), isl_ctx_free), SE(&ScalarEvolution), DT(&DT),
1707 R(R), name(None), HasSingleExitEdge(R.getExitingBlock()), DC(DC),
1708 ORE(ORE), Affinator(this, LI), ID(ID) {
1709 SmallVector<char *, 8> IslArgv;
1710 IslArgv.reserve(1 + IslArgs.size());
1711
1712 // Substitute for program name.
1713 IslArgv.push_back(const_cast<char *>("-polly-isl-arg"));
1714
1715 for (std::string &Arg : IslArgs)
1716 IslArgv.push_back(const_cast<char *>(Arg.c_str()));
1717
1718 // Abort if unknown argument is passed.
1719 // Note that "-V" (print isl version) will always call exit(0), so we cannot
1720 // avoid ISL aborting the program at this point.
1721 unsigned IslParseFlags = ISL_ARG_ALL;
1722
1723 isl_ctx_parse_options(IslCtx.get(), IslArgv.size(), IslArgv.data(),
1724 IslParseFlags);
1725
1726 if (IslOnErrorAbort)
1727 isl_options_set_on_error(getIslCtx().get(), ISL_ON_ERROR_ABORT);
1728 buildContext();
1729 }
1730
1731 Scop::~Scop() = default;
1732
removeFromStmtMap(ScopStmt & Stmt)1733 void Scop::removeFromStmtMap(ScopStmt &Stmt) {
1734 for (Instruction *Inst : Stmt.getInstructions())
1735 InstStmtMap.erase(Inst);
1736
1737 if (Stmt.isRegionStmt()) {
1738 for (BasicBlock *BB : Stmt.getRegion()->blocks()) {
1739 StmtMap.erase(BB);
1740 // Skip entry basic block, as its instructions are already deleted as
1741 // part of the statement's instruction list.
1742 if (BB == Stmt.getEntryBlock())
1743 continue;
1744 for (Instruction &Inst : *BB)
1745 InstStmtMap.erase(&Inst);
1746 }
1747 } else {
1748 auto StmtMapIt = StmtMap.find(Stmt.getBasicBlock());
1749 if (StmtMapIt != StmtMap.end())
1750 StmtMapIt->second.erase(std::remove(StmtMapIt->second.begin(),
1751 StmtMapIt->second.end(), &Stmt),
1752 StmtMapIt->second.end());
1753 for (Instruction *Inst : Stmt.getInstructions())
1754 InstStmtMap.erase(Inst);
1755 }
1756 }
1757
removeStmts(function_ref<bool (ScopStmt &)> ShouldDelete,bool AfterHoisting)1758 void Scop::removeStmts(function_ref<bool(ScopStmt &)> ShouldDelete,
1759 bool AfterHoisting) {
1760 for (auto StmtIt = Stmts.begin(), StmtEnd = Stmts.end(); StmtIt != StmtEnd;) {
1761 if (!ShouldDelete(*StmtIt)) {
1762 StmtIt++;
1763 continue;
1764 }
1765
1766 // Start with removing all of the statement's accesses including erasing it
1767 // from all maps that are pointing to them.
1768 // Make a temporary copy because removing MAs invalidates the iterator.
1769 SmallVector<MemoryAccess *, 16> MAList(StmtIt->begin(), StmtIt->end());
1770 for (MemoryAccess *MA : MAList)
1771 StmtIt->removeSingleMemoryAccess(MA, AfterHoisting);
1772
1773 removeFromStmtMap(*StmtIt);
1774 StmtIt = Stmts.erase(StmtIt);
1775 }
1776 }
1777
removeStmtNotInDomainMap()1778 void Scop::removeStmtNotInDomainMap() {
1779 removeStmts([this](ScopStmt &Stmt) -> bool {
1780 isl::set Domain = DomainMap.lookup(Stmt.getEntryBlock());
1781 if (Domain.is_null())
1782 return true;
1783 return Domain.is_empty();
1784 });
1785 }
1786
simplifySCoP(bool AfterHoisting)1787 void Scop::simplifySCoP(bool AfterHoisting) {
1788 removeStmts(
1789 [AfterHoisting](ScopStmt &Stmt) -> bool {
1790 // Never delete statements that contain calls to debug functions.
1791 if (hasDebugCall(&Stmt))
1792 return false;
1793
1794 bool RemoveStmt = Stmt.isEmpty();
1795
1796 // Remove read only statements only after invariant load hoisting.
1797 if (!RemoveStmt && AfterHoisting) {
1798 bool OnlyRead = true;
1799 for (MemoryAccess *MA : Stmt) {
1800 if (MA->isRead())
1801 continue;
1802
1803 OnlyRead = false;
1804 break;
1805 }
1806
1807 RemoveStmt = OnlyRead;
1808 }
1809 return RemoveStmt;
1810 },
1811 AfterHoisting);
1812 }
1813
lookupInvariantEquivClass(Value * Val)1814 InvariantEquivClassTy *Scop::lookupInvariantEquivClass(Value *Val) {
1815 LoadInst *LInst = dyn_cast<LoadInst>(Val);
1816 if (!LInst)
1817 return nullptr;
1818
1819 if (Value *Rep = InvEquivClassVMap.lookup(LInst))
1820 LInst = cast<LoadInst>(Rep);
1821
1822 Type *Ty = LInst->getType();
1823 const SCEV *PointerSCEV = SE->getSCEV(LInst->getPointerOperand());
1824 for (auto &IAClass : InvariantEquivClasses) {
1825 if (PointerSCEV != IAClass.IdentifyingPointer || Ty != IAClass.AccessType)
1826 continue;
1827
1828 auto &MAs = IAClass.InvariantAccesses;
1829 for (auto *MA : MAs)
1830 if (MA->getAccessInstruction() == Val)
1831 return &IAClass;
1832 }
1833
1834 return nullptr;
1835 }
1836
getOrCreateScopArrayInfo(Value * BasePtr,Type * ElementType,ArrayRef<const SCEV * > Sizes,MemoryKind Kind,const char * BaseName)1837 ScopArrayInfo *Scop::getOrCreateScopArrayInfo(Value *BasePtr, Type *ElementType,
1838 ArrayRef<const SCEV *> Sizes,
1839 MemoryKind Kind,
1840 const char *BaseName) {
1841 assert((BasePtr || BaseName) &&
1842 "BasePtr and BaseName can not be nullptr at the same time.");
1843 assert(!(BasePtr && BaseName) && "BaseName is redundant.");
1844 auto &SAI = BasePtr ? ScopArrayInfoMap[std::make_pair(BasePtr, Kind)]
1845 : ScopArrayNameMap[BaseName];
1846 if (!SAI) {
1847 auto &DL = getFunction().getParent()->getDataLayout();
1848 SAI.reset(new ScopArrayInfo(BasePtr, ElementType, getIslCtx(), Sizes, Kind,
1849 DL, this, BaseName));
1850 ScopArrayInfoSet.insert(SAI.get());
1851 } else {
1852 SAI->updateElementType(ElementType);
1853 // In case of mismatching array sizes, we bail out by setting the run-time
1854 // context to false.
1855 if (!SAI->updateSizes(Sizes))
1856 invalidate(DELINEARIZATION, DebugLoc());
1857 }
1858 return SAI.get();
1859 }
1860
createScopArrayInfo(Type * ElementType,const std::string & BaseName,const std::vector<unsigned> & Sizes)1861 ScopArrayInfo *Scop::createScopArrayInfo(Type *ElementType,
1862 const std::string &BaseName,
1863 const std::vector<unsigned> &Sizes) {
1864 auto *DimSizeType = Type::getInt64Ty(getSE()->getContext());
1865 std::vector<const SCEV *> SCEVSizes;
1866
1867 for (auto size : Sizes)
1868 if (size)
1869 SCEVSizes.push_back(getSE()->getConstant(DimSizeType, size, false));
1870 else
1871 SCEVSizes.push_back(nullptr);
1872
1873 auto *SAI = getOrCreateScopArrayInfo(nullptr, ElementType, SCEVSizes,
1874 MemoryKind::Array, BaseName.c_str());
1875 return SAI;
1876 }
1877
getScopArrayInfoOrNull(Value * BasePtr,MemoryKind Kind)1878 ScopArrayInfo *Scop::getScopArrayInfoOrNull(Value *BasePtr, MemoryKind Kind) {
1879 auto *SAI = ScopArrayInfoMap[std::make_pair(BasePtr, Kind)].get();
1880 return SAI;
1881 }
1882
getScopArrayInfo(Value * BasePtr,MemoryKind Kind)1883 ScopArrayInfo *Scop::getScopArrayInfo(Value *BasePtr, MemoryKind Kind) {
1884 auto *SAI = getScopArrayInfoOrNull(BasePtr, Kind);
1885 assert(SAI && "No ScopArrayInfo available for this base pointer");
1886 return SAI;
1887 }
1888
getContextStr() const1889 std::string Scop::getContextStr() const {
1890 return stringFromIslObj(getContext());
1891 }
1892
getAssumedContextStr() const1893 std::string Scop::getAssumedContextStr() const {
1894 assert(!AssumedContext.is_null() && "Assumed context not yet built");
1895 return stringFromIslObj(AssumedContext);
1896 }
1897
getInvalidContextStr() const1898 std::string Scop::getInvalidContextStr() const {
1899 return stringFromIslObj(InvalidContext);
1900 }
1901
getNameStr() const1902 std::string Scop::getNameStr() const {
1903 std::string ExitName, EntryName;
1904 std::tie(EntryName, ExitName) = getEntryExitStr();
1905 return EntryName + "---" + ExitName;
1906 }
1907
getEntryExitStr() const1908 std::pair<std::string, std::string> Scop::getEntryExitStr() const {
1909 std::string ExitName, EntryName;
1910 raw_string_ostream ExitStr(ExitName);
1911 raw_string_ostream EntryStr(EntryName);
1912
1913 R.getEntry()->printAsOperand(EntryStr, false);
1914 EntryStr.str();
1915
1916 if (R.getExit()) {
1917 R.getExit()->printAsOperand(ExitStr, false);
1918 ExitStr.str();
1919 } else
1920 ExitName = "FunctionExit";
1921
1922 return std::make_pair(EntryName, ExitName);
1923 }
1924
getContext() const1925 isl::set Scop::getContext() const { return Context; }
1926
getParamSpace() const1927 isl::space Scop::getParamSpace() const { return getContext().get_space(); }
1928
getFullParamSpace() const1929 isl::space Scop::getFullParamSpace() const {
1930 std::vector<isl::id> FortranIDs;
1931 FortranIDs = getFortranArrayIds(arrays());
1932
1933 isl::space Space = isl::space::params_alloc(
1934 getIslCtx(), ParameterIds.size() + FortranIDs.size());
1935
1936 unsigned PDim = 0;
1937 for (const SCEV *Parameter : Parameters) {
1938 isl::id Id = getIdForParam(Parameter);
1939 Space = Space.set_dim_id(isl::dim::param, PDim++, Id);
1940 }
1941
1942 for (isl::id Id : FortranIDs)
1943 Space = Space.set_dim_id(isl::dim::param, PDim++, Id);
1944
1945 return Space;
1946 }
1947
getAssumedContext() const1948 isl::set Scop::getAssumedContext() const {
1949 assert(!AssumedContext.is_null() && "Assumed context not yet built");
1950 return AssumedContext;
1951 }
1952
isProfitable(bool ScalarsAreUnprofitable) const1953 bool Scop::isProfitable(bool ScalarsAreUnprofitable) const {
1954 if (PollyProcessUnprofitable)
1955 return true;
1956
1957 if (isEmpty())
1958 return false;
1959
1960 unsigned OptimizableStmtsOrLoops = 0;
1961 for (auto &Stmt : *this) {
1962 if (Stmt.getNumIterators() == 0)
1963 continue;
1964
1965 bool ContainsArrayAccs = false;
1966 bool ContainsScalarAccs = false;
1967 for (auto *MA : Stmt) {
1968 if (MA->isRead())
1969 continue;
1970 ContainsArrayAccs |= MA->isLatestArrayKind();
1971 ContainsScalarAccs |= MA->isLatestScalarKind();
1972 }
1973
1974 if (!ScalarsAreUnprofitable || (ContainsArrayAccs && !ContainsScalarAccs))
1975 OptimizableStmtsOrLoops += Stmt.getNumIterators();
1976 }
1977
1978 return OptimizableStmtsOrLoops > 1;
1979 }
1980
hasFeasibleRuntimeContext() const1981 bool Scop::hasFeasibleRuntimeContext() const {
1982 if (Stmts.empty())
1983 return false;
1984
1985 isl::set PositiveContext = getAssumedContext();
1986 isl::set NegativeContext = getInvalidContext();
1987 PositiveContext = PositiveContext.intersect_params(Context);
1988 PositiveContext = PositiveContext.intersect_params(getDomains().params());
1989 return PositiveContext.is_empty().is_false() &&
1990 PositiveContext.is_subset(NegativeContext).is_false();
1991 }
1992
lookupBasePtrAccess(MemoryAccess * MA)1993 MemoryAccess *Scop::lookupBasePtrAccess(MemoryAccess *MA) {
1994 Value *PointerBase = MA->getOriginalBaseAddr();
1995
1996 auto *PointerBaseInst = dyn_cast<Instruction>(PointerBase);
1997 if (!PointerBaseInst)
1998 return nullptr;
1999
2000 auto *BasePtrStmt = getStmtFor(PointerBaseInst);
2001 if (!BasePtrStmt)
2002 return nullptr;
2003
2004 return BasePtrStmt->getArrayAccessOrNULLFor(PointerBaseInst);
2005 }
2006
toString(AssumptionKind Kind)2007 static std::string toString(AssumptionKind Kind) {
2008 switch (Kind) {
2009 case ALIASING:
2010 return "No-aliasing";
2011 case INBOUNDS:
2012 return "Inbounds";
2013 case WRAPPING:
2014 return "No-overflows";
2015 case UNSIGNED:
2016 return "Signed-unsigned";
2017 case COMPLEXITY:
2018 return "Low complexity";
2019 case PROFITABLE:
2020 return "Profitable";
2021 case ERRORBLOCK:
2022 return "No-error";
2023 case INFINITELOOP:
2024 return "Finite loop";
2025 case INVARIANTLOAD:
2026 return "Invariant load";
2027 case DELINEARIZATION:
2028 return "Delinearization";
2029 }
2030 llvm_unreachable("Unknown AssumptionKind!");
2031 }
2032
isEffectiveAssumption(isl::set Set,AssumptionSign Sign)2033 bool Scop::isEffectiveAssumption(isl::set Set, AssumptionSign Sign) {
2034 if (Sign == AS_ASSUMPTION) {
2035 if (Context.is_subset(Set))
2036 return false;
2037
2038 if (AssumedContext.is_subset(Set))
2039 return false;
2040 } else {
2041 if (Set.is_disjoint(Context))
2042 return false;
2043
2044 if (Set.is_subset(InvalidContext))
2045 return false;
2046 }
2047 return true;
2048 }
2049
trackAssumption(AssumptionKind Kind,isl::set Set,DebugLoc Loc,AssumptionSign Sign,BasicBlock * BB)2050 bool Scop::trackAssumption(AssumptionKind Kind, isl::set Set, DebugLoc Loc,
2051 AssumptionSign Sign, BasicBlock *BB) {
2052 if (PollyRemarksMinimal && !isEffectiveAssumption(Set, Sign))
2053 return false;
2054
2055 // Do never emit trivial assumptions as they only clutter the output.
2056 if (!PollyRemarksMinimal) {
2057 isl::set Univ;
2058 if (Sign == AS_ASSUMPTION)
2059 Univ = isl::set::universe(Set.get_space());
2060
2061 bool IsTrivial = (Sign == AS_RESTRICTION && Set.is_empty()) ||
2062 (Sign == AS_ASSUMPTION && Univ.is_equal(Set));
2063
2064 if (IsTrivial)
2065 return false;
2066 }
2067
2068 switch (Kind) {
2069 case ALIASING:
2070 AssumptionsAliasing++;
2071 break;
2072 case INBOUNDS:
2073 AssumptionsInbounds++;
2074 break;
2075 case WRAPPING:
2076 AssumptionsWrapping++;
2077 break;
2078 case UNSIGNED:
2079 AssumptionsUnsigned++;
2080 break;
2081 case COMPLEXITY:
2082 AssumptionsComplexity++;
2083 break;
2084 case PROFITABLE:
2085 AssumptionsUnprofitable++;
2086 break;
2087 case ERRORBLOCK:
2088 AssumptionsErrorBlock++;
2089 break;
2090 case INFINITELOOP:
2091 AssumptionsInfiniteLoop++;
2092 break;
2093 case INVARIANTLOAD:
2094 AssumptionsInvariantLoad++;
2095 break;
2096 case DELINEARIZATION:
2097 AssumptionsDelinearization++;
2098 break;
2099 }
2100
2101 auto Suffix = Sign == AS_ASSUMPTION ? " assumption:\t" : " restriction:\t";
2102 std::string Msg = toString(Kind) + Suffix + stringFromIslObj(Set);
2103 if (BB)
2104 ORE.emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "AssumpRestrict", Loc, BB)
2105 << Msg);
2106 else
2107 ORE.emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "AssumpRestrict", Loc,
2108 R.getEntry())
2109 << Msg);
2110 return true;
2111 }
2112
addAssumption(AssumptionKind Kind,isl::set Set,DebugLoc Loc,AssumptionSign Sign,BasicBlock * BB,bool RequiresRTC)2113 void Scop::addAssumption(AssumptionKind Kind, isl::set Set, DebugLoc Loc,
2114 AssumptionSign Sign, BasicBlock *BB,
2115 bool RequiresRTC) {
2116 // Simplify the assumptions/restrictions first.
2117 Set = Set.gist_params(getContext());
2118 intersectDefinedBehavior(Set, Sign);
2119
2120 if (!RequiresRTC)
2121 return;
2122
2123 if (!trackAssumption(Kind, Set, Loc, Sign, BB))
2124 return;
2125
2126 if (Sign == AS_ASSUMPTION)
2127 AssumedContext = AssumedContext.intersect(Set).coalesce();
2128 else
2129 InvalidContext = InvalidContext.unite(Set).coalesce();
2130 }
2131
intersectDefinedBehavior(isl::set Set,AssumptionSign Sign)2132 void Scop::intersectDefinedBehavior(isl::set Set, AssumptionSign Sign) {
2133 if (DefinedBehaviorContext.is_null())
2134 return;
2135
2136 if (Sign == AS_ASSUMPTION)
2137 DefinedBehaviorContext = DefinedBehaviorContext.intersect(Set);
2138 else
2139 DefinedBehaviorContext = DefinedBehaviorContext.subtract(Set);
2140
2141 // Limit the complexity of the context. If complexity is exceeded, simplify
2142 // the set and check again.
2143 if (DefinedBehaviorContext.n_basic_set() >
2144 MaxDisjunktsInDefinedBehaviourContext) {
2145 simplify(DefinedBehaviorContext);
2146 if (DefinedBehaviorContext.n_basic_set() >
2147 MaxDisjunktsInDefinedBehaviourContext)
2148 DefinedBehaviorContext = {};
2149 }
2150 }
2151
invalidate(AssumptionKind Kind,DebugLoc Loc,BasicBlock * BB)2152 void Scop::invalidate(AssumptionKind Kind, DebugLoc Loc, BasicBlock *BB) {
2153 LLVM_DEBUG(dbgs() << "Invalidate SCoP because of reason " << Kind << "\n");
2154 addAssumption(Kind, isl::set::empty(getParamSpace()), Loc, AS_ASSUMPTION, BB);
2155 }
2156
getInvalidContext() const2157 isl::set Scop::getInvalidContext() const { return InvalidContext; }
2158
printContext(raw_ostream & OS) const2159 void Scop::printContext(raw_ostream &OS) const {
2160 OS << "Context:\n";
2161 OS.indent(4) << Context << "\n";
2162
2163 OS.indent(4) << "Assumed Context:\n";
2164 OS.indent(4) << AssumedContext << "\n";
2165
2166 OS.indent(4) << "Invalid Context:\n";
2167 OS.indent(4) << InvalidContext << "\n";
2168
2169 OS.indent(4) << "Defined Behavior Context:\n";
2170 if (!DefinedBehaviorContext.is_null())
2171 OS.indent(4) << DefinedBehaviorContext << "\n";
2172 else
2173 OS.indent(4) << "<unavailable>\n";
2174
2175 unsigned Dim = 0;
2176 for (const SCEV *Parameter : Parameters)
2177 OS.indent(4) << "p" << Dim++ << ": " << *Parameter << "\n";
2178 }
2179
printAliasAssumptions(raw_ostream & OS) const2180 void Scop::printAliasAssumptions(raw_ostream &OS) const {
2181 int noOfGroups = 0;
2182 for (const MinMaxVectorPairTy &Pair : MinMaxAliasGroups) {
2183 if (Pair.second.size() == 0)
2184 noOfGroups += 1;
2185 else
2186 noOfGroups += Pair.second.size();
2187 }
2188
2189 OS.indent(4) << "Alias Groups (" << noOfGroups << "):\n";
2190 if (MinMaxAliasGroups.empty()) {
2191 OS.indent(8) << "n/a\n";
2192 return;
2193 }
2194
2195 for (const MinMaxVectorPairTy &Pair : MinMaxAliasGroups) {
2196
2197 // If the group has no read only accesses print the write accesses.
2198 if (Pair.second.empty()) {
2199 OS.indent(8) << "[[";
2200 for (const MinMaxAccessTy &MMANonReadOnly : Pair.first) {
2201 OS << " <" << MMANonReadOnly.first << ", " << MMANonReadOnly.second
2202 << ">";
2203 }
2204 OS << " ]]\n";
2205 }
2206
2207 for (const MinMaxAccessTy &MMAReadOnly : Pair.second) {
2208 OS.indent(8) << "[[";
2209 OS << " <" << MMAReadOnly.first << ", " << MMAReadOnly.second << ">";
2210 for (const MinMaxAccessTy &MMANonReadOnly : Pair.first) {
2211 OS << " <" << MMANonReadOnly.first << ", " << MMANonReadOnly.second
2212 << ">";
2213 }
2214 OS << " ]]\n";
2215 }
2216 }
2217 }
2218
printStatements(raw_ostream & OS,bool PrintInstructions) const2219 void Scop::printStatements(raw_ostream &OS, bool PrintInstructions) const {
2220 OS << "Statements {\n";
2221
2222 for (const ScopStmt &Stmt : *this) {
2223 OS.indent(4);
2224 Stmt.print(OS, PrintInstructions);
2225 }
2226
2227 OS.indent(4) << "}\n";
2228 }
2229
printArrayInfo(raw_ostream & OS) const2230 void Scop::printArrayInfo(raw_ostream &OS) const {
2231 OS << "Arrays {\n";
2232
2233 for (auto &Array : arrays())
2234 Array->print(OS);
2235
2236 OS.indent(4) << "}\n";
2237
2238 OS.indent(4) << "Arrays (Bounds as pw_affs) {\n";
2239
2240 for (auto &Array : arrays())
2241 Array->print(OS, /* SizeAsPwAff */ true);
2242
2243 OS.indent(4) << "}\n";
2244 }
2245
print(raw_ostream & OS,bool PrintInstructions) const2246 void Scop::print(raw_ostream &OS, bool PrintInstructions) const {
2247 OS.indent(4) << "Function: " << getFunction().getName() << "\n";
2248 OS.indent(4) << "Region: " << getNameStr() << "\n";
2249 OS.indent(4) << "Max Loop Depth: " << getMaxLoopDepth() << "\n";
2250 OS.indent(4) << "Invariant Accesses: {\n";
2251 for (const auto &IAClass : InvariantEquivClasses) {
2252 const auto &MAs = IAClass.InvariantAccesses;
2253 if (MAs.empty()) {
2254 OS.indent(12) << "Class Pointer: " << *IAClass.IdentifyingPointer << "\n";
2255 } else {
2256 MAs.front()->print(OS);
2257 OS.indent(12) << "Execution Context: " << IAClass.ExecutionContext
2258 << "\n";
2259 }
2260 }
2261 OS.indent(4) << "}\n";
2262 printContext(OS.indent(4));
2263 printArrayInfo(OS.indent(4));
2264 printAliasAssumptions(OS);
2265 printStatements(OS.indent(4), PrintInstructions);
2266 }
2267
2268 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const2269 LLVM_DUMP_METHOD void Scop::dump() const { print(dbgs(), true); }
2270 #endif
2271
getIslCtx() const2272 isl::ctx Scop::getIslCtx() const { return IslCtx.get(); }
2273
getPwAff(const SCEV * E,BasicBlock * BB,bool NonNegative,RecordedAssumptionsTy * RecordedAssumptions)2274 __isl_give PWACtx Scop::getPwAff(const SCEV *E, BasicBlock *BB,
2275 bool NonNegative,
2276 RecordedAssumptionsTy *RecordedAssumptions) {
2277 // First try to use the SCEVAffinator to generate a piecewise defined
2278 // affine function from @p E in the context of @p BB. If that tasks becomes to
2279 // complex the affinator might return a nullptr. In such a case we invalidate
2280 // the SCoP and return a dummy value. This way we do not need to add error
2281 // handling code to all users of this function.
2282 auto PWAC = Affinator.getPwAff(E, BB, RecordedAssumptions);
2283 if (!PWAC.first.is_null()) {
2284 // TODO: We could use a heuristic and either use:
2285 // SCEVAffinator::takeNonNegativeAssumption
2286 // or
2287 // SCEVAffinator::interpretAsUnsigned
2288 // to deal with unsigned or "NonNegative" SCEVs.
2289 if (NonNegative)
2290 Affinator.takeNonNegativeAssumption(PWAC, RecordedAssumptions);
2291 return PWAC;
2292 }
2293
2294 auto DL = BB ? BB->getTerminator()->getDebugLoc() : DebugLoc();
2295 invalidate(COMPLEXITY, DL, BB);
2296 return Affinator.getPwAff(SE->getZero(E->getType()), BB, RecordedAssumptions);
2297 }
2298
getDomains() const2299 isl::union_set Scop::getDomains() const {
2300 isl_space *EmptySpace = isl_space_params_alloc(getIslCtx().get(), 0);
2301 isl_union_set *Domain = isl_union_set_empty(EmptySpace);
2302
2303 for (const ScopStmt &Stmt : *this)
2304 Domain = isl_union_set_add_set(Domain, Stmt.getDomain().release());
2305
2306 return isl::manage(Domain);
2307 }
2308
getPwAffOnly(const SCEV * E,BasicBlock * BB,RecordedAssumptionsTy * RecordedAssumptions)2309 isl::pw_aff Scop::getPwAffOnly(const SCEV *E, BasicBlock *BB,
2310 RecordedAssumptionsTy *RecordedAssumptions) {
2311 PWACtx PWAC = getPwAff(E, BB, RecordedAssumptions);
2312 return PWAC.first;
2313 }
2314
2315 isl::union_map
getAccessesOfType(std::function<bool (MemoryAccess &)> Predicate)2316 Scop::getAccessesOfType(std::function<bool(MemoryAccess &)> Predicate) {
2317 isl::union_map Accesses = isl::union_map::empty(getIslCtx());
2318
2319 for (ScopStmt &Stmt : *this) {
2320 for (MemoryAccess *MA : Stmt) {
2321 if (!Predicate(*MA))
2322 continue;
2323
2324 isl::set Domain = Stmt.getDomain();
2325 isl::map AccessDomain = MA->getAccessRelation();
2326 AccessDomain = AccessDomain.intersect_domain(Domain);
2327 Accesses = Accesses.unite(AccessDomain);
2328 }
2329 }
2330
2331 return Accesses.coalesce();
2332 }
2333
getMustWrites()2334 isl::union_map Scop::getMustWrites() {
2335 return getAccessesOfType([](MemoryAccess &MA) { return MA.isMustWrite(); });
2336 }
2337
getMayWrites()2338 isl::union_map Scop::getMayWrites() {
2339 return getAccessesOfType([](MemoryAccess &MA) { return MA.isMayWrite(); });
2340 }
2341
getWrites()2342 isl::union_map Scop::getWrites() {
2343 return getAccessesOfType([](MemoryAccess &MA) { return MA.isWrite(); });
2344 }
2345
getReads()2346 isl::union_map Scop::getReads() {
2347 return getAccessesOfType([](MemoryAccess &MA) { return MA.isRead(); });
2348 }
2349
getAccesses()2350 isl::union_map Scop::getAccesses() {
2351 return getAccessesOfType([](MemoryAccess &MA) { return true; });
2352 }
2353
getAccesses(ScopArrayInfo * Array)2354 isl::union_map Scop::getAccesses(ScopArrayInfo *Array) {
2355 return getAccessesOfType(
2356 [Array](MemoryAccess &MA) { return MA.getScopArrayInfo() == Array; });
2357 }
2358
getSchedule() const2359 isl::union_map Scop::getSchedule() const {
2360 auto Tree = getScheduleTree();
2361 return Tree.get_map();
2362 }
2363
getScheduleTree() const2364 isl::schedule Scop::getScheduleTree() const {
2365 return Schedule.intersect_domain(getDomains());
2366 }
2367
setSchedule(isl::union_map NewSchedule)2368 void Scop::setSchedule(isl::union_map NewSchedule) {
2369 auto S = isl::schedule::from_domain(getDomains());
2370 Schedule = S.insert_partial_schedule(
2371 isl::multi_union_pw_aff::from_union_map(NewSchedule));
2372 ScheduleModified = true;
2373 }
2374
setScheduleTree(isl::schedule NewSchedule)2375 void Scop::setScheduleTree(isl::schedule NewSchedule) {
2376 Schedule = NewSchedule;
2377 ScheduleModified = true;
2378 }
2379
restrictDomains(isl::union_set Domain)2380 bool Scop::restrictDomains(isl::union_set Domain) {
2381 bool Changed = false;
2382 for (ScopStmt &Stmt : *this) {
2383 isl::union_set StmtDomain = isl::union_set(Stmt.getDomain());
2384 isl::union_set NewStmtDomain = StmtDomain.intersect(Domain);
2385
2386 if (StmtDomain.is_subset(NewStmtDomain))
2387 continue;
2388
2389 Changed = true;
2390
2391 NewStmtDomain = NewStmtDomain.coalesce();
2392
2393 if (NewStmtDomain.is_empty())
2394 Stmt.restrictDomain(isl::set::empty(Stmt.getDomainSpace()));
2395 else
2396 Stmt.restrictDomain(isl::set(NewStmtDomain));
2397 }
2398 return Changed;
2399 }
2400
getSE() const2401 ScalarEvolution *Scop::getSE() const { return SE; }
2402
addScopStmt(BasicBlock * BB,StringRef Name,Loop * SurroundingLoop,std::vector<Instruction * > Instructions)2403 void Scop::addScopStmt(BasicBlock *BB, StringRef Name, Loop *SurroundingLoop,
2404 std::vector<Instruction *> Instructions) {
2405 assert(BB && "Unexpected nullptr!");
2406 Stmts.emplace_back(*this, *BB, Name, SurroundingLoop, Instructions);
2407 auto *Stmt = &Stmts.back();
2408 StmtMap[BB].push_back(Stmt);
2409 for (Instruction *Inst : Instructions) {
2410 assert(!InstStmtMap.count(Inst) &&
2411 "Unexpected statement corresponding to the instruction.");
2412 InstStmtMap[Inst] = Stmt;
2413 }
2414 }
2415
addScopStmt(Region * R,StringRef Name,Loop * SurroundingLoop,std::vector<Instruction * > Instructions)2416 void Scop::addScopStmt(Region *R, StringRef Name, Loop *SurroundingLoop,
2417 std::vector<Instruction *> Instructions) {
2418 assert(R && "Unexpected nullptr!");
2419 Stmts.emplace_back(*this, *R, Name, SurroundingLoop, Instructions);
2420 auto *Stmt = &Stmts.back();
2421
2422 for (Instruction *Inst : Instructions) {
2423 assert(!InstStmtMap.count(Inst) &&
2424 "Unexpected statement corresponding to the instruction.");
2425 InstStmtMap[Inst] = Stmt;
2426 }
2427
2428 for (BasicBlock *BB : R->blocks()) {
2429 StmtMap[BB].push_back(Stmt);
2430 if (BB == R->getEntry())
2431 continue;
2432 for (Instruction &Inst : *BB) {
2433 assert(!InstStmtMap.count(&Inst) &&
2434 "Unexpected statement corresponding to the instruction.");
2435 InstStmtMap[&Inst] = Stmt;
2436 }
2437 }
2438 }
2439
addScopStmt(isl::map SourceRel,isl::map TargetRel,isl::set Domain)2440 ScopStmt *Scop::addScopStmt(isl::map SourceRel, isl::map TargetRel,
2441 isl::set Domain) {
2442 #ifndef NDEBUG
2443 isl::set SourceDomain = SourceRel.domain();
2444 isl::set TargetDomain = TargetRel.domain();
2445 assert(Domain.is_subset(TargetDomain) &&
2446 "Target access not defined for complete statement domain");
2447 assert(Domain.is_subset(SourceDomain) &&
2448 "Source access not defined for complete statement domain");
2449 #endif
2450 Stmts.emplace_back(*this, SourceRel, TargetRel, Domain);
2451 CopyStmtsNum++;
2452 return &(Stmts.back());
2453 }
2454
getStmtListFor(BasicBlock * BB) const2455 ArrayRef<ScopStmt *> Scop::getStmtListFor(BasicBlock *BB) const {
2456 auto StmtMapIt = StmtMap.find(BB);
2457 if (StmtMapIt == StmtMap.end())
2458 return {};
2459 return StmtMapIt->second;
2460 }
2461
getIncomingStmtFor(const Use & U) const2462 ScopStmt *Scop::getIncomingStmtFor(const Use &U) const {
2463 auto *PHI = cast<PHINode>(U.getUser());
2464 BasicBlock *IncomingBB = PHI->getIncomingBlock(U);
2465
2466 // If the value is a non-synthesizable from the incoming block, use the
2467 // statement that contains it as user statement.
2468 if (auto *IncomingInst = dyn_cast<Instruction>(U.get())) {
2469 if (IncomingInst->getParent() == IncomingBB) {
2470 if (ScopStmt *IncomingStmt = getStmtFor(IncomingInst))
2471 return IncomingStmt;
2472 }
2473 }
2474
2475 // Otherwise, use the epilogue/last statement.
2476 return getLastStmtFor(IncomingBB);
2477 }
2478
getLastStmtFor(BasicBlock * BB) const2479 ScopStmt *Scop::getLastStmtFor(BasicBlock *BB) const {
2480 ArrayRef<ScopStmt *> StmtList = getStmtListFor(BB);
2481 if (!StmtList.empty())
2482 return StmtList.back();
2483 return nullptr;
2484 }
2485
getStmtListFor(RegionNode * RN) const2486 ArrayRef<ScopStmt *> Scop::getStmtListFor(RegionNode *RN) const {
2487 if (RN->isSubRegion())
2488 return getStmtListFor(RN->getNodeAs<Region>());
2489 return getStmtListFor(RN->getNodeAs<BasicBlock>());
2490 }
2491
getStmtListFor(Region * R) const2492 ArrayRef<ScopStmt *> Scop::getStmtListFor(Region *R) const {
2493 return getStmtListFor(R->getEntry());
2494 }
2495
getRelativeLoopDepth(const Loop * L) const2496 int Scop::getRelativeLoopDepth(const Loop *L) const {
2497 if (!L || !R.contains(L))
2498 return -1;
2499 // outermostLoopInRegion always returns nullptr for top level regions
2500 if (R.isTopLevelRegion()) {
2501 // LoopInfo's depths start at 1, we start at 0
2502 return L->getLoopDepth() - 1;
2503 } else {
2504 Loop *OuterLoop = R.outermostLoopInRegion(const_cast<Loop *>(L));
2505 assert(OuterLoop);
2506 return L->getLoopDepth() - OuterLoop->getLoopDepth();
2507 }
2508 }
2509
getArrayInfoByName(const std::string BaseName)2510 ScopArrayInfo *Scop::getArrayInfoByName(const std::string BaseName) {
2511 for (auto &SAI : arrays()) {
2512 if (SAI->getName() == BaseName)
2513 return SAI;
2514 }
2515 return nullptr;
2516 }
2517
addAccessData(MemoryAccess * Access)2518 void Scop::addAccessData(MemoryAccess *Access) {
2519 const ScopArrayInfo *SAI = Access->getOriginalScopArrayInfo();
2520 assert(SAI && "can only use after access relations have been constructed");
2521
2522 if (Access->isOriginalValueKind() && Access->isRead())
2523 ValueUseAccs[SAI].push_back(Access);
2524 else if (Access->isOriginalAnyPHIKind() && Access->isWrite())
2525 PHIIncomingAccs[SAI].push_back(Access);
2526 }
2527
removeAccessData(MemoryAccess * Access)2528 void Scop::removeAccessData(MemoryAccess *Access) {
2529 if (Access->isOriginalValueKind() && Access->isWrite()) {
2530 ValueDefAccs.erase(Access->getAccessValue());
2531 } else if (Access->isOriginalValueKind() && Access->isRead()) {
2532 auto &Uses = ValueUseAccs[Access->getScopArrayInfo()];
2533 auto NewEnd = std::remove(Uses.begin(), Uses.end(), Access);
2534 Uses.erase(NewEnd, Uses.end());
2535 } else if (Access->isOriginalPHIKind() && Access->isRead()) {
2536 PHINode *PHI = cast<PHINode>(Access->getAccessInstruction());
2537 PHIReadAccs.erase(PHI);
2538 } else if (Access->isOriginalAnyPHIKind() && Access->isWrite()) {
2539 auto &Incomings = PHIIncomingAccs[Access->getScopArrayInfo()];
2540 auto NewEnd = std::remove(Incomings.begin(), Incomings.end(), Access);
2541 Incomings.erase(NewEnd, Incomings.end());
2542 }
2543 }
2544
getValueDef(const ScopArrayInfo * SAI) const2545 MemoryAccess *Scop::getValueDef(const ScopArrayInfo *SAI) const {
2546 assert(SAI->isValueKind());
2547
2548 Instruction *Val = dyn_cast<Instruction>(SAI->getBasePtr());
2549 if (!Val)
2550 return nullptr;
2551
2552 return ValueDefAccs.lookup(Val);
2553 }
2554
getValueUses(const ScopArrayInfo * SAI) const2555 ArrayRef<MemoryAccess *> Scop::getValueUses(const ScopArrayInfo *SAI) const {
2556 assert(SAI->isValueKind());
2557 auto It = ValueUseAccs.find(SAI);
2558 if (It == ValueUseAccs.end())
2559 return {};
2560 return It->second;
2561 }
2562
getPHIRead(const ScopArrayInfo * SAI) const2563 MemoryAccess *Scop::getPHIRead(const ScopArrayInfo *SAI) const {
2564 assert(SAI->isPHIKind() || SAI->isExitPHIKind());
2565
2566 if (SAI->isExitPHIKind())
2567 return nullptr;
2568
2569 PHINode *PHI = cast<PHINode>(SAI->getBasePtr());
2570 return PHIReadAccs.lookup(PHI);
2571 }
2572
getPHIIncomings(const ScopArrayInfo * SAI) const2573 ArrayRef<MemoryAccess *> Scop::getPHIIncomings(const ScopArrayInfo *SAI) const {
2574 assert(SAI->isPHIKind() || SAI->isExitPHIKind());
2575 auto It = PHIIncomingAccs.find(SAI);
2576 if (It == PHIIncomingAccs.end())
2577 return {};
2578 return It->second;
2579 }
2580
isEscaping(Instruction * Inst)2581 bool Scop::isEscaping(Instruction *Inst) {
2582 assert(contains(Inst) && "The concept of escaping makes only sense for "
2583 "values defined inside the SCoP");
2584
2585 for (Use &Use : Inst->uses()) {
2586 BasicBlock *UserBB = getUseBlock(Use);
2587 if (!contains(UserBB))
2588 return true;
2589
2590 // When the SCoP region exit needs to be simplified, PHIs in the region exit
2591 // move to a new basic block such that its incoming blocks are not in the
2592 // SCoP anymore.
2593 if (hasSingleExitEdge() && isa<PHINode>(Use.getUser()) &&
2594 isExit(cast<PHINode>(Use.getUser())->getParent()))
2595 return true;
2596 }
2597 return false;
2598 }
2599
incrementNumberOfAliasingAssumptions(unsigned step)2600 void Scop::incrementNumberOfAliasingAssumptions(unsigned step) {
2601 AssumptionsAliasing += step;
2602 }
2603
getStatistics() const2604 Scop::ScopStatistics Scop::getStatistics() const {
2605 ScopStatistics Result;
2606 #if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS)
2607 auto LoopStat = ScopDetection::countBeneficialLoops(&R, *SE, *getLI(), 0);
2608
2609 int NumTotalLoops = LoopStat.NumLoops;
2610 Result.NumBoxedLoops = getBoxedLoops().size();
2611 Result.NumAffineLoops = NumTotalLoops - Result.NumBoxedLoops;
2612
2613 for (const ScopStmt &Stmt : *this) {
2614 isl::set Domain = Stmt.getDomain().intersect_params(getContext());
2615 bool IsInLoop = Stmt.getNumIterators() >= 1;
2616 for (MemoryAccess *MA : Stmt) {
2617 if (!MA->isWrite())
2618 continue;
2619
2620 if (MA->isLatestValueKind()) {
2621 Result.NumValueWrites += 1;
2622 if (IsInLoop)
2623 Result.NumValueWritesInLoops += 1;
2624 }
2625
2626 if (MA->isLatestAnyPHIKind()) {
2627 Result.NumPHIWrites += 1;
2628 if (IsInLoop)
2629 Result.NumPHIWritesInLoops += 1;
2630 }
2631
2632 isl::set AccSet =
2633 MA->getAccessRelation().intersect_domain(Domain).range();
2634 if (AccSet.is_singleton()) {
2635 Result.NumSingletonWrites += 1;
2636 if (IsInLoop)
2637 Result.NumSingletonWritesInLoops += 1;
2638 }
2639 }
2640 }
2641 #endif
2642 return Result;
2643 }
2644
operator <<(raw_ostream & OS,const Scop & scop)2645 raw_ostream &polly::operator<<(raw_ostream &OS, const Scop &scop) {
2646 scop.print(OS, PollyPrintInstructions);
2647 return OS;
2648 }
2649
2650 //===----------------------------------------------------------------------===//
getAnalysisUsage(AnalysisUsage & AU) const2651 void ScopInfoRegionPass::getAnalysisUsage(AnalysisUsage &AU) const {
2652 AU.addRequired<LoopInfoWrapperPass>();
2653 AU.addRequired<RegionInfoPass>();
2654 AU.addRequired<DominatorTreeWrapperPass>();
2655 AU.addRequiredTransitive<ScalarEvolutionWrapperPass>();
2656 AU.addRequiredTransitive<ScopDetectionWrapperPass>();
2657 AU.addRequired<AAResultsWrapperPass>();
2658 AU.addRequired<AssumptionCacheTracker>();
2659 AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2660 AU.setPreservesAll();
2661 }
2662
updateLoopCountStatistic(ScopDetection::LoopStats Stats,Scop::ScopStatistics ScopStats)2663 void updateLoopCountStatistic(ScopDetection::LoopStats Stats,
2664 Scop::ScopStatistics ScopStats) {
2665 assert(Stats.NumLoops == ScopStats.NumAffineLoops + ScopStats.NumBoxedLoops);
2666
2667 NumScops++;
2668 NumLoopsInScop += Stats.NumLoops;
2669 MaxNumLoopsInScop =
2670 std::max(MaxNumLoopsInScop.getValue(), (unsigned)Stats.NumLoops);
2671
2672 if (Stats.MaxDepth == 0)
2673 NumScopsDepthZero++;
2674 else if (Stats.MaxDepth == 1)
2675 NumScopsDepthOne++;
2676 else if (Stats.MaxDepth == 2)
2677 NumScopsDepthTwo++;
2678 else if (Stats.MaxDepth == 3)
2679 NumScopsDepthThree++;
2680 else if (Stats.MaxDepth == 4)
2681 NumScopsDepthFour++;
2682 else if (Stats.MaxDepth == 5)
2683 NumScopsDepthFive++;
2684 else
2685 NumScopsDepthLarger++;
2686
2687 NumAffineLoops += ScopStats.NumAffineLoops;
2688 NumBoxedLoops += ScopStats.NumBoxedLoops;
2689
2690 NumValueWrites += ScopStats.NumValueWrites;
2691 NumValueWritesInLoops += ScopStats.NumValueWritesInLoops;
2692 NumPHIWrites += ScopStats.NumPHIWrites;
2693 NumPHIWritesInLoops += ScopStats.NumPHIWritesInLoops;
2694 NumSingletonWrites += ScopStats.NumSingletonWrites;
2695 NumSingletonWritesInLoops += ScopStats.NumSingletonWritesInLoops;
2696 }
2697
runOnRegion(Region * R,RGPassManager & RGM)2698 bool ScopInfoRegionPass::runOnRegion(Region *R, RGPassManager &RGM) {
2699 auto &SD = getAnalysis<ScopDetectionWrapperPass>().getSD();
2700
2701 if (!SD.isMaxRegionInScop(*R))
2702 return false;
2703
2704 Function *F = R->getEntry()->getParent();
2705 auto &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2706 auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2707 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2708 auto const &DL = F->getParent()->getDataLayout();
2709 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2710 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(*F);
2711 auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2712
2713 ScopBuilder SB(R, AC, AA, DL, DT, LI, SD, SE, ORE);
2714 S = SB.getScop(); // take ownership of scop object
2715
2716 #if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS)
2717 if (S) {
2718 ScopDetection::LoopStats Stats =
2719 ScopDetection::countBeneficialLoops(&S->getRegion(), SE, LI, 0);
2720 updateLoopCountStatistic(Stats, S->getStatistics());
2721 }
2722 #endif
2723
2724 return false;
2725 }
2726
print(raw_ostream & OS,const Module *) const2727 void ScopInfoRegionPass::print(raw_ostream &OS, const Module *) const {
2728 if (S)
2729 S->print(OS, PollyPrintInstructions);
2730 else
2731 OS << "Invalid Scop!\n";
2732 }
2733
2734 char ScopInfoRegionPass::ID = 0;
2735
createScopInfoRegionPassPass()2736 Pass *polly::createScopInfoRegionPassPass() { return new ScopInfoRegionPass(); }
2737
2738 INITIALIZE_PASS_BEGIN(ScopInfoRegionPass, "polly-scops",
2739 "Polly - Create polyhedral description of Scops", false,
2740 false);
2741 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass);
2742 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker);
2743 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass);
2744 INITIALIZE_PASS_DEPENDENCY(RegionInfoPass);
2745 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass);
2746 INITIALIZE_PASS_DEPENDENCY(ScopDetectionWrapperPass);
2747 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass);
2748 INITIALIZE_PASS_END(ScopInfoRegionPass, "polly-scops",
2749 "Polly - Create polyhedral description of Scops", false,
2750 false)
2751
2752 //===----------------------------------------------------------------------===//
ScopInfo(const DataLayout & DL,ScopDetection & SD,ScalarEvolution & SE,LoopInfo & LI,AliasAnalysis & AA,DominatorTree & DT,AssumptionCache & AC,OptimizationRemarkEmitter & ORE)2753 ScopInfo::ScopInfo(const DataLayout &DL, ScopDetection &SD, ScalarEvolution &SE,
2754 LoopInfo &LI, AliasAnalysis &AA, DominatorTree &DT,
2755 AssumptionCache &AC, OptimizationRemarkEmitter &ORE)
2756 : DL(DL), SD(SD), SE(SE), LI(LI), AA(AA), DT(DT), AC(AC), ORE(ORE) {
2757 recompute();
2758 }
2759
recompute()2760 void ScopInfo::recompute() {
2761 RegionToScopMap.clear();
2762 /// Create polyhedral description of scops for all the valid regions of a
2763 /// function.
2764 for (auto &It : SD) {
2765 Region *R = const_cast<Region *>(It);
2766 if (!SD.isMaxRegionInScop(*R))
2767 continue;
2768
2769 ScopBuilder SB(R, AC, AA, DL, DT, LI, SD, SE, ORE);
2770 std::unique_ptr<Scop> S = SB.getScop();
2771 if (!S)
2772 continue;
2773 #if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS)
2774 ScopDetection::LoopStats Stats =
2775 ScopDetection::countBeneficialLoops(&S->getRegion(), SE, LI, 0);
2776 updateLoopCountStatistic(Stats, S->getStatistics());
2777 #endif
2778 bool Inserted = RegionToScopMap.insert({R, std::move(S)}).second;
2779 assert(Inserted && "Building Scop for the same region twice!");
2780 (void)Inserted;
2781 }
2782 }
2783
invalidate(Function & F,const PreservedAnalyses & PA,FunctionAnalysisManager::Invalidator & Inv)2784 bool ScopInfo::invalidate(Function &F, const PreservedAnalyses &PA,
2785 FunctionAnalysisManager::Invalidator &Inv) {
2786 // Check whether the analysis, all analyses on functions have been preserved
2787 // or anything we're holding references to is being invalidated
2788 auto PAC = PA.getChecker<ScopInfoAnalysis>();
2789 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
2790 Inv.invalidate<ScopAnalysis>(F, PA) ||
2791 Inv.invalidate<ScalarEvolutionAnalysis>(F, PA) ||
2792 Inv.invalidate<LoopAnalysis>(F, PA) ||
2793 Inv.invalidate<AAManager>(F, PA) ||
2794 Inv.invalidate<DominatorTreeAnalysis>(F, PA) ||
2795 Inv.invalidate<AssumptionAnalysis>(F, PA);
2796 }
2797
2798 AnalysisKey ScopInfoAnalysis::Key;
2799
run(Function & F,FunctionAnalysisManager & FAM)2800 ScopInfoAnalysis::Result ScopInfoAnalysis::run(Function &F,
2801 FunctionAnalysisManager &FAM) {
2802 auto &SD = FAM.getResult<ScopAnalysis>(F);
2803 auto &SE = FAM.getResult<ScalarEvolutionAnalysis>(F);
2804 auto &LI = FAM.getResult<LoopAnalysis>(F);
2805 auto &AA = FAM.getResult<AAManager>(F);
2806 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
2807 auto &AC = FAM.getResult<AssumptionAnalysis>(F);
2808 auto &DL = F.getParent()->getDataLayout();
2809 auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(F);
2810 return {DL, SD, SE, LI, AA, DT, AC, ORE};
2811 }
2812
run(Function & F,FunctionAnalysisManager & FAM)2813 PreservedAnalyses ScopInfoPrinterPass::run(Function &F,
2814 FunctionAnalysisManager &FAM) {
2815 auto &SI = FAM.getResult<ScopInfoAnalysis>(F);
2816 // Since the legacy PM processes Scops in bottom up, we print them in reverse
2817 // order here to keep the output persistent
2818 for (auto &It : reverse(SI)) {
2819 if (It.second)
2820 It.second->print(Stream, PollyPrintInstructions);
2821 else
2822 Stream << "Invalid Scop!\n";
2823 }
2824 return PreservedAnalyses::all();
2825 }
2826
getAnalysisUsage(AnalysisUsage & AU) const2827 void ScopInfoWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2828 AU.addRequired<LoopInfoWrapperPass>();
2829 AU.addRequired<RegionInfoPass>();
2830 AU.addRequired<DominatorTreeWrapperPass>();
2831 AU.addRequiredTransitive<ScalarEvolutionWrapperPass>();
2832 AU.addRequiredTransitive<ScopDetectionWrapperPass>();
2833 AU.addRequired<AAResultsWrapperPass>();
2834 AU.addRequired<AssumptionCacheTracker>();
2835 AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2836 AU.setPreservesAll();
2837 }
2838
runOnFunction(Function & F)2839 bool ScopInfoWrapperPass::runOnFunction(Function &F) {
2840 auto &SD = getAnalysis<ScopDetectionWrapperPass>().getSD();
2841 auto &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2842 auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2843 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2844 auto const &DL = F.getParent()->getDataLayout();
2845 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2846 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2847 auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2848
2849 Result.reset(new ScopInfo{DL, SD, SE, LI, AA, DT, AC, ORE});
2850 return false;
2851 }
2852
print(raw_ostream & OS,const Module *) const2853 void ScopInfoWrapperPass::print(raw_ostream &OS, const Module *) const {
2854 for (auto &It : *Result) {
2855 if (It.second)
2856 It.second->print(OS, PollyPrintInstructions);
2857 else
2858 OS << "Invalid Scop!\n";
2859 }
2860 }
2861
2862 char ScopInfoWrapperPass::ID = 0;
2863
createScopInfoWrapperPassPass()2864 Pass *polly::createScopInfoWrapperPassPass() {
2865 return new ScopInfoWrapperPass();
2866 }
2867
2868 INITIALIZE_PASS_BEGIN(
2869 ScopInfoWrapperPass, "polly-function-scops",
2870 "Polly - Create polyhedral description of all Scops of a function", false,
2871 false);
2872 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass);
2873 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker);
2874 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass);
2875 INITIALIZE_PASS_DEPENDENCY(RegionInfoPass);
2876 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass);
2877 INITIALIZE_PASS_DEPENDENCY(ScopDetectionWrapperPass);
2878 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass);
2879 INITIALIZE_PASS_END(
2880 ScopInfoWrapperPass, "polly-function-scops",
2881 "Polly - Create polyhedral description of all Scops of a function", false,
2882 false)
2883