1 //===--- BlockGenerators.cpp - Generate code for statements -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the BlockGenerator and VectorBlockGenerator classes,
10 // which generate sequential code and vectorized code for a polyhedral
11 // statement, respectively.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "polly/CodeGen/BlockGenerators.h"
16 #include "polly/CodeGen/IslExprBuilder.h"
17 #include "polly/CodeGen/RuntimeDebugBuilder.h"
18 #include "polly/Options.h"
19 #include "polly/ScopInfo.h"
20 #include "polly/Support/ScopHelper.h"
21 #include "polly/Support/VirtualInstruction.h"
22 #include "llvm/Analysis/LoopInfo.h"
23 #include "llvm/Analysis/RegionInfo.h"
24 #include "llvm/Analysis/ScalarEvolution.h"
25 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
26 #include "llvm/Transforms/Utils/Local.h"
27 #include "isl/ast.h"
28 #include <deque>
29
30 using namespace llvm;
31 using namespace polly;
32
33 static cl::opt<bool> Aligned("enable-polly-aligned",
34 cl::desc("Assumed aligned memory accesses."),
35 cl::Hidden, cl::init(false), cl::ZeroOrMore,
36 cl::cat(PollyCategory));
37
38 bool PollyDebugPrinting;
39 static cl::opt<bool, true> DebugPrintingX(
40 "polly-codegen-add-debug-printing",
41 cl::desc("Add printf calls that show the values loaded/stored."),
42 cl::location(PollyDebugPrinting), cl::Hidden, cl::init(false),
43 cl::ZeroOrMore, cl::cat(PollyCategory));
44
45 static cl::opt<bool> TraceStmts(
46 "polly-codegen-trace-stmts",
47 cl::desc("Add printf calls that print the statement being executed"),
48 cl::Hidden, cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
49
50 static cl::opt<bool> TraceScalars(
51 "polly-codegen-trace-scalars",
52 cl::desc("Add printf calls that print the values of all scalar values "
53 "used in a statement. Requires -polly-codegen-trace-stmts."),
54 cl::Hidden, cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
55
BlockGenerator(PollyIRBuilder & B,LoopInfo & LI,ScalarEvolution & SE,DominatorTree & DT,AllocaMapTy & ScalarMap,EscapeUsersAllocaMapTy & EscapeMap,ValueMapT & GlobalMap,IslExprBuilder * ExprBuilder,BasicBlock * StartBlock)56 BlockGenerator::BlockGenerator(
57 PollyIRBuilder &B, LoopInfo &LI, ScalarEvolution &SE, DominatorTree &DT,
58 AllocaMapTy &ScalarMap, EscapeUsersAllocaMapTy &EscapeMap,
59 ValueMapT &GlobalMap, IslExprBuilder *ExprBuilder, BasicBlock *StartBlock)
60 : Builder(B), LI(LI), SE(SE), ExprBuilder(ExprBuilder), DT(DT),
61 EntryBB(nullptr), ScalarMap(ScalarMap), EscapeMap(EscapeMap),
62 GlobalMap(GlobalMap), StartBlock(StartBlock) {}
63
trySynthesizeNewValue(ScopStmt & Stmt,Value * Old,ValueMapT & BBMap,LoopToScevMapT & LTS,Loop * L) const64 Value *BlockGenerator::trySynthesizeNewValue(ScopStmt &Stmt, Value *Old,
65 ValueMapT &BBMap,
66 LoopToScevMapT <S,
67 Loop *L) const {
68 if (!SE.isSCEVable(Old->getType()))
69 return nullptr;
70
71 const SCEV *Scev = SE.getSCEVAtScope(Old, L);
72 if (!Scev)
73 return nullptr;
74
75 if (isa<SCEVCouldNotCompute>(Scev))
76 return nullptr;
77
78 const SCEV *NewScev = SCEVLoopAddRecRewriter::rewrite(Scev, LTS, SE);
79 ValueMapT VTV;
80 VTV.insert(BBMap.begin(), BBMap.end());
81 VTV.insert(GlobalMap.begin(), GlobalMap.end());
82
83 Scop &S = *Stmt.getParent();
84 const DataLayout &DL = S.getFunction().getParent()->getDataLayout();
85 auto IP = Builder.GetInsertPoint();
86
87 assert(IP != Builder.GetInsertBlock()->end() &&
88 "Only instructions can be insert points for SCEVExpander");
89 Value *Expanded =
90 expandCodeFor(S, SE, DL, "polly", NewScev, Old->getType(), &*IP, &VTV,
91 StartBlock->getSinglePredecessor());
92
93 BBMap[Old] = Expanded;
94 return Expanded;
95 }
96
getNewValue(ScopStmt & Stmt,Value * Old,ValueMapT & BBMap,LoopToScevMapT & LTS,Loop * L) const97 Value *BlockGenerator::getNewValue(ScopStmt &Stmt, Value *Old, ValueMapT &BBMap,
98 LoopToScevMapT <S, Loop *L) const {
99
100 auto lookupGlobally = [this](Value *Old) -> Value * {
101 Value *New = GlobalMap.lookup(Old);
102 if (!New)
103 return nullptr;
104
105 // Required by:
106 // * Isl/CodeGen/OpenMP/invariant_base_pointer_preloaded.ll
107 // * Isl/CodeGen/OpenMP/invariant_base_pointer_preloaded_different_bb.ll
108 // * Isl/CodeGen/OpenMP/invariant_base_pointer_preloaded_pass_only_needed.ll
109 // * Isl/CodeGen/OpenMP/invariant_base_pointers_preloaded.ll
110 // * Isl/CodeGen/OpenMP/loop-body-references-outer-values-3.ll
111 // * Isl/CodeGen/OpenMP/single_loop_with_loop_invariant_baseptr.ll
112 // GlobalMap should be a mapping from (value in original SCoP) to (copied
113 // value in generated SCoP), without intermediate mappings, which might
114 // easily require transitiveness as well.
115 if (Value *NewRemapped = GlobalMap.lookup(New))
116 New = NewRemapped;
117
118 // No test case for this code.
119 if (Old->getType()->getScalarSizeInBits() <
120 New->getType()->getScalarSizeInBits())
121 New = Builder.CreateTruncOrBitCast(New, Old->getType());
122
123 return New;
124 };
125
126 Value *New = nullptr;
127 auto VUse = VirtualUse::create(&Stmt, L, Old, true);
128 switch (VUse.getKind()) {
129 case VirtualUse::Block:
130 // BasicBlock are constants, but the BlockGenerator copies them.
131 New = BBMap.lookup(Old);
132 break;
133
134 case VirtualUse::Constant:
135 // Used by:
136 // * Isl/CodeGen/OpenMP/reference-argument-from-non-affine-region.ll
137 // Constants should not be redefined. In this case, the GlobalMap just
138 // contains a mapping to the same constant, which is unnecessary, but
139 // harmless.
140 if ((New = lookupGlobally(Old)))
141 break;
142
143 assert(!BBMap.count(Old));
144 New = Old;
145 break;
146
147 case VirtualUse::ReadOnly:
148 assert(!GlobalMap.count(Old));
149
150 // Required for:
151 // * Isl/CodeGen/MemAccess/create_arrays.ll
152 // * Isl/CodeGen/read-only-scalars.ll
153 // * ScheduleOptimizer/pattern-matching-based-opts_10.ll
154 // For some reason these reload a read-only value. The reloaded value ends
155 // up in BBMap, buts its value should be identical.
156 //
157 // Required for:
158 // * Isl/CodeGen/OpenMP/single_loop_with_param.ll
159 // The parallel subfunctions need to reference the read-only value from the
160 // parent function, this is done by reloading them locally.
161 if ((New = BBMap.lookup(Old)))
162 break;
163
164 New = Old;
165 break;
166
167 case VirtualUse::Synthesizable:
168 // Used by:
169 // * Isl/CodeGen/OpenMP/loop-body-references-outer-values-3.ll
170 // * Isl/CodeGen/OpenMP/recomputed-srem.ll
171 // * Isl/CodeGen/OpenMP/reference-other-bb.ll
172 // * Isl/CodeGen/OpenMP/two-parallel-loops-reference-outer-indvar.ll
173 // For some reason synthesizable values end up in GlobalMap. Their values
174 // are the same as trySynthesizeNewValue would return. The legacy
175 // implementation prioritized GlobalMap, so this is what we do here as well.
176 // Ideally, synthesizable values should not end up in GlobalMap.
177 if ((New = lookupGlobally(Old)))
178 break;
179
180 // Required for:
181 // * Isl/CodeGen/RuntimeDebugBuilder/combine_different_values.ll
182 // * Isl/CodeGen/getNumberOfIterations.ll
183 // * Isl/CodeGen/non_affine_float_compare.ll
184 // * ScheduleOptimizer/pattern-matching-based-opts_10.ll
185 // Ideally, synthesizable values are synthesized by trySynthesizeNewValue,
186 // not precomputed (SCEVExpander has its own caching mechanism).
187 // These tests fail without this, but I think trySynthesizeNewValue would
188 // just re-synthesize the same instructions.
189 if ((New = BBMap.lookup(Old)))
190 break;
191
192 New = trySynthesizeNewValue(Stmt, Old, BBMap, LTS, L);
193 break;
194
195 case VirtualUse::Hoisted:
196 // TODO: Hoisted invariant loads should be found in GlobalMap only, but not
197 // redefined locally (which will be ignored anyway). That is, the following
198 // assertion should apply: assert(!BBMap.count(Old))
199
200 New = lookupGlobally(Old);
201 break;
202
203 case VirtualUse::Intra:
204 case VirtualUse::Inter:
205 assert(!GlobalMap.count(Old) &&
206 "Intra and inter-stmt values are never global");
207 New = BBMap.lookup(Old);
208 break;
209 }
210 assert(New && "Unexpected scalar dependence in region!");
211 return New;
212 }
213
copyInstScalar(ScopStmt & Stmt,Instruction * Inst,ValueMapT & BBMap,LoopToScevMapT & LTS)214 void BlockGenerator::copyInstScalar(ScopStmt &Stmt, Instruction *Inst,
215 ValueMapT &BBMap, LoopToScevMapT <S) {
216 // We do not generate debug intrinsics as we did not investigate how to
217 // copy them correctly. At the current state, they just crash the code
218 // generation as the meta-data operands are not correctly copied.
219 if (isa<DbgInfoIntrinsic>(Inst))
220 return;
221
222 Instruction *NewInst = Inst->clone();
223
224 // Replace old operands with the new ones.
225 for (Value *OldOperand : Inst->operands()) {
226 Value *NewOperand =
227 getNewValue(Stmt, OldOperand, BBMap, LTS, getLoopForStmt(Stmt));
228
229 if (!NewOperand) {
230 assert(!isa<StoreInst>(NewInst) &&
231 "Store instructions are always needed!");
232 NewInst->deleteValue();
233 return;
234 }
235
236 NewInst->replaceUsesOfWith(OldOperand, NewOperand);
237 }
238
239 Builder.Insert(NewInst);
240 BBMap[Inst] = NewInst;
241
242 // When copying the instruction onto the Module meant for the GPU,
243 // debug metadata attached to an instruction causes all related
244 // metadata to be pulled into the Module. This includes the DICompileUnit,
245 // which will not be listed in llvm.dbg.cu of the Module since the Module
246 // doesn't contain one. This fails the verification of the Module and the
247 // subsequent generation of the ASM string.
248 if (NewInst->getModule() != Inst->getModule())
249 NewInst->setDebugLoc(llvm::DebugLoc());
250
251 if (!NewInst->getType()->isVoidTy())
252 NewInst->setName("p_" + Inst->getName());
253 }
254
255 Value *
generateLocationAccessed(ScopStmt & Stmt,MemAccInst Inst,ValueMapT & BBMap,LoopToScevMapT & LTS,isl_id_to_ast_expr * NewAccesses)256 BlockGenerator::generateLocationAccessed(ScopStmt &Stmt, MemAccInst Inst,
257 ValueMapT &BBMap, LoopToScevMapT <S,
258 isl_id_to_ast_expr *NewAccesses) {
259 const MemoryAccess &MA = Stmt.getArrayAccessFor(Inst);
260 return generateLocationAccessed(
261 Stmt, getLoopForStmt(Stmt),
262 Inst.isNull() ? nullptr : Inst.getPointerOperand(), BBMap, LTS,
263 NewAccesses, MA.getId().release(), MA.getAccessValue()->getType());
264 }
265
generateLocationAccessed(ScopStmt & Stmt,Loop * L,Value * Pointer,ValueMapT & BBMap,LoopToScevMapT & LTS,isl_id_to_ast_expr * NewAccesses,__isl_take isl_id * Id,Type * ExpectedType)266 Value *BlockGenerator::generateLocationAccessed(
267 ScopStmt &Stmt, Loop *L, Value *Pointer, ValueMapT &BBMap,
268 LoopToScevMapT <S, isl_id_to_ast_expr *NewAccesses, __isl_take isl_id *Id,
269 Type *ExpectedType) {
270 isl_ast_expr *AccessExpr = isl_id_to_ast_expr_get(NewAccesses, Id);
271
272 if (AccessExpr) {
273 AccessExpr = isl_ast_expr_address_of(AccessExpr);
274 auto Address = ExprBuilder->create(AccessExpr);
275
276 // Cast the address of this memory access to a pointer type that has the
277 // same element type as the original access, but uses the address space of
278 // the newly generated pointer.
279 auto OldPtrTy = ExpectedType->getPointerTo();
280 auto NewPtrTy = Address->getType();
281 OldPtrTy = PointerType::getWithSamePointeeType(
282 OldPtrTy, NewPtrTy->getPointerAddressSpace());
283
284 if (OldPtrTy != NewPtrTy)
285 Address = Builder.CreateBitOrPointerCast(Address, OldPtrTy);
286 return Address;
287 }
288 assert(
289 Pointer &&
290 "If expression was not generated, must use the original pointer value");
291 return getNewValue(Stmt, Pointer, BBMap, LTS, L);
292 }
293
294 Value *
getImplicitAddress(MemoryAccess & Access,Loop * L,LoopToScevMapT & LTS,ValueMapT & BBMap,__isl_keep isl_id_to_ast_expr * NewAccesses)295 BlockGenerator::getImplicitAddress(MemoryAccess &Access, Loop *L,
296 LoopToScevMapT <S, ValueMapT &BBMap,
297 __isl_keep isl_id_to_ast_expr *NewAccesses) {
298 if (Access.isLatestArrayKind())
299 return generateLocationAccessed(*Access.getStatement(), L, nullptr, BBMap,
300 LTS, NewAccesses, Access.getId().release(),
301 Access.getAccessValue()->getType());
302
303 return getOrCreateAlloca(Access);
304 }
305
getLoopForStmt(const ScopStmt & Stmt) const306 Loop *BlockGenerator::getLoopForStmt(const ScopStmt &Stmt) const {
307 auto *StmtBB = Stmt.getEntryBlock();
308 return LI.getLoopFor(StmtBB);
309 }
310
generateArrayLoad(ScopStmt & Stmt,LoadInst * Load,ValueMapT & BBMap,LoopToScevMapT & LTS,isl_id_to_ast_expr * NewAccesses)311 Value *BlockGenerator::generateArrayLoad(ScopStmt &Stmt, LoadInst *Load,
312 ValueMapT &BBMap, LoopToScevMapT <S,
313 isl_id_to_ast_expr *NewAccesses) {
314 if (Value *PreloadLoad = GlobalMap.lookup(Load))
315 return PreloadLoad;
316
317 Value *NewPointer =
318 generateLocationAccessed(Stmt, Load, BBMap, LTS, NewAccesses);
319 Value *ScalarLoad =
320 Builder.CreateAlignedLoad(Load->getType(), NewPointer, Load->getAlign(),
321 Load->getName() + "_p_scalar_");
322
323 if (PollyDebugPrinting)
324 RuntimeDebugBuilder::createCPUPrinter(Builder, "Load from ", NewPointer,
325 ": ", ScalarLoad, "\n");
326
327 return ScalarLoad;
328 }
329
generateArrayStore(ScopStmt & Stmt,StoreInst * Store,ValueMapT & BBMap,LoopToScevMapT & LTS,isl_id_to_ast_expr * NewAccesses)330 void BlockGenerator::generateArrayStore(ScopStmt &Stmt, StoreInst *Store,
331 ValueMapT &BBMap, LoopToScevMapT <S,
332 isl_id_to_ast_expr *NewAccesses) {
333 MemoryAccess &MA = Stmt.getArrayAccessFor(Store);
334 isl::set AccDom = MA.getAccessRelation().domain();
335 std::string Subject = MA.getId().get_name();
336
337 generateConditionalExecution(Stmt, AccDom, Subject.c_str(), [&, this]() {
338 Value *NewPointer =
339 generateLocationAccessed(Stmt, Store, BBMap, LTS, NewAccesses);
340 Value *ValueOperand = getNewValue(Stmt, Store->getValueOperand(), BBMap,
341 LTS, getLoopForStmt(Stmt));
342
343 if (PollyDebugPrinting)
344 RuntimeDebugBuilder::createCPUPrinter(Builder, "Store to ", NewPointer,
345 ": ", ValueOperand, "\n");
346
347 Builder.CreateAlignedStore(ValueOperand, NewPointer, Store->getAlign());
348 });
349 }
350
canSyntheziseInStmt(ScopStmt & Stmt,Instruction * Inst)351 bool BlockGenerator::canSyntheziseInStmt(ScopStmt &Stmt, Instruction *Inst) {
352 Loop *L = getLoopForStmt(Stmt);
353 return (Stmt.isBlockStmt() || !Stmt.getRegion()->contains(L)) &&
354 canSynthesize(Inst, *Stmt.getParent(), &SE, L);
355 }
356
copyInstruction(ScopStmt & Stmt,Instruction * Inst,ValueMapT & BBMap,LoopToScevMapT & LTS,isl_id_to_ast_expr * NewAccesses)357 void BlockGenerator::copyInstruction(ScopStmt &Stmt, Instruction *Inst,
358 ValueMapT &BBMap, LoopToScevMapT <S,
359 isl_id_to_ast_expr *NewAccesses) {
360 // Terminator instructions control the control flow. They are explicitly
361 // expressed in the clast and do not need to be copied.
362 if (Inst->isTerminator())
363 return;
364
365 // Synthesizable statements will be generated on-demand.
366 if (canSyntheziseInStmt(Stmt, Inst))
367 return;
368
369 if (auto *Load = dyn_cast<LoadInst>(Inst)) {
370 Value *NewLoad = generateArrayLoad(Stmt, Load, BBMap, LTS, NewAccesses);
371 // Compute NewLoad before its insertion in BBMap to make the insertion
372 // deterministic.
373 BBMap[Load] = NewLoad;
374 return;
375 }
376
377 if (auto *Store = dyn_cast<StoreInst>(Inst)) {
378 // Identified as redundant by -polly-simplify.
379 if (!Stmt.getArrayAccessOrNULLFor(Store))
380 return;
381
382 generateArrayStore(Stmt, Store, BBMap, LTS, NewAccesses);
383 return;
384 }
385
386 if (auto *PHI = dyn_cast<PHINode>(Inst)) {
387 copyPHIInstruction(Stmt, PHI, BBMap, LTS);
388 return;
389 }
390
391 // Skip some special intrinsics for which we do not adjust the semantics to
392 // the new schedule. All others are handled like every other instruction.
393 if (isIgnoredIntrinsic(Inst))
394 return;
395
396 copyInstScalar(Stmt, Inst, BBMap, LTS);
397 }
398
removeDeadInstructions(BasicBlock * BB,ValueMapT & BBMap)399 void BlockGenerator::removeDeadInstructions(BasicBlock *BB, ValueMapT &BBMap) {
400 auto NewBB = Builder.GetInsertBlock();
401 for (auto I = NewBB->rbegin(); I != NewBB->rend(); I++) {
402 Instruction *NewInst = &*I;
403
404 if (!isInstructionTriviallyDead(NewInst))
405 continue;
406
407 for (auto Pair : BBMap)
408 if (Pair.second == NewInst) {
409 BBMap.erase(Pair.first);
410 }
411
412 NewInst->eraseFromParent();
413 I = NewBB->rbegin();
414 }
415 }
416
copyStmt(ScopStmt & Stmt,LoopToScevMapT & LTS,isl_id_to_ast_expr * NewAccesses)417 void BlockGenerator::copyStmt(ScopStmt &Stmt, LoopToScevMapT <S,
418 isl_id_to_ast_expr *NewAccesses) {
419 assert(Stmt.isBlockStmt() &&
420 "Only block statements can be copied by the block generator");
421
422 ValueMapT BBMap;
423
424 BasicBlock *BB = Stmt.getBasicBlock();
425 copyBB(Stmt, BB, BBMap, LTS, NewAccesses);
426 removeDeadInstructions(BB, BBMap);
427 }
428
splitBB(BasicBlock * BB)429 BasicBlock *BlockGenerator::splitBB(BasicBlock *BB) {
430 BasicBlock *CopyBB = SplitBlock(Builder.GetInsertBlock(),
431 &*Builder.GetInsertPoint(), &DT, &LI);
432 CopyBB->setName("polly.stmt." + BB->getName());
433 return CopyBB;
434 }
435
copyBB(ScopStmt & Stmt,BasicBlock * BB,ValueMapT & BBMap,LoopToScevMapT & LTS,isl_id_to_ast_expr * NewAccesses)436 BasicBlock *BlockGenerator::copyBB(ScopStmt &Stmt, BasicBlock *BB,
437 ValueMapT &BBMap, LoopToScevMapT <S,
438 isl_id_to_ast_expr *NewAccesses) {
439 BasicBlock *CopyBB = splitBB(BB);
440 Builder.SetInsertPoint(&CopyBB->front());
441 generateScalarLoads(Stmt, LTS, BBMap, NewAccesses);
442 generateBeginStmtTrace(Stmt, LTS, BBMap);
443
444 copyBB(Stmt, BB, CopyBB, BBMap, LTS, NewAccesses);
445
446 // After a basic block was copied store all scalars that escape this block in
447 // their alloca.
448 generateScalarStores(Stmt, LTS, BBMap, NewAccesses);
449 return CopyBB;
450 }
451
copyBB(ScopStmt & Stmt,BasicBlock * BB,BasicBlock * CopyBB,ValueMapT & BBMap,LoopToScevMapT & LTS,isl_id_to_ast_expr * NewAccesses)452 void BlockGenerator::copyBB(ScopStmt &Stmt, BasicBlock *BB, BasicBlock *CopyBB,
453 ValueMapT &BBMap, LoopToScevMapT <S,
454 isl_id_to_ast_expr *NewAccesses) {
455 EntryBB = &CopyBB->getParent()->getEntryBlock();
456
457 // Block statements and the entry blocks of region statement are code
458 // generated from instruction lists. This allow us to optimize the
459 // instructions that belong to a certain scop statement. As the code
460 // structure of region statements might be arbitrary complex, optimizing the
461 // instruction list is not yet supported.
462 if (Stmt.isBlockStmt() || (Stmt.isRegionStmt() && Stmt.getEntryBlock() == BB))
463 for (Instruction *Inst : Stmt.getInstructions())
464 copyInstruction(Stmt, Inst, BBMap, LTS, NewAccesses);
465 else
466 for (Instruction &Inst : *BB)
467 copyInstruction(Stmt, &Inst, BBMap, LTS, NewAccesses);
468 }
469
getOrCreateAlloca(const MemoryAccess & Access)470 Value *BlockGenerator::getOrCreateAlloca(const MemoryAccess &Access) {
471 assert(!Access.isLatestArrayKind() && "Trying to get alloca for array kind");
472
473 return getOrCreateAlloca(Access.getLatestScopArrayInfo());
474 }
475
getOrCreateAlloca(const ScopArrayInfo * Array)476 Value *BlockGenerator::getOrCreateAlloca(const ScopArrayInfo *Array) {
477 assert(!Array->isArrayKind() && "Trying to get alloca for array kind");
478
479 auto &Addr = ScalarMap[Array];
480
481 if (Addr) {
482 // Allow allocas to be (temporarily) redirected once by adding a new
483 // old-alloca-addr to new-addr mapping to GlobalMap. This functionality
484 // is used for example by the OpenMP code generation where a first use
485 // of a scalar while still in the host code allocates a normal alloca with
486 // getOrCreateAlloca. When the values of this scalar are accessed during
487 // the generation of the parallel subfunction, these values are copied over
488 // to the parallel subfunction and each request for a scalar alloca slot
489 // must be forwarded to the temporary in-subfunction slot. This mapping is
490 // removed when the subfunction has been generated and again normal host
491 // code is generated. Due to the following reasons it is not possible to
492 // perform the GlobalMap lookup right after creating the alloca below, but
493 // instead we need to check GlobalMap at each call to getOrCreateAlloca:
494 //
495 // 1) GlobalMap may be changed multiple times (for each parallel loop),
496 // 2) The temporary mapping is commonly only known after the initial
497 // alloca has already been generated, and
498 // 3) The original alloca value must be restored after leaving the
499 // sub-function.
500 if (Value *NewAddr = GlobalMap.lookup(&*Addr))
501 return NewAddr;
502 return Addr;
503 }
504
505 Type *Ty = Array->getElementType();
506 Value *ScalarBase = Array->getBasePtr();
507 std::string NameExt;
508 if (Array->isPHIKind())
509 NameExt = ".phiops";
510 else
511 NameExt = ".s2a";
512
513 const DataLayout &DL = Builder.GetInsertBlock()->getModule()->getDataLayout();
514
515 Addr =
516 new AllocaInst(Ty, DL.getAllocaAddrSpace(), nullptr,
517 DL.getPrefTypeAlign(Ty), ScalarBase->getName() + NameExt);
518 EntryBB = &Builder.GetInsertBlock()->getParent()->getEntryBlock();
519 Addr->insertBefore(&*EntryBB->getFirstInsertionPt());
520
521 return Addr;
522 }
523
handleOutsideUsers(const Scop & S,ScopArrayInfo * Array)524 void BlockGenerator::handleOutsideUsers(const Scop &S, ScopArrayInfo *Array) {
525 Instruction *Inst = cast<Instruction>(Array->getBasePtr());
526
527 // If there are escape users we get the alloca for this instruction and put it
528 // in the EscapeMap for later finalization. Lastly, if the instruction was
529 // copied multiple times we already did this and can exit.
530 if (EscapeMap.count(Inst))
531 return;
532
533 EscapeUserVectorTy EscapeUsers;
534 for (User *U : Inst->users()) {
535
536 // Non-instruction user will never escape.
537 Instruction *UI = dyn_cast<Instruction>(U);
538 if (!UI)
539 continue;
540
541 if (S.contains(UI))
542 continue;
543
544 EscapeUsers.push_back(UI);
545 }
546
547 // Exit if no escape uses were found.
548 if (EscapeUsers.empty())
549 return;
550
551 // Get or create an escape alloca for this instruction.
552 auto *ScalarAddr = getOrCreateAlloca(Array);
553
554 // Remember that this instruction has escape uses and the escape alloca.
555 EscapeMap[Inst] = std::make_pair(ScalarAddr, std::move(EscapeUsers));
556 }
557
generateScalarLoads(ScopStmt & Stmt,LoopToScevMapT & LTS,ValueMapT & BBMap,__isl_keep isl_id_to_ast_expr * NewAccesses)558 void BlockGenerator::generateScalarLoads(
559 ScopStmt &Stmt, LoopToScevMapT <S, ValueMapT &BBMap,
560 __isl_keep isl_id_to_ast_expr *NewAccesses) {
561 for (MemoryAccess *MA : Stmt) {
562 if (MA->isOriginalArrayKind() || MA->isWrite())
563 continue;
564
565 #ifndef NDEBUG
566 auto StmtDom =
567 Stmt.getDomain().intersect_params(Stmt.getParent()->getContext());
568 auto AccDom = MA->getAccessRelation().domain();
569 assert(!StmtDom.is_subset(AccDom).is_false() &&
570 "Scalar must be loaded in all statement instances");
571 #endif
572
573 auto *Address =
574 getImplicitAddress(*MA, getLoopForStmt(Stmt), LTS, BBMap, NewAccesses);
575 assert((!isa<Instruction>(Address) ||
576 DT.dominates(cast<Instruction>(Address)->getParent(),
577 Builder.GetInsertBlock())) &&
578 "Domination violation");
579 BBMap[MA->getAccessValue()] = Builder.CreateLoad(
580 MA->getElementType(), Address, Address->getName() + ".reload");
581 }
582 }
583
buildContainsCondition(ScopStmt & Stmt,const isl::set & Subdomain)584 Value *BlockGenerator::buildContainsCondition(ScopStmt &Stmt,
585 const isl::set &Subdomain) {
586 isl::ast_build AstBuild = Stmt.getAstBuild();
587 isl::set Domain = Stmt.getDomain();
588
589 isl::union_map USchedule = AstBuild.get_schedule();
590 USchedule = USchedule.intersect_domain(Domain);
591
592 assert(!USchedule.is_empty());
593 isl::map Schedule = isl::map::from_union_map(USchedule);
594
595 isl::set ScheduledDomain = Schedule.range();
596 isl::set ScheduledSet = Subdomain.apply(Schedule);
597
598 isl::ast_build RestrictedBuild = AstBuild.restrict(ScheduledDomain);
599
600 isl::ast_expr IsInSet = RestrictedBuild.expr_from(ScheduledSet);
601 Value *IsInSetExpr = ExprBuilder->create(IsInSet.copy());
602 IsInSetExpr = Builder.CreateICmpNE(
603 IsInSetExpr, ConstantInt::get(IsInSetExpr->getType(), 0));
604
605 return IsInSetExpr;
606 }
607
generateConditionalExecution(ScopStmt & Stmt,const isl::set & Subdomain,StringRef Subject,const std::function<void ()> & GenThenFunc)608 void BlockGenerator::generateConditionalExecution(
609 ScopStmt &Stmt, const isl::set &Subdomain, StringRef Subject,
610 const std::function<void()> &GenThenFunc) {
611 isl::set StmtDom = Stmt.getDomain();
612
613 // If the condition is a tautology, don't generate a condition around the
614 // code.
615 bool IsPartialWrite =
616 !StmtDom.intersect_params(Stmt.getParent()->getContext())
617 .is_subset(Subdomain);
618 if (!IsPartialWrite) {
619 GenThenFunc();
620 return;
621 }
622
623 // Generate the condition.
624 Value *Cond = buildContainsCondition(Stmt, Subdomain);
625
626 // Don't call GenThenFunc if it is never executed. An ast index expression
627 // might not be defined in this case.
628 if (auto *Const = dyn_cast<ConstantInt>(Cond))
629 if (Const->isZero())
630 return;
631
632 BasicBlock *HeadBlock = Builder.GetInsertBlock();
633 StringRef BlockName = HeadBlock->getName();
634
635 // Generate the conditional block.
636 SplitBlockAndInsertIfThen(Cond, &*Builder.GetInsertPoint(), false, nullptr,
637 &DT, &LI);
638 BranchInst *Branch = cast<BranchInst>(HeadBlock->getTerminator());
639 BasicBlock *ThenBlock = Branch->getSuccessor(0);
640 BasicBlock *TailBlock = Branch->getSuccessor(1);
641
642 // Assign descriptive names.
643 if (auto *CondInst = dyn_cast<Instruction>(Cond))
644 CondInst->setName("polly." + Subject + ".cond");
645 ThenBlock->setName(BlockName + "." + Subject + ".partial");
646 TailBlock->setName(BlockName + ".cont");
647
648 // Put the client code into the conditional block and continue in the merge
649 // block afterwards.
650 Builder.SetInsertPoint(ThenBlock, ThenBlock->getFirstInsertionPt());
651 GenThenFunc();
652 Builder.SetInsertPoint(TailBlock, TailBlock->getFirstInsertionPt());
653 }
654
getInstName(Value * Val)655 static std::string getInstName(Value *Val) {
656 std::string Result;
657 raw_string_ostream OS(Result);
658 Val->printAsOperand(OS, false);
659 return OS.str();
660 }
661
generateBeginStmtTrace(ScopStmt & Stmt,LoopToScevMapT & LTS,ValueMapT & BBMap)662 void BlockGenerator::generateBeginStmtTrace(ScopStmt &Stmt, LoopToScevMapT <S,
663 ValueMapT &BBMap) {
664 if (!TraceStmts)
665 return;
666
667 Scop *S = Stmt.getParent();
668 const char *BaseName = Stmt.getBaseName();
669
670 isl::ast_build AstBuild = Stmt.getAstBuild();
671 isl::set Domain = Stmt.getDomain();
672
673 isl::union_map USchedule = AstBuild.get_schedule().intersect_domain(Domain);
674 isl::map Schedule = isl::map::from_union_map(USchedule);
675 assert(Schedule.is_empty().is_false() &&
676 "The stmt must have a valid instance");
677
678 isl::multi_pw_aff ScheduleMultiPwAff =
679 isl::pw_multi_aff::from_map(Schedule.reverse());
680 isl::ast_build RestrictedBuild = AstBuild.restrict(Schedule.range());
681
682 // Sequence of strings to print.
683 SmallVector<llvm::Value *, 8> Values;
684
685 // Print the name of the statement.
686 // TODO: Indent by the depth of the statement instance in the schedule tree.
687 Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, BaseName));
688 Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, "("));
689
690 // Add the coordinate of the statement instance.
691 int DomDims = ScheduleMultiPwAff.dim(isl::dim::out);
692 for (int i = 0; i < DomDims; i += 1) {
693 if (i > 0)
694 Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, ","));
695
696 isl::ast_expr IsInSet =
697 RestrictedBuild.expr_from(ScheduleMultiPwAff.get_pw_aff(i));
698 Values.push_back(ExprBuilder->create(IsInSet.copy()));
699 }
700
701 if (TraceScalars) {
702 Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, ")"));
703 DenseSet<Instruction *> Encountered;
704
705 // Add the value of each scalar (and the result of PHIs) used in the
706 // statement.
707 // TODO: Values used in region-statements.
708 for (Instruction *Inst : Stmt.insts()) {
709 if (!RuntimeDebugBuilder::isPrintable(Inst->getType()))
710 continue;
711
712 if (isa<PHINode>(Inst)) {
713 Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, " "));
714 Values.push_back(RuntimeDebugBuilder::getPrintableString(
715 Builder, getInstName(Inst)));
716 Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, "="));
717 Values.push_back(getNewValue(Stmt, Inst, BBMap, LTS,
718 LI.getLoopFor(Inst->getParent())));
719 } else {
720 for (Value *Op : Inst->operand_values()) {
721 // Do not print values that cannot change during the execution of the
722 // SCoP.
723 auto *OpInst = dyn_cast<Instruction>(Op);
724 if (!OpInst)
725 continue;
726 if (!S->contains(OpInst))
727 continue;
728
729 // Print each scalar at most once, and exclude values defined in the
730 // statement itself.
731 if (Encountered.count(OpInst))
732 continue;
733
734 Values.push_back(
735 RuntimeDebugBuilder::getPrintableString(Builder, " "));
736 Values.push_back(RuntimeDebugBuilder::getPrintableString(
737 Builder, getInstName(OpInst)));
738 Values.push_back(
739 RuntimeDebugBuilder::getPrintableString(Builder, "="));
740 Values.push_back(getNewValue(Stmt, OpInst, BBMap, LTS,
741 LI.getLoopFor(Inst->getParent())));
742 Encountered.insert(OpInst);
743 }
744 }
745
746 Encountered.insert(Inst);
747 }
748
749 Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, "\n"));
750 } else {
751 Values.push_back(RuntimeDebugBuilder::getPrintableString(Builder, ")\n"));
752 }
753
754 RuntimeDebugBuilder::createCPUPrinter(Builder, ArrayRef<Value *>(Values));
755 }
756
generateScalarStores(ScopStmt & Stmt,LoopToScevMapT & LTS,ValueMapT & BBMap,__isl_keep isl_id_to_ast_expr * NewAccesses)757 void BlockGenerator::generateScalarStores(
758 ScopStmt &Stmt, LoopToScevMapT <S, ValueMapT &BBMap,
759 __isl_keep isl_id_to_ast_expr *NewAccesses) {
760 Loop *L = LI.getLoopFor(Stmt.getBasicBlock());
761
762 assert(Stmt.isBlockStmt() &&
763 "Region statements need to use the generateScalarStores() function in "
764 "the RegionGenerator");
765
766 for (MemoryAccess *MA : Stmt) {
767 if (MA->isOriginalArrayKind() || MA->isRead())
768 continue;
769
770 isl::set AccDom = MA->getAccessRelation().domain();
771 std::string Subject = MA->getId().get_name();
772
773 generateConditionalExecution(
774 Stmt, AccDom, Subject.c_str(), [&, this, MA]() {
775 Value *Val = MA->getAccessValue();
776 if (MA->isAnyPHIKind()) {
777 assert(MA->getIncoming().size() >= 1 &&
778 "Block statements have exactly one exiting block, or "
779 "multiple but "
780 "with same incoming block and value");
781 assert(std::all_of(MA->getIncoming().begin(),
782 MA->getIncoming().end(),
783 [&](std::pair<BasicBlock *, Value *> p) -> bool {
784 return p.first == Stmt.getBasicBlock();
785 }) &&
786 "Incoming block must be statement's block");
787 Val = MA->getIncoming()[0].second;
788 }
789 auto Address = getImplicitAddress(*MA, getLoopForStmt(Stmt), LTS,
790 BBMap, NewAccesses);
791
792 Val = getNewValue(Stmt, Val, BBMap, LTS, L);
793 assert((!isa<Instruction>(Val) ||
794 DT.dominates(cast<Instruction>(Val)->getParent(),
795 Builder.GetInsertBlock())) &&
796 "Domination violation");
797 assert((!isa<Instruction>(Address) ||
798 DT.dominates(cast<Instruction>(Address)->getParent(),
799 Builder.GetInsertBlock())) &&
800 "Domination violation");
801
802 // The new Val might have a different type than the old Val due to
803 // ScalarEvolution looking through bitcasts.
804 Address = Builder.CreateBitOrPointerCast(
805 Address, Val->getType()->getPointerTo(
806 Address->getType()->getPointerAddressSpace()));
807
808 Builder.CreateStore(Val, Address);
809 });
810 }
811 }
812
createScalarInitialization(Scop & S)813 void BlockGenerator::createScalarInitialization(Scop &S) {
814 BasicBlock *ExitBB = S.getExit();
815 BasicBlock *PreEntryBB = S.getEnteringBlock();
816
817 Builder.SetInsertPoint(&*StartBlock->begin());
818
819 for (auto &Array : S.arrays()) {
820 if (Array->getNumberOfDimensions() != 0)
821 continue;
822 if (Array->isPHIKind()) {
823 // For PHI nodes, the only values we need to store are the ones that
824 // reach the PHI node from outside the region. In general there should
825 // only be one such incoming edge and this edge should enter through
826 // 'PreEntryBB'.
827 auto PHI = cast<PHINode>(Array->getBasePtr());
828
829 for (auto BI = PHI->block_begin(), BE = PHI->block_end(); BI != BE; BI++)
830 if (!S.contains(*BI) && *BI != PreEntryBB)
831 llvm_unreachable("Incoming edges from outside the scop should always "
832 "come from PreEntryBB");
833
834 int Idx = PHI->getBasicBlockIndex(PreEntryBB);
835 if (Idx < 0)
836 continue;
837
838 Value *ScalarValue = PHI->getIncomingValue(Idx);
839
840 Builder.CreateStore(ScalarValue, getOrCreateAlloca(Array));
841 continue;
842 }
843
844 auto *Inst = dyn_cast<Instruction>(Array->getBasePtr());
845
846 if (Inst && S.contains(Inst))
847 continue;
848
849 // PHI nodes that are not marked as such in their SAI object are either exit
850 // PHI nodes we model as common scalars but without initialization, or
851 // incoming phi nodes that need to be initialized. Check if the first is the
852 // case for Inst and do not create and initialize memory if so.
853 if (auto *PHI = dyn_cast_or_null<PHINode>(Inst))
854 if (!S.hasSingleExitEdge() && PHI->getBasicBlockIndex(ExitBB) >= 0)
855 continue;
856
857 Builder.CreateStore(Array->getBasePtr(), getOrCreateAlloca(Array));
858 }
859 }
860
createScalarFinalization(Scop & S)861 void BlockGenerator::createScalarFinalization(Scop &S) {
862 // The exit block of the __unoptimized__ region.
863 BasicBlock *ExitBB = S.getExitingBlock();
864 // The merge block __just after__ the region and the optimized region.
865 BasicBlock *MergeBB = S.getExit();
866
867 // The exit block of the __optimized__ region.
868 BasicBlock *OptExitBB = *(pred_begin(MergeBB));
869 if (OptExitBB == ExitBB)
870 OptExitBB = *(++pred_begin(MergeBB));
871
872 Builder.SetInsertPoint(OptExitBB->getTerminator());
873 for (const auto &EscapeMapping : EscapeMap) {
874 // Extract the escaping instruction and the escaping users as well as the
875 // alloca the instruction was demoted to.
876 Instruction *EscapeInst = EscapeMapping.first;
877 const auto &EscapeMappingValue = EscapeMapping.second;
878 const EscapeUserVectorTy &EscapeUsers = EscapeMappingValue.second;
879 auto *ScalarAddr = cast<AllocaInst>(&*EscapeMappingValue.first);
880
881 // Reload the demoted instruction in the optimized version of the SCoP.
882 Value *EscapeInstReload =
883 Builder.CreateLoad(ScalarAddr->getAllocatedType(), ScalarAddr,
884 EscapeInst->getName() + ".final_reload");
885 EscapeInstReload =
886 Builder.CreateBitOrPointerCast(EscapeInstReload, EscapeInst->getType());
887
888 // Create the merge PHI that merges the optimized and unoptimized version.
889 PHINode *MergePHI = PHINode::Create(EscapeInst->getType(), 2,
890 EscapeInst->getName() + ".merge");
891 MergePHI->insertBefore(&*MergeBB->getFirstInsertionPt());
892
893 // Add the respective values to the merge PHI.
894 MergePHI->addIncoming(EscapeInstReload, OptExitBB);
895 MergePHI->addIncoming(EscapeInst, ExitBB);
896
897 // The information of scalar evolution about the escaping instruction needs
898 // to be revoked so the new merged instruction will be used.
899 if (SE.isSCEVable(EscapeInst->getType()))
900 SE.forgetValue(EscapeInst);
901
902 // Replace all uses of the demoted instruction with the merge PHI.
903 for (Instruction *EUser : EscapeUsers)
904 EUser->replaceUsesOfWith(EscapeInst, MergePHI);
905 }
906 }
907
findOutsideUsers(Scop & S)908 void BlockGenerator::findOutsideUsers(Scop &S) {
909 for (auto &Array : S.arrays()) {
910
911 if (Array->getNumberOfDimensions() != 0)
912 continue;
913
914 if (Array->isPHIKind())
915 continue;
916
917 auto *Inst = dyn_cast<Instruction>(Array->getBasePtr());
918
919 if (!Inst)
920 continue;
921
922 // Scop invariant hoisting moves some of the base pointers out of the scop.
923 // We can ignore these, as the invariant load hoisting already registers the
924 // relevant outside users.
925 if (!S.contains(Inst))
926 continue;
927
928 handleOutsideUsers(S, Array);
929 }
930 }
931
createExitPHINodeMerges(Scop & S)932 void BlockGenerator::createExitPHINodeMerges(Scop &S) {
933 if (S.hasSingleExitEdge())
934 return;
935
936 auto *ExitBB = S.getExitingBlock();
937 auto *MergeBB = S.getExit();
938 auto *AfterMergeBB = MergeBB->getSingleSuccessor();
939 BasicBlock *OptExitBB = *(pred_begin(MergeBB));
940 if (OptExitBB == ExitBB)
941 OptExitBB = *(++pred_begin(MergeBB));
942
943 Builder.SetInsertPoint(OptExitBB->getTerminator());
944
945 for (auto &SAI : S.arrays()) {
946 auto *Val = SAI->getBasePtr();
947
948 // Only Value-like scalars need a merge PHI. Exit block PHIs receive either
949 // the original PHI's value or the reloaded incoming values from the
950 // generated code. An llvm::Value is merged between the original code's
951 // value or the generated one.
952 if (!SAI->isExitPHIKind())
953 continue;
954
955 PHINode *PHI = dyn_cast<PHINode>(Val);
956 if (!PHI)
957 continue;
958
959 if (PHI->getParent() != AfterMergeBB)
960 continue;
961
962 std::string Name = PHI->getName().str();
963 Value *ScalarAddr = getOrCreateAlloca(SAI);
964 Value *Reload = Builder.CreateLoad(SAI->getElementType(), ScalarAddr,
965 Name + ".ph.final_reload");
966 Reload = Builder.CreateBitOrPointerCast(Reload, PHI->getType());
967 Value *OriginalValue = PHI->getIncomingValueForBlock(MergeBB);
968 assert((!isa<Instruction>(OriginalValue) ||
969 cast<Instruction>(OriginalValue)->getParent() != MergeBB) &&
970 "Original value must no be one we just generated.");
971 auto *MergePHI = PHINode::Create(PHI->getType(), 2, Name + ".ph.merge");
972 MergePHI->insertBefore(&*MergeBB->getFirstInsertionPt());
973 MergePHI->addIncoming(Reload, OptExitBB);
974 MergePHI->addIncoming(OriginalValue, ExitBB);
975 int Idx = PHI->getBasicBlockIndex(MergeBB);
976 PHI->setIncomingValue(Idx, MergePHI);
977 }
978 }
979
invalidateScalarEvolution(Scop & S)980 void BlockGenerator::invalidateScalarEvolution(Scop &S) {
981 for (auto &Stmt : S)
982 if (Stmt.isCopyStmt())
983 continue;
984 else if (Stmt.isBlockStmt())
985 for (auto &Inst : *Stmt.getBasicBlock())
986 SE.forgetValue(&Inst);
987 else if (Stmt.isRegionStmt())
988 for (auto *BB : Stmt.getRegion()->blocks())
989 for (auto &Inst : *BB)
990 SE.forgetValue(&Inst);
991 else
992 llvm_unreachable("Unexpected statement type found");
993
994 // Invalidate SCEV of loops surrounding the EscapeUsers.
995 for (const auto &EscapeMapping : EscapeMap) {
996 const EscapeUserVectorTy &EscapeUsers = EscapeMapping.second.second;
997 for (Instruction *EUser : EscapeUsers) {
998 if (Loop *L = LI.getLoopFor(EUser->getParent()))
999 while (L) {
1000 SE.forgetLoop(L);
1001 L = L->getParentLoop();
1002 }
1003 }
1004 }
1005 }
1006
finalizeSCoP(Scop & S)1007 void BlockGenerator::finalizeSCoP(Scop &S) {
1008 findOutsideUsers(S);
1009 createScalarInitialization(S);
1010 createExitPHINodeMerges(S);
1011 createScalarFinalization(S);
1012 invalidateScalarEvolution(S);
1013 }
1014
VectorBlockGenerator(BlockGenerator & BlockGen,std::vector<LoopToScevMapT> & VLTS,isl_map * Schedule)1015 VectorBlockGenerator::VectorBlockGenerator(BlockGenerator &BlockGen,
1016 std::vector<LoopToScevMapT> &VLTS,
1017 isl_map *Schedule)
1018 : BlockGenerator(BlockGen), VLTS(VLTS), Schedule(Schedule) {
1019 assert(Schedule && "No statement domain provided");
1020 }
1021
getVectorValue(ScopStmt & Stmt,Value * Old,ValueMapT & VectorMap,VectorValueMapT & ScalarMaps,Loop * L)1022 Value *VectorBlockGenerator::getVectorValue(ScopStmt &Stmt, Value *Old,
1023 ValueMapT &VectorMap,
1024 VectorValueMapT &ScalarMaps,
1025 Loop *L) {
1026 if (Value *NewValue = VectorMap.lookup(Old))
1027 return NewValue;
1028
1029 int Width = getVectorWidth();
1030
1031 Value *Vector = UndefValue::get(FixedVectorType::get(Old->getType(), Width));
1032
1033 for (int Lane = 0; Lane < Width; Lane++)
1034 Vector = Builder.CreateInsertElement(
1035 Vector, getNewValue(Stmt, Old, ScalarMaps[Lane], VLTS[Lane], L),
1036 Builder.getInt32(Lane));
1037
1038 VectorMap[Old] = Vector;
1039
1040 return Vector;
1041 }
1042
generateStrideOneLoad(ScopStmt & Stmt,LoadInst * Load,VectorValueMapT & ScalarMaps,__isl_keep isl_id_to_ast_expr * NewAccesses,bool NegativeStride=false)1043 Value *VectorBlockGenerator::generateStrideOneLoad(
1044 ScopStmt &Stmt, LoadInst *Load, VectorValueMapT &ScalarMaps,
1045 __isl_keep isl_id_to_ast_expr *NewAccesses, bool NegativeStride = false) {
1046 unsigned VectorWidth = getVectorWidth();
1047 Type *VectorType = FixedVectorType::get(Load->getType(), VectorWidth);
1048 Type *VectorPtrType =
1049 PointerType::get(VectorType, Load->getPointerAddressSpace());
1050 unsigned Offset = NegativeStride ? VectorWidth - 1 : 0;
1051
1052 Value *NewPointer = generateLocationAccessed(Stmt, Load, ScalarMaps[Offset],
1053 VLTS[Offset], NewAccesses);
1054 Value *VectorPtr =
1055 Builder.CreateBitCast(NewPointer, VectorPtrType, "vector_ptr");
1056 LoadInst *VecLoad = Builder.CreateLoad(VectorType, VectorPtr,
1057 Load->getName() + "_p_vec_full");
1058 if (!Aligned)
1059 VecLoad->setAlignment(Align(8));
1060
1061 if (NegativeStride) {
1062 SmallVector<Constant *, 16> Indices;
1063 for (int i = VectorWidth - 1; i >= 0; i--)
1064 Indices.push_back(ConstantInt::get(Builder.getInt32Ty(), i));
1065 Constant *SV = llvm::ConstantVector::get(Indices);
1066 Value *RevVecLoad = Builder.CreateShuffleVector(
1067 VecLoad, VecLoad, SV, Load->getName() + "_reverse");
1068 return RevVecLoad;
1069 }
1070
1071 return VecLoad;
1072 }
1073
generateStrideZeroLoad(ScopStmt & Stmt,LoadInst * Load,ValueMapT & BBMap,__isl_keep isl_id_to_ast_expr * NewAccesses)1074 Value *VectorBlockGenerator::generateStrideZeroLoad(
1075 ScopStmt &Stmt, LoadInst *Load, ValueMapT &BBMap,
1076 __isl_keep isl_id_to_ast_expr *NewAccesses) {
1077 Type *VectorType = FixedVectorType::get(Load->getType(), 1);
1078 Type *VectorPtrType =
1079 PointerType::get(VectorType, Load->getPointerAddressSpace());
1080 Value *NewPointer =
1081 generateLocationAccessed(Stmt, Load, BBMap, VLTS[0], NewAccesses);
1082 Value *VectorPtr = Builder.CreateBitCast(NewPointer, VectorPtrType,
1083 Load->getName() + "_p_vec_p");
1084 LoadInst *ScalarLoad = Builder.CreateLoad(VectorType, VectorPtr,
1085 Load->getName() + "_p_splat_one");
1086
1087 if (!Aligned)
1088 ScalarLoad->setAlignment(Align(8));
1089
1090 Constant *SplatVector = Constant::getNullValue(
1091 FixedVectorType::get(Builder.getInt32Ty(), getVectorWidth()));
1092
1093 Value *VectorLoad = Builder.CreateShuffleVector(
1094 ScalarLoad, ScalarLoad, SplatVector, Load->getName() + "_p_splat");
1095 return VectorLoad;
1096 }
1097
generateUnknownStrideLoad(ScopStmt & Stmt,LoadInst * Load,VectorValueMapT & ScalarMaps,__isl_keep isl_id_to_ast_expr * NewAccesses)1098 Value *VectorBlockGenerator::generateUnknownStrideLoad(
1099 ScopStmt &Stmt, LoadInst *Load, VectorValueMapT &ScalarMaps,
1100 __isl_keep isl_id_to_ast_expr *NewAccesses) {
1101 int VectorWidth = getVectorWidth();
1102 Type *ElemTy = Load->getType();
1103 auto *FVTy = FixedVectorType::get(ElemTy, VectorWidth);
1104
1105 Value *Vector = UndefValue::get(FVTy);
1106
1107 for (int i = 0; i < VectorWidth; i++) {
1108 Value *NewPointer = generateLocationAccessed(Stmt, Load, ScalarMaps[i],
1109 VLTS[i], NewAccesses);
1110 Value *ScalarLoad =
1111 Builder.CreateLoad(ElemTy, NewPointer, Load->getName() + "_p_scalar_");
1112 Vector = Builder.CreateInsertElement(
1113 Vector, ScalarLoad, Builder.getInt32(i), Load->getName() + "_p_vec_");
1114 }
1115
1116 return Vector;
1117 }
1118
generateLoad(ScopStmt & Stmt,LoadInst * Load,ValueMapT & VectorMap,VectorValueMapT & ScalarMaps,__isl_keep isl_id_to_ast_expr * NewAccesses)1119 void VectorBlockGenerator::generateLoad(
1120 ScopStmt &Stmt, LoadInst *Load, ValueMapT &VectorMap,
1121 VectorValueMapT &ScalarMaps, __isl_keep isl_id_to_ast_expr *NewAccesses) {
1122 if (Value *PreloadLoad = GlobalMap.lookup(Load)) {
1123 VectorMap[Load] = Builder.CreateVectorSplat(getVectorWidth(), PreloadLoad,
1124 Load->getName() + "_p");
1125 return;
1126 }
1127
1128 if (!VectorType::isValidElementType(Load->getType())) {
1129 for (int i = 0; i < getVectorWidth(); i++)
1130 ScalarMaps[i][Load] =
1131 generateArrayLoad(Stmt, Load, ScalarMaps[i], VLTS[i], NewAccesses);
1132 return;
1133 }
1134
1135 const MemoryAccess &Access = Stmt.getArrayAccessFor(Load);
1136
1137 // Make sure we have scalar values available to access the pointer to
1138 // the data location.
1139 extractScalarValues(Load, VectorMap, ScalarMaps);
1140
1141 Value *NewLoad;
1142 if (Access.isStrideZero(isl::manage_copy(Schedule)))
1143 NewLoad = generateStrideZeroLoad(Stmt, Load, ScalarMaps[0], NewAccesses);
1144 else if (Access.isStrideOne(isl::manage_copy(Schedule)))
1145 NewLoad = generateStrideOneLoad(Stmt, Load, ScalarMaps, NewAccesses);
1146 else if (Access.isStrideX(isl::manage_copy(Schedule), -1))
1147 NewLoad = generateStrideOneLoad(Stmt, Load, ScalarMaps, NewAccesses, true);
1148 else
1149 NewLoad = generateUnknownStrideLoad(Stmt, Load, ScalarMaps, NewAccesses);
1150
1151 VectorMap[Load] = NewLoad;
1152 }
1153
copyUnaryInst(ScopStmt & Stmt,UnaryInstruction * Inst,ValueMapT & VectorMap,VectorValueMapT & ScalarMaps)1154 void VectorBlockGenerator::copyUnaryInst(ScopStmt &Stmt, UnaryInstruction *Inst,
1155 ValueMapT &VectorMap,
1156 VectorValueMapT &ScalarMaps) {
1157 int VectorWidth = getVectorWidth();
1158 Value *NewOperand = getVectorValue(Stmt, Inst->getOperand(0), VectorMap,
1159 ScalarMaps, getLoopForStmt(Stmt));
1160
1161 assert(isa<CastInst>(Inst) && "Can not generate vector code for instruction");
1162
1163 const CastInst *Cast = dyn_cast<CastInst>(Inst);
1164 auto *DestType = FixedVectorType::get(Inst->getType(), VectorWidth);
1165 VectorMap[Inst] = Builder.CreateCast(Cast->getOpcode(), NewOperand, DestType);
1166 }
1167
copyBinaryInst(ScopStmt & Stmt,BinaryOperator * Inst,ValueMapT & VectorMap,VectorValueMapT & ScalarMaps)1168 void VectorBlockGenerator::copyBinaryInst(ScopStmt &Stmt, BinaryOperator *Inst,
1169 ValueMapT &VectorMap,
1170 VectorValueMapT &ScalarMaps) {
1171 Loop *L = getLoopForStmt(Stmt);
1172 Value *OpZero = Inst->getOperand(0);
1173 Value *OpOne = Inst->getOperand(1);
1174
1175 Value *NewOpZero, *NewOpOne;
1176 NewOpZero = getVectorValue(Stmt, OpZero, VectorMap, ScalarMaps, L);
1177 NewOpOne = getVectorValue(Stmt, OpOne, VectorMap, ScalarMaps, L);
1178
1179 Value *NewInst = Builder.CreateBinOp(Inst->getOpcode(), NewOpZero, NewOpOne,
1180 Inst->getName() + "p_vec");
1181 VectorMap[Inst] = NewInst;
1182 }
1183
copyStore(ScopStmt & Stmt,StoreInst * Store,ValueMapT & VectorMap,VectorValueMapT & ScalarMaps,__isl_keep isl_id_to_ast_expr * NewAccesses)1184 void VectorBlockGenerator::copyStore(
1185 ScopStmt &Stmt, StoreInst *Store, ValueMapT &VectorMap,
1186 VectorValueMapT &ScalarMaps, __isl_keep isl_id_to_ast_expr *NewAccesses) {
1187 const MemoryAccess &Access = Stmt.getArrayAccessFor(Store);
1188
1189 Value *Vector = getVectorValue(Stmt, Store->getValueOperand(), VectorMap,
1190 ScalarMaps, getLoopForStmt(Stmt));
1191
1192 // Make sure we have scalar values available to access the pointer to
1193 // the data location.
1194 extractScalarValues(Store, VectorMap, ScalarMaps);
1195
1196 if (Access.isStrideOne(isl::manage_copy(Schedule))) {
1197 Type *VectorType = FixedVectorType::get(Store->getValueOperand()->getType(),
1198 getVectorWidth());
1199 Type *VectorPtrType =
1200 PointerType::get(VectorType, Store->getPointerAddressSpace());
1201 Value *NewPointer = generateLocationAccessed(Stmt, Store, ScalarMaps[0],
1202 VLTS[0], NewAccesses);
1203
1204 Value *VectorPtr =
1205 Builder.CreateBitCast(NewPointer, VectorPtrType, "vector_ptr");
1206 StoreInst *Store = Builder.CreateStore(Vector, VectorPtr);
1207
1208 if (!Aligned)
1209 Store->setAlignment(Align(8));
1210 } else {
1211 for (unsigned i = 0; i < ScalarMaps.size(); i++) {
1212 Value *Scalar = Builder.CreateExtractElement(Vector, Builder.getInt32(i));
1213 Value *NewPointer = generateLocationAccessed(Stmt, Store, ScalarMaps[i],
1214 VLTS[i], NewAccesses);
1215 Builder.CreateStore(Scalar, NewPointer);
1216 }
1217 }
1218 }
1219
hasVectorOperands(const Instruction * Inst,ValueMapT & VectorMap)1220 bool VectorBlockGenerator::hasVectorOperands(const Instruction *Inst,
1221 ValueMapT &VectorMap) {
1222 for (Value *Operand : Inst->operands())
1223 if (VectorMap.count(Operand))
1224 return true;
1225 return false;
1226 }
1227
extractScalarValues(const Instruction * Inst,ValueMapT & VectorMap,VectorValueMapT & ScalarMaps)1228 bool VectorBlockGenerator::extractScalarValues(const Instruction *Inst,
1229 ValueMapT &VectorMap,
1230 VectorValueMapT &ScalarMaps) {
1231 bool HasVectorOperand = false;
1232 int VectorWidth = getVectorWidth();
1233
1234 for (Value *Operand : Inst->operands()) {
1235 ValueMapT::iterator VecOp = VectorMap.find(Operand);
1236
1237 if (VecOp == VectorMap.end())
1238 continue;
1239
1240 HasVectorOperand = true;
1241 Value *NewVector = VecOp->second;
1242
1243 for (int i = 0; i < VectorWidth; ++i) {
1244 ValueMapT &SM = ScalarMaps[i];
1245
1246 // If there is one scalar extracted, all scalar elements should have
1247 // already been extracted by the code here. So no need to check for the
1248 // existence of all of them.
1249 if (SM.count(Operand))
1250 break;
1251
1252 SM[Operand] =
1253 Builder.CreateExtractElement(NewVector, Builder.getInt32(i));
1254 }
1255 }
1256
1257 return HasVectorOperand;
1258 }
1259
copyInstScalarized(ScopStmt & Stmt,Instruction * Inst,ValueMapT & VectorMap,VectorValueMapT & ScalarMaps,__isl_keep isl_id_to_ast_expr * NewAccesses)1260 void VectorBlockGenerator::copyInstScalarized(
1261 ScopStmt &Stmt, Instruction *Inst, ValueMapT &VectorMap,
1262 VectorValueMapT &ScalarMaps, __isl_keep isl_id_to_ast_expr *NewAccesses) {
1263 bool HasVectorOperand;
1264 int VectorWidth = getVectorWidth();
1265
1266 HasVectorOperand = extractScalarValues(Inst, VectorMap, ScalarMaps);
1267
1268 for (int VectorLane = 0; VectorLane < getVectorWidth(); VectorLane++)
1269 BlockGenerator::copyInstruction(Stmt, Inst, ScalarMaps[VectorLane],
1270 VLTS[VectorLane], NewAccesses);
1271
1272 if (!VectorType::isValidElementType(Inst->getType()) || !HasVectorOperand)
1273 return;
1274
1275 // Make the result available as vector value.
1276 auto *FVTy = FixedVectorType::get(Inst->getType(), VectorWidth);
1277 Value *Vector = UndefValue::get(FVTy);
1278
1279 for (int i = 0; i < VectorWidth; i++)
1280 Vector = Builder.CreateInsertElement(Vector, ScalarMaps[i][Inst],
1281 Builder.getInt32(i));
1282
1283 VectorMap[Inst] = Vector;
1284 }
1285
getVectorWidth()1286 int VectorBlockGenerator::getVectorWidth() { return VLTS.size(); }
1287
copyInstruction(ScopStmt & Stmt,Instruction * Inst,ValueMapT & VectorMap,VectorValueMapT & ScalarMaps,__isl_keep isl_id_to_ast_expr * NewAccesses)1288 void VectorBlockGenerator::copyInstruction(
1289 ScopStmt &Stmt, Instruction *Inst, ValueMapT &VectorMap,
1290 VectorValueMapT &ScalarMaps, __isl_keep isl_id_to_ast_expr *NewAccesses) {
1291 // Terminator instructions control the control flow. They are explicitly
1292 // expressed in the clast and do not need to be copied.
1293 if (Inst->isTerminator())
1294 return;
1295
1296 if (canSyntheziseInStmt(Stmt, Inst))
1297 return;
1298
1299 if (auto *Load = dyn_cast<LoadInst>(Inst)) {
1300 generateLoad(Stmt, Load, VectorMap, ScalarMaps, NewAccesses);
1301 return;
1302 }
1303
1304 if (hasVectorOperands(Inst, VectorMap)) {
1305 if (auto *Store = dyn_cast<StoreInst>(Inst)) {
1306 // Identified as redundant by -polly-simplify.
1307 if (!Stmt.getArrayAccessOrNULLFor(Store))
1308 return;
1309
1310 copyStore(Stmt, Store, VectorMap, ScalarMaps, NewAccesses);
1311 return;
1312 }
1313
1314 if (auto *Unary = dyn_cast<UnaryInstruction>(Inst)) {
1315 copyUnaryInst(Stmt, Unary, VectorMap, ScalarMaps);
1316 return;
1317 }
1318
1319 if (auto *Binary = dyn_cast<BinaryOperator>(Inst)) {
1320 copyBinaryInst(Stmt, Binary, VectorMap, ScalarMaps);
1321 return;
1322 }
1323
1324 // Fallthrough: We generate scalar instructions, if we don't know how to
1325 // generate vector code.
1326 }
1327
1328 copyInstScalarized(Stmt, Inst, VectorMap, ScalarMaps, NewAccesses);
1329 }
1330
generateScalarVectorLoads(ScopStmt & Stmt,ValueMapT & VectorBlockMap)1331 void VectorBlockGenerator::generateScalarVectorLoads(
1332 ScopStmt &Stmt, ValueMapT &VectorBlockMap) {
1333 for (MemoryAccess *MA : Stmt) {
1334 if (MA->isArrayKind() || MA->isWrite())
1335 continue;
1336
1337 auto *Address = getOrCreateAlloca(*MA);
1338 Type *VectorType = FixedVectorType::get(MA->getElementType(), 1);
1339 Type *VectorPtrType = PointerType::get(
1340 VectorType, Address->getType()->getPointerAddressSpace());
1341 Value *VectorPtr = Builder.CreateBitCast(Address, VectorPtrType,
1342 Address->getName() + "_p_vec_p");
1343 auto *Val = Builder.CreateLoad(VectorType, VectorPtr,
1344 Address->getName() + ".reload");
1345 Constant *SplatVector = Constant::getNullValue(
1346 FixedVectorType::get(Builder.getInt32Ty(), getVectorWidth()));
1347
1348 Value *VectorVal = Builder.CreateShuffleVector(
1349 Val, Val, SplatVector, Address->getName() + "_p_splat");
1350 VectorBlockMap[MA->getAccessValue()] = VectorVal;
1351 }
1352 }
1353
verifyNoScalarStores(ScopStmt & Stmt)1354 void VectorBlockGenerator::verifyNoScalarStores(ScopStmt &Stmt) {
1355 for (MemoryAccess *MA : Stmt) {
1356 if (MA->isArrayKind() || MA->isRead())
1357 continue;
1358
1359 llvm_unreachable("Scalar stores not expected in vector loop");
1360 }
1361 }
1362
copyStmt(ScopStmt & Stmt,__isl_keep isl_id_to_ast_expr * NewAccesses)1363 void VectorBlockGenerator::copyStmt(
1364 ScopStmt &Stmt, __isl_keep isl_id_to_ast_expr *NewAccesses) {
1365 assert(Stmt.isBlockStmt() &&
1366 "TODO: Only block statements can be copied by the vector block "
1367 "generator");
1368
1369 BasicBlock *BB = Stmt.getBasicBlock();
1370 BasicBlock *CopyBB = SplitBlock(Builder.GetInsertBlock(),
1371 &*Builder.GetInsertPoint(), &DT, &LI);
1372 CopyBB->setName("polly.stmt." + BB->getName());
1373 Builder.SetInsertPoint(&CopyBB->front());
1374
1375 // Create two maps that store the mapping from the original instructions of
1376 // the old basic block to their copies in the new basic block. Those maps
1377 // are basic block local.
1378 //
1379 // As vector code generation is supported there is one map for scalar values
1380 // and one for vector values.
1381 //
1382 // In case we just do scalar code generation, the vectorMap is not used and
1383 // the scalarMap has just one dimension, which contains the mapping.
1384 //
1385 // In case vector code generation is done, an instruction may either appear
1386 // in the vector map once (as it is calculating >vectorwidth< values at a
1387 // time. Or (if the values are calculated using scalar operations), it
1388 // appears once in every dimension of the scalarMap.
1389 VectorValueMapT ScalarBlockMap(getVectorWidth());
1390 ValueMapT VectorBlockMap;
1391
1392 generateScalarVectorLoads(Stmt, VectorBlockMap);
1393
1394 for (Instruction *Inst : Stmt.getInstructions())
1395 copyInstruction(Stmt, Inst, VectorBlockMap, ScalarBlockMap, NewAccesses);
1396
1397 verifyNoScalarStores(Stmt);
1398 }
1399
repairDominance(BasicBlock * BB,BasicBlock * BBCopy)1400 BasicBlock *RegionGenerator::repairDominance(BasicBlock *BB,
1401 BasicBlock *BBCopy) {
1402
1403 BasicBlock *BBIDom = DT.getNode(BB)->getIDom()->getBlock();
1404 BasicBlock *BBCopyIDom = EndBlockMap.lookup(BBIDom);
1405
1406 if (BBCopyIDom)
1407 DT.changeImmediateDominator(BBCopy, BBCopyIDom);
1408
1409 return StartBlockMap.lookup(BBIDom);
1410 }
1411
1412 // This is to determine whether an llvm::Value (defined in @p BB) is usable when
1413 // leaving a subregion. The straight-forward DT.dominates(BB, R->getExitBlock())
1414 // does not work in cases where the exit block has edges from outside the
1415 // region. In that case the llvm::Value would never be usable in in the exit
1416 // block. The RegionGenerator however creates an new exit block ('ExitBBCopy')
1417 // for the subregion's exiting edges only. We need to determine whether an
1418 // llvm::Value is usable in there. We do this by checking whether it dominates
1419 // all exiting blocks individually.
isDominatingSubregionExit(const DominatorTree & DT,Region * R,BasicBlock * BB)1420 static bool isDominatingSubregionExit(const DominatorTree &DT, Region *R,
1421 BasicBlock *BB) {
1422 for (auto ExitingBB : predecessors(R->getExit())) {
1423 // Check for non-subregion incoming edges.
1424 if (!R->contains(ExitingBB))
1425 continue;
1426
1427 if (!DT.dominates(BB, ExitingBB))
1428 return false;
1429 }
1430
1431 return true;
1432 }
1433
1434 // Find the direct dominator of the subregion's exit block if the subregion was
1435 // simplified.
findExitDominator(DominatorTree & DT,Region * R)1436 static BasicBlock *findExitDominator(DominatorTree &DT, Region *R) {
1437 BasicBlock *Common = nullptr;
1438 for (auto ExitingBB : predecessors(R->getExit())) {
1439 // Check for non-subregion incoming edges.
1440 if (!R->contains(ExitingBB))
1441 continue;
1442
1443 // First exiting edge.
1444 if (!Common) {
1445 Common = ExitingBB;
1446 continue;
1447 }
1448
1449 Common = DT.findNearestCommonDominator(Common, ExitingBB);
1450 }
1451
1452 assert(Common && R->contains(Common));
1453 return Common;
1454 }
1455
copyStmt(ScopStmt & Stmt,LoopToScevMapT & LTS,isl_id_to_ast_expr * IdToAstExp)1456 void RegionGenerator::copyStmt(ScopStmt &Stmt, LoopToScevMapT <S,
1457 isl_id_to_ast_expr *IdToAstExp) {
1458 assert(Stmt.isRegionStmt() &&
1459 "Only region statements can be copied by the region generator");
1460
1461 // Forget all old mappings.
1462 StartBlockMap.clear();
1463 EndBlockMap.clear();
1464 RegionMaps.clear();
1465 IncompletePHINodeMap.clear();
1466
1467 // Collection of all values related to this subregion.
1468 ValueMapT ValueMap;
1469
1470 // The region represented by the statement.
1471 Region *R = Stmt.getRegion();
1472
1473 // Create a dedicated entry for the region where we can reload all demoted
1474 // inputs.
1475 BasicBlock *EntryBB = R->getEntry();
1476 BasicBlock *EntryBBCopy = SplitBlock(Builder.GetInsertBlock(),
1477 &*Builder.GetInsertPoint(), &DT, &LI);
1478 EntryBBCopy->setName("polly.stmt." + EntryBB->getName() + ".entry");
1479 Builder.SetInsertPoint(&EntryBBCopy->front());
1480
1481 ValueMapT &EntryBBMap = RegionMaps[EntryBBCopy];
1482 generateScalarLoads(Stmt, LTS, EntryBBMap, IdToAstExp);
1483 generateBeginStmtTrace(Stmt, LTS, EntryBBMap);
1484
1485 for (auto PI = pred_begin(EntryBB), PE = pred_end(EntryBB); PI != PE; ++PI)
1486 if (!R->contains(*PI)) {
1487 StartBlockMap[*PI] = EntryBBCopy;
1488 EndBlockMap[*PI] = EntryBBCopy;
1489 }
1490
1491 // Iterate over all blocks in the region in a breadth-first search.
1492 std::deque<BasicBlock *> Blocks;
1493 SmallSetVector<BasicBlock *, 8> SeenBlocks;
1494 Blocks.push_back(EntryBB);
1495 SeenBlocks.insert(EntryBB);
1496
1497 while (!Blocks.empty()) {
1498 BasicBlock *BB = Blocks.front();
1499 Blocks.pop_front();
1500
1501 // First split the block and update dominance information.
1502 BasicBlock *BBCopy = splitBB(BB);
1503 BasicBlock *BBCopyIDom = repairDominance(BB, BBCopy);
1504
1505 // Get the mapping for this block and initialize it with either the scalar
1506 // loads from the generated entering block (which dominates all blocks of
1507 // this subregion) or the maps of the immediate dominator, if part of the
1508 // subregion. The latter necessarily includes the former.
1509 ValueMapT *InitBBMap;
1510 if (BBCopyIDom) {
1511 assert(RegionMaps.count(BBCopyIDom));
1512 InitBBMap = &RegionMaps[BBCopyIDom];
1513 } else
1514 InitBBMap = &EntryBBMap;
1515 auto Inserted = RegionMaps.insert(std::make_pair(BBCopy, *InitBBMap));
1516 ValueMapT &RegionMap = Inserted.first->second;
1517
1518 // Copy the block with the BlockGenerator.
1519 Builder.SetInsertPoint(&BBCopy->front());
1520 copyBB(Stmt, BB, BBCopy, RegionMap, LTS, IdToAstExp);
1521
1522 // In order to remap PHI nodes we store also basic block mappings.
1523 StartBlockMap[BB] = BBCopy;
1524 EndBlockMap[BB] = Builder.GetInsertBlock();
1525
1526 // Add values to incomplete PHI nodes waiting for this block to be copied.
1527 for (const PHINodePairTy &PHINodePair : IncompletePHINodeMap[BB])
1528 addOperandToPHI(Stmt, PHINodePair.first, PHINodePair.second, BB, LTS);
1529 IncompletePHINodeMap[BB].clear();
1530
1531 // And continue with new successors inside the region.
1532 for (auto SI = succ_begin(BB), SE = succ_end(BB); SI != SE; SI++)
1533 if (R->contains(*SI) && SeenBlocks.insert(*SI))
1534 Blocks.push_back(*SI);
1535
1536 // Remember value in case it is visible after this subregion.
1537 if (isDominatingSubregionExit(DT, R, BB))
1538 ValueMap.insert(RegionMap.begin(), RegionMap.end());
1539 }
1540
1541 // Now create a new dedicated region exit block and add it to the region map.
1542 BasicBlock *ExitBBCopy = SplitBlock(Builder.GetInsertBlock(),
1543 &*Builder.GetInsertPoint(), &DT, &LI);
1544 ExitBBCopy->setName("polly.stmt." + R->getExit()->getName() + ".exit");
1545 StartBlockMap[R->getExit()] = ExitBBCopy;
1546 EndBlockMap[R->getExit()] = ExitBBCopy;
1547
1548 BasicBlock *ExitDomBBCopy = EndBlockMap.lookup(findExitDominator(DT, R));
1549 assert(ExitDomBBCopy &&
1550 "Common exit dominator must be within region; at least the entry node "
1551 "must match");
1552 DT.changeImmediateDominator(ExitBBCopy, ExitDomBBCopy);
1553
1554 // As the block generator doesn't handle control flow we need to add the
1555 // region control flow by hand after all blocks have been copied.
1556 for (BasicBlock *BB : SeenBlocks) {
1557
1558 BasicBlock *BBCopyStart = StartBlockMap[BB];
1559 BasicBlock *BBCopyEnd = EndBlockMap[BB];
1560 Instruction *TI = BB->getTerminator();
1561 if (isa<UnreachableInst>(TI)) {
1562 while (!BBCopyEnd->empty())
1563 BBCopyEnd->begin()->eraseFromParent();
1564 new UnreachableInst(BBCopyEnd->getContext(), BBCopyEnd);
1565 continue;
1566 }
1567
1568 Instruction *BICopy = BBCopyEnd->getTerminator();
1569
1570 ValueMapT &RegionMap = RegionMaps[BBCopyStart];
1571 RegionMap.insert(StartBlockMap.begin(), StartBlockMap.end());
1572
1573 Builder.SetInsertPoint(BICopy);
1574 copyInstScalar(Stmt, TI, RegionMap, LTS);
1575 BICopy->eraseFromParent();
1576 }
1577
1578 // Add counting PHI nodes to all loops in the region that can be used as
1579 // replacement for SCEVs referring to the old loop.
1580 for (BasicBlock *BB : SeenBlocks) {
1581 Loop *L = LI.getLoopFor(BB);
1582 if (L == nullptr || L->getHeader() != BB || !R->contains(L))
1583 continue;
1584
1585 BasicBlock *BBCopy = StartBlockMap[BB];
1586 Value *NullVal = Builder.getInt32(0);
1587 PHINode *LoopPHI =
1588 PHINode::Create(Builder.getInt32Ty(), 2, "polly.subregion.iv");
1589 Instruction *LoopPHIInc = BinaryOperator::CreateAdd(
1590 LoopPHI, Builder.getInt32(1), "polly.subregion.iv.inc");
1591 LoopPHI->insertBefore(&BBCopy->front());
1592 LoopPHIInc->insertBefore(BBCopy->getTerminator());
1593
1594 for (auto *PredBB : make_range(pred_begin(BB), pred_end(BB))) {
1595 if (!R->contains(PredBB))
1596 continue;
1597 if (L->contains(PredBB))
1598 LoopPHI->addIncoming(LoopPHIInc, EndBlockMap[PredBB]);
1599 else
1600 LoopPHI->addIncoming(NullVal, EndBlockMap[PredBB]);
1601 }
1602
1603 for (auto *PredBBCopy : make_range(pred_begin(BBCopy), pred_end(BBCopy)))
1604 if (LoopPHI->getBasicBlockIndex(PredBBCopy) < 0)
1605 LoopPHI->addIncoming(NullVal, PredBBCopy);
1606
1607 LTS[L] = SE.getUnknown(LoopPHI);
1608 }
1609
1610 // Continue generating code in the exit block.
1611 Builder.SetInsertPoint(&*ExitBBCopy->getFirstInsertionPt());
1612
1613 // Write values visible to other statements.
1614 generateScalarStores(Stmt, LTS, ValueMap, IdToAstExp);
1615 StartBlockMap.clear();
1616 EndBlockMap.clear();
1617 RegionMaps.clear();
1618 IncompletePHINodeMap.clear();
1619 }
1620
buildExitPHI(MemoryAccess * MA,LoopToScevMapT & LTS,ValueMapT & BBMap,Loop * L)1621 PHINode *RegionGenerator::buildExitPHI(MemoryAccess *MA, LoopToScevMapT <S,
1622 ValueMapT &BBMap, Loop *L) {
1623 ScopStmt *Stmt = MA->getStatement();
1624 Region *SubR = Stmt->getRegion();
1625 auto Incoming = MA->getIncoming();
1626
1627 PollyIRBuilder::InsertPointGuard IPGuard(Builder);
1628 PHINode *OrigPHI = cast<PHINode>(MA->getAccessInstruction());
1629 BasicBlock *NewSubregionExit = Builder.GetInsertBlock();
1630
1631 // This can happen if the subregion is simplified after the ScopStmts
1632 // have been created; simplification happens as part of CodeGeneration.
1633 if (OrigPHI->getParent() != SubR->getExit()) {
1634 BasicBlock *FormerExit = SubR->getExitingBlock();
1635 if (FormerExit)
1636 NewSubregionExit = StartBlockMap.lookup(FormerExit);
1637 }
1638
1639 PHINode *NewPHI = PHINode::Create(OrigPHI->getType(), Incoming.size(),
1640 "polly." + OrigPHI->getName(),
1641 NewSubregionExit->getFirstNonPHI());
1642
1643 // Add the incoming values to the PHI.
1644 for (auto &Pair : Incoming) {
1645 BasicBlock *OrigIncomingBlock = Pair.first;
1646 BasicBlock *NewIncomingBlockStart = StartBlockMap.lookup(OrigIncomingBlock);
1647 BasicBlock *NewIncomingBlockEnd = EndBlockMap.lookup(OrigIncomingBlock);
1648 Builder.SetInsertPoint(NewIncomingBlockEnd->getTerminator());
1649 assert(RegionMaps.count(NewIncomingBlockStart));
1650 assert(RegionMaps.count(NewIncomingBlockEnd));
1651 ValueMapT *LocalBBMap = &RegionMaps[NewIncomingBlockStart];
1652
1653 Value *OrigIncomingValue = Pair.second;
1654 Value *NewIncomingValue =
1655 getNewValue(*Stmt, OrigIncomingValue, *LocalBBMap, LTS, L);
1656 NewPHI->addIncoming(NewIncomingValue, NewIncomingBlockEnd);
1657 }
1658
1659 return NewPHI;
1660 }
1661
getExitScalar(MemoryAccess * MA,LoopToScevMapT & LTS,ValueMapT & BBMap)1662 Value *RegionGenerator::getExitScalar(MemoryAccess *MA, LoopToScevMapT <S,
1663 ValueMapT &BBMap) {
1664 ScopStmt *Stmt = MA->getStatement();
1665
1666 // TODO: Add some test cases that ensure this is really the right choice.
1667 Loop *L = LI.getLoopFor(Stmt->getRegion()->getExit());
1668
1669 if (MA->isAnyPHIKind()) {
1670 auto Incoming = MA->getIncoming();
1671 assert(!Incoming.empty() &&
1672 "PHI WRITEs must have originate from at least one incoming block");
1673
1674 // If there is only one incoming value, we do not need to create a PHI.
1675 if (Incoming.size() == 1) {
1676 Value *OldVal = Incoming[0].second;
1677 return getNewValue(*Stmt, OldVal, BBMap, LTS, L);
1678 }
1679
1680 return buildExitPHI(MA, LTS, BBMap, L);
1681 }
1682
1683 // MemoryKind::Value accesses leaving the subregion must dominate the exit
1684 // block; just pass the copied value.
1685 Value *OldVal = MA->getAccessValue();
1686 return getNewValue(*Stmt, OldVal, BBMap, LTS, L);
1687 }
1688
generateScalarStores(ScopStmt & Stmt,LoopToScevMapT & LTS,ValueMapT & BBMap,__isl_keep isl_id_to_ast_expr * NewAccesses)1689 void RegionGenerator::generateScalarStores(
1690 ScopStmt &Stmt, LoopToScevMapT <S, ValueMapT &BBMap,
1691 __isl_keep isl_id_to_ast_expr *NewAccesses) {
1692 assert(Stmt.getRegion() &&
1693 "Block statements need to use the generateScalarStores() "
1694 "function in the BlockGenerator");
1695
1696 // Get the exit scalar values before generating the writes.
1697 // This is necessary because RegionGenerator::getExitScalar may insert
1698 // PHINodes that depend on the region's exiting blocks. But
1699 // BlockGenerator::generateConditionalExecution may insert a new basic block
1700 // such that the current basic block is not a direct successor of the exiting
1701 // blocks anymore. Hence, build the PHINodes while the current block is still
1702 // the direct successor.
1703 SmallDenseMap<MemoryAccess *, Value *> NewExitScalars;
1704 for (MemoryAccess *MA : Stmt) {
1705 if (MA->isOriginalArrayKind() || MA->isRead())
1706 continue;
1707
1708 Value *NewVal = getExitScalar(MA, LTS, BBMap);
1709 NewExitScalars[MA] = NewVal;
1710 }
1711
1712 for (MemoryAccess *MA : Stmt) {
1713 if (MA->isOriginalArrayKind() || MA->isRead())
1714 continue;
1715
1716 isl::set AccDom = MA->getAccessRelation().domain();
1717 std::string Subject = MA->getId().get_name();
1718 generateConditionalExecution(
1719 Stmt, AccDom, Subject.c_str(), [&, this, MA]() {
1720 Value *NewVal = NewExitScalars.lookup(MA);
1721 assert(NewVal && "The exit scalar must be determined before");
1722 Value *Address = getImplicitAddress(*MA, getLoopForStmt(Stmt), LTS,
1723 BBMap, NewAccesses);
1724 assert((!isa<Instruction>(NewVal) ||
1725 DT.dominates(cast<Instruction>(NewVal)->getParent(),
1726 Builder.GetInsertBlock())) &&
1727 "Domination violation");
1728 assert((!isa<Instruction>(Address) ||
1729 DT.dominates(cast<Instruction>(Address)->getParent(),
1730 Builder.GetInsertBlock())) &&
1731 "Domination violation");
1732 Builder.CreateStore(NewVal, Address);
1733 });
1734 }
1735 }
1736
addOperandToPHI(ScopStmt & Stmt,PHINode * PHI,PHINode * PHICopy,BasicBlock * IncomingBB,LoopToScevMapT & LTS)1737 void RegionGenerator::addOperandToPHI(ScopStmt &Stmt, PHINode *PHI,
1738 PHINode *PHICopy, BasicBlock *IncomingBB,
1739 LoopToScevMapT <S) {
1740 // If the incoming block was not yet copied mark this PHI as incomplete.
1741 // Once the block will be copied the incoming value will be added.
1742 BasicBlock *BBCopyStart = StartBlockMap[IncomingBB];
1743 BasicBlock *BBCopyEnd = EndBlockMap[IncomingBB];
1744 if (!BBCopyStart) {
1745 assert(!BBCopyEnd);
1746 assert(Stmt.represents(IncomingBB) &&
1747 "Bad incoming block for PHI in non-affine region");
1748 IncompletePHINodeMap[IncomingBB].push_back(std::make_pair(PHI, PHICopy));
1749 return;
1750 }
1751
1752 assert(RegionMaps.count(BBCopyStart) &&
1753 "Incoming PHI block did not have a BBMap");
1754 ValueMapT &BBCopyMap = RegionMaps[BBCopyStart];
1755
1756 Value *OpCopy = nullptr;
1757
1758 if (Stmt.represents(IncomingBB)) {
1759 Value *Op = PHI->getIncomingValueForBlock(IncomingBB);
1760
1761 // If the current insert block is different from the PHIs incoming block
1762 // change it, otherwise do not.
1763 auto IP = Builder.GetInsertPoint();
1764 if (IP->getParent() != BBCopyEnd)
1765 Builder.SetInsertPoint(BBCopyEnd->getTerminator());
1766 OpCopy = getNewValue(Stmt, Op, BBCopyMap, LTS, getLoopForStmt(Stmt));
1767 if (IP->getParent() != BBCopyEnd)
1768 Builder.SetInsertPoint(&*IP);
1769 } else {
1770 // All edges from outside the non-affine region become a single edge
1771 // in the new copy of the non-affine region. Make sure to only add the
1772 // corresponding edge the first time we encounter a basic block from
1773 // outside the non-affine region.
1774 if (PHICopy->getBasicBlockIndex(BBCopyEnd) >= 0)
1775 return;
1776
1777 // Get the reloaded value.
1778 OpCopy = getNewValue(Stmt, PHI, BBCopyMap, LTS, getLoopForStmt(Stmt));
1779 }
1780
1781 assert(OpCopy && "Incoming PHI value was not copied properly");
1782 PHICopy->addIncoming(OpCopy, BBCopyEnd);
1783 }
1784
copyPHIInstruction(ScopStmt & Stmt,PHINode * PHI,ValueMapT & BBMap,LoopToScevMapT & LTS)1785 void RegionGenerator::copyPHIInstruction(ScopStmt &Stmt, PHINode *PHI,
1786 ValueMapT &BBMap,
1787 LoopToScevMapT <S) {
1788 unsigned NumIncoming = PHI->getNumIncomingValues();
1789 PHINode *PHICopy =
1790 Builder.CreatePHI(PHI->getType(), NumIncoming, "polly." + PHI->getName());
1791 PHICopy->moveBefore(PHICopy->getParent()->getFirstNonPHI());
1792 BBMap[PHI] = PHICopy;
1793
1794 for (BasicBlock *IncomingBB : PHI->blocks())
1795 addOperandToPHI(Stmt, PHI, PHICopy, IncomingBB, LTS);
1796 }
1797