1 //===- LoopTiling.cpp --- Loop tiling pass ------------------------------*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements a pass to tile loop nests.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "PassDetail.h"
14 #include "mlir/Analysis/AffineAnalysis.h"
15 #include "mlir/Analysis/AffineStructures.h"
16 #include "mlir/Analysis/LoopAnalysis.h"
17 #include "mlir/Analysis/Utils.h"
18 #include "mlir/Dialect/Affine/IR/AffineOps.h"
19 #include "mlir/Dialect/Affine/IR/AffineValueMap.h"
20 #include "mlir/Dialect/Affine/Passes.h"
21 #include "mlir/IR/BlockAndValueMapping.h"
22 #include "mlir/IR/Builders.h"
23 #include "mlir/Transforms/LoopUtils.h"
24 #include "mlir/Transforms/Utils.h"
25 #include "llvm/Support/CommandLine.h"
26 #include "llvm/Support/Debug.h"
27 using namespace mlir;
28 
29 #define DEBUG_TYPE "affine-loop-tile"
30 
31 namespace {
32 
33 /// A pass to perform loop tiling on all suitable loop nests of a Function.
34 struct LoopTiling : public AffineLoopTilingBase<LoopTiling> {
35   LoopTiling() = default;
LoopTiling__anon9647b7130111::LoopTiling36   explicit LoopTiling(uint64_t cacheSizeBytes, bool avoidMaxMinBounds = true)
37       : avoidMaxMinBounds(avoidMaxMinBounds) {
38     this->cacheSizeInKiB = cacheSizeBytes / 1024;
39   }
40 
41   void runOnFunction() override;
42   void getTileSizes(ArrayRef<AffineForOp> band,
43                     SmallVectorImpl<unsigned> *tileSizes);
44 
45   // Default tile size if nothing is provided.
46   constexpr static unsigned kDefaultTileSize = 4;
47 
48   // If true, tile sizes are set to avoid max/min in bounds if possible.
49   bool avoidMaxMinBounds = true;
50 };
51 
52 } // end anonymous namespace
53 
54 /// Creates a pass to perform loop tiling on all suitable loop nests of a
55 /// Function.
56 std::unique_ptr<OperationPass<FuncOp>>
createLoopTilingPass(uint64_t cacheSizeBytes)57 mlir::createLoopTilingPass(uint64_t cacheSizeBytes) {
58   return std::make_unique<LoopTiling>(cacheSizeBytes);
59 }
createLoopTilingPass()60 std::unique_ptr<OperationPass<FuncOp>> mlir::createLoopTilingPass() {
61   return std::make_unique<LoopTiling>();
62 }
63 
64 // Move the loop body of AffineForOp 'src' from 'src' into the specified
65 // location in destination's body, ignoring the terminator.
moveLoopBody(AffineForOp src,AffineForOp dest,Block::iterator loc)66 static inline void moveLoopBody(AffineForOp src, AffineForOp dest,
67                                 Block::iterator loc) {
68   auto &insts = src.getBody()->getOperations();
69   dest.getBody()->getOperations().splice(loc, insts, insts.begin(),
70                                          std::prev(insts.end()));
71 }
72 
73 // Move the loop body of AffineForOp 'src' from 'src' to the start of dest's
74 // body.
moveLoopBody(AffineForOp src,AffineForOp dest)75 static inline void moveLoopBody(AffineForOp src, AffineForOp dest) {
76   moveLoopBody(src, dest, dest.getBody()->begin());
77 }
78 
79 /// Constructs and sets new loop bounds after tiling for the case of
80 /// hyper-rectangular index sets, where the bounds of one dimension do not
81 /// depend on other dimensions. Bounds of each dimension can thus be treated
82 /// independently, and deriving the new bounds is much simpler and faster
83 /// than for the case of tiling arbitrary polyhedral shapes.
84 static void
constructTiledIndexSetHyperRect(MutableArrayRef<AffineForOp> origLoops,MutableArrayRef<AffineForOp> newLoops,ArrayRef<unsigned> tileSizes)85 constructTiledIndexSetHyperRect(MutableArrayRef<AffineForOp> origLoops,
86                                 MutableArrayRef<AffineForOp> newLoops,
87                                 ArrayRef<unsigned> tileSizes) {
88   assert(!origLoops.empty());
89   assert(origLoops.size() == tileSizes.size());
90 
91   OpBuilder b(origLoops[0].getOperation());
92   unsigned width = origLoops.size();
93 
94   // Bounds for tile space loops.
95   for (unsigned i = 0; i < width; i++) {
96     OperandRange newLbOperands = origLoops[i].getLowerBoundOperands();
97     OperandRange newUbOperands = origLoops[i].getUpperBoundOperands();
98     newLoops[i].setLowerBound(newLbOperands, origLoops[i].getLowerBoundMap());
99     newLoops[i].setUpperBound(newUbOperands, origLoops[i].getUpperBoundMap());
100     newLoops[i].setStep(tileSizes[i]);
101   }
102   // Bounds for intra-tile loops.
103   for (unsigned i = 0; i < width; i++) {
104     int64_t largestDiv = getLargestDivisorOfTripCount(origLoops[i]);
105     auto mayBeConstantCount = getConstantTripCount(origLoops[i]);
106     // The lower bound is just the tile-space loop.
107     AffineMap lbMap = b.getDimIdentityMap();
108     newLoops[width + i].setLowerBound(
109         /*operands=*/newLoops[i].getInductionVar(), lbMap);
110 
111     // Set the upper bound.
112     if (mayBeConstantCount && mayBeConstantCount.getValue() < tileSizes[i]) {
113       // Trip count is less than the tile size: upper bound is lower bound +
114       // trip count.
115       auto ubMap = b.getSingleDimShiftAffineMap(mayBeConstantCount.getValue());
116       newLoops[width + i].setUpperBound(
117           /*operands=*/newLoops[i].getInductionVar(), ubMap);
118     } else if (largestDiv % tileSizes[i] != 0) {
119       // Intra-tile loop ii goes from i to min(i + tileSize, ub_i).
120       // Construct the upper bound map; the operands are the original operands
121       // with 'i' (tile-space loop) appended to it. The new upper bound map is
122       // the original one with an additional expression i + tileSize appended.
123 
124       // Add dim operands from original upper bound.
125       SmallVector<Value, 4> ubOperands;
126       auto ub = origLoops[i].getUpperBound();
127       ubOperands.reserve(ub.getNumOperands() + 1);
128       auto origUbMap = ub.getMap();
129       for (unsigned j = 0, e = origUbMap.getNumDims(); j < e; ++j)
130         ubOperands.push_back(ub.getOperand(j));
131 
132       // Add dim operand for new loop upper bound.
133       ubOperands.push_back(newLoops[i].getInductionVar());
134 
135       // Add symbol operands from original upper bound.
136       for (unsigned j = 0, e = origUbMap.getNumSymbols(); j < e; ++j)
137         ubOperands.push_back(ub.getOperand(origUbMap.getNumDims() + j));
138 
139       SmallVector<AffineExpr, 4> boundExprs;
140       boundExprs.reserve(1 + origUbMap.getNumResults());
141       auto dim = b.getAffineDimExpr(origUbMap.getNumDims());
142       // The new upper bound map is the original one with an additional
143       // expression i + tileSize appended.
144       boundExprs.push_back(dim + tileSizes[i]);
145       boundExprs.append(origUbMap.getResults().begin(),
146                         origUbMap.getResults().end());
147       auto ubMap =
148           AffineMap::get(origUbMap.getNumDims() + 1, origUbMap.getNumSymbols(),
149                          boundExprs, b.getContext());
150       newLoops[width + i].setUpperBound(/*operands=*/ubOperands, ubMap);
151     } else {
152       // No need of the min expression.
153       auto dim = b.getAffineDimExpr(0);
154       auto ubMap = AffineMap::get(1, 0, dim + tileSizes[i]);
155       newLoops[width + i].setUpperBound(newLoops[i].getInductionVar(), ubMap);
156     }
157   }
158 }
159 
160 /// Tiles the specified band of perfectly nested loops creating tile-space loops
161 /// and intra-tile loops. A band is a contiguous set of loops.
162 //  TODO: handle non hyper-rectangular spaces.
163 LogicalResult
tilePerfectlyNested(MutableArrayRef<AffineForOp> input,ArrayRef<unsigned> tileSizes,SmallVectorImpl<AffineForOp> * tiledNest)164 mlir::tilePerfectlyNested(MutableArrayRef<AffineForOp> input,
165                           ArrayRef<unsigned> tileSizes,
166                           SmallVectorImpl<AffineForOp> *tiledNest) {
167   // Check if the supplied for op's are all successively nested.
168   assert(!input.empty() && "no loops in input band");
169   assert(input.size() == tileSizes.size() && "Too few/many tile sizes");
170 
171   assert(isPerfectlyNested(input) && "input loops not perfectly nested");
172 
173   auto origLoops = input;
174 
175   AffineForOp rootAffineForOp = origLoops[0];
176   auto loc = rootAffineForOp.getLoc();
177   // Note that width is at least one since band isn't empty.
178   unsigned width = input.size();
179 
180   SmallVector<AffineForOp, 6> tiledLoops(2 * width);
181 
182   // The outermost among the loops as we add more..
183   auto *topLoop = rootAffineForOp.getOperation();
184   AffineForOp innermostPointLoop;
185 
186   // Add intra-tile (or point) loops.
187   for (unsigned i = 0; i < width; i++) {
188     OpBuilder b(topLoop);
189     // Loop bounds will be set later.
190     auto pointLoop = b.create<AffineForOp>(loc, 0, 0);
191     pointLoop.getBody()->getOperations().splice(
192         pointLoop.getBody()->begin(), topLoop->getBlock()->getOperations(),
193         topLoop);
194     tiledLoops[2 * width - 1 - i] = pointLoop;
195     topLoop = pointLoop.getOperation();
196     if (i == 0)
197       innermostPointLoop = pointLoop;
198   }
199 
200   // Add tile space loops;
201   for (unsigned i = width; i < 2 * width; i++) {
202     OpBuilder b(topLoop);
203     // Loop bounds will be set later.
204     auto tileSpaceLoop = b.create<AffineForOp>(loc, 0, 0);
205     tileSpaceLoop.getBody()->getOperations().splice(
206         tileSpaceLoop.getBody()->begin(), topLoop->getBlock()->getOperations(),
207         topLoop);
208     tiledLoops[2 * width - i - 1] = tileSpaceLoop;
209     topLoop = tileSpaceLoop.getOperation();
210   }
211 
212   // Move the loop body of the original nest to the new one.
213   moveLoopBody(origLoops.back(), innermostPointLoop);
214 
215   SmallVector<Value, 8> origLoopIVs;
216   extractForInductionVars(input, &origLoopIVs);
217 
218   FlatAffineConstraints cst;
219   getIndexSet(input, &cst);
220   if (!cst.isHyperRectangular(0, width)) {
221     llvm::dbgs() << "tiled code generation unimplemented for the "
222                     "non-hyperrectangular case, op:"
223                  << *rootAffineForOp << "\n";
224     return failure();
225   }
226 
227   constructTiledIndexSetHyperRect(origLoops, tiledLoops, tileSizes);
228 
229   // Replace original IVs with intra-tile loop IVs.
230   for (unsigned i = 0; i < width; i++)
231     origLoopIVs[i].replaceAllUsesWith(tiledLoops[i + width].getInductionVar());
232 
233   // Erase the old loop nest.
234   rootAffineForOp.erase();
235 
236   if (tiledNest)
237     *tiledNest = std::move(tiledLoops);
238 
239   return success();
240 }
241 
242 // Identify valid and profitable bands of loops to tile. This is currently just
243 // a temporary placeholder to test the mechanics of tiled code generation.
244 // Returns all maximal outermost perfect loop nests to tile.
getTileableBands(FuncOp f,std::vector<SmallVector<AffineForOp,6>> * bands)245 static void getTileableBands(FuncOp f,
246                              std::vector<SmallVector<AffineForOp, 6>> *bands) {
247   // Get maximal perfect nest of 'affine.for' insts starting from root
248   // (inclusive).
249   auto getMaximalPerfectLoopNest = [&](AffineForOp root) {
250     SmallVector<AffineForOp, 6> band;
251     getPerfectlyNestedLoops(band, root);
252     bands->push_back(band);
253   };
254 
255   for (auto &block : f)
256     for (auto &op : block)
257       if (auto forOp = dyn_cast<AffineForOp>(op))
258         getMaximalPerfectLoopNest(forOp);
259 }
260 
261 /// Reduces each tile size to the largest divisor of the corresponding trip
262 /// count (if the trip count is known).
adjustToDivisorsOfTripCounts(ArrayRef<AffineForOp> band,SmallVectorImpl<unsigned> * tileSizes)263 static void adjustToDivisorsOfTripCounts(ArrayRef<AffineForOp> band,
264                                          SmallVectorImpl<unsigned> *tileSizes) {
265   assert(band.size() == tileSizes->size() && "invalid tile size count");
266   for (unsigned i = 0, e = band.size(); i < e; i++) {
267     unsigned &tSizeAdjusted = (*tileSizes)[i];
268     auto mayConst = getConstantTripCount(band[i]);
269     if (!mayConst)
270       continue;
271     // Adjust the tile size to largest factor of the trip count less than
272     // tSize.
273     uint64_t constTripCount = mayConst.getValue();
274     if (constTripCount > 1 && tSizeAdjusted > constTripCount / 2)
275       tSizeAdjusted = constTripCount / 2;
276     while (constTripCount % tSizeAdjusted != 0)
277       tSizeAdjusted--;
278   }
279 }
280 
281 // Returns tile sizes to use. Checks CL options; if none are specified, sets it
282 // based on a simple model that looks at the memory footprint and determines
283 // tile sizes assuming identity accesses / 1:1 tile size proportional footprint
284 // along each of the dimensions being tiled.
285 // TODO: evolve this model. Tile size determination is a large area
286 // to play with in general.
getTileSizes(ArrayRef<AffineForOp> band,SmallVectorImpl<unsigned> * tileSizes)287 void LoopTiling::getTileSizes(ArrayRef<AffineForOp> band,
288                               SmallVectorImpl<unsigned> *tileSizes) {
289   if (band.empty())
290     return;
291 
292   // Use command-line tileSize for all loops if specified.
293   if (tileSize) {
294     tileSizes->assign(band.size(), tileSize);
295     return;
296   }
297 
298   // Use tileSizes and fill them with default tile size if it's short.
299   if (!this->tileSizes.empty()) {
300     tileSizes->assign(this->tileSizes.begin(), this->tileSizes.end());
301     tileSizes->resize(band.size(), kDefaultTileSize);
302     return;
303   }
304   tileSizes->resize(band.size());
305 
306   // The first loop in the band.
307   auto rootForOp = band[0];
308   (void)rootForOp;
309 
310   // Obtain memory footprint and set tile sizes so that a tile fits in
311   // the cache size. This is an approximation with the assumption that the
312   // footprint increases with the tile size linearly in that dimension (i.e.,
313   // assumes one-to-one access function).
314   auto fp = getMemoryFootprintBytes(band[0], 0);
315   if (!fp) {
316     // Fill with default tile sizes if footprint is unknown.
317     std::fill(tileSizes->begin(), tileSizes->end(),
318               LoopTiling::kDefaultTileSize);
319     if (avoidMaxMinBounds)
320       adjustToDivisorsOfTripCounts(band, tileSizes);
321     LLVM_DEBUG(
322         rootForOp.emitWarning("memory footprint unknown: using default tile "
323                               "sizes adjusted to trip count divisors"));
324     return;
325   }
326 
327   // Check how many times larger the cache size is when compared to footprint.
328   uint64_t cacheSizeBytes = cacheSizeInKiB * 1024;
329   uint64_t excessFactor = llvm::divideCeil(fp.getValue(), cacheSizeBytes);
330   if (excessFactor <= 1) {
331     // No need of any tiling - set tile size to 1.
332     std::fill(tileSizes->begin(), tileSizes->end(), 1);
333     return;
334   }
335 
336   // Divide all loops equally in an attempt to reduce footprint.
337   // TODO: this is approximate. Ideally, obtain reuse factor /
338   // profitability along each dimension and weight tile sizes based on that as
339   // one possible approach. Or compute a polynomial in tile sizes and solve for
340   // it.
341 
342   // For an n-d tileable band, compute the n^th root of the excess.
343   unsigned tSize =
344       static_cast<unsigned>(floorl(std::pow(excessFactor, 1.0 / band.size())));
345   // We'll keep a running product to determine the last tile size better.
346   unsigned cumulProductOfTileSizes = 1;
347   for (unsigned i = 0, e = band.size(); i < e; i++) {
348     if (i < e - 1)
349       (*tileSizes)[i] = tSize;
350     else
351       // Set last tile size to cover the balance.
352       (*tileSizes)[i] = std::max(
353           1U, static_cast<unsigned>(excessFactor / cumulProductOfTileSizes));
354     cumulProductOfTileSizes *= (*tileSizes)[i];
355   }
356   if (avoidMaxMinBounds)
357     adjustToDivisorsOfTripCounts(band, tileSizes);
358 }
359 
runOnFunction()360 void LoopTiling::runOnFunction() {
361   // Bands of loops to tile.
362   std::vector<SmallVector<AffineForOp, 6>> bands;
363   getTileableBands(getFunction(), &bands);
364 
365   // Tile each band.
366   for (auto &band : bands) {
367     // Set up tile sizes; fill missing tile sizes at the end with default tile
368     // size or tileSize if one was provided.
369     SmallVector<unsigned, 6> tileSizes;
370     getTileSizes(band, &tileSizes);
371     if (llvm::DebugFlag) {
372       auto diag = band[0].emitRemark("using tile sizes [");
373       for (auto tSize : tileSizes)
374         diag << tSize << ' ';
375       diag << "]\n";
376     }
377     SmallVector<AffineForOp, 6> tiledNest;
378     if (failed(tilePerfectlyNested(band, tileSizes, &tiledNest)))
379       return signalPassFailure();
380 
381     // Separate full and partial tiles.
382     if (separate) {
383       auto intraTileLoops =
384           MutableArrayRef<AffineForOp>(tiledNest).drop_front(band.size());
385       separateFullTiles(intraTileLoops);
386     }
387   }
388 }
389 
390 constexpr unsigned LoopTiling::kDefaultTileSize;
391