1 //===- LoopFusion.cpp - Code to perform loop fusion -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements loop fusion.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "PassDetail.h"
14 #include "mlir/Analysis/AffineAnalysis.h"
15 #include "mlir/Analysis/AffineStructures.h"
16 #include "mlir/Analysis/LoopAnalysis.h"
17 #include "mlir/Analysis/Utils.h"
18 #include "mlir/Dialect/Affine/IR/AffineOps.h"
19 #include "mlir/IR/AffineExpr.h"
20 #include "mlir/IR/AffineMap.h"
21 #include "mlir/IR/Builders.h"
22 #include "mlir/Transforms/LoopFusionUtils.h"
23 #include "mlir/Transforms/LoopUtils.h"
24 #include "mlir/Transforms/Passes.h"
25 #include "mlir/Transforms/Utils.h"
26 #include "llvm/ADT/DenseMap.h"
27 #include "llvm/ADT/DenseSet.h"
28 #include "llvm/ADT/SetVector.h"
29 #include "llvm/Support/CommandLine.h"
30 #include "llvm/Support/Debug.h"
31 #include "llvm/Support/raw_ostream.h"
32 #include <iomanip>
33 #include <sstream>
34 #define DEBUG_TYPE "affine-loop-fusion"
35
36 using llvm::SetVector;
37
38 using namespace mlir;
39
40 namespace {
41 /// Loop fusion pass. This pass currently supports a greedy fusion policy,
42 /// which fuses loop nests with single-writer/single-reader memref dependences
43 /// with the goal of improving locality.
44
45 // TODO: Support fusion of source loop nests which write to multiple
46 // memrefs, where each memref can have multiple users (if profitable).
47 // TODO: Extend this pass to check for fusion preventing dependences,
48 // and add support for more general loop fusion algorithms.
49
50 struct LoopFusion : public AffineLoopFusionBase<LoopFusion> {
51 LoopFusion() = default;
LoopFusion__anonb2be91030111::LoopFusion52 LoopFusion(unsigned fastMemorySpace, uint64_t localBufSizeThresholdBytes,
53 bool maximalFusion) {
54 this->fastMemorySpace = fastMemorySpace;
55 this->localBufSizeThreshold = localBufSizeThresholdBytes / 1024;
56 this->maximalFusion = maximalFusion;
57 }
58
59 void runOnFunction() override;
60 };
61
62 } // end anonymous namespace
63
64 std::unique_ptr<OperationPass<FuncOp>>
createLoopFusionPass(unsigned fastMemorySpace,uint64_t localBufSizeThreshold,bool maximalFusion)65 mlir::createLoopFusionPass(unsigned fastMemorySpace,
66 uint64_t localBufSizeThreshold, bool maximalFusion) {
67 return std::make_unique<LoopFusion>(fastMemorySpace, localBufSizeThreshold,
68 maximalFusion);
69 }
70
71 // TODO: Replace when this is modeled through side-effects/op traits
isMemRefDereferencingOp(Operation & op)72 static bool isMemRefDereferencingOp(Operation &op) {
73 return isa<AffineReadOpInterface, AffineWriteOpInterface, AffineDmaStartOp,
74 AffineDmaWaitOp>(op);
75 }
76
77 namespace {
78
79 // LoopNestStateCollector walks loop nests and collects load and store
80 // operations, and whether or not an IfInst was encountered in the loop nest.
81 struct LoopNestStateCollector {
82 SmallVector<AffineForOp, 4> forOps;
83 SmallVector<Operation *, 4> loadOpInsts;
84 SmallVector<Operation *, 4> storeOpInsts;
85 bool hasNonForRegion = false;
86
collect__anonb2be91030211::LoopNestStateCollector87 void collect(Operation *opToWalk) {
88 opToWalk->walk([&](Operation *op) {
89 if (isa<AffineForOp>(op))
90 forOps.push_back(cast<AffineForOp>(op));
91 else if (op->getNumRegions() != 0)
92 hasNonForRegion = true;
93 else if (isa<AffineReadOpInterface>(op))
94 loadOpInsts.push_back(op);
95 else if (isa<AffineWriteOpInterface>(op))
96 storeOpInsts.push_back(op);
97 });
98 }
99 };
100
101 // MemRefDependenceGraph is a graph data structure where graph nodes are
102 // top-level operations in a FuncOp which contain load/store ops, and edges
103 // are memref dependences between the nodes.
104 // TODO: Add a more flexible dependence graph representation.
105 // TODO: Add a depth parameter to dependence graph construction.
106 struct MemRefDependenceGraph {
107 public:
108 // Node represents a node in the graph. A Node is either an entire loop nest
109 // rooted at the top level which contains loads/stores, or a top level
110 // load/store.
111 struct Node {
112 // The unique identifier of this node in the graph.
113 unsigned id;
114 // The top-level statement which is (or contains) a load/store.
115 Operation *op;
116 // List of load operations.
117 SmallVector<Operation *, 4> loads;
118 // List of store op insts.
119 SmallVector<Operation *, 4> stores;
Node__anonb2be91030211::MemRefDependenceGraph::Node120 Node(unsigned id, Operation *op) : id(id), op(op) {}
121
122 // Returns the load op count for 'memref'.
getLoadOpCount__anonb2be91030211::MemRefDependenceGraph::Node123 unsigned getLoadOpCount(Value memref) {
124 unsigned loadOpCount = 0;
125 for (auto *loadOpInst : loads) {
126 if (memref == cast<AffineReadOpInterface>(loadOpInst).getMemRef())
127 ++loadOpCount;
128 }
129 return loadOpCount;
130 }
131
132 // Returns the store op count for 'memref'.
getStoreOpCount__anonb2be91030211::MemRefDependenceGraph::Node133 unsigned getStoreOpCount(Value memref) {
134 unsigned storeOpCount = 0;
135 for (auto *storeOpInst : stores) {
136 if (memref == cast<AffineWriteOpInterface>(storeOpInst).getMemRef())
137 ++storeOpCount;
138 }
139 return storeOpCount;
140 }
141
142 // Returns all store ops in 'storeOps' which access 'memref'.
getStoreOpsForMemref__anonb2be91030211::MemRefDependenceGraph::Node143 void getStoreOpsForMemref(Value memref,
144 SmallVectorImpl<Operation *> *storeOps) {
145 for (auto *storeOpInst : stores) {
146 if (memref == cast<AffineWriteOpInterface>(storeOpInst).getMemRef())
147 storeOps->push_back(storeOpInst);
148 }
149 }
150
151 // Returns all load ops in 'loadOps' which access 'memref'.
getLoadOpsForMemref__anonb2be91030211::MemRefDependenceGraph::Node152 void getLoadOpsForMemref(Value memref,
153 SmallVectorImpl<Operation *> *loadOps) {
154 for (auto *loadOpInst : loads) {
155 if (memref == cast<AffineReadOpInterface>(loadOpInst).getMemRef())
156 loadOps->push_back(loadOpInst);
157 }
158 }
159
160 // Returns all memrefs in 'loadAndStoreMemrefSet' for which this node
161 // has at least one load and store operation.
getLoadAndStoreMemrefSet__anonb2be91030211::MemRefDependenceGraph::Node162 void getLoadAndStoreMemrefSet(DenseSet<Value> *loadAndStoreMemrefSet) {
163 llvm::SmallDenseSet<Value, 2> loadMemrefs;
164 for (auto *loadOpInst : loads) {
165 loadMemrefs.insert(cast<AffineReadOpInterface>(loadOpInst).getMemRef());
166 }
167 for (auto *storeOpInst : stores) {
168 auto memref = cast<AffineWriteOpInterface>(storeOpInst).getMemRef();
169 if (loadMemrefs.count(memref) > 0)
170 loadAndStoreMemrefSet->insert(memref);
171 }
172 }
173 };
174
175 // Edge represents a data dependence between nodes in the graph.
176 struct Edge {
177 // The id of the node at the other end of the edge.
178 // If this edge is stored in Edge = Node.inEdges[i], then
179 // 'Node.inEdges[i].id' is the identifier of the source node of the edge.
180 // If this edge is stored in Edge = Node.outEdges[i], then
181 // 'Node.outEdges[i].id' is the identifier of the dest node of the edge.
182 unsigned id;
183 // The SSA value on which this edge represents a dependence.
184 // If the value is a memref, then the dependence is between graph nodes
185 // which contain accesses to the same memref 'value'. If the value is a
186 // non-memref value, then the dependence is between a graph node which
187 // defines an SSA value and another graph node which uses the SSA value
188 // (e.g. a constant operation defining a value which is used inside a loop
189 // nest).
190 Value value;
191 };
192
193 // Map from node id to Node.
194 DenseMap<unsigned, Node> nodes;
195 // Map from node id to list of input edges.
196 DenseMap<unsigned, SmallVector<Edge, 2>> inEdges;
197 // Map from node id to list of output edges.
198 DenseMap<unsigned, SmallVector<Edge, 2>> outEdges;
199 // Map from memref to a count on the dependence edges associated with that
200 // memref.
201 DenseMap<Value, unsigned> memrefEdgeCount;
202 // The next unique identifier to use for newly created graph nodes.
203 unsigned nextNodeId = 0;
204
MemRefDependenceGraph__anonb2be91030211::MemRefDependenceGraph205 MemRefDependenceGraph() {}
206
207 // Initializes the dependence graph based on operations in 'f'.
208 // Returns true on success, false otherwise.
209 bool init(FuncOp f);
210
211 // Returns the graph node for 'id'.
getNode__anonb2be91030211::MemRefDependenceGraph212 Node *getNode(unsigned id) {
213 auto it = nodes.find(id);
214 assert(it != nodes.end());
215 return &it->second;
216 }
217
218 // Returns the graph node for 'forOp'.
getForOpNode__anonb2be91030211::MemRefDependenceGraph219 Node *getForOpNode(AffineForOp forOp) {
220 for (auto &idAndNode : nodes)
221 if (idAndNode.second.op == forOp.getOperation())
222 return &idAndNode.second;
223 return nullptr;
224 }
225
226 // Adds a node with 'op' to the graph and returns its unique identifier.
addNode__anonb2be91030211::MemRefDependenceGraph227 unsigned addNode(Operation *op) {
228 Node node(nextNodeId++, op);
229 nodes.insert({node.id, node});
230 return node.id;
231 }
232
233 // Remove node 'id' (and its associated edges) from graph.
removeNode__anonb2be91030211::MemRefDependenceGraph234 void removeNode(unsigned id) {
235 // Remove each edge in 'inEdges[id]'.
236 if (inEdges.count(id) > 0) {
237 SmallVector<Edge, 2> oldInEdges = inEdges[id];
238 for (auto &inEdge : oldInEdges) {
239 removeEdge(inEdge.id, id, inEdge.value);
240 }
241 }
242 // Remove each edge in 'outEdges[id]'.
243 if (outEdges.count(id) > 0) {
244 SmallVector<Edge, 2> oldOutEdges = outEdges[id];
245 for (auto &outEdge : oldOutEdges) {
246 removeEdge(id, outEdge.id, outEdge.value);
247 }
248 }
249 // Erase remaining node state.
250 inEdges.erase(id);
251 outEdges.erase(id);
252 nodes.erase(id);
253 }
254
255 // Returns true if node 'id' writes to any memref which escapes (or is an
256 // argument to) the function/block. Returns false otherwise.
writesToLiveInOrEscapingMemrefs__anonb2be91030211::MemRefDependenceGraph257 bool writesToLiveInOrEscapingMemrefs(unsigned id) {
258 Node *node = getNode(id);
259 for (auto *storeOpInst : node->stores) {
260 auto memref = cast<AffineWriteOpInterface>(storeOpInst).getMemRef();
261 auto *op = memref.getDefiningOp();
262 // Return true if 'memref' is a block argument.
263 if (!op)
264 return true;
265 // Return true if any use of 'memref' escapes the function.
266 for (auto *user : memref.getUsers())
267 if (!isMemRefDereferencingOp(*user))
268 return true;
269 }
270 return false;
271 }
272
273 // Returns the unique AffineWriteOpInterface in `node` that meets all the
274 // following:
275 // *) store is the only one that writes to a function-local memref live out
276 // of `node`,
277 // *) store is not the source of a self-dependence on `node`.
278 // Otherwise, returns a null AffineWriteOpInterface.
getUniqueOutgoingStore__anonb2be91030211::MemRefDependenceGraph279 AffineWriteOpInterface getUniqueOutgoingStore(Node *node) {
280 AffineWriteOpInterface uniqueStore;
281
282 // Return null if `node` doesn't have any outgoing edges.
283 auto outEdgeIt = outEdges.find(node->id);
284 if (outEdgeIt == outEdges.end())
285 return nullptr;
286
287 const auto &nodeOutEdges = outEdgeIt->second;
288 for (auto *op : node->stores) {
289 auto storeOp = cast<AffineWriteOpInterface>(op);
290 auto memref = storeOp.getMemRef();
291 // Skip this store if there are no dependences on its memref. This means
292 // that store either:
293 // *) writes to a memref that is only read within the same loop nest
294 // (self-dependence edges are not represented in graph at the moment),
295 // *) writes to a function live out memref (function parameter), or
296 // *) is dead.
297 if (llvm::all_of(nodeOutEdges, [=](const Edge &edge) {
298 return (edge.value != memref);
299 }))
300 continue;
301
302 if (uniqueStore)
303 // Found multiple stores to function-local live-out memrefs.
304 return nullptr;
305 // Found first store to function-local live-out memref.
306 uniqueStore = storeOp;
307 }
308
309 return uniqueStore;
310 }
311
312 // Returns true if node 'id' can be removed from the graph. Returns false
313 // otherwise. A node can be removed from the graph iff the following
314 // conditions are met:
315 // *) The node does not write to any memref which escapes (or is a
316 // function/block argument).
317 // *) The node has no successors in the dependence graph.
canRemoveNode__anonb2be91030211::MemRefDependenceGraph318 bool canRemoveNode(unsigned id) {
319 if (writesToLiveInOrEscapingMemrefs(id))
320 return false;
321 Node *node = getNode(id);
322 for (auto *storeOpInst : node->stores) {
323 // Return false if there exist out edges from 'id' on 'memref'.
324 auto storeMemref = cast<AffineWriteOpInterface>(storeOpInst).getMemRef();
325 if (getOutEdgeCount(id, storeMemref) > 0)
326 return false;
327 }
328 return true;
329 }
330
331 // Returns true iff there is an edge from node 'srcId' to node 'dstId' which
332 // is for 'value' if non-null, or for any value otherwise. Returns false
333 // otherwise.
hasEdge__anonb2be91030211::MemRefDependenceGraph334 bool hasEdge(unsigned srcId, unsigned dstId, Value value = nullptr) {
335 if (outEdges.count(srcId) == 0 || inEdges.count(dstId) == 0) {
336 return false;
337 }
338 bool hasOutEdge = llvm::any_of(outEdges[srcId], [=](Edge &edge) {
339 return edge.id == dstId && (!value || edge.value == value);
340 });
341 bool hasInEdge = llvm::any_of(inEdges[dstId], [=](Edge &edge) {
342 return edge.id == srcId && (!value || edge.value == value);
343 });
344 return hasOutEdge && hasInEdge;
345 }
346
347 // Adds an edge from node 'srcId' to node 'dstId' for 'value'.
addEdge__anonb2be91030211::MemRefDependenceGraph348 void addEdge(unsigned srcId, unsigned dstId, Value value) {
349 if (!hasEdge(srcId, dstId, value)) {
350 outEdges[srcId].push_back({dstId, value});
351 inEdges[dstId].push_back({srcId, value});
352 if (value.getType().isa<MemRefType>())
353 memrefEdgeCount[value]++;
354 }
355 }
356
357 // Removes an edge from node 'srcId' to node 'dstId' for 'value'.
removeEdge__anonb2be91030211::MemRefDependenceGraph358 void removeEdge(unsigned srcId, unsigned dstId, Value value) {
359 assert(inEdges.count(dstId) > 0);
360 assert(outEdges.count(srcId) > 0);
361 if (value.getType().isa<MemRefType>()) {
362 assert(memrefEdgeCount.count(value) > 0);
363 memrefEdgeCount[value]--;
364 }
365 // Remove 'srcId' from 'inEdges[dstId]'.
366 for (auto it = inEdges[dstId].begin(); it != inEdges[dstId].end(); ++it) {
367 if ((*it).id == srcId && (*it).value == value) {
368 inEdges[dstId].erase(it);
369 break;
370 }
371 }
372 // Remove 'dstId' from 'outEdges[srcId]'.
373 for (auto it = outEdges[srcId].begin(); it != outEdges[srcId].end(); ++it) {
374 if ((*it).id == dstId && (*it).value == value) {
375 outEdges[srcId].erase(it);
376 break;
377 }
378 }
379 }
380
381 // Returns true if there is a path in the dependence graph from node 'srcId'
382 // to node 'dstId'. Returns false otherwise.
hasDependencePath__anonb2be91030211::MemRefDependenceGraph383 bool hasDependencePath(unsigned srcId, unsigned dstId) {
384 // Worklist state is: <node-id, next-output-edge-index-to-visit>
385 SmallVector<std::pair<unsigned, unsigned>, 4> worklist;
386 worklist.push_back({srcId, 0});
387 // Run DFS traversal to see if 'dstId' is reachable from 'srcId'.
388 while (!worklist.empty()) {
389 auto &idAndIndex = worklist.back();
390 // Return true if we have reached 'dstId'.
391 if (idAndIndex.first == dstId)
392 return true;
393 // Pop and continue if node has no out edges, or if all out edges have
394 // already been visited.
395 if (outEdges.count(idAndIndex.first) == 0 ||
396 idAndIndex.second == outEdges[idAndIndex.first].size()) {
397 worklist.pop_back();
398 continue;
399 }
400 // Get graph edge to traverse.
401 Edge edge = outEdges[idAndIndex.first][idAndIndex.second];
402 // Increment next output edge index for 'idAndIndex'.
403 ++idAndIndex.second;
404 // Add node at 'edge.id' to worklist.
405 worklist.push_back({edge.id, 0});
406 }
407 return false;
408 }
409
410 // Returns the input edge count for node 'id' and 'memref' from src nodes
411 // which access 'memref' with a store operation.
getIncomingMemRefAccesses__anonb2be91030211::MemRefDependenceGraph412 unsigned getIncomingMemRefAccesses(unsigned id, Value memref) {
413 unsigned inEdgeCount = 0;
414 if (inEdges.count(id) > 0)
415 for (auto &inEdge : inEdges[id])
416 if (inEdge.value == memref) {
417 Node *srcNode = getNode(inEdge.id);
418 // Only count in edges from 'srcNode' if 'srcNode' accesses 'memref'
419 if (srcNode->getStoreOpCount(memref) > 0)
420 ++inEdgeCount;
421 }
422 return inEdgeCount;
423 }
424
425 // Returns the output edge count for node 'id' and 'memref' (if non-null),
426 // otherwise returns the total output edge count from node 'id'.
getOutEdgeCount__anonb2be91030211::MemRefDependenceGraph427 unsigned getOutEdgeCount(unsigned id, Value memref = nullptr) {
428 unsigned outEdgeCount = 0;
429 if (outEdges.count(id) > 0)
430 for (auto &outEdge : outEdges[id])
431 if (!memref || outEdge.value == memref)
432 ++outEdgeCount;
433 return outEdgeCount;
434 }
435
436 // Computes and returns an insertion point operation, before which the
437 // the fused <srcId, dstId> loop nest can be inserted while preserving
438 // dependences. Returns nullptr if no such insertion point is found.
getFusedLoopNestInsertionPoint__anonb2be91030211::MemRefDependenceGraph439 Operation *getFusedLoopNestInsertionPoint(unsigned srcId, unsigned dstId) {
440 if (outEdges.count(srcId) == 0)
441 return getNode(dstId)->op;
442
443 // Build set of insts in range (srcId, dstId) which depend on 'srcId'.
444 SmallPtrSet<Operation *, 2> srcDepInsts;
445 for (auto &outEdge : outEdges[srcId])
446 if (outEdge.id != dstId)
447 srcDepInsts.insert(getNode(outEdge.id)->op);
448
449 // Build set of insts in range (srcId, dstId) on which 'dstId' depends.
450 SmallPtrSet<Operation *, 2> dstDepInsts;
451 for (auto &inEdge : inEdges[dstId])
452 if (inEdge.id != srcId)
453 dstDepInsts.insert(getNode(inEdge.id)->op);
454
455 Operation *srcNodeInst = getNode(srcId)->op;
456 Operation *dstNodeInst = getNode(dstId)->op;
457
458 // Computing insertion point:
459 // *) Walk all operation positions in Block operation list in the
460 // range (src, dst). For each operation 'op' visited in this search:
461 // *) Store in 'firstSrcDepPos' the first position where 'op' has a
462 // dependence edge from 'srcNode'.
463 // *) Store in 'lastDstDepPost' the last position where 'op' has a
464 // dependence edge to 'dstNode'.
465 // *) Compare 'firstSrcDepPos' and 'lastDstDepPost' to determine the
466 // operation insertion point (or return null pointer if no such
467 // insertion point exists: 'firstSrcDepPos' <= 'lastDstDepPos').
468 SmallVector<Operation *, 2> depInsts;
469 Optional<unsigned> firstSrcDepPos;
470 Optional<unsigned> lastDstDepPos;
471 unsigned pos = 0;
472 for (Block::iterator it = std::next(Block::iterator(srcNodeInst));
473 it != Block::iterator(dstNodeInst); ++it) {
474 Operation *op = &(*it);
475 if (srcDepInsts.count(op) > 0 && firstSrcDepPos == None)
476 firstSrcDepPos = pos;
477 if (dstDepInsts.count(op) > 0)
478 lastDstDepPos = pos;
479 depInsts.push_back(op);
480 ++pos;
481 }
482
483 if (firstSrcDepPos.hasValue()) {
484 if (lastDstDepPos.hasValue()) {
485 if (firstSrcDepPos.getValue() <= lastDstDepPos.getValue()) {
486 // No valid insertion point exists which preserves dependences.
487 return nullptr;
488 }
489 }
490 // Return the insertion point at 'firstSrcDepPos'.
491 return depInsts[firstSrcDepPos.getValue()];
492 }
493 // No dependence targets in range (or only dst deps in range), return
494 // 'dstNodInst' insertion point.
495 return dstNodeInst;
496 }
497
498 // Updates edge mappings from node 'srcId' to node 'dstId' after 'oldMemRef'
499 // has been replaced in node at 'dstId' by a private memref depending
500 // on the value of 'createPrivateMemRef'.
updateEdges__anonb2be91030211::MemRefDependenceGraph501 void updateEdges(unsigned srcId, unsigned dstId, Value oldMemRef,
502 bool createPrivateMemRef) {
503 // For each edge in 'inEdges[srcId]': add new edge remapping to 'dstId'.
504 if (inEdges.count(srcId) > 0) {
505 SmallVector<Edge, 2> oldInEdges = inEdges[srcId];
506 for (auto &inEdge : oldInEdges) {
507 // Add edge from 'inEdge.id' to 'dstId' if not for 'oldMemRef'.
508 if (inEdge.value != oldMemRef)
509 addEdge(inEdge.id, dstId, inEdge.value);
510 }
511 }
512 // For each edge in 'outEdges[srcId]': remove edge from 'srcId' to 'dstId'.
513 if (outEdges.count(srcId) > 0) {
514 SmallVector<Edge, 2> oldOutEdges = outEdges[srcId];
515 for (auto &outEdge : oldOutEdges) {
516 // Remove any out edges from 'srcId' to 'dstId' across memrefs.
517 if (outEdge.id == dstId)
518 removeEdge(srcId, outEdge.id, outEdge.value);
519 }
520 }
521 // Remove any edges in 'inEdges[dstId]' on 'oldMemRef' (which is being
522 // replaced by a private memref). These edges could come from nodes
523 // other than 'srcId' which were removed in the previous step.
524 if (inEdges.count(dstId) > 0 && createPrivateMemRef) {
525 SmallVector<Edge, 2> oldInEdges = inEdges[dstId];
526 for (auto &inEdge : oldInEdges)
527 if (inEdge.value == oldMemRef)
528 removeEdge(inEdge.id, dstId, inEdge.value);
529 }
530 }
531
532 // Update edge mappings for nodes 'sibId' and 'dstId' to reflect fusion
533 // of sibling node 'sidId' into node 'dstId'.
updateEdges__anonb2be91030211::MemRefDependenceGraph534 void updateEdges(unsigned sibId, unsigned dstId) {
535 // For each edge in 'inEdges[sibId]':
536 // *) Add new edge from source node 'inEdge.id' to 'dstNode'.
537 // *) Remove edge from source node 'inEdge.id' to 'sibNode'.
538 if (inEdges.count(sibId) > 0) {
539 SmallVector<Edge, 2> oldInEdges = inEdges[sibId];
540 for (auto &inEdge : oldInEdges) {
541 addEdge(inEdge.id, dstId, inEdge.value);
542 removeEdge(inEdge.id, sibId, inEdge.value);
543 }
544 }
545
546 // For each edge in 'outEdges[sibId]' to node 'id'
547 // *) Add new edge from 'dstId' to 'outEdge.id'.
548 // *) Remove edge from 'sibId' to 'outEdge.id'.
549 if (outEdges.count(sibId) > 0) {
550 SmallVector<Edge, 2> oldOutEdges = outEdges[sibId];
551 for (auto &outEdge : oldOutEdges) {
552 addEdge(dstId, outEdge.id, outEdge.value);
553 removeEdge(sibId, outEdge.id, outEdge.value);
554 }
555 }
556 }
557
558 // Adds ops in 'loads' and 'stores' to node at 'id'.
addToNode__anonb2be91030211::MemRefDependenceGraph559 void addToNode(unsigned id, const SmallVectorImpl<Operation *> &loads,
560 const SmallVectorImpl<Operation *> &stores) {
561 Node *node = getNode(id);
562 for (auto *loadOpInst : loads)
563 node->loads.push_back(loadOpInst);
564 for (auto *storeOpInst : stores)
565 node->stores.push_back(storeOpInst);
566 }
567
clearNodeLoadAndStores__anonb2be91030211::MemRefDependenceGraph568 void clearNodeLoadAndStores(unsigned id) {
569 Node *node = getNode(id);
570 node->loads.clear();
571 node->stores.clear();
572 }
573
574 // Calls 'callback' for each input edge incident to node 'id' which carries a
575 // memref dependence.
forEachMemRefInputEdge__anonb2be91030211::MemRefDependenceGraph576 void forEachMemRefInputEdge(unsigned id,
577 const std::function<void(Edge)> &callback) {
578 if (inEdges.count(id) > 0)
579 forEachMemRefEdge(inEdges[id], callback);
580 }
581
582 // Calls 'callback' for each output edge from node 'id' which carries a
583 // memref dependence.
forEachMemRefOutputEdge__anonb2be91030211::MemRefDependenceGraph584 void forEachMemRefOutputEdge(unsigned id,
585 const std::function<void(Edge)> &callback) {
586 if (outEdges.count(id) > 0)
587 forEachMemRefEdge(outEdges[id], callback);
588 }
589
590 // Calls 'callback' for each edge in 'edges' which carries a memref
591 // dependence.
forEachMemRefEdge__anonb2be91030211::MemRefDependenceGraph592 void forEachMemRefEdge(ArrayRef<Edge> edges,
593 const std::function<void(Edge)> &callback) {
594 for (auto &edge : edges) {
595 // Skip if 'edge' is not a memref dependence edge.
596 if (!edge.value.getType().isa<MemRefType>())
597 continue;
598 assert(nodes.count(edge.id) > 0);
599 // Skip if 'edge.id' is not a loop nest.
600 if (!isa<AffineForOp>(getNode(edge.id)->op))
601 continue;
602 // Visit current input edge 'edge'.
603 callback(edge);
604 }
605 }
606
print__anonb2be91030211::MemRefDependenceGraph607 void print(raw_ostream &os) const {
608 os << "\nMemRefDependenceGraph\n";
609 os << "\nNodes:\n";
610 for (auto &idAndNode : nodes) {
611 os << "Node: " << idAndNode.first << "\n";
612 auto it = inEdges.find(idAndNode.first);
613 if (it != inEdges.end()) {
614 for (const auto &e : it->second)
615 os << " InEdge: " << e.id << " " << e.value << "\n";
616 }
617 it = outEdges.find(idAndNode.first);
618 if (it != outEdges.end()) {
619 for (const auto &e : it->second)
620 os << " OutEdge: " << e.id << " " << e.value << "\n";
621 }
622 }
623 }
dump__anonb2be91030211::MemRefDependenceGraph624 void dump() const { print(llvm::errs()); }
625 };
626
627 } // end anonymous namespace
628
629 // Initializes the data dependence graph by walking operations in 'f'.
630 // Assigns each node in the graph a node id based on program order in 'f'.
631 // TODO: Add support for taking a Block arg to construct the
632 // dependence graph at a different depth.
init(FuncOp f)633 bool MemRefDependenceGraph::init(FuncOp f) {
634 DenseMap<Value, SetVector<unsigned>> memrefAccesses;
635
636 // TODO: support multi-block functions.
637 if (!llvm::hasSingleElement(f))
638 return false;
639
640 DenseMap<Operation *, unsigned> forToNodeMap;
641 for (auto &op : f.front()) {
642 if (auto forOp = dyn_cast<AffineForOp>(op)) {
643 // Create graph node 'id' to represent top-level 'forOp' and record
644 // all loads and store accesses it contains.
645 LoopNestStateCollector collector;
646 collector.collect(&op);
647 // Return false if a non 'affine.for' region was found (not currently
648 // supported).
649 if (collector.hasNonForRegion)
650 return false;
651 Node node(nextNodeId++, &op);
652 for (auto *opInst : collector.loadOpInsts) {
653 node.loads.push_back(opInst);
654 auto memref = cast<AffineReadOpInterface>(opInst).getMemRef();
655 memrefAccesses[memref].insert(node.id);
656 }
657 for (auto *opInst : collector.storeOpInsts) {
658 node.stores.push_back(opInst);
659 auto memref = cast<AffineWriteOpInterface>(opInst).getMemRef();
660 memrefAccesses[memref].insert(node.id);
661 }
662 forToNodeMap[&op] = node.id;
663 nodes.insert({node.id, node});
664 } else if (auto loadOp = dyn_cast<AffineReadOpInterface>(op)) {
665 // Create graph node for top-level load op.
666 Node node(nextNodeId++, &op);
667 node.loads.push_back(&op);
668 auto memref = cast<AffineReadOpInterface>(op).getMemRef();
669 memrefAccesses[memref].insert(node.id);
670 nodes.insert({node.id, node});
671 } else if (auto storeOp = dyn_cast<AffineWriteOpInterface>(op)) {
672 // Create graph node for top-level store op.
673 Node node(nextNodeId++, &op);
674 node.stores.push_back(&op);
675 auto memref = cast<AffineWriteOpInterface>(op).getMemRef();
676 memrefAccesses[memref].insert(node.id);
677 nodes.insert({node.id, node});
678 } else if (op.getNumRegions() != 0) {
679 // Return false if another region is found (not currently supported).
680 return false;
681 } else if (op.getNumResults() > 0 && !op.use_empty()) {
682 // Create graph node for top-level producer of SSA values, which
683 // could be used by loop nest nodes.
684 Node node(nextNodeId++, &op);
685 nodes.insert({node.id, node});
686 }
687 }
688
689 // Add dependence edges between nodes which produce SSA values and their
690 // users.
691 for (auto &idAndNode : nodes) {
692 const Node &node = idAndNode.second;
693 if (!node.loads.empty() || !node.stores.empty())
694 continue;
695 auto *opInst = node.op;
696 for (auto value : opInst->getResults()) {
697 for (auto *user : value.getUsers()) {
698 SmallVector<AffineForOp, 4> loops;
699 getLoopIVs(*user, &loops);
700 if (loops.empty())
701 continue;
702 assert(forToNodeMap.count(loops[0].getOperation()) > 0);
703 unsigned userLoopNestId = forToNodeMap[loops[0].getOperation()];
704 addEdge(node.id, userLoopNestId, value);
705 }
706 }
707 }
708
709 // Walk memref access lists and add graph edges between dependent nodes.
710 for (auto &memrefAndList : memrefAccesses) {
711 unsigned n = memrefAndList.second.size();
712 for (unsigned i = 0; i < n; ++i) {
713 unsigned srcId = memrefAndList.second[i];
714 bool srcHasStore =
715 getNode(srcId)->getStoreOpCount(memrefAndList.first) > 0;
716 for (unsigned j = i + 1; j < n; ++j) {
717 unsigned dstId = memrefAndList.second[j];
718 bool dstHasStore =
719 getNode(dstId)->getStoreOpCount(memrefAndList.first) > 0;
720 if (srcHasStore || dstHasStore)
721 addEdge(srcId, dstId, memrefAndList.first);
722 }
723 }
724 }
725 return true;
726 }
727
728 // Removes load operations from 'srcLoads' which operate on 'memref', and
729 // adds them to 'dstLoads'.
moveLoadsAccessingMemrefTo(Value memref,SmallVectorImpl<Operation * > * srcLoads,SmallVectorImpl<Operation * > * dstLoads)730 static void moveLoadsAccessingMemrefTo(Value memref,
731 SmallVectorImpl<Operation *> *srcLoads,
732 SmallVectorImpl<Operation *> *dstLoads) {
733 dstLoads->clear();
734 SmallVector<Operation *, 4> srcLoadsToKeep;
735 for (auto *load : *srcLoads) {
736 if (cast<AffineReadOpInterface>(load).getMemRef() == memref)
737 dstLoads->push_back(load);
738 else
739 srcLoadsToKeep.push_back(load);
740 }
741 srcLoads->swap(srcLoadsToKeep);
742 }
743
744 // Returns the innermost common loop depth for the set of operations in 'ops'.
getInnermostCommonLoopDepth(ArrayRef<Operation * > ops)745 static unsigned getInnermostCommonLoopDepth(ArrayRef<Operation *> ops) {
746 unsigned numOps = ops.size();
747 assert(numOps > 0);
748
749 std::vector<SmallVector<AffineForOp, 4>> loops(numOps);
750 unsigned loopDepthLimit = std::numeric_limits<unsigned>::max();
751 for (unsigned i = 0; i < numOps; ++i) {
752 getLoopIVs(*ops[i], &loops[i]);
753 loopDepthLimit =
754 std::min(loopDepthLimit, static_cast<unsigned>(loops[i].size()));
755 }
756
757 unsigned loopDepth = 0;
758 for (unsigned d = 0; d < loopDepthLimit; ++d) {
759 unsigned i;
760 for (i = 1; i < numOps; ++i) {
761 if (loops[i - 1][d] != loops[i][d])
762 break;
763 }
764 if (i != numOps)
765 break;
766 ++loopDepth;
767 }
768 return loopDepth;
769 }
770
771 // Returns the maximum loop depth at which no dependences between 'loadOpInsts'
772 // and 'storeOpInsts' are satisfied.
getMaxLoopDepth(ArrayRef<Operation * > loadOpInsts,ArrayRef<Operation * > storeOpInsts)773 static unsigned getMaxLoopDepth(ArrayRef<Operation *> loadOpInsts,
774 ArrayRef<Operation *> storeOpInsts) {
775 // Merge loads and stores into the same array.
776 SmallVector<Operation *, 2> ops(loadOpInsts.begin(), loadOpInsts.end());
777 ops.append(storeOpInsts.begin(), storeOpInsts.end());
778
779 // Compute the innermost common loop depth for loads and stores.
780 unsigned loopDepth = getInnermostCommonLoopDepth(ops);
781
782 // Return common loop depth for loads if there are no store ops.
783 if (storeOpInsts.empty())
784 return loopDepth;
785
786 // Check dependences on all pairs of ops in 'ops' and store the minimum
787 // loop depth at which a dependence is satisfied.
788 for (unsigned i = 0, e = ops.size(); i < e; ++i) {
789 auto *srcOpInst = ops[i];
790 MemRefAccess srcAccess(srcOpInst);
791 for (unsigned j = 0; j < e; ++j) {
792 auto *dstOpInst = ops[j];
793 MemRefAccess dstAccess(dstOpInst);
794
795 unsigned numCommonLoops =
796 getNumCommonSurroundingLoops(*srcOpInst, *dstOpInst);
797 for (unsigned d = 1; d <= numCommonLoops + 1; ++d) {
798 FlatAffineConstraints dependenceConstraints;
799 // TODO: Cache dependence analysis results, check cache here.
800 DependenceResult result = checkMemrefAccessDependence(
801 srcAccess, dstAccess, d, &dependenceConstraints,
802 /*dependenceComponents=*/nullptr);
803 if (hasDependence(result)) {
804 // Store minimum loop depth and break because we want the min 'd' at
805 // which there is a dependence.
806 loopDepth = std::min(loopDepth, d - 1);
807 break;
808 }
809 }
810 }
811 }
812 return loopDepth;
813 }
814
815 // Sinks all sequential loops to the innermost levels (while preserving
816 // relative order among them) and moves all parallel loops to the
817 // outermost (while again preserving relative order among them).
818 // This can increase the loop depth at which we can fuse a slice, since we are
819 // pushing loop carried dependence to a greater depth in the loop nest.
sinkSequentialLoops(MemRefDependenceGraph::Node * node)820 static void sinkSequentialLoops(MemRefDependenceGraph::Node *node) {
821 assert(isa<AffineForOp>(node->op));
822 AffineForOp newRootForOp = sinkSequentialLoops(cast<AffineForOp>(node->op));
823 node->op = newRootForOp.getOperation();
824 }
825
826 // TODO: improve/complete this when we have target data.
getMemRefEltSizeInBytes(MemRefType memRefType)827 static unsigned getMemRefEltSizeInBytes(MemRefType memRefType) {
828 auto elementType = memRefType.getElementType();
829
830 unsigned sizeInBits;
831 if (elementType.isIntOrFloat()) {
832 sizeInBits = elementType.getIntOrFloatBitWidth();
833 } else {
834 auto vectorType = elementType.cast<VectorType>();
835 sizeInBits =
836 vectorType.getElementTypeBitWidth() * vectorType.getNumElements();
837 }
838 return llvm::divideCeil(sizeInBits, 8);
839 }
840
841 // Creates and returns a private (single-user) memref for fused loop rooted
842 // at 'forOp', with (potentially reduced) memref size based on the
843 // MemRefRegion written to by 'srcStoreOpInst' at depth 'dstLoopDepth'.
844 // TODO: consider refactoring the common code from generateDma and
845 // this one.
createPrivateMemRef(AffineForOp forOp,Operation * srcStoreOpInst,unsigned dstLoopDepth,Optional<unsigned> fastMemorySpace,uint64_t localBufSizeThreshold)846 static Value createPrivateMemRef(AffineForOp forOp, Operation *srcStoreOpInst,
847 unsigned dstLoopDepth,
848 Optional<unsigned> fastMemorySpace,
849 uint64_t localBufSizeThreshold) {
850 auto *forInst = forOp.getOperation();
851
852 // Create builder to insert alloc op just before 'forOp'.
853 OpBuilder b(forInst);
854 // Builder to create constants at the top level.
855 OpBuilder top(forInst->getParentOfType<FuncOp>().getBody());
856 // Create new memref type based on slice bounds.
857 auto oldMemRef = cast<AffineWriteOpInterface>(srcStoreOpInst).getMemRef();
858 auto oldMemRefType = oldMemRef.getType().cast<MemRefType>();
859 unsigned rank = oldMemRefType.getRank();
860
861 // Compute MemRefRegion for 'srcStoreOpInst' at depth 'dstLoopDepth'.
862 MemRefRegion region(srcStoreOpInst->getLoc());
863 bool validRegion = succeeded(region.compute(srcStoreOpInst, dstLoopDepth));
864 (void)validRegion;
865 assert(validRegion && "unexpected memref region failure");
866 SmallVector<int64_t, 4> newShape;
867 std::vector<SmallVector<int64_t, 4>> lbs;
868 SmallVector<int64_t, 8> lbDivisors;
869 lbs.reserve(rank);
870 // Query 'region' for 'newShape' and lower bounds of MemRefRegion accessed
871 // by 'srcStoreOpInst' at depth 'dstLoopDepth'.
872 Optional<int64_t> numElements =
873 region.getConstantBoundingSizeAndShape(&newShape, &lbs, &lbDivisors);
874 assert(numElements.hasValue() &&
875 "non-constant number of elts in local buffer");
876
877 const FlatAffineConstraints *cst = region.getConstraints();
878 // 'outerIVs' holds the values that this memory region is symbolic/parametric
879 // on; this would correspond to loop IVs surrounding the level at which the
880 // slice is being materialized.
881 SmallVector<Value, 8> outerIVs;
882 cst->getIdValues(rank, cst->getNumIds(), &outerIVs);
883
884 // Build 'rank' AffineExprs from MemRefRegion 'lbs'
885 SmallVector<AffineExpr, 4> offsets;
886 offsets.reserve(rank);
887 for (unsigned d = 0; d < rank; ++d) {
888 assert(lbs[d].size() == cst->getNumCols() - rank && "incorrect bound size");
889
890 AffineExpr offset = top.getAffineConstantExpr(0);
891 for (unsigned j = 0, e = cst->getNumCols() - rank - 1; j < e; j++) {
892 offset = offset + lbs[d][j] * top.getAffineDimExpr(j);
893 }
894 assert(lbDivisors[d] > 0);
895 offset =
896 (offset + lbs[d][cst->getNumCols() - 1 - rank]).floorDiv(lbDivisors[d]);
897 offsets.push_back(offset);
898 }
899
900 // Create 'newMemRefType' using 'newShape' from MemRefRegion accessed
901 // by 'srcStoreOpInst'.
902 uint64_t bufSize =
903 getMemRefEltSizeInBytes(oldMemRefType) * numElements.getValue();
904 unsigned newMemSpace;
905 if (bufSize <= localBufSizeThreshold && fastMemorySpace.hasValue()) {
906 newMemSpace = fastMemorySpace.getValue();
907 } else {
908 newMemSpace = oldMemRefType.getMemorySpace();
909 }
910 auto newMemRefType = MemRefType::get(newShape, oldMemRefType.getElementType(),
911 {}, newMemSpace);
912
913 // Create new private memref for fused loop 'forOp'. 'newShape' is always
914 // a constant shape.
915 // TODO: Create/move alloc ops for private memrefs closer to their
916 // consumer loop nests to reduce their live range. Currently they are added
917 // at the beginning of the function, because loop nests can be reordered
918 // during the fusion pass.
919 Value newMemRef = top.create<AllocOp>(forOp.getLoc(), newMemRefType);
920
921 // Build an AffineMap to remap access functions based on lower bound offsets.
922 SmallVector<AffineExpr, 4> remapExprs;
923 remapExprs.reserve(rank);
924 unsigned zeroOffsetCount = 0;
925 for (unsigned i = 0; i < rank; i++) {
926 if (auto constExpr = offsets[i].dyn_cast<AffineConstantExpr>())
927 if (constExpr.getValue() == 0)
928 ++zeroOffsetCount;
929 auto dimExpr = b.getAffineDimExpr(outerIVs.size() + i);
930
931 auto remapExpr =
932 simplifyAffineExpr(dimExpr - offsets[i], outerIVs.size() + rank, 0);
933 remapExprs.push_back(remapExpr);
934 }
935 auto indexRemap = zeroOffsetCount == rank
936 ? AffineMap()
937 : AffineMap::get(outerIVs.size() + rank, 0, remapExprs,
938 forOp.getContext());
939 // Replace all users of 'oldMemRef' with 'newMemRef'.
940 LogicalResult res =
941 replaceAllMemRefUsesWith(oldMemRef, newMemRef, {}, indexRemap,
942 /*extraOperands=*/outerIVs,
943 /*symbolOperands=*/{},
944 /*domInstFilter=*/&*forOp.getBody()->begin());
945 assert(succeeded(res) &&
946 "replaceAllMemrefUsesWith should always succeed here");
947 (void)res;
948 return newMemRef;
949 }
950
951 /// Walking from node 'srcId' to node 'dstId' (exclusive of 'srcId' and
952 /// 'dstId'), if there is any non-affine operation accessing 'memref', return
953 /// false. Otherwise, return true.
hasNonAffineUsersOnThePath(unsigned srcId,unsigned dstId,Value memref,MemRefDependenceGraph * mdg)954 static bool hasNonAffineUsersOnThePath(unsigned srcId, unsigned dstId,
955 Value memref,
956 MemRefDependenceGraph *mdg) {
957 auto *srcNode = mdg->getNode(srcId);
958 auto *dstNode = mdg->getNode(dstId);
959 Value::user_range users = memref.getUsers();
960 // For each MemRefDependenceGraph's node that is between 'srcNode' and
961 // 'dstNode' (exclusive of 'srcNodes' and 'dstNode'), check whether any
962 // non-affine operation in the node accesses the 'memref'.
963 for (auto &idAndNode : mdg->nodes) {
964 Operation *op = idAndNode.second.op;
965 // Take care of operations between 'srcNode' and 'dstNode'.
966 if (srcNode->op->isBeforeInBlock(op) && op->isBeforeInBlock(dstNode->op)) {
967 // Walk inside the operation to find any use of the memref.
968 // Interrupt the walk if found.
969 auto walkResult = op->walk([&](Operation *user) {
970 // Skip affine ops.
971 if (isMemRefDereferencingOp(*user))
972 return WalkResult::advance();
973 // Find a non-affine op that uses the memref.
974 if (llvm::is_contained(users, user))
975 return WalkResult::interrupt();
976 return WalkResult::advance();
977 });
978 if (walkResult.wasInterrupted())
979 return true;
980 }
981 }
982 return false;
983 }
984
985 /// Check whether a memref value in node 'srcId' has a non-affine that
986 /// is between node 'srcId' and node 'dstId' (exclusive of 'srcNode' and
987 /// 'dstNode').
hasNonAffineUsersOnThePath(unsigned srcId,unsigned dstId,MemRefDependenceGraph * mdg)988 static bool hasNonAffineUsersOnThePath(unsigned srcId, unsigned dstId,
989 MemRefDependenceGraph *mdg) {
990 // Collect memref values in node 'srcId'.
991 auto *srcNode = mdg->getNode(srcId);
992 llvm::SmallDenseSet<Value, 2> memRefValues;
993 srcNode->op->walk([&](Operation *op) {
994 // Skip affine ops.
995 if (isa<AffineForOp>(op))
996 return WalkResult::advance();
997 for (Value v : op->getOperands())
998 // Collect memref values only.
999 if (v.getType().isa<MemRefType>())
1000 memRefValues.insert(v);
1001 return WalkResult::advance();
1002 });
1003 // Looking for users between node 'srcId' and node 'dstId'.
1004 for (Value memref : memRefValues)
1005 if (hasNonAffineUsersOnThePath(srcId, dstId, memref, mdg))
1006 return true;
1007 return false;
1008 }
1009
1010 // Checks if node 'srcId' can be safely fused into node 'dstId'. Node 'srcId'
1011 // may write to multiple memrefs but it is required that only one of them,
1012 // 'srcLiveOutStoreOp', has output edges.
1013 // Returns true if 'dstNode's read/write region to 'memref' is a super set of
1014 // 'srcNode's write region to 'memref' and 'srcId' has only one output edge.
1015 // TODO: Generalize this to handle more live in/out cases.
1016 static bool
canFuseSrcWhichWritesToLiveOut(unsigned srcId,unsigned dstId,AffineWriteOpInterface srcLiveOutStoreOp,MemRefDependenceGraph * mdg)1017 canFuseSrcWhichWritesToLiveOut(unsigned srcId, unsigned dstId,
1018 AffineWriteOpInterface srcLiveOutStoreOp,
1019 MemRefDependenceGraph *mdg) {
1020 assert(srcLiveOutStoreOp && "Expected a valid store op");
1021 auto *dstNode = mdg->getNode(dstId);
1022 Value memref = srcLiveOutStoreOp.getMemRef();
1023 // Return false if 'srcNode' has more than one output edge on 'memref'.
1024 if (mdg->getOutEdgeCount(srcId, memref) > 1)
1025 return false;
1026
1027 // Compute MemRefRegion 'srcWriteRegion' for 'srcStoreOp' on 'memref'.
1028 MemRefRegion srcWriteRegion(srcLiveOutStoreOp.getLoc());
1029 if (failed(srcWriteRegion.compute(srcLiveOutStoreOp, /*loopDepth=*/0))) {
1030 LLVM_DEBUG(llvm::dbgs()
1031 << "Unable to compute MemRefRegion for source operation\n.");
1032 return false;
1033 }
1034 SmallVector<int64_t, 4> srcShape;
1035 // Query 'srcWriteRegion' for 'srcShape' and 'srcNumElements'.
1036 // by 'srcStoreOp' at depth 'dstLoopDepth'.
1037 Optional<int64_t> srcNumElements =
1038 srcWriteRegion.getConstantBoundingSizeAndShape(&srcShape);
1039 if (!srcNumElements.hasValue())
1040 return false;
1041
1042 // Compute MemRefRegion 'dstRegion' for 'dstStore/LoadOpInst' on 'memref'.
1043 // TODO: Compute 'unionboundingbox' of all write regions (one for
1044 // each store op in 'dstStoreOps').
1045 SmallVector<Operation *, 2> dstStoreOps;
1046 dstNode->getStoreOpsForMemref(memref, &dstStoreOps);
1047 SmallVector<Operation *, 2> dstLoadOps;
1048 dstNode->getLoadOpsForMemref(memref, &dstLoadOps);
1049
1050 auto *dstOpInst = dstStoreOps.empty() ? dstLoadOps[0] : dstStoreOps[0];
1051 MemRefRegion dstRegion(dstOpInst->getLoc());
1052 if (failed(dstRegion.compute(dstOpInst, /*loopDepth=*/0))) {
1053 LLVM_DEBUG(llvm::dbgs()
1054 << "Unable to compute MemRefRegion for dest operation\n.");
1055 return false;
1056 }
1057 SmallVector<int64_t, 4> dstShape;
1058 // Query 'dstRegion' for 'dstShape' and 'dstNumElements'.
1059 // by 'dstOpInst' at depth 'dstLoopDepth'.
1060 Optional<int64_t> dstNumElements =
1061 dstRegion.getConstantBoundingSizeAndShape(&dstShape);
1062 if (!dstNumElements.hasValue())
1063 return false;
1064
1065 // Return false if write region is not a superset of 'srcNodes' write
1066 // region to 'memref'.
1067 // TODO: Check the shape and lower bounds here too.
1068 if (srcNumElements != dstNumElements)
1069 return false;
1070
1071 // Return false if 'memref' is used by a non-affine operation that is
1072 // between node 'srcId' and node 'dstId'.
1073 if (hasNonAffineUsersOnThePath(srcId, dstId, mdg))
1074 return false;
1075
1076 return true;
1077 }
1078
1079 // Checks the profitability of fusing a backwards slice of the loop nest
1080 // surrounding 'srcOpInst' into the loop nest surrounding 'dstLoadOpInsts'.
1081 // The argument 'srcStoreOpInst' is used to calculate the storage reduction on
1082 // the memref being produced and consumed, which is an input to the cost model.
1083 // For producer-consumer fusion, 'srcStoreOpInst' will be the same as
1084 // 'srcOpInst', as we are slicing w.r.t to that producer.
1085 // For input-reuse fusion, 'srcOpInst' will be the src loop nest LoadOp which
1086 // reads from the same memref as dst loop nest load ops, and 'srcStoreOpInst'
1087 // will be the unique store op in the src node, which will be used to check
1088 // that the write region is the same after input-reuse fusion.
1089 // Returns true if it is profitable to fuse the candidate loop nests. Returns
1090 // false otherwise. `dstLoopDepth` is set to the most profitable depth at which
1091 // to materialize the source loop nest slice.
1092 // The profitability model executes the following steps:
1093 // *) Computes the backward computation slice at 'srcOpInst'. This
1094 // computation slice of the loop nest surrounding 'srcOpInst' is
1095 // represented by modified src loop bounds in 'sliceState', which are
1096 // functions of loop IVs in the loop nest surrounding 'srcOpInst'.
1097 // *) Computes the cost of unfused src/dst loop nests (currently the cost of a
1098 // loop nest is the total number of dynamic operation instances in the loop
1099 // nest).
1100 // *) Computes the cost of fusing a slice of the src loop nest into the dst
1101 // loop nest at various values of dst loop depth, attempting to fuse
1102 // the largest computation slice at the maximal dst loop depth (closest to
1103 // the load) to minimize reuse distance and potentially enable subsequent
1104 // load/store forwarding.
1105 // NOTE: If the dst loop nest includes multiple loads in 'dstLoadOpInsts' for
1106 // the same memref as is written by 'srcOpInst', then the union of slice
1107 // loop bounds is used to compute the slice and associated slice cost.
1108 // NOTE: 'dstLoopDepth' refers to the loop depth within the destination loop
1109 // nest, at which the src computation slice is inserted/fused.
1110 // NOTE: We attempt to maximize the dst loop depth, but there are cases
1111 // where a particular setting for 'dstLoopNest' might fuse an unsliced
1112 // loop (within the src computation slice) at a depth which results in
1113 // excessive recomputation (see unit tests for examples).
1114 // *) Compares the total cost of the unfused loop nests to the min cost fused
1115 // loop nest computed in the previous step, and returns true if the latter
1116 // is lower.
isFusionProfitable(Operation * srcOpInst,Operation * srcStoreOpInst,ArrayRef<Operation * > dstLoadOpInsts,ArrayRef<Operation * > dstStoreOpInsts,ComputationSliceState * sliceState,unsigned * dstLoopDepth,bool maximalFusion,double computeToleranceThreshold)1117 static bool isFusionProfitable(Operation *srcOpInst, Operation *srcStoreOpInst,
1118 ArrayRef<Operation *> dstLoadOpInsts,
1119 ArrayRef<Operation *> dstStoreOpInsts,
1120 ComputationSliceState *sliceState,
1121 unsigned *dstLoopDepth, bool maximalFusion,
1122 double computeToleranceThreshold) {
1123 LLVM_DEBUG({
1124 llvm::dbgs() << "Checking whether fusion is profitable between src op:\n";
1125 llvm::dbgs() << ' ' << *srcOpInst << " and destination op(s)\n";
1126 for (auto dstOpInst : dstLoadOpInsts) {
1127 llvm::dbgs() << " " << *dstOpInst << "\n";
1128 };
1129 });
1130
1131 // Compute cost of sliced and unsliced src loop nest.
1132 SmallVector<AffineForOp, 4> srcLoopIVs;
1133 getLoopIVs(*srcOpInst, &srcLoopIVs);
1134 unsigned numSrcLoopIVs = srcLoopIVs.size();
1135
1136 // Walk src loop nest and collect stats.
1137 LoopNestStats srcLoopNestStats;
1138 if (!getLoopNestStats(srcLoopIVs[0], &srcLoopNestStats))
1139 return false;
1140
1141 // Compute cost of dst loop nest.
1142 SmallVector<AffineForOp, 4> dstLoopIVs;
1143 getLoopIVs(*dstLoadOpInsts[0], &dstLoopIVs);
1144
1145 LoopNestStats dstLoopNestStats;
1146 if (!getLoopNestStats(dstLoopIVs[0], &dstLoopNestStats))
1147 return false;
1148
1149 // Compute the maximum loop depth at which we can can insert the src slice
1150 // and still satisfy dest loop nest dependences, for producer-consumer fusion.
1151 unsigned maxDstLoopDepth =
1152 (srcOpInst == srcStoreOpInst)
1153 ? getMaxLoopDepth(dstLoadOpInsts, dstStoreOpInsts)
1154 : dstLoopIVs.size();
1155 if (maxDstLoopDepth == 0) {
1156 LLVM_DEBUG(llvm::dbgs() << "Can't fuse: maxDstLoopDepth == 0 .\n");
1157 return false;
1158 }
1159
1160 // Search for min cost value for 'dstLoopDepth'. At each value of
1161 // 'dstLoopDepth' from 'maxDstLoopDepth' to '1', compute computation slice
1162 // bounds between 'srcOpInst' and each op in 'dstOpinsts' (taking the union
1163 // of these bounds). Next the union slice bounds are used to calculate
1164 // the cost of the slice and the cost of the slice inserted into the dst
1165 // loop nest at 'dstLoopDepth'.
1166 uint64_t minFusedLoopNestComputeCost = std::numeric_limits<uint64_t>::max();
1167 double maxStorageReduction = 0.0;
1168 Optional<uint64_t> sliceMemEstimate = None;
1169
1170 SmallVector<ComputationSliceState, 4> sliceStates;
1171 sliceStates.resize(maxDstLoopDepth);
1172 // The best loop depth at which to materialize the slice.
1173 Optional<unsigned> bestDstLoopDepth = None;
1174
1175 // Compute op instance count for the src loop nest without iteration slicing.
1176 uint64_t srcLoopNestCost = getComputeCost(srcLoopIVs[0], srcLoopNestStats);
1177
1178 // Compute src loop nest write region size.
1179 MemRefRegion srcWriteRegion(srcStoreOpInst->getLoc());
1180 if (failed(srcWriteRegion.compute(srcStoreOpInst, /*loopDepth=*/0))) {
1181 LLVM_DEBUG(llvm::dbgs()
1182 << "Unable to compute MemRefRegion for source operation\n.");
1183 return false;
1184 }
1185
1186 Optional<int64_t> maybeSrcWriteRegionSizeBytes =
1187 srcWriteRegion.getRegionSize();
1188 if (!maybeSrcWriteRegionSizeBytes.hasValue())
1189 return false;
1190 int64_t srcWriteRegionSizeBytes = maybeSrcWriteRegionSizeBytes.getValue();
1191
1192 // Compute op instance count for the src loop nest.
1193 uint64_t dstLoopNestCost = getComputeCost(dstLoopIVs[0], dstLoopNestStats);
1194
1195 // Evaluate all depth choices for materializing the slice in the destination
1196 // loop nest.
1197 for (unsigned i = maxDstLoopDepth; i >= 1; --i) {
1198 // Compute the union of slice bounds of all ops in 'dstLoadOpInsts'.
1199 if (failed(mlir::computeSliceUnion({srcOpInst}, dstLoadOpInsts,
1200 /*loopDepth=*/i,
1201 /*numCommonLoops=*/0,
1202 /*isBackwardSlice=*/true,
1203 &sliceStates[i - 1]))) {
1204 LLVM_DEBUG(llvm::dbgs()
1205 << "computeSliceUnion failed for loopDepth: " << i << "\n");
1206 continue;
1207 }
1208
1209 int64_t fusedLoopNestComputeCost;
1210 if (!getFusionComputeCost(srcLoopIVs[0], srcLoopNestStats, dstLoopIVs[0],
1211 dstLoopNestStats, &sliceStates[i - 1],
1212 &fusedLoopNestComputeCost)) {
1213 LLVM_DEBUG(llvm::dbgs() << "Unable to compute fusion compute cost.\n.");
1214 continue;
1215 }
1216
1217 double additionalComputeFraction =
1218 fusedLoopNestComputeCost /
1219 (static_cast<double>(srcLoopNestCost) + dstLoopNestCost) -
1220 1;
1221
1222 // Determine what the slice write MemRefRegion would be, if the src loop
1223 // nest slice 'sliceStates[i - 1]' were to be inserted into the dst loop
1224 // nest at loop depth 'i'
1225 MemRefRegion sliceWriteRegion(srcStoreOpInst->getLoc());
1226 if (failed(sliceWriteRegion.compute(srcStoreOpInst, /*loopDepth=*/0,
1227 &sliceStates[i - 1]))) {
1228 LLVM_DEBUG(llvm::dbgs()
1229 << "Failed to compute slice write region at loopDepth: " << i
1230 << "\n");
1231 continue;
1232 }
1233
1234 Optional<int64_t> maybeSliceWriteRegionSizeBytes =
1235 sliceWriteRegion.getRegionSize();
1236 if (!maybeSliceWriteRegionSizeBytes.hasValue() ||
1237 maybeSliceWriteRegionSizeBytes.getValue() == 0) {
1238 LLVM_DEBUG(llvm::dbgs()
1239 << "Failed to get slice write region size at loopDepth: " << i
1240 << "\n");
1241 continue;
1242 }
1243 int64_t sliceWriteRegionSizeBytes =
1244 maybeSliceWriteRegionSizeBytes.getValue();
1245
1246 // If we are fusing for reuse, check that write regions remain the same.
1247 // TODO: Write region check should check sizes and offsets in
1248 // each dimension, so that we are sure they are covering the same memref
1249 // region. Also, move this out to a isMemRefRegionSuperSet helper function.
1250 if (srcOpInst != srcStoreOpInst &&
1251 sliceWriteRegionSizeBytes != srcWriteRegionSizeBytes)
1252 continue;
1253
1254 double storageReduction = static_cast<double>(srcWriteRegionSizeBytes) /
1255 static_cast<double>(sliceWriteRegionSizeBytes);
1256
1257 LLVM_DEBUG({
1258 std::stringstream msg;
1259 msg << " evaluating fusion profitability at depth : " << i << "\n"
1260 << std::fixed << std::setprecision(2)
1261 << " additional compute fraction: "
1262 << 100.0 * additionalComputeFraction << "%\n"
1263 << " storage reduction factor: " << storageReduction << "x\n"
1264 << " fused nest cost: " << fusedLoopNestComputeCost << "\n"
1265 << " src write region size: " << srcWriteRegionSizeBytes << "\n"
1266 << " slice write region size: " << sliceWriteRegionSizeBytes
1267 << "\n";
1268 llvm::dbgs() << msg.str();
1269 });
1270
1271 // TODO: This is a placeholder cost model.
1272 // Among all choices that add an acceptable amount of redundant computation
1273 // (as per computeToleranceThreshold), we will simply pick the one that
1274 // reduces the intermediary size the most.
1275 if ((storageReduction > maxStorageReduction) &&
1276 (maximalFusion ||
1277 (additionalComputeFraction < computeToleranceThreshold))) {
1278 maxStorageReduction = storageReduction;
1279 bestDstLoopDepth = i;
1280 minFusedLoopNestComputeCost = fusedLoopNestComputeCost;
1281 sliceMemEstimate = sliceWriteRegionSizeBytes;
1282 }
1283 }
1284
1285 // A simple cost model: fuse if it reduces the memory footprint. If
1286 // -maximal-fusion is set, fuse nevertheless.
1287
1288 if (!maximalFusion && !bestDstLoopDepth.hasValue()) {
1289 LLVM_DEBUG(
1290 llvm::dbgs()
1291 << "All fusion choices involve more than the threshold amount of "
1292 "redundant computation; NOT fusing.\n");
1293 return false;
1294 }
1295
1296 if (!bestDstLoopDepth.hasValue()) {
1297 LLVM_DEBUG(llvm::dbgs() << "no fusion depth could be evaluated.\n");
1298 return false;
1299 }
1300
1301 // Set dstLoopDepth based on best values from search.
1302 *dstLoopDepth = bestDstLoopDepth.getValue();
1303
1304 LLVM_DEBUG(
1305 llvm::dbgs() << " LoopFusion fusion stats:"
1306 << "\n best loop depth: " << bestDstLoopDepth
1307 << "\n src loop nest compute cost: " << srcLoopNestCost
1308 << "\n dst loop nest compute cost: " << dstLoopNestCost
1309 << "\n fused loop nest compute cost: "
1310 << minFusedLoopNestComputeCost << "\n");
1311
1312 auto dstMemSize = getMemoryFootprintBytes(dstLoopIVs[0]);
1313 auto srcMemSize = getMemoryFootprintBytes(srcLoopIVs[0]);
1314
1315 Optional<double> storageReduction = None;
1316
1317 if (!maximalFusion) {
1318 if (!dstMemSize.hasValue() || !srcMemSize.hasValue()) {
1319 LLVM_DEBUG(
1320 llvm::dbgs()
1321 << " fusion memory benefit cannot be evaluated; NOT fusing.\n");
1322 return false;
1323 }
1324
1325 auto srcMemSizeVal = srcMemSize.getValue();
1326 auto dstMemSizeVal = dstMemSize.getValue();
1327
1328 assert(sliceMemEstimate.hasValue() && "expected value");
1329 auto fusedMem = dstMemSizeVal + sliceMemEstimate.getValue();
1330
1331 LLVM_DEBUG(llvm::dbgs() << " src mem: " << srcMemSizeVal << "\n"
1332 << " dst mem: " << dstMemSizeVal << "\n"
1333 << " fused mem: " << fusedMem << "\n"
1334 << " slice mem: " << sliceMemEstimate << "\n");
1335
1336 if (static_cast<long>(fusedMem) > srcMemSizeVal + dstMemSizeVal) {
1337 LLVM_DEBUG(llvm::dbgs() << "Fusion is not profitable; NOT fusing.\n");
1338 return false;
1339 }
1340 storageReduction =
1341 100.0 *
1342 (1.0 - fusedMem / (static_cast<double>(srcMemSizeVal) + dstMemSizeVal));
1343 }
1344
1345 double additionalComputeFraction =
1346 100.0 * (minFusedLoopNestComputeCost /
1347 (static_cast<double>(srcLoopNestCost) + dstLoopNestCost) -
1348 1);
1349 (void)additionalComputeFraction;
1350 LLVM_DEBUG({
1351 std::stringstream msg;
1352 msg << " fusion is most profitable at depth " << *dstLoopDepth << " with "
1353 << std::setprecision(2) << additionalComputeFraction
1354 << "% redundant computation and a ";
1355 msg << (storageReduction.hasValue()
1356 ? std::to_string(storageReduction.getValue())
1357 : "<unknown>");
1358 msg << "% storage reduction.\n";
1359 llvm::dbgs() << msg.str();
1360 });
1361
1362 // Update return parameter 'sliceState' with 'bestSliceState'.
1363 ComputationSliceState *bestSliceState = &sliceStates[*dstLoopDepth - 1];
1364 sliceState->lbs = bestSliceState->lbs;
1365 sliceState->ubs = bestSliceState->ubs;
1366 sliceState->lbOperands = bestSliceState->lbOperands;
1367 sliceState->ubOperands = bestSliceState->ubOperands;
1368
1369 // Canonicalize slice bound affine maps.
1370 for (unsigned i = 0; i < numSrcLoopIVs; ++i) {
1371 if (sliceState->lbs[i] != AffineMap()) {
1372 canonicalizeMapAndOperands(&sliceState->lbs[i],
1373 &sliceState->lbOperands[i]);
1374 }
1375 if (sliceState->ubs[i] != AffineMap()) {
1376 canonicalizeMapAndOperands(&sliceState->ubs[i],
1377 &sliceState->ubOperands[i]);
1378 }
1379 }
1380 return true;
1381 }
1382
1383 namespace {
1384
1385 // GreedyFusion greedily fuses loop nests which have a producer/consumer or
1386 // input-reuse relationship on a memref, with the goal of improving locality.
1387 //
1388 // The steps of the producer-consumer fusion algorithm are as follows:
1389 //
1390 // *) A worklist is initialized with node ids from the dependence graph.
1391 // *) For each node id in the worklist:
1392 // *) Pop an AffineForOp of the worklist. This 'dstAffineForOp' will be a
1393 // candidate destination AffineForOp into which fusion will be attempted.
1394 // *) Add each LoadOp currently in 'dstAffineForOp' into list 'dstLoadOps'.
1395 // *) For each LoadOp in 'dstLoadOps' do:
1396 // *) Look up dependent loop nests which have a single store op to the same
1397 // memref.
1398 // *) Check if dependences would be violated by the fusion.
1399 // *) Get a computation slice of 'srcLoopNest', which adjusts its loop
1400 // bounds to be functions of 'dstLoopNest' IVs and symbols.
1401 // *) Fuse the 'srcLoopNest' computation slice into the 'dstLoopNest',
1402 // at a loop depth determined by the cost model in 'isFusionProfitable'.
1403 // *) Add the newly fused load/store operations to the state,
1404 // and also add newly fused load ops to 'dstLoopOps' to be considered
1405 // as fusion dst load ops in another iteration.
1406 // *) Remove old src loop nest and its associated state.
1407 //
1408 // The steps of the input-reuse fusion algorithm are as follows:
1409 //
1410 // *) Initialize 'worklist' with node ids from the dependence graph.
1411 // *) For each 'dstNode' in the worklist:
1412 // *) Find a candidate sibling node 'sibNode' to fuse with 'dstNode' which
1413 // loads from the same memref, but which has no dependence paths to/from.
1414 // *) Get a computation slice of 'sibLoopNest', which adjusts its loop
1415 // bounds to be functions of 'dstLoopNest' IVs and symbols.
1416 // *) Fuse the 'sibLoopNest' computation slice into the 'dstLoopNest',
1417 // at a loop depth determined by the cost model in 'isFusionProfitable'.
1418 // This function also checks that the memref write region of 'sibLoopNest',
1419 // is preserved in the fused loop nest.
1420 // *) Update graph state to reflect the fusion of 'sibNode' into 'dstNode'.
1421 //
1422 // Given a graph where top-level operations are vertices in the set 'V' and
1423 // edges in the set 'E' are dependences between vertices, this algorithm
1424 // takes O(V) time for initialization, and has runtime O(V + E).
1425 //
1426 // This greedy algorithm is not 'maximal' due to the current restriction of
1427 // fusing along single producer consumer edges, but there is a TODO: to fix
1428 // this.
1429 //
1430 // TODO: Experiment with other fusion policies.
1431 struct GreedyFusion {
1432 public:
1433 // The data dependence graph to traverse during fusion.
1434 MemRefDependenceGraph *mdg;
1435 // Worklist of graph nodes visited during the fusion pass.
1436 SmallVector<unsigned, 8> worklist;
1437 // Set of graph nodes which are present on the worklist.
1438 llvm::SmallDenseSet<unsigned, 16> worklistSet;
1439 // Parameter for local buffer size threshold.
1440 unsigned localBufSizeThreshold;
1441 // Parameter for fast memory space.
1442 Optional<unsigned> fastMemorySpace;
1443 // If true, ignore any additional (redundant) computation tolerance threshold
1444 // that would have prevented fusion.
1445 bool maximalFusion;
1446 // The amount of additional computation that is tolerated while fusing
1447 // pair-wise as a fraction of the total computation.
1448 double computeToleranceThreshold;
1449
1450 using Node = MemRefDependenceGraph::Node;
1451
GreedyFusion__anonb2be91030911::GreedyFusion1452 GreedyFusion(MemRefDependenceGraph *mdg, unsigned localBufSizeThreshold,
1453 Optional<unsigned> fastMemorySpace, bool maximalFusion,
1454 double computeToleranceThreshold)
1455 : mdg(mdg), localBufSizeThreshold(localBufSizeThreshold),
1456 fastMemorySpace(fastMemorySpace), maximalFusion(maximalFusion),
1457 computeToleranceThreshold(computeToleranceThreshold) {}
1458
1459 // Initializes 'worklist' with nodes from 'mdg'
init__anonb2be91030911::GreedyFusion1460 void init() {
1461 // TODO: Add a priority queue for prioritizing nodes by different
1462 // metrics (e.g. arithmetic intensity/flops-to-bytes ratio).
1463 worklist.clear();
1464 worklistSet.clear();
1465 for (auto &idAndNode : mdg->nodes) {
1466 const Node &node = idAndNode.second;
1467 worklist.push_back(node.id);
1468 worklistSet.insert(node.id);
1469 }
1470 }
1471
1472 // Run the GreedyFusion pass.
1473 // *) First pass through the nodes fuses single-use producer nodes into their
1474 // unique consumer.
1475 // *) Second pass fuses sibling nodes which share no dependence edges.
1476 // *) Third pass fuses any remaining producer nodes into their users.
run__anonb2be91030911::GreedyFusion1477 void run() {
1478 // TODO: Run this repeatedly until a fixed-point is reached.
1479 fuseProducerConsumerNodes(/*maxSrcUserCount=*/1);
1480 fuseSiblingNodes();
1481 fuseProducerConsumerNodes(
1482 /*maxSrcUserCount=*/std::numeric_limits<unsigned>::max());
1483 eraseUnusedMemRefAllocations();
1484 }
1485
fuseProducerConsumerNodes__anonb2be91030911::GreedyFusion1486 void fuseProducerConsumerNodes(unsigned maxSrcUserCount) {
1487 init();
1488 while (!worklist.empty()) {
1489 unsigned dstId = worklist.back();
1490 worklist.pop_back();
1491 worklistSet.erase(dstId);
1492
1493 // Skip if this node was removed (fused into another node).
1494 if (mdg->nodes.count(dstId) == 0)
1495 continue;
1496 // Get 'dstNode' into which to attempt fusion.
1497 auto *dstNode = mdg->getNode(dstId);
1498 // Skip if 'dstNode' is not a loop nest.
1499 if (!isa<AffineForOp>(dstNode->op))
1500 continue;
1501 // Sink sequential loops in 'dstNode' (and thus raise parallel loops)
1502 // while preserving relative order. This can increase the maximum loop
1503 // depth at which we can fuse a slice of a producer loop nest into a
1504 // consumer loop nest.
1505 sinkSequentialLoops(dstNode);
1506
1507 SmallVector<Operation *, 4> loads = dstNode->loads;
1508 SmallVector<Operation *, 4> dstLoadOpInsts;
1509 DenseSet<Value> visitedMemrefs;
1510 while (!loads.empty()) {
1511 // Get memref of load on top of the stack.
1512 auto memref = cast<AffineReadOpInterface>(loads.back()).getMemRef();
1513 if (visitedMemrefs.count(memref) > 0)
1514 continue;
1515 visitedMemrefs.insert(memref);
1516 // Move all loads in 'loads' accessing 'memref' to 'dstLoadOpInsts'.
1517 moveLoadsAccessingMemrefTo(memref, &loads, &dstLoadOpInsts);
1518 // Skip if no input edges along which to fuse.
1519 if (mdg->inEdges.count(dstId) == 0)
1520 continue;
1521 // Iterate through in-edges for 'dstId' and src node id for any
1522 // edges on 'memref'.
1523 SmallVector<unsigned, 2> srcNodeIds;
1524 for (auto &srcEdge : mdg->inEdges[dstId]) {
1525 // Skip 'srcEdge' if not for 'memref'.
1526 if (srcEdge.value != memref)
1527 continue;
1528 srcNodeIds.push_back(srcEdge.id);
1529 }
1530 for (unsigned srcId : srcNodeIds) {
1531 // Skip if this node was removed (fused into another node).
1532 if (mdg->nodes.count(srcId) == 0)
1533 continue;
1534 // Get 'srcNode' from which to attempt fusion into 'dstNode'.
1535 auto *srcNode = mdg->getNode(srcId);
1536 // Skip if 'srcNode' is not a loop nest.
1537 if (!isa<AffineForOp>(srcNode->op))
1538 continue;
1539 // Skip if 'srcNode' has more than one live-out store to a
1540 // function-local memref.
1541 // TODO: Support more generic multi-output src loop nests
1542 // fusion.
1543 auto srcStoreOp = mdg->getUniqueOutgoingStore(srcNode);
1544 if (!srcStoreOp) {
1545 // Get the src store op at the deepest loop depth.
1546 // We will use 'LoopFusionUtils::canFuseLoops' to check fusion
1547 // feasibility for loops with multiple stores.
1548 unsigned maxLoopDepth = 0;
1549 for (auto *op : srcNode->stores) {
1550 auto storeOp = cast<AffineWriteOpInterface>(op);
1551 if (storeOp.getMemRef() != memref) {
1552 srcStoreOp = nullptr;
1553 break;
1554 }
1555 unsigned loopDepth = getNestingDepth(storeOp);
1556 if (loopDepth > maxLoopDepth) {
1557 maxLoopDepth = loopDepth;
1558 srcStoreOp = storeOp;
1559 }
1560 }
1561 if (!srcStoreOp)
1562 continue;
1563 }
1564
1565 // Unique outgoing store found must write to 'memref' since 'memref'
1566 // is the one that established the producer-consumer relationship
1567 // between 'srcNode' and 'dstNode'.
1568 assert(srcStoreOp.getMemRef() == memref &&
1569 "Found store to unexpected memref");
1570
1571 // Skip if 'srcNode' writes to any live in or escaping memrefs,
1572 // and cannot be fused.
1573 bool writesToLiveInOrOut =
1574 mdg->writesToLiveInOrEscapingMemrefs(srcNode->id);
1575 if (writesToLiveInOrOut &&
1576 !canFuseSrcWhichWritesToLiveOut(srcId, dstId, srcStoreOp, mdg))
1577 continue;
1578
1579 // Don't create a private memref if 'writesToLiveInOrOut'.
1580 bool createPrivateMemref = !writesToLiveInOrOut;
1581 // Don't create a private memref if 'srcNode' has in edges on
1582 // 'memref', or if 'dstNode' has out edges on 'memref'.
1583 if (mdg->getIncomingMemRefAccesses(srcNode->id, memref) > 0 ||
1584 mdg->getOutEdgeCount(dstNode->id, memref) > 0) {
1585 createPrivateMemref = false;
1586 }
1587
1588 // Skip if 'srcNode' out edge count on 'memref' > 'maxSrcUserCount'.
1589 if (mdg->getOutEdgeCount(srcNode->id, memref) > maxSrcUserCount)
1590 continue;
1591
1592 // Compute an operation list insertion point for the fused loop
1593 // nest which preserves dependences.
1594 Operation *insertPointInst =
1595 mdg->getFusedLoopNestInsertionPoint(srcNode->id, dstNode->id);
1596 if (insertPointInst == nullptr)
1597 continue;
1598
1599 // Compute the innermost common loop depth for dstNode loads/stores.
1600 SmallVector<Operation *, 2> dstOps(dstNode->loads.begin(),
1601 dstNode->loads.end());
1602 dstOps.append(dstNode->stores.begin(), dstNode->stores.end());
1603 unsigned dstLoopDepthTest = getInnermostCommonLoopDepth(dstOps);
1604 // Check the feasibility of fusing src loop nest into dst loop nest
1605 // at loop depths in range [1, dstLoopDepthTest].
1606 // TODO: Use slice union computation and union of memref
1607 // read/write regions to cost model and fusion.
1608 bool canFuse = false;
1609 for (unsigned i = 1; i <= dstLoopDepthTest; ++i) {
1610 ComputationSliceState sliceUnion;
1611 FusionResult result = mlir::canFuseLoops(
1612 cast<AffineForOp>(srcNode->op), cast<AffineForOp>(dstNode->op),
1613 /*dstLoopDepth=*/i, &sliceUnion);
1614 if (result.value == FusionResult::Success)
1615 canFuse = true;
1616 }
1617
1618 // Skip if fusion is not feasible at all loop depths.
1619 if (!canFuse)
1620 continue;
1621
1622 // Gather 'dstNode' store ops to 'memref'.
1623 SmallVector<Operation *, 2> dstStoreOpInsts;
1624 for (auto *storeOpInst : dstNode->stores)
1625 if (cast<AffineWriteOpInterface>(storeOpInst).getMemRef() == memref)
1626 dstStoreOpInsts.push_back(storeOpInst);
1627
1628 unsigned bestDstLoopDepth;
1629 mlir::ComputationSliceState sliceState;
1630 // Check if fusion would be profitable.
1631 if (!isFusionProfitable(srcStoreOp, srcStoreOp, dstLoadOpInsts,
1632 dstStoreOpInsts, &sliceState,
1633 &bestDstLoopDepth, maximalFusion,
1634 computeToleranceThreshold))
1635 continue;
1636
1637 // Fuse computation slice of 'srcLoopNest' into 'dstLoopNest'.
1638 auto sliceLoopNest = mlir::insertBackwardComputationSlice(
1639 srcStoreOp, dstLoadOpInsts[0], bestDstLoopDepth, &sliceState);
1640 if (sliceLoopNest) {
1641 LLVM_DEBUG(llvm::dbgs() << "\tslice loop nest:\n"
1642 << *sliceLoopNest.getOperation() << "\n");
1643 // Move 'dstAffineForOp' before 'insertPointInst' if needed.
1644 auto dstAffineForOp = cast<AffineForOp>(dstNode->op);
1645 if (insertPointInst != dstAffineForOp.getOperation()) {
1646 dstAffineForOp.getOperation()->moveBefore(insertPointInst);
1647 }
1648 // Update edges between 'srcNode' and 'dstNode'.
1649 mdg->updateEdges(srcNode->id, dstNode->id, memref,
1650 createPrivateMemref);
1651
1652 // Collect slice loop stats.
1653 LoopNestStateCollector sliceCollector;
1654 sliceCollector.collect(sliceLoopNest.getOperation());
1655 // Promote single iteration slice loops to single IV value.
1656 for (auto forOp : sliceCollector.forOps) {
1657 promoteIfSingleIteration(forOp);
1658 }
1659 if (createPrivateMemref) {
1660 // Create private memref for 'memref' in 'dstAffineForOp'.
1661 SmallVector<Operation *, 4> storesForMemref;
1662 for (auto *storeOpInst : sliceCollector.storeOpInsts) {
1663 if (cast<AffineWriteOpInterface>(storeOpInst).getMemRef() ==
1664 memref)
1665 storesForMemref.push_back(storeOpInst);
1666 }
1667 // TODO: Use union of memref write regions to compute
1668 // private memref footprint.
1669 auto newMemRef = createPrivateMemRef(
1670 dstAffineForOp, storesForMemref[0], bestDstLoopDepth,
1671 fastMemorySpace, localBufSizeThreshold);
1672 visitedMemrefs.insert(newMemRef);
1673 // Create new node in dependence graph for 'newMemRef' alloc op.
1674 unsigned newMemRefNodeId =
1675 mdg->addNode(newMemRef.getDefiningOp());
1676 // Add edge from 'newMemRef' node to dstNode.
1677 mdg->addEdge(newMemRefNodeId, dstId, newMemRef);
1678 }
1679
1680 // Collect dst loop stats after memref privatization transformation.
1681 LoopNestStateCollector dstLoopCollector;
1682 dstLoopCollector.collect(dstAffineForOp.getOperation());
1683
1684 // Add new load ops to current Node load op list 'loads' to
1685 // continue fusing based on new operands.
1686 for (auto *loadOpInst : dstLoopCollector.loadOpInsts) {
1687 // NOTE: Change 'loads' to a hash set in case efficiency is an
1688 // issue. We still use a vector since it's expected to be small.
1689 if (!llvm::is_contained(loads, loadOpInst))
1690 loads.push_back(loadOpInst);
1691 }
1692 // Clear visited memrefs after fusion so that previously visited src
1693 // nodes are considered for fusion again in the context of the new
1694 // fused node.
1695 // TODO: This shouldn't be necessary if we visited candidates in the
1696 // dependence graph in post-order or once we fully support
1697 // multi-store producers. Currently, in a multi-store producer
1698 // scenario such as A->B, A->C, B->C, we fail to fuse A+B due to the
1699 // multiple outgoing edges. However, after fusing B+C, A has a
1700 // single outgoing edge and can be fused if we revisit it in the
1701 // context of the new fused B+C node.
1702 visitedMemrefs.clear();
1703
1704 // Clear and add back loads and stores.
1705 mdg->clearNodeLoadAndStores(dstNode->id);
1706 mdg->addToNode(dstId, dstLoopCollector.loadOpInsts,
1707 dstLoopCollector.storeOpInsts);
1708 // Remove old src loop nest if it no longer has outgoing dependence
1709 // edges, and if it does not write to a memref which escapes the
1710 // function. If 'writesToLiveInOrOut' is true, then 'srcNode' has
1711 // been fused into 'dstNode' and write region of 'dstNode' covers
1712 // the write region of 'srcNode', and 'srcNode' has no other users
1713 // so it is safe to remove.
1714 if (writesToLiveInOrOut || mdg->canRemoveNode(srcNode->id)) {
1715 mdg->removeNode(srcNode->id);
1716 srcNode->op->erase();
1717 } else {
1718 // Add remaining users of 'oldMemRef' back on the worklist (if not
1719 // already there), as its replacement with a local/private memref
1720 // has reduced dependences on 'oldMemRef' which may have created
1721 // new fusion opportunities.
1722 if (mdg->outEdges.count(srcNode->id) > 0) {
1723 SmallVector<MemRefDependenceGraph::Edge, 2> oldOutEdges =
1724 mdg->outEdges[srcNode->id];
1725 for (auto &outEdge : oldOutEdges) {
1726 if (outEdge.value == memref &&
1727 worklistSet.count(outEdge.id) == 0) {
1728 worklist.push_back(outEdge.id);
1729 worklistSet.insert(outEdge.id);
1730 }
1731 }
1732 }
1733 }
1734 }
1735 }
1736 }
1737 }
1738 }
1739
1740 // Visits each node in the graph, and for each node, attempts to fuse it with
1741 // its sibling nodes (nodes which share a parent, but no dependence edges).
fuseSiblingNodes__anonb2be91030911::GreedyFusion1742 void fuseSiblingNodes() {
1743 init();
1744 while (!worklist.empty()) {
1745 unsigned dstId = worklist.back();
1746 worklist.pop_back();
1747 worklistSet.erase(dstId);
1748
1749 // Skip if this node was removed (fused into another node).
1750 if (mdg->nodes.count(dstId) == 0)
1751 continue;
1752 // Get 'dstNode' into which to attempt fusion.
1753 auto *dstNode = mdg->getNode(dstId);
1754 // Skip if 'dstNode' is not a loop nest.
1755 if (!isa<AffineForOp>(dstNode->op))
1756 continue;
1757 // Attempt to fuse 'dstNode' with its sibling nodes in the graph.
1758 fuseWithSiblingNodes(dstNode);
1759 }
1760 }
1761
1762 // Attempt to fuse 'dstNode' with sibling nodes in the graph.
fuseWithSiblingNodes__anonb2be91030911::GreedyFusion1763 void fuseWithSiblingNodes(Node *dstNode) {
1764 DenseSet<unsigned> visitedSibNodeIds;
1765 std::pair<unsigned, Value> idAndMemref;
1766 while (findSiblingNodeToFuse(dstNode, &visitedSibNodeIds, &idAndMemref)) {
1767 unsigned sibId = idAndMemref.first;
1768 Value memref = idAndMemref.second;
1769 // TODO: Check that 'sibStoreOpInst' post-dominates all other
1770 // stores to the same memref in 'sibNode' loop nest.
1771 auto *sibNode = mdg->getNode(sibId);
1772 // Compute an operation list insertion point for the fused loop
1773 // nest which preserves dependences.
1774 assert(sibNode->op->getBlock() == dstNode->op->getBlock());
1775 Operation *insertPointInst =
1776 sibNode->op->isBeforeInBlock(dstNode->op)
1777 ? mdg->getFusedLoopNestInsertionPoint(sibNode->id, dstNode->id)
1778 : mdg->getFusedLoopNestInsertionPoint(dstNode->id, sibNode->id);
1779 if (insertPointInst == nullptr)
1780 continue;
1781
1782 // Check if fusion would be profitable and at what depth.
1783
1784 // Get unique 'sibNode' load op to 'memref'.
1785 SmallVector<Operation *, 2> sibLoadOpInsts;
1786 sibNode->getLoadOpsForMemref(memref, &sibLoadOpInsts);
1787 // Currently findSiblingNodeToFuse searches for siblings with one load.
1788 assert(sibLoadOpInsts.size() == 1);
1789 Operation *sibLoadOpInst = sibLoadOpInsts[0];
1790 assert(!sibNode->stores.empty());
1791 // TODO: Choose the store which postdominates all other stores.
1792 auto *sibStoreOpInst = sibNode->stores.back();
1793
1794 // Gather 'dstNode' load ops to 'memref'.
1795 SmallVector<Operation *, 2> dstLoadOpInsts;
1796 dstNode->getLoadOpsForMemref(memref, &dstLoadOpInsts);
1797
1798 // Gather 'dstNode' store ops to 'memref'.
1799 SmallVector<Operation *, 2> dstStoreOpInsts;
1800 dstNode->getStoreOpsForMemref(memref, &dstStoreOpInsts);
1801
1802 unsigned bestDstLoopDepth;
1803 mlir::ComputationSliceState sliceState;
1804
1805 // Check if fusion would be profitable.
1806 if (!isFusionProfitable(sibLoadOpInst, sibStoreOpInst, dstLoadOpInsts,
1807 dstStoreOpInsts, &sliceState, &bestDstLoopDepth,
1808 maximalFusion, computeToleranceThreshold))
1809 continue;
1810
1811 // Fuse computation slice of 'sibLoopNest' into 'dstLoopNest'.
1812 auto sliceLoopNest = mlir::insertBackwardComputationSlice(
1813 sibLoadOpInst, dstLoadOpInsts[0], bestDstLoopDepth, &sliceState);
1814 if (sliceLoopNest != nullptr) {
1815 auto dstForInst = cast<AffineForOp>(dstNode->op);
1816 // Update operation position of fused loop nest (if needed).
1817 if (insertPointInst != dstForInst.getOperation()) {
1818 dstForInst.getOperation()->moveBefore(insertPointInst);
1819 }
1820 // Update data dependence graph state post fusion.
1821 updateStateAfterSiblingFusion(sliceLoopNest, sibNode, dstNode);
1822 }
1823 }
1824 }
1825
1826 // Searches function argument uses and the graph from 'dstNode' looking for a
1827 // fusion candidate sibling node which shares no dependences with 'dstNode'
1828 // but which loads from the same memref. Returns true and sets
1829 // 'idAndMemrefToFuse' on success. Returns false otherwise.
findSiblingNodeToFuse__anonb2be91030911::GreedyFusion1830 bool findSiblingNodeToFuse(Node *dstNode,
1831 DenseSet<unsigned> *visitedSibNodeIds,
1832 std::pair<unsigned, Value> *idAndMemrefToFuse) {
1833 // Returns true if 'sibNode' can be fused with 'dstNode' for input reuse
1834 // on 'memref'.
1835 auto canFuseWithSibNode = [&](Node *sibNode, Value memref) {
1836 // Skip if 'outEdge' is not a read-after-write dependence.
1837 // TODO: Remove restrict to single load op restriction.
1838 if (sibNode->getLoadOpCount(memref) != 1)
1839 return false;
1840 // Skip if there exists a path of dependent edges between
1841 // 'sibNode' and 'dstNode'.
1842 if (mdg->hasDependencePath(sibNode->id, dstNode->id) ||
1843 mdg->hasDependencePath(dstNode->id, sibNode->id))
1844 return false;
1845 // Skip sib node if it loads to (and stores from) the same memref on
1846 // which it also has an input dependence edge.
1847 DenseSet<Value> loadAndStoreMemrefSet;
1848 sibNode->getLoadAndStoreMemrefSet(&loadAndStoreMemrefSet);
1849 if (llvm::any_of(loadAndStoreMemrefSet, [=](Value memref) {
1850 return mdg->getIncomingMemRefAccesses(sibNode->id, memref) > 0;
1851 }))
1852 return false;
1853
1854 // Check that all stores are to the same memref.
1855 DenseSet<Value> storeMemrefs;
1856 for (auto *storeOpInst : sibNode->stores) {
1857 storeMemrefs.insert(
1858 cast<AffineWriteOpInterface>(storeOpInst).getMemRef());
1859 }
1860 if (storeMemrefs.size() != 1)
1861 return false;
1862
1863 // Skip if a memref value in one node is used by a non-affine memref
1864 // access that lies between 'dstNode' and 'sibNode'.
1865 if (hasNonAffineUsersOnThePath(dstNode->id, sibNode->id, mdg) ||
1866 hasNonAffineUsersOnThePath(sibNode->id, dstNode->id, mdg))
1867 return false;
1868 return true;
1869 };
1870
1871 // Search for siblings which load the same memref function argument.
1872 auto fn = dstNode->op->getParentOfType<FuncOp>();
1873 for (unsigned i = 0, e = fn.getNumArguments(); i != e; ++i) {
1874 for (auto *user : fn.getArgument(i).getUsers()) {
1875 if (auto loadOp = dyn_cast<AffineReadOpInterface>(user)) {
1876 // Gather loops surrounding 'use'.
1877 SmallVector<AffineForOp, 4> loops;
1878 getLoopIVs(*user, &loops);
1879 // Skip 'use' if it is not within a loop nest.
1880 if (loops.empty())
1881 continue;
1882 Node *sibNode = mdg->getForOpNode(loops[0]);
1883 assert(sibNode != nullptr);
1884 // Skip 'use' if it not a sibling to 'dstNode'.
1885 if (sibNode->id == dstNode->id)
1886 continue;
1887 // Skip 'use' if it has been visited.
1888 if (visitedSibNodeIds->count(sibNode->id) > 0)
1889 continue;
1890 // Skip 'use' if it does not load from the same memref as 'dstNode'.
1891 auto memref = loadOp.getMemRef();
1892 if (dstNode->getLoadOpCount(memref) == 0)
1893 continue;
1894 // Check if 'sibNode/dstNode' can be input-reuse fused on 'memref'.
1895 if (canFuseWithSibNode(sibNode, memref)) {
1896 visitedSibNodeIds->insert(sibNode->id);
1897 idAndMemrefToFuse->first = sibNode->id;
1898 idAndMemrefToFuse->second = memref;
1899 return true;
1900 }
1901 }
1902 }
1903 }
1904
1905 // Search for siblings by following edges through an intermediate src node.
1906 // Collect candidate 'dstNode' input edges in 'inEdges'.
1907 SmallVector<MemRefDependenceGraph::Edge, 2> inEdges;
1908 mdg->forEachMemRefInputEdge(
1909 dstNode->id, [&](MemRefDependenceGraph::Edge inEdge) {
1910 // Add 'inEdge' if it is a read-after-write dependence.
1911 if (dstNode->getLoadOpCount(inEdge.value) > 0 &&
1912 mdg->getNode(inEdge.id)->getStoreOpCount(inEdge.value) > 0)
1913 inEdges.push_back(inEdge);
1914 });
1915
1916 // Search for sibling nodes to fuse by visiting output edges from each input
1917 // edge in 'inEdges'.
1918 for (auto &inEdge : inEdges) {
1919 // Collect candidate output edges from each node 'inEdge.id' in 'inEdges'.
1920 SmallVector<MemRefDependenceGraph::Edge, 2> outEdges;
1921 mdg->forEachMemRefOutputEdge(
1922 inEdge.id, [&](MemRefDependenceGraph::Edge outEdge) {
1923 unsigned sibNodeId = outEdge.id;
1924 if (visitedSibNodeIds->count(sibNodeId) > 0)
1925 return;
1926 // Skip output edge if not a sibling using the same memref.
1927 if (outEdge.id == dstNode->id || outEdge.value != inEdge.value)
1928 return;
1929 auto *sibNode = mdg->getNode(sibNodeId);
1930 if (!isa<AffineForOp>(sibNode->op))
1931 return;
1932 // Check if 'sibNode/dstNode' can be input-reuse fused on 'memref'.
1933 if (canFuseWithSibNode(sibNode, outEdge.value)) {
1934 // Add candidate 'outEdge' to sibling node.
1935 outEdges.push_back(outEdge);
1936 }
1937 });
1938
1939 // Add first candidate if any were returned.
1940 if (!outEdges.empty()) {
1941 visitedSibNodeIds->insert(outEdges[0].id);
1942 idAndMemrefToFuse->first = outEdges[0].id;
1943 idAndMemrefToFuse->second = outEdges[0].value;
1944 return true;
1945 }
1946 }
1947 return false;
1948 }
1949
updateStateAfterSiblingFusion__anonb2be91030911::GreedyFusion1950 void updateStateAfterSiblingFusion(AffineForOp sliceLoopNest, Node *sibNode,
1951 Node *dstNode) {
1952 // Update 'sibNode' and 'dstNode' input/output edges to reflect fusion.
1953 mdg->updateEdges(sibNode->id, dstNode->id);
1954
1955 // Collect slice loop stats.
1956 LoopNestStateCollector sliceCollector;
1957 sliceCollector.collect(sliceLoopNest.getOperation());
1958 // Promote single iteration slice loops to single IV value.
1959 for (auto forOp : sliceCollector.forOps) {
1960 promoteIfSingleIteration(forOp);
1961 }
1962
1963 // Collect dst loop stats after memref privatization transformation.
1964 auto dstForInst = cast<AffineForOp>(dstNode->op);
1965 LoopNestStateCollector dstLoopCollector;
1966 dstLoopCollector.collect(dstForInst.getOperation());
1967 // Clear and add back loads and stores
1968 mdg->clearNodeLoadAndStores(dstNode->id);
1969 mdg->addToNode(dstNode->id, dstLoopCollector.loadOpInsts,
1970 dstLoopCollector.storeOpInsts);
1971 // Remove old sibling loop nest if it no longer has outgoing dependence
1972 // edges, and it does not write to a memref which escapes the
1973 // function.
1974 if (mdg->getOutEdgeCount(sibNode->id) == 0) {
1975 mdg->removeNode(sibNode->id);
1976 sibNode->op->erase();
1977 }
1978 }
1979
1980 // Clean up any allocs with no users.
eraseUnusedMemRefAllocations__anonb2be91030911::GreedyFusion1981 void eraseUnusedMemRefAllocations() {
1982 for (auto &pair : mdg->memrefEdgeCount) {
1983 if (pair.second > 0)
1984 continue;
1985 auto memref = pair.first;
1986 // Skip if there exist other uses (return operation or function calls).
1987 if (!memref.use_empty())
1988 continue;
1989 // Use list expected to match the dep graph info.
1990 auto *op = memref.getDefiningOp();
1991 if (isa_and_nonnull<AllocOp>(op))
1992 op->erase();
1993 }
1994 }
1995 };
1996
1997 } // end anonymous namespace
1998
runOnFunction()1999 void LoopFusion::runOnFunction() {
2000 MemRefDependenceGraph g;
2001 if (!g.init(getFunction()))
2002 return;
2003
2004 Optional<unsigned> fastMemorySpaceOpt;
2005 if (fastMemorySpace.hasValue())
2006 fastMemorySpaceOpt = fastMemorySpace;
2007 unsigned localBufSizeThresholdBytes = localBufSizeThreshold * 1024;
2008 GreedyFusion fusion(&g, localBufSizeThresholdBytes, fastMemorySpaceOpt,
2009 maximalFusion, computeToleranceThreshold);
2010 fusion.run();
2011 }
2012