1 //===- SCFToGPU.cpp - Convert an affine loop nest to a GPU kernel -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements a straightforward conversion of an loop nest into a GPU
10 // kernel.  The caller is expected to guarantee that the conversion is correct
11 // or to further transform the kernel to ensure correctness.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "mlir/Conversion/SCFToGPU/SCFToGPU.h"
16 
17 #include "mlir/Conversion/AffineToStandard/AffineToStandard.h"
18 #include "mlir/Dialect/Affine/IR/AffineOps.h"
19 #include "mlir/Dialect/GPU/GPUDialect.h"
20 #include "mlir/Dialect/GPU/ParallelLoopMapper.h"
21 #include "mlir/Dialect/SCF/SCF.h"
22 #include "mlir/Dialect/StandardOps/IR/Ops.h"
23 #include "mlir/IR/AffineExpr.h"
24 #include "mlir/IR/BlockAndValueMapping.h"
25 #include "mlir/IR/Builders.h"
26 #include "mlir/Pass/Pass.h"
27 #include "mlir/Transforms/DialectConversion.h"
28 #include "mlir/Transforms/LoopUtils.h"
29 #include "mlir/Transforms/Passes.h"
30 #include "mlir/Transforms/RegionUtils.h"
31 #include "llvm/ADT/Sequence.h"
32 #include "llvm/Support/Debug.h"
33 
34 #define DEBUG_TYPE "loops-to-gpu"
35 
36 using namespace mlir;
37 using namespace mlir::scf;
38 
39 // Extract an indexed value from KernelDim3.
getDim3Value(const gpu::KernelDim3 & dim3,unsigned pos)40 static Value getDim3Value(const gpu::KernelDim3 &dim3, unsigned pos) {
41   switch (pos) {
42   case 0:
43     return dim3.x;
44   case 1:
45     return dim3.y;
46   case 2:
47     return dim3.z;
48   default:
49     llvm_unreachable("dim3 position out of bounds");
50   }
51   return nullptr;
52 }
53 
54 // Get the lower bound-related operands of a loop operation.
getLowerBoundOperands(AffineForOp forOp)55 static Operation::operand_range getLowerBoundOperands(AffineForOp forOp) {
56   return forOp.getLowerBoundOperands();
57 }
58 
59 // Get the upper bound-related operands of a loop operation.
getUpperBoundOperands(AffineForOp forOp)60 static Operation::operand_range getUpperBoundOperands(AffineForOp forOp) {
61   return forOp.getUpperBoundOperands();
62 }
63 
64 // Get a Value that corresponds to the loop step.  If the step is an attribute,
65 // materialize a corresponding constant using builder.
getOrCreateStep(AffineForOp forOp,OpBuilder & builder)66 static Value getOrCreateStep(AffineForOp forOp, OpBuilder &builder) {
67   return builder.create<ConstantIndexOp>(forOp.getLoc(), forOp.getStep());
68 }
69 
70 // Get a Value for the loop lower bound.  If the value requires computation,
71 // materialize the instructions using builder.
getOrEmitLowerBound(AffineForOp forOp,OpBuilder & builder)72 static Value getOrEmitLowerBound(AffineForOp forOp, OpBuilder &builder) {
73   return lowerAffineLowerBound(forOp, builder);
74 }
75 
76 // Get a Value for the loop upper bound.  If the value requires computation,
77 // materialize the instructions using builder.
getOrEmitUpperBound(AffineForOp forOp,OpBuilder & builder)78 static Value getOrEmitUpperBound(AffineForOp forOp, OpBuilder &builder) {
79   return lowerAffineUpperBound(forOp, builder);
80 }
81 
82 // Check the structure of the loop nest:
83 //   - there are enough loops to map to numDims;
84 //   - the loops are perfectly nested;
85 //   - the loop bounds can be computed above the outermost loop.
86 // This roughly corresponds to the "matcher" part of the pattern-based
87 // rewriting infrastructure.
checkAffineLoopNestMappableImpl(AffineForOp forOp,unsigned numDims)88 static LogicalResult checkAffineLoopNestMappableImpl(AffineForOp forOp,
89                                                      unsigned numDims) {
90   Region &limit = forOp.region();
91   for (unsigned i = 0, e = numDims; i < e; ++i) {
92     Operation *nested = &forOp.getBody()->front();
93     if (!areValuesDefinedAbove(getLowerBoundOperands(forOp), limit) ||
94         !areValuesDefinedAbove(getUpperBoundOperands(forOp), limit))
95       return forOp.emitError(
96           "loops with bounds depending on other mapped loops "
97           "are not supported");
98 
99     // The innermost loop can have an arbitrary body, skip the perfect nesting
100     // check for it.
101     if (i == e - 1)
102       break;
103 
104     auto begin = forOp.getBody()->begin(), end = forOp.getBody()->end();
105     if (forOp.getBody()->empty() || std::next(begin, 2) != end)
106       return forOp.emitError("expected perfectly nested loops in the body");
107 
108     if (!(forOp = dyn_cast<AffineForOp>(nested)))
109       return nested->emitError("expected a nested loop");
110   }
111   return success();
112 }
113 
checkAffineLoopNestMappable(AffineForOp forOp,unsigned numBlockDims,unsigned numThreadDims)114 static LogicalResult checkAffineLoopNestMappable(AffineForOp forOp,
115                                                  unsigned numBlockDims,
116                                                  unsigned numThreadDims) {
117   if (numBlockDims < 1 || numThreadDims < 1) {
118     LLVM_DEBUG(llvm::dbgs() << "nothing to map");
119     return success();
120   }
121 
122   if (numBlockDims > 3) {
123     return forOp.emitError("cannot map to more than 3 block dimensions");
124   }
125   if (numThreadDims > 3) {
126     return forOp.emitError("cannot map to more than 3 thread dimensions");
127   }
128   return checkAffineLoopNestMappableImpl(forOp, numBlockDims + numThreadDims);
129 }
130 
131 namespace {
132 // Helper structure that holds common state of the loop to GPU kernel
133 // conversion.
134 struct AffineLoopToGpuConverter {
135   Optional<AffineForOp> collectBounds(AffineForOp forOp, unsigned numLoops);
136 
137   void createLaunch(AffineForOp rootForOp, AffineForOp innermostForOp,
138                     unsigned numBlockDims, unsigned numThreadDims);
139 
140   // Ranges of the loops mapped to blocks or threads.
141   SmallVector<Value, 6> dims;
142   // Lower bounds of the loops mapped to blocks or threads.
143   SmallVector<Value, 6> lbs;
144   // Induction variables of the loops mapped to blocks or threads.
145   SmallVector<Value, 6> ivs;
146   // Steps of the loops mapped to blocks or threads.
147   SmallVector<Value, 6> steps;
148 };
149 } // namespace
150 
151 // Return true if the value is obviously a constant "one".
isConstantOne(Value value)152 static bool isConstantOne(Value value) {
153   if (auto def = value.getDefiningOp<ConstantIndexOp>())
154     return def.getValue() == 1;
155   return false;
156 }
157 
158 // Collect ranges, bounds, steps and induction variables in preparation for
159 // mapping a loop nest of depth "numLoops" rooted at "forOp" to a GPU kernel.
160 // This may fail if the IR for computing loop bounds cannot be constructed, for
161 // example if an affine loop uses semi-affine maps. Return the last loop to be
162 // mapped on success, llvm::None on failure.
163 Optional<AffineForOp>
collectBounds(AffineForOp forOp,unsigned numLoops)164 AffineLoopToGpuConverter::collectBounds(AffineForOp forOp, unsigned numLoops) {
165   OpBuilder builder(forOp.getOperation());
166   dims.reserve(numLoops);
167   lbs.reserve(numLoops);
168   ivs.reserve(numLoops);
169   steps.reserve(numLoops);
170   AffineForOp currentLoop = forOp;
171   for (unsigned i = 0; i < numLoops; ++i) {
172     Value lowerBound = getOrEmitLowerBound(currentLoop, builder);
173     Value upperBound = getOrEmitUpperBound(currentLoop, builder);
174     if (!lowerBound || !upperBound) {
175       return llvm::None;
176     }
177 
178     Value range =
179         builder.create<SubIOp>(currentLoop.getLoc(), upperBound, lowerBound);
180     Value step = getOrCreateStep(currentLoop, builder);
181     if (!isConstantOne(step))
182       range = builder.create<SignedDivIOp>(currentLoop.getLoc(), range, step);
183     dims.push_back(range);
184 
185     lbs.push_back(lowerBound);
186     ivs.push_back(currentLoop.getInductionVar());
187     steps.push_back(step);
188 
189     if (i != numLoops - 1)
190       currentLoop = cast<AffineForOp>(&currentLoop.getBody()->front());
191   }
192   return currentLoop;
193 }
194 
195 // Replace the rooted at "rootForOp" with a GPU launch operation.  This expects
196 // "innermostForOp" to point to the last loop to be transformed to the kernel,
197 // and to have (numBlockDims + numThreadDims) perfectly nested loops between
198 // "rootForOp" and "innermostForOp".
createLaunch(AffineForOp rootForOp,AffineForOp innermostForOp,unsigned numBlockDims,unsigned numThreadDims)199 void AffineLoopToGpuConverter::createLaunch(AffineForOp rootForOp,
200                                             AffineForOp innermostForOp,
201                                             unsigned numBlockDims,
202                                             unsigned numThreadDims) {
203   OpBuilder builder(rootForOp.getOperation());
204   // Prepare the grid and block sizes for the launch operation.  If there is
205   // no loop mapped to a specific dimension, use constant "1" as its size.
206   Value constOne = (numBlockDims < 3 || numThreadDims < 3)
207                        ? builder.create<ConstantIndexOp>(rootForOp.getLoc(), 1)
208                        : nullptr;
209   Value gridSizeX = numBlockDims > 0 ? dims[0] : constOne;
210   Value gridSizeY = numBlockDims > 1 ? dims[1] : constOne;
211   Value gridSizeZ = numBlockDims > 2 ? dims[2] : constOne;
212   Value blockSizeX = numThreadDims > 0 ? dims[numBlockDims] : constOne;
213   Value blockSizeY = numThreadDims > 1 ? dims[numBlockDims + 1] : constOne;
214   Value blockSizeZ = numThreadDims > 2 ? dims[numBlockDims + 2] : constOne;
215 
216   // Create a launch op and move the body region of the innermost loop to the
217   // launch op.
218   auto launchOp = builder.create<gpu::LaunchOp>(
219       rootForOp.getLoc(), gridSizeX, gridSizeY, gridSizeZ, blockSizeX,
220       blockSizeY, blockSizeZ);
221 
222   // Replace the loop terminator (loops contain only a single block) with the
223   // gpu terminator and move the operations from the loop body block to the gpu
224   // launch body block.  Do not move the entire block because of the difference
225   // in block arguments.
226   Operation &terminator = innermostForOp.getBody()->back();
227   Location terminatorLoc = terminator.getLoc();
228   terminator.erase();
229   builder.setInsertionPointToEnd(innermostForOp.getBody());
230   builder.create<gpu::TerminatorOp>(terminatorLoc, llvm::None);
231   launchOp.body().front().getOperations().splice(
232       launchOp.body().front().begin(),
233       innermostForOp.getBody()->getOperations());
234 
235   // Remap the loop iterators to use block/thread identifiers instead.  Loops
236   // may iterate from LB with step S whereas GPU thread/block ids always iterate
237   // from 0 to N with step 1.  Therefore, loop induction variables are replaced
238   // with (gpu-thread/block-id * S) + LB.
239   builder.setInsertionPointToStart(&launchOp.body().front());
240   auto lbArgumentIt = lbs.begin();
241   auto stepArgumentIt = steps.begin();
242   for (auto en : llvm::enumerate(ivs)) {
243     Value id =
244         en.index() < numBlockDims
245             ? getDim3Value(launchOp.getBlockIds(), en.index())
246             : getDim3Value(launchOp.getThreadIds(), en.index() - numBlockDims);
247     Value step = steps[en.index()];
248     if (!isConstantOne(step))
249       id = builder.create<MulIOp>(rootForOp.getLoc(), step, id);
250 
251     Value ivReplacement =
252         builder.create<AddIOp>(rootForOp.getLoc(), *lbArgumentIt, id);
253     en.value().replaceAllUsesWith(ivReplacement);
254     std::advance(lbArgumentIt, 1);
255     std::advance(stepArgumentIt, 1);
256   }
257 
258   // We are done and can erase the original outermost loop.
259   rootForOp.erase();
260 }
261 
262 // Generic loop to GPU kernel conversion function.
convertAffineLoopNestToGPULaunch(AffineForOp forOp,unsigned numBlockDims,unsigned numThreadDims)263 static LogicalResult convertAffineLoopNestToGPULaunch(AffineForOp forOp,
264                                                       unsigned numBlockDims,
265                                                       unsigned numThreadDims) {
266   if (failed(checkAffineLoopNestMappable(forOp, numBlockDims, numThreadDims)))
267     return failure();
268 
269   AffineLoopToGpuConverter converter;
270   auto maybeInnerLoop =
271       converter.collectBounds(forOp, numBlockDims + numThreadDims);
272   if (!maybeInnerLoop)
273     return failure();
274   converter.createLaunch(forOp, *maybeInnerLoop, numBlockDims, numThreadDims);
275 
276   return success();
277 }
278 
convertAffineLoopNestToGPULaunch(AffineForOp forOp,unsigned numBlockDims,unsigned numThreadDims)279 LogicalResult mlir::convertAffineLoopNestToGPULaunch(AffineForOp forOp,
280                                                      unsigned numBlockDims,
281                                                      unsigned numThreadDims) {
282   return ::convertAffineLoopNestToGPULaunch(forOp, numBlockDims, numThreadDims);
283 }
284 
285 namespace {
286 struct ParallelToGpuLaunchLowering : public OpRewritePattern<ParallelOp> {
287   using OpRewritePattern<ParallelOp>::OpRewritePattern;
288 
289   LogicalResult matchAndRewrite(ParallelOp parallelOp,
290                                 PatternRewriter &rewriter) const override;
291 };
292 } // namespace
293 
294 /// Tries to derive a static upper bound from the defining operation of
295 /// `upperBound`.
deriveStaticUpperBound(Value upperBound,PatternRewriter & rewriter)296 static Value deriveStaticUpperBound(Value upperBound,
297                                     PatternRewriter &rewriter) {
298   if (auto op = upperBound.getDefiningOp<ConstantIndexOp>()) {
299     return op;
300   }
301 
302   if (auto minOp = upperBound.getDefiningOp<AffineMinOp>()) {
303     for (const AffineExpr &result : minOp.map().getResults()) {
304       if (auto constExpr = result.dyn_cast<AffineConstantExpr>()) {
305         return rewriter.create<ConstantIndexOp>(minOp.getLoc(),
306                                                 constExpr.getValue());
307       }
308     }
309   }
310 
311   if (auto multiplyOp = upperBound.getDefiningOp<MulIOp>()) {
312     if (auto lhs = dyn_cast_or_null<ConstantIndexOp>(
313             deriveStaticUpperBound(multiplyOp.getOperand(0), rewriter)
314                 .getDefiningOp()))
315       if (auto rhs = dyn_cast_or_null<ConstantIndexOp>(
316               deriveStaticUpperBound(multiplyOp.getOperand(1), rewriter)
317                   .getDefiningOp())) {
318         // Assumptions about the upper bound of minimum computations no longer
319         // work if multiplied by a negative value, so abort in this case.
320         if (lhs.getValue() < 0 || rhs.getValue() < 0)
321           return {};
322 
323         return rewriter.create<ConstantIndexOp>(
324             multiplyOp.getLoc(), lhs.getValue() * rhs.getValue());
325       }
326   }
327 
328   return {};
329 }
330 
isMappedToProcessor(gpu::Processor processor)331 static bool isMappedToProcessor(gpu::Processor processor) {
332   return processor != gpu::Processor::Sequential;
333 }
334 
getLaunchOpArgumentNum(gpu::Processor processor)335 static unsigned getLaunchOpArgumentNum(gpu::Processor processor) {
336   switch (processor) {
337   case gpu::Processor::BlockX:
338     return 0;
339   case gpu::Processor::BlockY:
340     return 1;
341   case gpu::Processor::BlockZ:
342     return 2;
343   case gpu::Processor::ThreadX:
344     return 3;
345   case gpu::Processor::ThreadY:
346     return 4;
347   case gpu::Processor::ThreadZ:
348     return 5;
349   default:;
350   }
351   llvm_unreachable(
352       "invalid processor type while retrieving launch op argument number");
353 }
354 
355 /// Modifies the current transformation state to capture the effect of the given
356 /// `scf.parallel` operation on index substitutions and the operations to be
357 /// inserted.
358 /// Specifically, if a dimension of a parallel loop is mapped to a hardware id,
359 /// this function will
360 /// - compute the loop index based on the hardware id and affine map from the
361 ///   mapping and update `cloningMap` to substitute all uses.
362 /// - derive a new upper bound for the hardware id and augment the provided
363 ///   `gpu.launch operation` accordingly.
364 /// - if the upper bound is imprecise, insert a conditional in the `gpu.launch`
365 ///   and update the rewriter to insert into the conditional's body.
366 /// If the dimension is mapped to sequential,
367 /// - insert a for loop into the body and update the rewriter to insert into
368 ///   the for loop's body.
369 /// - update the `cloningMap` to replace uses of the index with the index of
370 ///   the new for loop.
371 /// In either case,
372 /// - append the instructions from the loops body to worklist, in reverse order.
373 /// To note the end of the current scope in case a loop or conditional was
374 /// inserted, a sentinel (the `gpu.launch` operation) is inserted into the
375 /// worklist. This signals the processor of the worklist to pop the rewriter
376 /// one scope-level up.
processParallelLoop(ParallelOp parallelOp,gpu::LaunchOp launchOp,BlockAndValueMapping & cloningMap,SmallVectorImpl<Operation * > & worklist,DenseMap<gpu::Processor,Value> & bounds,PatternRewriter & rewriter)377 static LogicalResult processParallelLoop(
378     ParallelOp parallelOp, gpu::LaunchOp launchOp,
379     BlockAndValueMapping &cloningMap, SmallVectorImpl<Operation *> &worklist,
380     DenseMap<gpu::Processor, Value> &bounds, PatternRewriter &rewriter) {
381   // TODO: Verify that this is a valid GPU mapping.
382   // processor ids: 0-2 block [x/y/z], 3-5 -> thread [x/y/z], 6-> sequential
383   ArrayAttr mapping =
384       parallelOp.getAttrOfType<ArrayAttr>(gpu::getMappingAttrName());
385 
386   // TODO: Support reductions.
387   if (!mapping || parallelOp.getNumResults() != 0)
388     return failure();
389 
390   Location loc = parallelOp.getLoc();
391 
392   auto launchIndependent = [&launchOp](Value val) {
393     return val.getParentRegion()->isAncestor(launchOp.getParentRegion());
394   };
395 
396   auto ensureLaunchIndependent = [&rewriter,
397                                   launchIndependent](Value val) -> Value {
398     if (launchIndependent(val))
399       return val;
400     if (ConstantOp constOp = val.getDefiningOp<ConstantOp>())
401       return rewriter.create<ConstantOp>(constOp.getLoc(), constOp.getValue());
402     return {};
403   };
404 
405   for (auto config : llvm::zip(mapping, parallelOp.getInductionVars(),
406                                parallelOp.lowerBound(), parallelOp.upperBound(),
407                                parallelOp.step())) {
408     Attribute mappingAttribute;
409     Value iv, lowerBound, upperBound, step;
410     std::tie(mappingAttribute, iv, lowerBound, upperBound, step) = config;
411     auto annotation = mappingAttribute.dyn_cast<gpu::ParallelLoopDimMapping>();
412     if (!annotation)
413       return parallelOp.emitOpError()
414              << "expected mapping attribute for lowering to GPU";
415     Value newIndex;
416     gpu::Processor processor = gpu::getProcessor(annotation);
417 
418     if (isMappedToProcessor(processor)) {
419       // Use the corresponding thread/grid index as replacement for the loop iv.
420       Value operand =
421           launchOp.body().getArgument(getLaunchOpArgumentNum(processor));
422       // Take the indexmap and add the lower bound and step computations in.
423       // This computes operand * step + lowerBound.
424       // Use an affine map here so that it composes nicely with the provided
425       // annotation.
426       AffineMap lowerAndStep = AffineMap::get(
427           1, 2,
428           rewriter.getAffineDimExpr(0) * rewriter.getAffineSymbolExpr(0) +
429               rewriter.getAffineSymbolExpr(1));
430       newIndex = rewriter.create<AffineApplyOp>(
431           loc, annotation.map().getValue().compose(lowerAndStep),
432           ValueRange{operand, step, lowerBound});
433       // If there was also a bound, insert that, too.
434       // TODO: Check that we do not assign bounds twice.
435       if (annotation.bound().getValue()) {
436         // We pass as the single operand to the bound-map the number of
437         // iterations, which is (upperBound - lowerBound) ceilDiv step. To
438         // support inner loops with dynamic upper bounds (as generated by e.g.
439         // tiling), try to derive a max for the bounds. If the used bound for
440         // the hardware id is imprecise, wrap the contained code into a
441         // conditional. If the lower-bound is constant or defined before the
442         // launch, we can use it in the launch bounds. Otherwise fail.
443         if (!launchIndependent(lowerBound) &&
444             !isa_and_nonnull<ConstantOp>(lowerBound.getDefiningOp()))
445           return failure();
446         // The step must also be constant or defined outside of the loop nest.
447         if (!launchIndependent(step) &&
448             !isa_and_nonnull<ConstantOp>(step.getDefiningOp()))
449           return failure();
450         // If the upper-bound is constant or defined before the launch, we can
451         // use it in the launch bounds directly. Otherwise try derive a bound.
452         bool boundIsPrecise =
453             launchIndependent(upperBound) ||
454             isa_and_nonnull<ConstantOp>(upperBound.getDefiningOp());
455         {
456           PatternRewriter::InsertionGuard guard(rewriter);
457           rewriter.setInsertionPoint(launchOp);
458           if (!boundIsPrecise) {
459             upperBound = deriveStaticUpperBound(upperBound, rewriter);
460             if (!upperBound) {
461               return parallelOp.emitOpError()
462                      << "cannot derive loop-invariant upper bound for number "
463                         "of iterations";
464             }
465           }
466           // Compute the number of iterations needed. We compute this as an
467           // affine expression ceilDiv (upperBound - lowerBound) step. We use
468           // affine.apply here so that it composes nicely with the provided map.
469           AffineMap stepMap =
470               AffineMap::get(0, 3,
471                              ((rewriter.getAffineSymbolExpr(0) -
472                                rewriter.getAffineSymbolExpr(1))
473                                   .ceilDiv(rewriter.getAffineSymbolExpr(2))));
474           Value launchBound = rewriter.create<AffineApplyOp>(
475               loc, annotation.bound().getValue().compose(stepMap),
476               ValueRange{
477                   ensureLaunchIndependent(
478                       cloningMap.lookupOrDefault(upperBound)),
479                   ensureLaunchIndependent(
480                       cloningMap.lookupOrDefault(lowerBound)),
481                   ensureLaunchIndependent(cloningMap.lookupOrDefault(step))});
482           // todo(herhut,ravishankarm): Update the behavior of setMappingAttr
483           // when this condition is relaxed.
484           if (bounds.find(processor) != bounds.end()) {
485             return parallelOp.emitOpError()
486                    << "cannot redefine the bound for processor "
487                    << static_cast<int64_t>(processor);
488           }
489           bounds[processor] = launchBound;
490         }
491         if (!boundIsPrecise) {
492           // We are using an approximation, create a surrounding conditional.
493           Value originalBound = std::get<3>(config);
494           CmpIOp pred = rewriter.create<CmpIOp>(
495               loc, CmpIPredicate::slt, newIndex,
496               cloningMap.lookupOrDefault(originalBound));
497           scf::IfOp ifOp = rewriter.create<scf::IfOp>(loc, pred, false);
498           rewriter.setInsertionPointToStart(&ifOp.thenRegion().front());
499           // Put a sentinel into the worklist so we know when to pop out of the
500           // if body again. We use the launchOp here, as that cannot be part of
501           // the bodies instruction.
502           worklist.push_back(launchOp.getOperation());
503         }
504       }
505     } else {
506       // Create a sequential for loop.
507       auto loopOp = rewriter.create<scf::ForOp>(
508           loc, cloningMap.lookupOrDefault(lowerBound),
509           cloningMap.lookupOrDefault(upperBound),
510           cloningMap.lookupOrDefault(step));
511       newIndex = loopOp.getInductionVar();
512       rewriter.setInsertionPointToStart(loopOp.getBody());
513       // Put a sentinel into the worklist so we know when to pop out of the loop
514       // body again. We use the launchOp here, as that cannot be part of the
515       // bodies instruction.
516       worklist.push_back(launchOp.getOperation());
517     }
518     cloningMap.map(iv, newIndex);
519   }
520   Block *body = parallelOp.getBody();
521   worklist.reserve(worklist.size() + body->getOperations().size());
522   for (Operation &op : llvm::reverse(body->without_terminator()))
523     worklist.push_back(&op);
524   return success();
525 }
526 
527 /// Lower a `scf.parallel` operation into a corresponding `gpu.launch`
528 /// operation.
529 ///
530 /// This essentially transforms a loop nest into a corresponding SIMT function.
531 /// The conversion is driven by mapping annotations on the `scf.parallel`
532 /// operations. The mapping is provided via a `DictionaryAttribute` named
533 /// `mapping`, which has three entries:
534 ///  - processor: the hardware id to map to. 0-2 are block dimensions, 3-5 are
535 ///               thread dimensions and 6 is sequential.
536 ///  - map : An affine map that is used to pre-process hardware ids before
537 ///          substitution.
538 ///  - bound : An affine map that is used to compute the bound of the hardware
539 ///            id based on an upper bound of the number of iterations.
540 /// If the `scf.parallel` contains nested `scf.parallel` operations, those
541 /// need to be annotated, as well. Structurally, the transformation works by
542 /// splicing all operations from nested `scf.parallel` operations into a single
543 /// sequence. Indices mapped to hardware ids are substituted with those ids,
544 /// wheras sequential mappings result in a sequential for-loop. To have more
545 /// flexibility when mapping code to hardware ids, the transform supports two
546 /// affine maps. The first `map` is used to compute the actual index for
547 /// substitution from the hardware id. The second `bound` is used to compute the
548 /// launch dimension for the hardware id from the number of iterations the
549 /// mapped loop is performing. Note that the number of iterations might be
550 /// imprecise if the corresponding loop-bounds are loop-dependent. In such case,
551 /// the hardware id might iterate over additional indices. The transformation
552 /// caters for this by predicating the created sequence of instructions on
553 /// the actual loop bound. This only works if an static upper bound for the
554 /// dynamic loop bound can be derived, currently via analyzing `affine.min`
555 /// operations.
556 LogicalResult
matchAndRewrite(ParallelOp parallelOp,PatternRewriter & rewriter) const557 ParallelToGpuLaunchLowering::matchAndRewrite(ParallelOp parallelOp,
558                                              PatternRewriter &rewriter) const {
559   // Create a launch operation. We start with bound one for all grid/block
560   // sizes. Those will be refined later as we discover them from mappings.
561   Location loc = parallelOp.getLoc();
562   Value constantOne = rewriter.create<ConstantIndexOp>(parallelOp.getLoc(), 1);
563   gpu::LaunchOp launchOp = rewriter.create<gpu::LaunchOp>(
564       parallelOp.getLoc(), constantOne, constantOne, constantOne, constantOne,
565       constantOne, constantOne);
566   rewriter.setInsertionPointToEnd(&launchOp.body().front());
567   rewriter.create<gpu::TerminatorOp>(loc);
568   rewriter.setInsertionPointToStart(&launchOp.body().front());
569 
570   BlockAndValueMapping cloningMap;
571   llvm::DenseMap<gpu::Processor, Value> launchBounds;
572   SmallVector<Operation *, 16> worklist;
573   if (failed(processParallelLoop(parallelOp, launchOp, cloningMap, worklist,
574                                  launchBounds, rewriter)))
575     return failure();
576 
577   // Whether we have seen any side-effects. Reset when leaving an inner scope.
578   bool seenSideeffects = false;
579   // Whether we have left a nesting scope (and hence are no longer innermost).
580   bool leftNestingScope = false;
581   while (!worklist.empty()) {
582     Operation *op = worklist.pop_back_val();
583     // Now walk over the body and clone it.
584     // TODO: This is only correct if there either is no further scf.parallel
585     //       nested or this code is side-effect free. Otherwise we might need
586     //       predication. We are overly conservative for now and only allow
587     //       side-effects in the innermost scope.
588     if (auto nestedParallel = dyn_cast<ParallelOp>(op)) {
589       // Before entering a nested scope, make sure there have been no
590       // sideeffects until now.
591       if (seenSideeffects)
592         return failure();
593       // A nested scf.parallel needs insertion of code to compute indices.
594       // Insert that now. This will also update the worklist with the loops
595       // body.
596       if (failed(processParallelLoop(nestedParallel, launchOp, cloningMap,
597                                      worklist, launchBounds, rewriter)))
598         return failure();
599     } else if (op == launchOp.getOperation()) {
600       // Found our sentinel value. We have finished the operations from one
601       // nesting level, pop one level back up.
602       auto parent = rewriter.getInsertionPoint()->getParentOp();
603       rewriter.setInsertionPointAfter(parent);
604       leftNestingScope = true;
605       seenSideeffects = false;
606     } else {
607       // Otherwise we copy it over.
608       Operation *clone = rewriter.clone(*op, cloningMap);
609       cloningMap.map(op->getResults(), clone->getResults());
610       // Check for side effects.
611       // TODO: Handle region side effects properly.
612       seenSideeffects |= !MemoryEffectOpInterface::hasNoEffect(clone) ||
613                          clone->getNumRegions() != 0;
614       // If we are no longer in the innermost scope, sideeffects are disallowed.
615       if (seenSideeffects && leftNestingScope)
616         return failure();
617     }
618   }
619 
620   // Now that we succeeded creating the launch operation, also update the
621   // bounds.
622   for (auto bound : launchBounds)
623     launchOp.setOperand(getLaunchOpArgumentNum(std::get<0>(bound)),
624                         std::get<1>(bound));
625 
626   rewriter.eraseOp(parallelOp);
627   return success();
628 }
629 
populateParallelLoopToGPUPatterns(OwningRewritePatternList & patterns,MLIRContext * ctx)630 void mlir::populateParallelLoopToGPUPatterns(OwningRewritePatternList &patterns,
631                                              MLIRContext *ctx) {
632   patterns.insert<ParallelToGpuLaunchLowering>(ctx);
633 }
634