1 //===- SCFToGPU.cpp - Convert an affine loop nest to a GPU kernel -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements a straightforward conversion of an loop nest into a GPU
10 // kernel. The caller is expected to guarantee that the conversion is correct
11 // or to further transform the kernel to ensure correctness.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "mlir/Conversion/SCFToGPU/SCFToGPU.h"
16
17 #include "mlir/Conversion/AffineToStandard/AffineToStandard.h"
18 #include "mlir/Dialect/Affine/IR/AffineOps.h"
19 #include "mlir/Dialect/GPU/GPUDialect.h"
20 #include "mlir/Dialect/GPU/ParallelLoopMapper.h"
21 #include "mlir/Dialect/MemRef/IR/MemRef.h"
22 #include "mlir/Dialect/SCF/SCF.h"
23 #include "mlir/Dialect/StandardOps/IR/Ops.h"
24 #include "mlir/IR/AffineExpr.h"
25 #include "mlir/IR/BlockAndValueMapping.h"
26 #include "mlir/IR/Builders.h"
27 #include "mlir/Pass/Pass.h"
28 #include "mlir/Transforms/DialectConversion.h"
29 #include "mlir/Transforms/LoopUtils.h"
30 #include "mlir/Transforms/Passes.h"
31 #include "mlir/Transforms/RegionUtils.h"
32 #include "llvm/ADT/Sequence.h"
33 #include "llvm/Support/Debug.h"
34
35 #define DEBUG_TYPE "loops-to-gpu"
36
37 using namespace mlir;
38 using namespace mlir::scf;
39
40 // Name of internal attribute to mark visited operations during conversion.
41 //
42 // NOTE: The conversion originally used the following legality criteria:
43 // `!parallelOp->hasAttr(gpu::getMappingAttrName())`
44 // But the provided pattern might reject some cases based on more detailed
45 // analysis of the `mapping` attribute.
46 // To avoid dialect conversion failure due to non-converted illegal operation
47 // we use this extra Unit attribute as a marker, that the operation was checked
48 // by the pattern and is should be considered as legal in the following legality
49 // checks. The `finalizeParallelLoopToGPUConversion` function performs clean up
50 // of this extra attributes ans is supposed to be called after the dialect
51 // conversion.
52 //
53 // TODO: Implement a cleaner solution, factoring out the "matching" logic
54 // from the pattern and its callees into a separate function that can be called
55 // from both the pattern and the op legality check.
56 static constexpr StringLiteral kVisitedAttrName = "SCFToGPU_visited";
57
58 // Extract an indexed value from KernelDim3.
getDim3Value(const gpu::KernelDim3 & dim3,unsigned pos)59 static Value getDim3Value(const gpu::KernelDim3 &dim3, unsigned pos) {
60 switch (pos) {
61 case 0:
62 return dim3.x;
63 case 1:
64 return dim3.y;
65 case 2:
66 return dim3.z;
67 default:
68 llvm_unreachable("dim3 position out of bounds");
69 }
70 return nullptr;
71 }
72
73 // Get the lower bound-related operands of a loop operation.
getLowerBoundOperands(AffineForOp forOp)74 static Operation::operand_range getLowerBoundOperands(AffineForOp forOp) {
75 return forOp.getLowerBoundOperands();
76 }
77
78 // Get the upper bound-related operands of a loop operation.
getUpperBoundOperands(AffineForOp forOp)79 static Operation::operand_range getUpperBoundOperands(AffineForOp forOp) {
80 return forOp.getUpperBoundOperands();
81 }
82
83 // Get a Value that corresponds to the loop step. If the step is an attribute,
84 // materialize a corresponding constant using builder.
getOrCreateStep(AffineForOp forOp,OpBuilder & builder)85 static Value getOrCreateStep(AffineForOp forOp, OpBuilder &builder) {
86 return builder.create<ConstantIndexOp>(forOp.getLoc(), forOp.getStep());
87 }
88
89 // Get a Value for the loop lower bound. If the value requires computation,
90 // materialize the instructions using builder.
getOrEmitLowerBound(AffineForOp forOp,OpBuilder & builder)91 static Value getOrEmitLowerBound(AffineForOp forOp, OpBuilder &builder) {
92 return lowerAffineLowerBound(forOp, builder);
93 }
94
95 // Get a Value for the loop upper bound. If the value requires computation,
96 // materialize the instructions using builder.
getOrEmitUpperBound(AffineForOp forOp,OpBuilder & builder)97 static Value getOrEmitUpperBound(AffineForOp forOp, OpBuilder &builder) {
98 return lowerAffineUpperBound(forOp, builder);
99 }
100
101 // Check the structure of the loop nest:
102 // - there are enough loops to map to numDims;
103 // - the loops are perfectly nested;
104 // - the loop bounds can be computed above the outermost loop.
105 // This roughly corresponds to the "matcher" part of the pattern-based
106 // rewriting infrastructure.
checkAffineLoopNestMappableImpl(AffineForOp forOp,unsigned numDims)107 static LogicalResult checkAffineLoopNestMappableImpl(AffineForOp forOp,
108 unsigned numDims) {
109 Region &limit = forOp.region();
110 for (unsigned i = 0, e = numDims; i < e; ++i) {
111 Operation *nested = &forOp.getBody()->front();
112 if (!areValuesDefinedAbove(getLowerBoundOperands(forOp), limit) ||
113 !areValuesDefinedAbove(getUpperBoundOperands(forOp), limit))
114 return forOp.emitError(
115 "loops with bounds depending on other mapped loops "
116 "are not supported");
117
118 // The innermost loop can have an arbitrary body, skip the perfect nesting
119 // check for it.
120 if (i == e - 1)
121 break;
122
123 auto begin = forOp.getBody()->begin(), end = forOp.getBody()->end();
124 if (forOp.getBody()->empty() || std::next(begin, 2) != end)
125 return forOp.emitError("expected perfectly nested loops in the body");
126
127 if (!(forOp = dyn_cast<AffineForOp>(nested)))
128 return nested->emitError("expected a nested loop");
129 }
130 return success();
131 }
132
checkAffineLoopNestMappable(AffineForOp forOp,unsigned numBlockDims,unsigned numThreadDims)133 static LogicalResult checkAffineLoopNestMappable(AffineForOp forOp,
134 unsigned numBlockDims,
135 unsigned numThreadDims) {
136 if (numBlockDims < 1 || numThreadDims < 1) {
137 LLVM_DEBUG(llvm::dbgs() << "nothing to map");
138 return success();
139 }
140
141 if (numBlockDims > 3) {
142 return forOp.emitError("cannot map to more than 3 block dimensions");
143 }
144 if (numThreadDims > 3) {
145 return forOp.emitError("cannot map to more than 3 thread dimensions");
146 }
147 return checkAffineLoopNestMappableImpl(forOp, numBlockDims + numThreadDims);
148 }
149
150 namespace {
151 // Helper structure that holds common state of the loop to GPU kernel
152 // conversion.
153 struct AffineLoopToGpuConverter {
154 Optional<AffineForOp> collectBounds(AffineForOp forOp, unsigned numLoops);
155
156 void createLaunch(AffineForOp rootForOp, AffineForOp innermostForOp,
157 unsigned numBlockDims, unsigned numThreadDims);
158
159 // Ranges of the loops mapped to blocks or threads.
160 SmallVector<Value, 6> dims;
161 // Lower bounds of the loops mapped to blocks or threads.
162 SmallVector<Value, 6> lbs;
163 // Induction variables of the loops mapped to blocks or threads.
164 SmallVector<Value, 6> ivs;
165 // Steps of the loops mapped to blocks or threads.
166 SmallVector<Value, 6> steps;
167 };
168 } // namespace
169
170 // Return true if the value is obviously a constant "one".
isConstantOne(Value value)171 static bool isConstantOne(Value value) {
172 if (auto def = value.getDefiningOp<ConstantIndexOp>())
173 return def.getValue() == 1;
174 return false;
175 }
176
177 // Collect ranges, bounds, steps and induction variables in preparation for
178 // mapping a loop nest of depth "numLoops" rooted at "forOp" to a GPU kernel.
179 // This may fail if the IR for computing loop bounds cannot be constructed, for
180 // example if an affine loop uses semi-affine maps. Return the last loop to be
181 // mapped on success, llvm::None on failure.
182 Optional<AffineForOp>
collectBounds(AffineForOp forOp,unsigned numLoops)183 AffineLoopToGpuConverter::collectBounds(AffineForOp forOp, unsigned numLoops) {
184 OpBuilder builder(forOp.getOperation());
185 dims.reserve(numLoops);
186 lbs.reserve(numLoops);
187 ivs.reserve(numLoops);
188 steps.reserve(numLoops);
189 AffineForOp currentLoop = forOp;
190 for (unsigned i = 0; i < numLoops; ++i) {
191 Value lowerBound = getOrEmitLowerBound(currentLoop, builder);
192 Value upperBound = getOrEmitUpperBound(currentLoop, builder);
193 if (!lowerBound || !upperBound) {
194 return llvm::None;
195 }
196
197 Value range =
198 builder.create<SubIOp>(currentLoop.getLoc(), upperBound, lowerBound);
199 Value step = getOrCreateStep(currentLoop, builder);
200 if (!isConstantOne(step))
201 range = builder.create<SignedDivIOp>(currentLoop.getLoc(), range, step);
202 dims.push_back(range);
203
204 lbs.push_back(lowerBound);
205 ivs.push_back(currentLoop.getInductionVar());
206 steps.push_back(step);
207
208 if (i != numLoops - 1)
209 currentLoop = cast<AffineForOp>(¤tLoop.getBody()->front());
210 }
211 return currentLoop;
212 }
213
214 // Replace the rooted at "rootForOp" with a GPU launch operation. This expects
215 // "innermostForOp" to point to the last loop to be transformed to the kernel,
216 // and to have (numBlockDims + numThreadDims) perfectly nested loops between
217 // "rootForOp" and "innermostForOp".
createLaunch(AffineForOp rootForOp,AffineForOp innermostForOp,unsigned numBlockDims,unsigned numThreadDims)218 void AffineLoopToGpuConverter::createLaunch(AffineForOp rootForOp,
219 AffineForOp innermostForOp,
220 unsigned numBlockDims,
221 unsigned numThreadDims) {
222 OpBuilder builder(rootForOp.getOperation());
223 // Prepare the grid and block sizes for the launch operation. If there is
224 // no loop mapped to a specific dimension, use constant "1" as its size.
225 Value constOne = (numBlockDims < 3 || numThreadDims < 3)
226 ? builder.create<ConstantIndexOp>(rootForOp.getLoc(), 1)
227 : nullptr;
228 Value gridSizeX = numBlockDims > 0 ? dims[0] : constOne;
229 Value gridSizeY = numBlockDims > 1 ? dims[1] : constOne;
230 Value gridSizeZ = numBlockDims > 2 ? dims[2] : constOne;
231 Value blockSizeX = numThreadDims > 0 ? dims[numBlockDims] : constOne;
232 Value blockSizeY = numThreadDims > 1 ? dims[numBlockDims + 1] : constOne;
233 Value blockSizeZ = numThreadDims > 2 ? dims[numBlockDims + 2] : constOne;
234
235 // Create a launch op and move the body region of the innermost loop to the
236 // launch op.
237 auto launchOp = builder.create<gpu::LaunchOp>(
238 rootForOp.getLoc(), gridSizeX, gridSizeY, gridSizeZ, blockSizeX,
239 blockSizeY, blockSizeZ);
240
241 // Replace the loop terminator (loops contain only a single block) with the
242 // gpu terminator and move the operations from the loop body block to the gpu
243 // launch body block. Do not move the entire block because of the difference
244 // in block arguments.
245 Operation &terminator = innermostForOp.getBody()->back();
246 Location terminatorLoc = terminator.getLoc();
247 terminator.erase();
248 builder.setInsertionPointToEnd(innermostForOp.getBody());
249 builder.create<gpu::TerminatorOp>(terminatorLoc, llvm::None);
250 launchOp.body().front().getOperations().splice(
251 launchOp.body().front().begin(),
252 innermostForOp.getBody()->getOperations());
253
254 // Remap the loop iterators to use block/thread identifiers instead. Loops
255 // may iterate from LB with step S whereas GPU thread/block ids always iterate
256 // from 0 to N with step 1. Therefore, loop induction variables are replaced
257 // with (gpu-thread/block-id * S) + LB.
258 builder.setInsertionPointToStart(&launchOp.body().front());
259 auto lbArgumentIt = lbs.begin();
260 auto stepArgumentIt = steps.begin();
261 for (auto en : llvm::enumerate(ivs)) {
262 Value id =
263 en.index() < numBlockDims
264 ? getDim3Value(launchOp.getBlockIds(), en.index())
265 : getDim3Value(launchOp.getThreadIds(), en.index() - numBlockDims);
266 Value step = steps[en.index()];
267 if (!isConstantOne(step))
268 id = builder.create<MulIOp>(rootForOp.getLoc(), step, id);
269
270 Value ivReplacement =
271 builder.create<AddIOp>(rootForOp.getLoc(), *lbArgumentIt, id);
272 en.value().replaceAllUsesWith(ivReplacement);
273 std::advance(lbArgumentIt, 1);
274 std::advance(stepArgumentIt, 1);
275 }
276
277 // We are done and can erase the original outermost loop.
278 rootForOp.erase();
279 }
280
281 // Generic loop to GPU kernel conversion function.
convertAffineLoopNestToGPULaunch(AffineForOp forOp,unsigned numBlockDims,unsigned numThreadDims)282 static LogicalResult convertAffineLoopNestToGPULaunch(AffineForOp forOp,
283 unsigned numBlockDims,
284 unsigned numThreadDims) {
285 if (failed(checkAffineLoopNestMappable(forOp, numBlockDims, numThreadDims)))
286 return failure();
287
288 AffineLoopToGpuConverter converter;
289 auto maybeInnerLoop =
290 converter.collectBounds(forOp, numBlockDims + numThreadDims);
291 if (!maybeInnerLoop)
292 return failure();
293 converter.createLaunch(forOp, *maybeInnerLoop, numBlockDims, numThreadDims);
294
295 return success();
296 }
297
convertAffineLoopNestToGPULaunch(AffineForOp forOp,unsigned numBlockDims,unsigned numThreadDims)298 LogicalResult mlir::convertAffineLoopNestToGPULaunch(AffineForOp forOp,
299 unsigned numBlockDims,
300 unsigned numThreadDims) {
301 return ::convertAffineLoopNestToGPULaunch(forOp, numBlockDims, numThreadDims);
302 }
303
304 namespace {
305 struct ParallelToGpuLaunchLowering : public OpRewritePattern<ParallelOp> {
306 using OpRewritePattern<ParallelOp>::OpRewritePattern;
307
308 LogicalResult matchAndRewrite(ParallelOp parallelOp,
309 PatternRewriter &rewriter) const override;
310 };
311 } // namespace
312
313 /// Tries to derive a static upper bound from the defining operation of
314 /// `upperBound`.
deriveStaticUpperBound(Value upperBound,PatternRewriter & rewriter)315 static Value deriveStaticUpperBound(Value upperBound,
316 PatternRewriter &rewriter) {
317 if (auto op = upperBound.getDefiningOp<ConstantIndexOp>()) {
318 return op;
319 }
320
321 if (auto minOp = upperBound.getDefiningOp<AffineMinOp>()) {
322 for (const AffineExpr &result : minOp.map().getResults()) {
323 if (auto constExpr = result.dyn_cast<AffineConstantExpr>()) {
324 return rewriter.create<ConstantIndexOp>(minOp.getLoc(),
325 constExpr.getValue());
326 }
327 }
328 }
329
330 if (auto multiplyOp = upperBound.getDefiningOp<MulIOp>()) {
331 if (auto lhs = dyn_cast_or_null<ConstantIndexOp>(
332 deriveStaticUpperBound(multiplyOp.getOperand(0), rewriter)
333 .getDefiningOp()))
334 if (auto rhs = dyn_cast_or_null<ConstantIndexOp>(
335 deriveStaticUpperBound(multiplyOp.getOperand(1), rewriter)
336 .getDefiningOp())) {
337 // Assumptions about the upper bound of minimum computations no longer
338 // work if multiplied by a negative value, so abort in this case.
339 if (lhs.getValue() < 0 || rhs.getValue() < 0)
340 return {};
341
342 return rewriter.create<ConstantIndexOp>(
343 multiplyOp.getLoc(), lhs.getValue() * rhs.getValue());
344 }
345 }
346
347 return {};
348 }
349
isMappedToProcessor(gpu::Processor processor)350 static bool isMappedToProcessor(gpu::Processor processor) {
351 return processor != gpu::Processor::Sequential;
352 }
353
getLaunchOpArgumentNum(gpu::Processor processor)354 static unsigned getLaunchOpArgumentNum(gpu::Processor processor) {
355 switch (processor) {
356 case gpu::Processor::BlockX:
357 return 0;
358 case gpu::Processor::BlockY:
359 return 1;
360 case gpu::Processor::BlockZ:
361 return 2;
362 case gpu::Processor::ThreadX:
363 return 3;
364 case gpu::Processor::ThreadY:
365 return 4;
366 case gpu::Processor::ThreadZ:
367 return 5;
368 default:;
369 }
370 llvm_unreachable(
371 "invalid processor type while retrieving launch op argument number");
372 }
373
374 /// Modifies the current transformation state to capture the effect of the given
375 /// `scf.parallel` operation on index substitutions and the operations to be
376 /// inserted.
377 /// Specifically, if a dimension of a parallel loop is mapped to a hardware id,
378 /// this function will
379 /// - compute the loop index based on the hardware id and affine map from the
380 /// mapping and update `cloningMap` to substitute all uses.
381 /// - derive a new upper bound for the hardware id and augment the provided
382 /// `gpu.launch operation` accordingly.
383 /// - if the upper bound is imprecise, insert a conditional in the `gpu.launch`
384 /// and update the rewriter to insert into the conditional's body.
385 /// If the dimension is mapped to sequential,
386 /// - insert a for loop into the body and update the rewriter to insert into
387 /// the for loop's body.
388 /// - update the `cloningMap` to replace uses of the index with the index of
389 /// the new for loop.
390 /// In either case,
391 /// - append the instructions from the loops body to worklist, in reverse order.
392 /// To note the end of the current scope in case a loop or conditional was
393 /// inserted, a sentinel (the `gpu.launch` operation) is inserted into the
394 /// worklist. This signals the processor of the worklist to pop the rewriter
395 /// one scope-level up.
processParallelLoop(ParallelOp parallelOp,gpu::LaunchOp launchOp,BlockAndValueMapping & cloningMap,SmallVectorImpl<Operation * > & worklist,DenseMap<gpu::Processor,Value> & bounds,PatternRewriter & rewriter)396 static LogicalResult processParallelLoop(
397 ParallelOp parallelOp, gpu::LaunchOp launchOp,
398 BlockAndValueMapping &cloningMap, SmallVectorImpl<Operation *> &worklist,
399 DenseMap<gpu::Processor, Value> &bounds, PatternRewriter &rewriter) {
400 // TODO: Verify that this is a valid GPU mapping.
401 // processor ids: 0-2 block [x/y/z], 3-5 -> thread [x/y/z], 6-> sequential
402 ArrayAttr mapping =
403 parallelOp->getAttrOfType<ArrayAttr>(gpu::getMappingAttrName());
404
405 // TODO: Support reductions.
406 if (!mapping || parallelOp.getNumResults() != 0)
407 return failure();
408
409 Location loc = parallelOp.getLoc();
410
411 auto launchIndependent = [&launchOp](Value val) {
412 return val.getParentRegion()->isAncestor(launchOp->getParentRegion());
413 };
414
415 auto ensureLaunchIndependent = [&rewriter,
416 launchIndependent](Value val) -> Value {
417 if (launchIndependent(val))
418 return val;
419 if (ConstantOp constOp = val.getDefiningOp<ConstantOp>())
420 return rewriter.create<ConstantOp>(constOp.getLoc(), constOp.getValue());
421 return {};
422 };
423
424 for (auto config : llvm::zip(mapping, parallelOp.getInductionVars(),
425 parallelOp.lowerBound(), parallelOp.upperBound(),
426 parallelOp.step())) {
427 Attribute mappingAttribute;
428 Value iv, lowerBound, upperBound, step;
429 std::tie(mappingAttribute, iv, lowerBound, upperBound, step) = config;
430 auto annotation = mappingAttribute.dyn_cast<gpu::ParallelLoopDimMapping>();
431 if (!annotation)
432 return parallelOp.emitOpError()
433 << "expected mapping attribute for lowering to GPU";
434 Value newIndex;
435 gpu::Processor processor = gpu::getProcessor(annotation);
436
437 if (isMappedToProcessor(processor)) {
438 // Use the corresponding thread/grid index as replacement for the loop iv.
439 Value operand =
440 launchOp.body().getArgument(getLaunchOpArgumentNum(processor));
441 // Take the indexmap and add the lower bound and step computations in.
442 // This computes operand * step + lowerBound.
443 // Use an affine map here so that it composes nicely with the provided
444 // annotation.
445 AffineMap lowerAndStep = AffineMap::get(
446 1, 2,
447 rewriter.getAffineDimExpr(0) * rewriter.getAffineSymbolExpr(0) +
448 rewriter.getAffineSymbolExpr(1));
449 newIndex = rewriter.create<AffineApplyOp>(
450 loc, annotation.map().getValue().compose(lowerAndStep),
451 ValueRange{operand, step, lowerBound});
452 // If there was also a bound, insert that, too.
453 // TODO: Check that we do not assign bounds twice.
454 if (annotation.bound().getValue()) {
455 // We pass as the single operand to the bound-map the number of
456 // iterations, which is (upperBound - lowerBound) ceilDiv step. To
457 // support inner loops with dynamic upper bounds (as generated by e.g.
458 // tiling), try to derive a max for the bounds. If the used bound for
459 // the hardware id is imprecise, wrap the contained code into a
460 // conditional. If the lower-bound is constant or defined before the
461 // launch, we can use it in the launch bounds. Otherwise fail.
462 if (!launchIndependent(lowerBound) &&
463 !isa_and_nonnull<ConstantOp>(lowerBound.getDefiningOp()))
464 return failure();
465 // The step must also be constant or defined outside of the loop nest.
466 if (!launchIndependent(step) &&
467 !isa_and_nonnull<ConstantOp>(step.getDefiningOp()))
468 return failure();
469 // If the upper-bound is constant or defined before the launch, we can
470 // use it in the launch bounds directly. Otherwise try derive a bound.
471 bool boundIsPrecise =
472 launchIndependent(upperBound) ||
473 isa_and_nonnull<ConstantOp>(upperBound.getDefiningOp());
474 {
475 PatternRewriter::InsertionGuard guard(rewriter);
476 rewriter.setInsertionPoint(launchOp);
477 if (!boundIsPrecise) {
478 upperBound = deriveStaticUpperBound(upperBound, rewriter);
479 if (!upperBound) {
480 return rewriter.notifyMatchFailure(
481 parallelOp,
482 "cannot derive loop-invariant upper bound for number of"
483 "iterations");
484 }
485 }
486 // Compute the number of iterations needed. We compute this as an
487 // affine expression ceilDiv (upperBound - lowerBound) step. We use
488 // affine.apply here so that it composes nicely with the provided map.
489 AffineMap stepMap = AffineMap::get(
490 1, 2,
491 ((rewriter.getAffineDimExpr(0) - rewriter.getAffineSymbolExpr(0))
492 .ceilDiv(rewriter.getAffineSymbolExpr(1))));
493 Value launchBound = rewriter.create<AffineApplyOp>(
494 loc, annotation.bound().getValue().compose(stepMap),
495 ValueRange{
496 ensureLaunchIndependent(
497 cloningMap.lookupOrDefault(upperBound)),
498 ensureLaunchIndependent(
499 cloningMap.lookupOrDefault(lowerBound)),
500 ensureLaunchIndependent(cloningMap.lookupOrDefault(step))});
501 // todo(herhut,ravishankarm): Update the behavior of setMappingAttr
502 // when this condition is relaxed.
503 if (bounds.find(processor) != bounds.end()) {
504 return rewriter.notifyMatchFailure(
505 parallelOp, "cannot redefine the bound for processor " +
506 Twine(static_cast<int64_t>(processor)));
507 }
508 bounds[processor] = launchBound;
509 }
510 if (!boundIsPrecise) {
511 // We are using an approximation, create a surrounding conditional.
512 Value originalBound = std::get<3>(config);
513 CmpIOp pred = rewriter.create<CmpIOp>(
514 loc, CmpIPredicate::slt, newIndex,
515 cloningMap.lookupOrDefault(originalBound));
516 scf::IfOp ifOp = rewriter.create<scf::IfOp>(loc, pred, false);
517 rewriter.setInsertionPointToStart(&ifOp.thenRegion().front());
518 // Put a sentinel into the worklist so we know when to pop out of the
519 // if body again. We use the launchOp here, as that cannot be part of
520 // the bodies instruction.
521 worklist.push_back(launchOp.getOperation());
522 }
523 }
524 } else {
525 // Create a sequential for loop.
526 auto loopOp = rewriter.create<scf::ForOp>(
527 loc, cloningMap.lookupOrDefault(lowerBound),
528 cloningMap.lookupOrDefault(upperBound),
529 cloningMap.lookupOrDefault(step));
530 newIndex = loopOp.getInductionVar();
531 rewriter.setInsertionPointToStart(loopOp.getBody());
532 // Put a sentinel into the worklist so we know when to pop out of the loop
533 // body again. We use the launchOp here, as that cannot be part of the
534 // bodies instruction.
535 worklist.push_back(launchOp.getOperation());
536 }
537 cloningMap.map(iv, newIndex);
538 }
539
540 // Propagate custom user defined optional attributes, that can be used at
541 // later stage, such as extension data for GPU kernel dispatch
542 for (const auto &namedAttr : parallelOp->getAttrs()) {
543 if (namedAttr.first == gpu::getMappingAttrName() ||
544 namedAttr.first == ParallelOp::getOperandSegmentSizeAttr())
545 continue;
546 launchOp->setAttr(namedAttr.first, namedAttr.second);
547 }
548
549 Block *body = parallelOp.getBody();
550 worklist.reserve(worklist.size() + body->getOperations().size());
551 for (Operation &op : llvm::reverse(body->without_terminator()))
552 worklist.push_back(&op);
553 return success();
554 }
555
556 /// Lower a `scf.parallel` operation into a corresponding `gpu.launch`
557 /// operation.
558 ///
559 /// This essentially transforms a loop nest into a corresponding SIMT function.
560 /// The conversion is driven by mapping annotations on the `scf.parallel`
561 /// operations. The mapping is provided via a `DictionaryAttribute` named
562 /// `mapping`, which has three entries:
563 /// - processor: the hardware id to map to. 0-2 are block dimensions, 3-5 are
564 /// thread dimensions and 6 is sequential.
565 /// - map : An affine map that is used to pre-process hardware ids before
566 /// substitution.
567 /// - bound : An affine map that is used to compute the bound of the hardware
568 /// id based on an upper bound of the number of iterations.
569 /// If the `scf.parallel` contains nested `scf.parallel` operations, those
570 /// need to be annotated, as well. Structurally, the transformation works by
571 /// splicing all operations from nested `scf.parallel` operations into a single
572 /// sequence. Indices mapped to hardware ids are substituted with those ids,
573 /// wheras sequential mappings result in a sequential for-loop. To have more
574 /// flexibility when mapping code to hardware ids, the transform supports two
575 /// affine maps. The first `map` is used to compute the actual index for
576 /// substitution from the hardware id. The second `bound` is used to compute the
577 /// launch dimension for the hardware id from the number of iterations the
578 /// mapped loop is performing. Note that the number of iterations might be
579 /// imprecise if the corresponding loop-bounds are loop-dependent. In such case,
580 /// the hardware id might iterate over additional indices. The transformation
581 /// caters for this by predicating the created sequence of instructions on
582 /// the actual loop bound. This only works if an static upper bound for the
583 /// dynamic loop bound can be derived, currently via analyzing `affine.min`
584 /// operations.
585 LogicalResult
matchAndRewrite(ParallelOp parallelOp,PatternRewriter & rewriter) const586 ParallelToGpuLaunchLowering::matchAndRewrite(ParallelOp parallelOp,
587 PatternRewriter &rewriter) const {
588 // Mark the operation as visited for recursive legality check.
589 parallelOp->setAttr(kVisitedAttrName, rewriter.getUnitAttr());
590
591 // We can only transform starting at the outer-most loop. Launches inside of
592 // parallel loops are not supported.
593 if (auto parentLoop = parallelOp->getParentOfType<ParallelOp>())
594 return failure();
595 // Create a launch operation. We start with bound one for all grid/block
596 // sizes. Those will be refined later as we discover them from mappings.
597 Location loc = parallelOp.getLoc();
598 Value constantOne = rewriter.create<ConstantIndexOp>(parallelOp.getLoc(), 1);
599 gpu::LaunchOp launchOp = rewriter.create<gpu::LaunchOp>(
600 parallelOp.getLoc(), constantOne, constantOne, constantOne, constantOne,
601 constantOne, constantOne);
602 rewriter.setInsertionPointToEnd(&launchOp.body().front());
603 rewriter.create<gpu::TerminatorOp>(loc);
604 rewriter.setInsertionPointToStart(&launchOp.body().front());
605
606 BlockAndValueMapping cloningMap;
607 llvm::DenseMap<gpu::Processor, Value> launchBounds;
608 SmallVector<Operation *, 16> worklist;
609 if (failed(processParallelLoop(parallelOp, launchOp, cloningMap, worklist,
610 launchBounds, rewriter)))
611 return failure();
612
613 // Whether we have seen any side-effects. Reset when leaving an inner scope.
614 bool seenSideeffects = false;
615 // Whether we have left a nesting scope (and hence are no longer innermost).
616 bool leftNestingScope = false;
617 while (!worklist.empty()) {
618 Operation *op = worklist.pop_back_val();
619 // Now walk over the body and clone it.
620 // TODO: This is only correct if there either is no further scf.parallel
621 // nested or this code is side-effect free. Otherwise we might need
622 // predication. We are overly conservative for now and only allow
623 // side-effects in the innermost scope.
624 if (auto nestedParallel = dyn_cast<ParallelOp>(op)) {
625 // Before entering a nested scope, make sure there have been no
626 // sideeffects until now.
627 if (seenSideeffects)
628 return failure();
629 // A nested scf.parallel needs insertion of code to compute indices.
630 // Insert that now. This will also update the worklist with the loops
631 // body.
632 if (failed(processParallelLoop(nestedParallel, launchOp, cloningMap,
633 worklist, launchBounds, rewriter)))
634 return failure();
635 } else if (op == launchOp.getOperation()) {
636 // Found our sentinel value. We have finished the operations from one
637 // nesting level, pop one level back up.
638 auto parent = rewriter.getInsertionPoint()->getParentOp();
639 rewriter.setInsertionPointAfter(parent);
640 leftNestingScope = true;
641 seenSideeffects = false;
642 } else {
643 // Otherwise we copy it over.
644 Operation *clone = rewriter.clone(*op, cloningMap);
645 cloningMap.map(op->getResults(), clone->getResults());
646 // Check for side effects.
647 // TODO: Handle region side effects properly.
648 seenSideeffects |= !MemoryEffectOpInterface::hasNoEffect(clone) ||
649 clone->getNumRegions() != 0;
650 // If we are no longer in the innermost scope, sideeffects are disallowed.
651 if (seenSideeffects && leftNestingScope)
652 return failure();
653 }
654 }
655
656 // Now that we succeeded creating the launch operation, also update the
657 // bounds.
658 for (auto bound : launchBounds)
659 launchOp.setOperand(getLaunchOpArgumentNum(std::get<0>(bound)),
660 std::get<1>(bound));
661
662 rewriter.eraseOp(parallelOp);
663 return success();
664 }
665
populateParallelLoopToGPUPatterns(RewritePatternSet & patterns)666 void mlir::populateParallelLoopToGPUPatterns(RewritePatternSet &patterns) {
667 patterns.add<ParallelToGpuLaunchLowering>(patterns.getContext());
668 }
669
configureParallelLoopToGPULegality(ConversionTarget & target)670 void mlir::configureParallelLoopToGPULegality(ConversionTarget &target) {
671 target.addLegalDialect<memref::MemRefDialect>();
672 target.addDynamicallyLegalOp<scf::ParallelOp>([](scf::ParallelOp parallelOp) {
673 return !parallelOp->hasAttr(gpu::getMappingAttrName()) ||
674 parallelOp->hasAttr(kVisitedAttrName);
675 });
676 }
677
finalizeParallelLoopToGPUConversion(Operation * op)678 void mlir::finalizeParallelLoopToGPUConversion(Operation *op) {
679 op->walk([](scf::ParallelOp parallelOp) {
680 parallelOp->removeAttr(kVisitedAttrName);
681 });
682 }
683