1 //===- SuperVectorize.cpp - Vectorize Pass Impl ---------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements vectorization of loops, operations and data types to
10 // a target-independent, n-D super-vector abstraction.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "PassDetail.h"
15 #include "mlir/Analysis/AffineAnalysis.h"
16 #include "mlir/Analysis/LoopAnalysis.h"
17 #include "mlir/Analysis/NestedMatcher.h"
18 #include "mlir/Dialect/Affine/IR/AffineOps.h"
19 #include "mlir/Dialect/Affine/Utils.h"
20 #include "mlir/Dialect/Vector/VectorOps.h"
21 #include "mlir/Dialect/Vector/VectorUtils.h"
22 #include "mlir/IR/BlockAndValueMapping.h"
23 #include "mlir/Support/LLVM.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/Support/Debug.h"
26
27 using namespace mlir;
28 using namespace vector;
29
30 ///
31 /// Implements a high-level vectorization strategy on a Function.
32 /// The abstraction used is that of super-vectors, which provide a single,
33 /// compact, representation in the vector types, information that is expected
34 /// to reduce the impact of the phase ordering problem
35 ///
36 /// Vector granularity:
37 /// ===================
38 /// This pass is designed to perform vectorization at a super-vector
39 /// granularity. A super-vector is loosely defined as a vector type that is a
40 /// multiple of a "good" vector size so the HW can efficiently implement a set
41 /// of high-level primitives. Multiple is understood along any dimension; e.g.
42 /// both vector<16xf32> and vector<2x8xf32> are valid super-vectors for a
43 /// vector<8xf32> HW vector. Note that a "good vector size so the HW can
44 /// efficiently implement a set of high-level primitives" is not necessarily an
45 /// integer multiple of actual hardware registers. We leave details of this
46 /// distinction unspecified for now.
47 ///
48 /// Some may prefer the terminology a "tile of HW vectors". In this case, one
49 /// should note that super-vectors implement an "always full tile" abstraction.
50 /// They guarantee no partial-tile separation is necessary by relying on a
51 /// high-level copy-reshape abstraction that we call vector.transfer. This
52 /// copy-reshape operations is also responsible for performing layout
53 /// transposition if necessary. In the general case this will require a scoped
54 /// allocation in some notional local memory.
55 ///
56 /// Whatever the mental model one prefers to use for this abstraction, the key
57 /// point is that we burn into a single, compact, representation in the vector
58 /// types, information that is expected to reduce the impact of the phase
59 /// ordering problem. Indeed, a vector type conveys information that:
60 /// 1. the associated loops have dependency semantics that do not prevent
61 /// vectorization;
62 /// 2. the associate loops have been sliced in chunks of static sizes that are
63 /// compatible with vector sizes (i.e. similar to unroll-and-jam);
64 /// 3. the inner loops, in the unroll-and-jam analogy of 2, are captured by
65 /// the
66 /// vector type and no vectorization hampering transformations can be
67 /// applied to them anymore;
68 /// 4. the underlying memrefs are accessed in some notional contiguous way
69 /// that allows loading into vectors with some amount of spatial locality;
70 /// In other words, super-vectorization provides a level of separation of
71 /// concern by way of opacity to subsequent passes. This has the effect of
72 /// encapsulating and propagating vectorization constraints down the list of
73 /// passes until we are ready to lower further.
74 ///
75 /// For a particular target, a notion of minimal n-d vector size will be
76 /// specified and vectorization targets a multiple of those. In the following
77 /// paragraph, let "k ." represent "a multiple of", to be understood as a
78 /// multiple in the same dimension (e.g. vector<16 x k . 128> summarizes
79 /// vector<16 x 128>, vector<16 x 256>, vector<16 x 1024>, etc).
80 ///
81 /// Some non-exhaustive notable super-vector sizes of interest include:
82 /// - CPU: vector<k . HW_vector_size>,
83 /// vector<k' . core_count x k . HW_vector_size>,
84 /// vector<socket_count x k' . core_count x k . HW_vector_size>;
85 /// - GPU: vector<k . warp_size>,
86 /// vector<k . warp_size x float2>,
87 /// vector<k . warp_size x float4>,
88 /// vector<k . warp_size x 4 x 4x 4> (for tensor_core sizes).
89 ///
90 /// Loops and operations are emitted that operate on those super-vector shapes.
91 /// Subsequent lowering passes will materialize to actual HW vector sizes. These
92 /// passes are expected to be (gradually) more target-specific.
93 ///
94 /// At a high level, a vectorized load in a loop will resemble:
95 /// ```mlir
96 /// affine.for %i = ? to ? step ? {
97 /// %v_a = vector.transfer_read A[%i] : memref<?xf32>, vector<128xf32>
98 /// }
99 /// ```
100 /// It is the responsibility of the implementation of vector.transfer_read to
101 /// materialize vector registers from the original scalar memrefs. A later (more
102 /// target-dependent) lowering pass will materialize to actual HW vector sizes.
103 /// This lowering may be occur at different times:
104 /// 1. at the MLIR level into a combination of loops, unrolling, DmaStartOp +
105 /// DmaWaitOp + vectorized operations for data transformations and shuffle;
106 /// thus opening opportunities for unrolling and pipelining. This is an
107 /// instance of library call "whiteboxing"; or
108 /// 2. later in the a target-specific lowering pass or hand-written library
109 /// call; achieving full separation of concerns. This is an instance of
110 /// library call; or
111 /// 3. a mix of both, e.g. based on a model.
112 /// In the future, these operations will expose a contract to constrain the
113 /// search on vectorization patterns and sizes.
114 ///
115 /// Occurrence of super-vectorization in the compiler flow:
116 /// =======================================================
117 /// This is an active area of investigation. We start with 2 remarks to position
118 /// super-vectorization in the context of existing ongoing work: LLVM VPLAN
119 /// and LLVM SLP Vectorizer.
120 ///
121 /// LLVM VPLAN:
122 /// -----------
123 /// The astute reader may have noticed that in the limit, super-vectorization
124 /// can be applied at a similar time and with similar objectives than VPLAN.
125 /// For instance, in the case of a traditional, polyhedral compilation-flow (for
126 /// instance, the PPCG project uses ISL to provide dependence analysis,
127 /// multi-level(scheduling + tiling), lifting footprint to fast memory,
128 /// communication synthesis, mapping, register optimizations) and before
129 /// unrolling. When vectorization is applied at this *late* level in a typical
130 /// polyhedral flow, and is instantiated with actual hardware vector sizes,
131 /// super-vectorization is expected to match (or subsume) the type of patterns
132 /// that LLVM's VPLAN aims at targeting. The main difference here is that MLIR
133 /// is higher level and our implementation should be significantly simpler. Also
134 /// note that in this mode, recursive patterns are probably a bit of an overkill
135 /// although it is reasonable to expect that mixing a bit of outer loop and
136 /// inner loop vectorization + unrolling will provide interesting choices to
137 /// MLIR.
138 ///
139 /// LLVM SLP Vectorizer:
140 /// --------------------
141 /// Super-vectorization however is not meant to be usable in a similar fashion
142 /// to the SLP vectorizer. The main difference lies in the information that
143 /// both vectorizers use: super-vectorization examines contiguity of memory
144 /// references along fastest varying dimensions and loops with recursive nested
145 /// patterns capturing imperfectly-nested loop nests; the SLP vectorizer, on
146 /// the other hand, performs flat pattern matching inside a single unrolled loop
147 /// body and stitches together pieces of load and store operations into full
148 /// 1-D vectors. We envision that the SLP vectorizer is a good way to capture
149 /// innermost loop, control-flow dependent patterns that super-vectorization may
150 /// not be able to capture easily. In other words, super-vectorization does not
151 /// aim at replacing the SLP vectorizer and the two solutions are complementary.
152 ///
153 /// Ongoing investigations:
154 /// -----------------------
155 /// We discuss the following *early* places where super-vectorization is
156 /// applicable and touch on the expected benefits and risks . We list the
157 /// opportunities in the context of the traditional polyhedral compiler flow
158 /// described in PPCG. There are essentially 6 places in the MLIR pass pipeline
159 /// we expect to experiment with super-vectorization:
160 /// 1. Right after language lowering to MLIR: this is the earliest time where
161 /// super-vectorization is expected to be applied. At this level, all the
162 /// language/user/library-level annotations are available and can be fully
163 /// exploited. Examples include loop-type annotations (such as parallel,
164 /// reduction, scan, dependence distance vector, vectorizable) as well as
165 /// memory access annotations (such as non-aliasing writes guaranteed,
166 /// indirect accesses that are permutations by construction) accesses or
167 /// that a particular operation is prescribed atomic by the user. At this
168 /// level, anything that enriches what dependence analysis can do should be
169 /// aggressively exploited. At this level we are close to having explicit
170 /// vector types in the language, except we do not impose that burden on the
171 /// programmer/library: we derive information from scalar code + annotations.
172 /// 2. After dependence analysis and before polyhedral scheduling: the
173 /// information that supports vectorization does not need to be supplied by a
174 /// higher level of abstraction. Traditional dependence analysis is available
175 /// in MLIR and will be used to drive vectorization and cost models.
176 ///
177 /// Let's pause here and remark that applying super-vectorization as described
178 /// in 1. and 2. presents clear opportunities and risks:
179 /// - the opportunity is that vectorization is burned in the type system and
180 /// is protected from the adverse effect of loop scheduling, tiling, loop
181 /// interchange and all passes downstream. Provided that subsequent passes are
182 /// able to operate on vector types; the vector shapes, associated loop
183 /// iterator properties, alignment, and contiguity of fastest varying
184 /// dimensions are preserved until we lower the super-vector types. We expect
185 /// this to significantly rein in on the adverse effects of phase ordering.
186 /// - the risks are that a. all passes after super-vectorization have to work
187 /// on elemental vector types (not that this is always true, wherever
188 /// vectorization is applied) and b. that imposing vectorization constraints
189 /// too early may be overall detrimental to loop fusion, tiling and other
190 /// transformations because the dependence distances are coarsened when
191 /// operating on elemental vector types. For this reason, the pattern
192 /// profitability analysis should include a component that also captures the
193 /// maximal amount of fusion available under a particular pattern. This is
194 /// still at the stage of rough ideas but in this context, search is our
195 /// friend as the Tensor Comprehensions and auto-TVM contributions
196 /// demonstrated previously.
197 /// Bottom-line is we do not yet have good answers for the above but aim at
198 /// making it easy to answer such questions.
199 ///
200 /// Back to our listing, the last places where early super-vectorization makes
201 /// sense are:
202 /// 3. right after polyhedral-style scheduling: PLUTO-style algorithms are known
203 /// to improve locality, parallelism and be configurable (e.g. max-fuse,
204 /// smart-fuse etc). They can also have adverse effects on contiguity
205 /// properties that are required for vectorization but the vector.transfer
206 /// copy-reshape-pad-transpose abstraction is expected to help recapture
207 /// these properties.
208 /// 4. right after polyhedral-style scheduling+tiling;
209 /// 5. right after scheduling+tiling+rescheduling: points 4 and 5 represent
210 /// probably the most promising places because applying tiling achieves a
211 /// separation of concerns that allows rescheduling to worry less about
212 /// locality and more about parallelism and distribution (e.g. min-fuse).
213 ///
214 /// At these levels the risk-reward looks different: on one hand we probably
215 /// lost a good deal of language/user/library-level annotation; on the other
216 /// hand we gained parallelism and locality through scheduling and tiling.
217 /// However we probably want to ensure tiling is compatible with the
218 /// full-tile-only abstraction used in super-vectorization or suffer the
219 /// consequences. It is too early to place bets on what will win but we expect
220 /// super-vectorization to be the right abstraction to allow exploring at all
221 /// these levels. And again, search is our friend.
222 ///
223 /// Lastly, we mention it again here:
224 /// 6. as a MLIR-based alternative to VPLAN.
225 ///
226 /// Lowering, unrolling, pipelining:
227 /// ================================
228 /// TODO: point to the proper places.
229 ///
230 /// Algorithm:
231 /// ==========
232 /// The algorithm proceeds in a few steps:
233 /// 1. defining super-vectorization patterns and matching them on the tree of
234 /// AffineForOp. A super-vectorization pattern is defined as a recursive
235 /// data structures that matches and captures nested, imperfectly-nested
236 /// loops that have a. conformable loop annotations attached (e.g. parallel,
237 /// reduction, vectorizable, ...) as well as b. all contiguous load/store
238 /// operations along a specified minor dimension (not necessarily the
239 /// fastest varying) ;
240 /// 2. analyzing those patterns for profitability (TODO: and
241 /// interference);
242 /// 3. then, for each pattern in order:
243 /// a. applying iterative rewriting of the loops and all their nested
244 /// operations in topological order. Rewriting is implemented by
245 /// coarsening the loops and converting operations and operands to their
246 /// vector forms. Processing operations in topological order is relatively
247 /// simple due to the structured nature of the control-flow
248 /// representation. This order ensures that all the operands of a given
249 /// operation have been vectorized before the operation itself in a single
250 /// traversal, except for operands defined outside of the loop nest. The
251 /// algorithm can convert the following operations to their vector form:
252 /// * Affine load and store operations are converted to opaque vector
253 /// transfer read and write operations.
254 /// * Scalar constant operations/operands are converted to vector
255 /// constant operations (splat).
256 /// * Uniform operands (only operands defined outside of the loop nest,
257 /// for now) are broadcasted to a vector.
258 /// TODO: Support more uniform cases.
259 /// * Affine for operations with 'iter_args' are vectorized by
260 /// vectorizing their 'iter_args' operands and results.
261 /// TODO: Support more complex loops with divergent lbs and/or ubs.
262 /// * The remaining operations in the loop nest are vectorized by
263 /// widening their scalar types to vector types.
264 /// b. if everything under the root AffineForOp in the current pattern
265 /// is vectorized properly, we commit that loop to the IR and remove the
266 /// scalar loop. Otherwise, we discard the vectorized loop and keep the
267 /// original scalar loop.
268 /// c. vectorization is applied on the next pattern in the list. Because
269 /// pattern interference avoidance is not yet implemented and that we do
270 /// not support further vectorizing an already vector load we need to
271 /// re-verify that the pattern is still vectorizable. This is expected to
272 /// make cost models more difficult to write and is subject to improvement
273 /// in the future.
274 ///
275 /// Choice of loop transformation to support the algorithm:
276 /// =======================================================
277 /// The choice of loop transformation to apply for coarsening vectorized loops
278 /// is still subject to exploratory tradeoffs. In particular, say we want to
279 /// vectorize by a factor 128, we want to transform the following input:
280 /// ```mlir
281 /// affine.for %i = %M to %N {
282 /// %a = affine.load %A[%i] : memref<?xf32>
283 /// }
284 /// ```
285 ///
286 /// Traditionally, one would vectorize late (after scheduling, tiling,
287 /// memory promotion etc) say after stripmining (and potentially unrolling in
288 /// the case of LLVM's SLP vectorizer):
289 /// ```mlir
290 /// affine.for %i = floor(%M, 128) to ceil(%N, 128) {
291 /// affine.for %ii = max(%M, 128 * %i) to min(%N, 128*%i + 127) {
292 /// %a = affine.load %A[%ii] : memref<?xf32>
293 /// }
294 /// }
295 /// ```
296 ///
297 /// Instead, we seek to vectorize early and freeze vector types before
298 /// scheduling, so we want to generate a pattern that resembles:
299 /// ```mlir
300 /// affine.for %i = ? to ? step ? {
301 /// %v_a = vector.transfer_read %A[%i] : memref<?xf32>, vector<128xf32>
302 /// }
303 /// ```
304 ///
305 /// i. simply dividing the lower / upper bounds by 128 creates issues
306 /// when representing expressions such as ii + 1 because now we only
307 /// have access to original values that have been divided. Additional
308 /// information is needed to specify accesses at below-128 granularity;
309 /// ii. another alternative is to coarsen the loop step but this may have
310 /// consequences on dependence analysis and fusability of loops: fusable
311 /// loops probably need to have the same step (because we don't want to
312 /// stripmine/unroll to enable fusion).
313 /// As a consequence, we choose to represent the coarsening using the loop
314 /// step for now and reevaluate in the future. Note that we can renormalize
315 /// loop steps later if/when we have evidence that they are problematic.
316 ///
317 /// For the simple strawman example above, vectorizing for a 1-D vector
318 /// abstraction of size 128 returns code similar to:
319 /// ```mlir
320 /// affine.for %i = %M to %N step 128 {
321 /// %v_a = vector.transfer_read %A[%i] : memref<?xf32>, vector<128xf32>
322 /// }
323 /// ```
324 ///
325 /// Unsupported cases, extensions, and work in progress (help welcome :-) ):
326 /// ========================================================================
327 /// 1. lowering to concrete vector types for various HW;
328 /// 2. reduction support for n-D vectorization and non-unit steps;
329 /// 3. non-effecting padding during vector.transfer_read and filter during
330 /// vector.transfer_write;
331 /// 4. misalignment support vector.transfer_read / vector.transfer_write
332 /// (hopefully without read-modify-writes);
333 /// 5. control-flow support;
334 /// 6. cost-models, heuristics and search;
335 /// 7. Op implementation, extensions and implication on memref views;
336 /// 8. many TODOs left around.
337 ///
338 /// Examples:
339 /// =========
340 /// Consider the following Function:
341 /// ```mlir
342 /// func @vector_add_2d(%M : index, %N : index) -> f32 {
343 /// %A = alloc (%M, %N) : memref<?x?xf32, 0>
344 /// %B = alloc (%M, %N) : memref<?x?xf32, 0>
345 /// %C = alloc (%M, %N) : memref<?x?xf32, 0>
346 /// %f1 = constant 1.0 : f32
347 /// %f2 = constant 2.0 : f32
348 /// affine.for %i0 = 0 to %M {
349 /// affine.for %i1 = 0 to %N {
350 /// // non-scoped %f1
351 /// affine.store %f1, %A[%i0, %i1] : memref<?x?xf32, 0>
352 /// }
353 /// }
354 /// affine.for %i2 = 0 to %M {
355 /// affine.for %i3 = 0 to %N {
356 /// // non-scoped %f2
357 /// affine.store %f2, %B[%i2, %i3] : memref<?x?xf32, 0>
358 /// }
359 /// }
360 /// affine.for %i4 = 0 to %M {
361 /// affine.for %i5 = 0 to %N {
362 /// %a5 = affine.load %A[%i4, %i5] : memref<?x?xf32, 0>
363 /// %b5 = affine.load %B[%i4, %i5] : memref<?x?xf32, 0>
364 /// %s5 = addf %a5, %b5 : f32
365 /// // non-scoped %f1
366 /// %s6 = addf %s5, %f1 : f32
367 /// // non-scoped %f2
368 /// %s7 = addf %s5, %f2 : f32
369 /// // diamond dependency.
370 /// %s8 = addf %s7, %s6 : f32
371 /// affine.store %s8, %C[%i4, %i5] : memref<?x?xf32, 0>
372 /// }
373 /// }
374 /// %c7 = constant 7 : index
375 /// %c42 = constant 42 : index
376 /// %res = load %C[%c7, %c42] : memref<?x?xf32, 0>
377 /// return %res : f32
378 /// }
379 /// ```
380 ///
381 /// The -affine-vectorize pass with the following arguments:
382 /// ```
383 /// -affine-vectorize="virtual-vector-size=256 test-fastest-varying=0"
384 /// ```
385 ///
386 /// produces this standard innermost-loop vectorized code:
387 /// ```mlir
388 /// func @vector_add_2d(%arg0 : index, %arg1 : index) -> f32 {
389 /// %0 = alloc(%arg0, %arg1) : memref<?x?xf32>
390 /// %1 = alloc(%arg0, %arg1) : memref<?x?xf32>
391 /// %2 = alloc(%arg0, %arg1) : memref<?x?xf32>
392 /// %cst = constant 1.0 : f32
393 /// %cst_0 = constant 2.0 : f32
394 /// affine.for %i0 = 0 to %arg0 {
395 /// affine.for %i1 = 0 to %arg1 step 256 {
396 /// %cst_1 = constant dense<vector<256xf32>, 1.0> :
397 /// vector<256xf32>
398 /// vector.transfer_write %cst_1, %0[%i0, %i1] :
399 /// vector<256xf32>, memref<?x?xf32>
400 /// }
401 /// }
402 /// affine.for %i2 = 0 to %arg0 {
403 /// affine.for %i3 = 0 to %arg1 step 256 {
404 /// %cst_2 = constant dense<vector<256xf32>, 2.0> :
405 /// vector<256xf32>
406 /// vector.transfer_write %cst_2, %1[%i2, %i3] :
407 /// vector<256xf32>, memref<?x?xf32>
408 /// }
409 /// }
410 /// affine.for %i4 = 0 to %arg0 {
411 /// affine.for %i5 = 0 to %arg1 step 256 {
412 /// %3 = vector.transfer_read %0[%i4, %i5] :
413 /// memref<?x?xf32>, vector<256xf32>
414 /// %4 = vector.transfer_read %1[%i4, %i5] :
415 /// memref<?x?xf32>, vector<256xf32>
416 /// %5 = addf %3, %4 : vector<256xf32>
417 /// %cst_3 = constant dense<vector<256xf32>, 1.0> :
418 /// vector<256xf32>
419 /// %6 = addf %5, %cst_3 : vector<256xf32>
420 /// %cst_4 = constant dense<vector<256xf32>, 2.0> :
421 /// vector<256xf32>
422 /// %7 = addf %5, %cst_4 : vector<256xf32>
423 /// %8 = addf %7, %6 : vector<256xf32>
424 /// vector.transfer_write %8, %2[%i4, %i5] :
425 /// vector<256xf32>, memref<?x?xf32>
426 /// }
427 /// }
428 /// %c7 = constant 7 : index
429 /// %c42 = constant 42 : index
430 /// %9 = load %2[%c7, %c42] : memref<?x?xf32>
431 /// return %9 : f32
432 /// }
433 /// ```
434 ///
435 /// The -affine-vectorize pass with the following arguments:
436 /// ```
437 /// -affine-vectorize="virtual-vector-size=32,256 test-fastest-varying=1,0"
438 /// ```
439 ///
440 /// produces this more interesting mixed outer-innermost-loop vectorized code:
441 /// ```mlir
442 /// func @vector_add_2d(%arg0 : index, %arg1 : index) -> f32 {
443 /// %0 = alloc(%arg0, %arg1) : memref<?x?xf32>
444 /// %1 = alloc(%arg0, %arg1) : memref<?x?xf32>
445 /// %2 = alloc(%arg0, %arg1) : memref<?x?xf32>
446 /// %cst = constant 1.0 : f32
447 /// %cst_0 = constant 2.0 : f32
448 /// affine.for %i0 = 0 to %arg0 step 32 {
449 /// affine.for %i1 = 0 to %arg1 step 256 {
450 /// %cst_1 = constant dense<vector<32x256xf32>, 1.0> :
451 /// vector<32x256xf32>
452 /// vector.transfer_write %cst_1, %0[%i0, %i1] :
453 /// vector<32x256xf32>, memref<?x?xf32>
454 /// }
455 /// }
456 /// affine.for %i2 = 0 to %arg0 step 32 {
457 /// affine.for %i3 = 0 to %arg1 step 256 {
458 /// %cst_2 = constant dense<vector<32x256xf32>, 2.0> :
459 /// vector<32x256xf32>
460 /// vector.transfer_write %cst_2, %1[%i2, %i3] :
461 /// vector<32x256xf32>, memref<?x?xf32>
462 /// }
463 /// }
464 /// affine.for %i4 = 0 to %arg0 step 32 {
465 /// affine.for %i5 = 0 to %arg1 step 256 {
466 /// %3 = vector.transfer_read %0[%i4, %i5] :
467 /// memref<?x?xf32> vector<32x256xf32>
468 /// %4 = vector.transfer_read %1[%i4, %i5] :
469 /// memref<?x?xf32>, vector<32x256xf32>
470 /// %5 = addf %3, %4 : vector<32x256xf32>
471 /// %cst_3 = constant dense<vector<32x256xf32>, 1.0> :
472 /// vector<32x256xf32>
473 /// %6 = addf %5, %cst_3 : vector<32x256xf32>
474 /// %cst_4 = constant dense<vector<32x256xf32>, 2.0> :
475 /// vector<32x256xf32>
476 /// %7 = addf %5, %cst_4 : vector<32x256xf32>
477 /// %8 = addf %7, %6 : vector<32x256xf32>
478 /// vector.transfer_write %8, %2[%i4, %i5] :
479 /// vector<32x256xf32>, memref<?x?xf32>
480 /// }
481 /// }
482 /// %c7 = constant 7 : index
483 /// %c42 = constant 42 : index
484 /// %9 = load %2[%c7, %c42] : memref<?x?xf32>
485 /// return %9 : f32
486 /// }
487 /// ```
488 ///
489 /// Of course, much more intricate n-D imperfectly-nested patterns can be
490 /// vectorized too and specified in a fully declarative fashion.
491 ///
492 /// Reduction:
493 /// ==========
494 /// Vectorizing reduction loops along the reduction dimension is supported if:
495 /// - the reduction kind is supported,
496 /// - the vectorization is 1-D, and
497 /// - the step size of the loop equals to one.
498 ///
499 /// Comparing to the non-vector-dimension case, two additional things are done
500 /// during vectorization of such loops:
501 /// - The resulting vector returned from the loop is reduced to a scalar using
502 /// `vector.reduce`.
503 /// - In some cases a mask is applied to the vector yielded at the end of the
504 /// loop to prevent garbage values from being written to the accumulator.
505 ///
506 /// Reduction vectorization is switched off by default, it can be enabled by
507 /// passing a map from loops to reductions to utility functions, or by passing
508 /// `vectorize-reductions=true` to the vectorization pass.
509 ///
510 /// Consider the following example:
511 /// ```mlir
512 /// func @vecred(%in: memref<512xf32>) -> f32 {
513 /// %cst = constant 0.000000e+00 : f32
514 /// %sum = affine.for %i = 0 to 500 iter_args(%part_sum = %cst) -> (f32) {
515 /// %ld = affine.load %in[%i] : memref<512xf32>
516 /// %cos = math.cos %ld : f32
517 /// %add = addf %part_sum, %cos : f32
518 /// affine.yield %add : f32
519 /// }
520 /// return %sum : f32
521 /// }
522 /// ```
523 ///
524 /// The -affine-vectorize pass with the following arguments:
525 /// ```
526 /// -affine-vectorize="virtual-vector-size=128 test-fastest-varying=0 \
527 /// vectorize-reductions=true"
528 /// ```
529 /// produces the following output:
530 /// ```mlir
531 /// #map = affine_map<(d0) -> (-d0 + 500)>
532 /// func @vecred(%arg0: memref<512xf32>) -> f32 {
533 /// %cst = constant 0.000000e+00 : f32
534 /// %cst_0 = constant dense<0.000000e+00> : vector<128xf32>
535 /// %0 = affine.for %arg1 = 0 to 500 step 128 iter_args(%arg2 = %cst_0)
536 /// -> (vector<128xf32>) {
537 /// // %2 is the number of iterations left in the original loop.
538 /// %2 = affine.apply #map(%arg1)
539 /// %3 = vector.create_mask %2 : vector<128xi1>
540 /// %cst_1 = constant 0.000000e+00 : f32
541 /// %4 = vector.transfer_read %arg0[%arg1], %cst_1 :
542 /// memref<512xf32>, vector<128xf32>
543 /// %5 = math.cos %4 : vector<128xf32>
544 /// %6 = addf %arg2, %5 : vector<128xf32>
545 /// // We filter out the effect of last 12 elements using the mask.
546 /// %7 = select %3, %6, %arg2 : vector<128xi1>, vector<128xf32>
547 /// affine.yield %7 : vector<128xf32>
548 /// }
549 /// %1 = vector.reduction "add", %0 : vector<128xf32> into f32
550 /// return %1 : f32
551 /// }
552 /// ```
553 ///
554 /// Note that because of loop misalignment we needed to apply a mask to prevent
555 /// last 12 elements from affecting the final result. The mask is full of ones
556 /// in every iteration except for the last one, in which it has the form
557 /// `11...100...0` with 116 ones and 12 zeros.
558
559 #define DEBUG_TYPE "early-vect"
560
561 using llvm::dbgs;
562
563 /// Forward declaration.
564 static FilterFunctionType
565 isVectorizableLoopPtrFactory(const DenseSet<Operation *> ¶llelLoops,
566 int fastestVaryingMemRefDimension);
567
568 /// Creates a vectorization pattern from the command line arguments.
569 /// Up to 3-D patterns are supported.
570 /// If the command line argument requests a pattern of higher order, returns an
571 /// empty pattern list which will conservatively result in no vectorization.
572 static Optional<NestedPattern>
makePattern(const DenseSet<Operation * > & parallelLoops,int vectorRank,ArrayRef<int64_t> fastestVaryingPattern)573 makePattern(const DenseSet<Operation *> ¶llelLoops, int vectorRank,
574 ArrayRef<int64_t> fastestVaryingPattern) {
575 using matcher::For;
576 int64_t d0 = fastestVaryingPattern.empty() ? -1 : fastestVaryingPattern[0];
577 int64_t d1 = fastestVaryingPattern.size() < 2 ? -1 : fastestVaryingPattern[1];
578 int64_t d2 = fastestVaryingPattern.size() < 3 ? -1 : fastestVaryingPattern[2];
579 switch (vectorRank) {
580 case 1:
581 return For(isVectorizableLoopPtrFactory(parallelLoops, d0));
582 case 2:
583 return For(isVectorizableLoopPtrFactory(parallelLoops, d0),
584 For(isVectorizableLoopPtrFactory(parallelLoops, d1)));
585 case 3:
586 return For(isVectorizableLoopPtrFactory(parallelLoops, d0),
587 For(isVectorizableLoopPtrFactory(parallelLoops, d1),
588 For(isVectorizableLoopPtrFactory(parallelLoops, d2))));
589 default: {
590 return llvm::None;
591 }
592 }
593 }
594
vectorTransferPattern()595 static NestedPattern &vectorTransferPattern() {
596 static auto pattern = matcher::Op([](Operation &op) {
597 return isa<vector::TransferReadOp, vector::TransferWriteOp>(op);
598 });
599 return pattern;
600 }
601
602 namespace {
603
604 /// Base state for the vectorize pass.
605 /// Command line arguments are preempted by non-empty pass arguments.
606 struct Vectorize : public AffineVectorizeBase<Vectorize> {
607 Vectorize() = default;
608 Vectorize(ArrayRef<int64_t> virtualVectorSize);
609 void runOnFunction() override;
610 };
611
612 } // end anonymous namespace
613
Vectorize(ArrayRef<int64_t> virtualVectorSize)614 Vectorize::Vectorize(ArrayRef<int64_t> virtualVectorSize) {
615 vectorSizes = virtualVectorSize;
616 }
617
vectorizeLoopIfProfitable(Operation * loop,unsigned depthInPattern,unsigned patternDepth,VectorizationStrategy * strategy)618 static void vectorizeLoopIfProfitable(Operation *loop, unsigned depthInPattern,
619 unsigned patternDepth,
620 VectorizationStrategy *strategy) {
621 assert(patternDepth > depthInPattern &&
622 "patternDepth is greater than depthInPattern");
623 if (patternDepth - depthInPattern > strategy->vectorSizes.size()) {
624 // Don't vectorize this loop
625 return;
626 }
627 strategy->loopToVectorDim[loop] =
628 strategy->vectorSizes.size() - (patternDepth - depthInPattern);
629 }
630
631 /// Implements a simple strawman strategy for vectorization.
632 /// Given a matched pattern `matches` of depth `patternDepth`, this strategy
633 /// greedily assigns the fastest varying dimension ** of the vector ** to the
634 /// innermost loop in the pattern.
635 /// When coupled with a pattern that looks for the fastest varying dimension in
636 /// load/store MemRefs, this creates a generic vectorization strategy that works
637 /// for any loop in a hierarchy (outermost, innermost or intermediate).
638 ///
639 /// TODO: In the future we should additionally increase the power of the
640 /// profitability analysis along 3 directions:
641 /// 1. account for loop extents (both static and parametric + annotations);
642 /// 2. account for data layout permutations;
643 /// 3. account for impact of vectorization on maximal loop fusion.
644 /// Then we can quantify the above to build a cost model and search over
645 /// strategies.
analyzeProfitability(ArrayRef<NestedMatch> matches,unsigned depthInPattern,unsigned patternDepth,VectorizationStrategy * strategy)646 static LogicalResult analyzeProfitability(ArrayRef<NestedMatch> matches,
647 unsigned depthInPattern,
648 unsigned patternDepth,
649 VectorizationStrategy *strategy) {
650 for (auto m : matches) {
651 if (failed(analyzeProfitability(m.getMatchedChildren(), depthInPattern + 1,
652 patternDepth, strategy))) {
653 return failure();
654 }
655 vectorizeLoopIfProfitable(m.getMatchedOperation(), depthInPattern,
656 patternDepth, strategy);
657 }
658 return success();
659 }
660
661 ///// end TODO: Hoist to a VectorizationStrategy.cpp when appropriate /////
662
663 namespace {
664
665 struct VectorizationState {
666
VectorizationState__anon98a067b40311::VectorizationState667 VectorizationState(MLIRContext *context) : builder(context) {}
668
669 /// Registers the vector replacement of a scalar operation and its result
670 /// values. Both operations must have the same number of results.
671 ///
672 /// This utility is used to register the replacement for the vast majority of
673 /// the vectorized operations.
674 ///
675 /// Example:
676 /// * 'replaced': %0 = addf %1, %2 : f32
677 /// * 'replacement': %0 = addf %1, %2 : vector<128xf32>
678 void registerOpVectorReplacement(Operation *replaced, Operation *replacement);
679
680 /// Registers the vector replacement of a scalar value. The replacement
681 /// operation should have a single result, which replaces the scalar value.
682 ///
683 /// This utility is used to register the vector replacement of block arguments
684 /// and operation results which are not directly vectorized (i.e., their
685 /// scalar version still exists after vectorization), like uniforms.
686 ///
687 /// Example:
688 /// * 'replaced': block argument or operation outside of the vectorized
689 /// loop.
690 /// * 'replacement': %0 = vector.broadcast %1 : f32 to vector<128xf32>
691 void registerValueVectorReplacement(Value replaced, Operation *replacement);
692
693 /// Registers the vector replacement of a block argument (e.g., iter_args).
694 ///
695 /// Example:
696 /// * 'replaced': 'iter_arg' block argument.
697 /// * 'replacement': vectorized 'iter_arg' block argument.
698 void registerBlockArgVectorReplacement(BlockArgument replaced,
699 BlockArgument replacement);
700
701 /// Registers the scalar replacement of a scalar value. 'replacement' must be
702 /// scalar. Both values must be block arguments. Operation results should be
703 /// replaced using the 'registerOp*' utilitites.
704 ///
705 /// This utility is used to register the replacement of block arguments
706 /// that are within the loop to be vectorized and will continue being scalar
707 /// within the vector loop.
708 ///
709 /// Example:
710 /// * 'replaced': induction variable of a loop to be vectorized.
711 /// * 'replacement': new induction variable in the new vector loop.
712 void registerValueScalarReplacement(BlockArgument replaced,
713 BlockArgument replacement);
714
715 /// Registers the scalar replacement of a scalar result returned from a
716 /// reduction loop. 'replacement' must be scalar.
717 ///
718 /// This utility is used to register the replacement for scalar results of
719 /// vectorized reduction loops with iter_args.
720 ///
721 /// Example 2:
722 /// * 'replaced': %0 = affine.for %i = 0 to 512 iter_args(%x = ...) -> (f32)
723 /// * 'replacement': %1 = vector.reduction "add" %0 : vector<4xf32> into f32
724 void registerLoopResultScalarReplacement(Value replaced, Value replacement);
725
726 /// Returns in 'replacedVals' the scalar replacement for values in
727 /// 'inputVals'.
728 void getScalarValueReplacementsFor(ValueRange inputVals,
729 SmallVectorImpl<Value> &replacedVals);
730
731 /// Erases the scalar loop nest after its successful vectorization.
732 void finishVectorizationPattern(AffineForOp rootLoop);
733
734 // Used to build and insert all the new operations created. The insertion
735 // point is preserved and updated along the vectorization process.
736 OpBuilder builder;
737
738 // Maps input scalar operations to their vector counterparts.
739 DenseMap<Operation *, Operation *> opVectorReplacement;
740 // Maps input scalar values to their vector counterparts.
741 BlockAndValueMapping valueVectorReplacement;
742 // Maps input scalar values to their new scalar counterparts in the vector
743 // loop nest.
744 BlockAndValueMapping valueScalarReplacement;
745 // Maps results of reduction loops to their new scalar counterparts.
746 DenseMap<Value, Value> loopResultScalarReplacement;
747
748 // Maps the newly created vector loops to their vector dimension.
749 DenseMap<Operation *, unsigned> vecLoopToVecDim;
750
751 // Maps the new vectorized loops to the corresponding vector masks if it is
752 // required.
753 DenseMap<Operation *, Value> vecLoopToMask;
754
755 // The strategy drives which loop to vectorize by which amount.
756 const VectorizationStrategy *strategy;
757
758 private:
759 /// Internal implementation to map input scalar values to new vector or scalar
760 /// values.
761 void registerValueVectorReplacementImpl(Value replaced, Value replacement);
762 void registerValueScalarReplacementImpl(Value replaced, Value replacement);
763 };
764
765 } // end namespace
766
767 /// Registers the vector replacement of a scalar operation and its result
768 /// values. Both operations must have the same number of results.
769 ///
770 /// This utility is used to register the replacement for the vast majority of
771 /// the vectorized operations.
772 ///
773 /// Example:
774 /// * 'replaced': %0 = addf %1, %2 : f32
775 /// * 'replacement': %0 = addf %1, %2 : vector<128xf32>
registerOpVectorReplacement(Operation * replaced,Operation * replacement)776 void VectorizationState::registerOpVectorReplacement(Operation *replaced,
777 Operation *replacement) {
778 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ commit vectorized op:\n");
779 LLVM_DEBUG(dbgs() << *replaced << "\n");
780 LLVM_DEBUG(dbgs() << "into\n");
781 LLVM_DEBUG(dbgs() << *replacement << "\n");
782
783 assert(replaced->getNumResults() == replacement->getNumResults() &&
784 "Unexpected replaced and replacement results");
785 assert(opVectorReplacement.count(replaced) == 0 && "already registered");
786 opVectorReplacement[replaced] = replacement;
787
788 for (auto resultTuple :
789 llvm::zip(replaced->getResults(), replacement->getResults()))
790 registerValueVectorReplacementImpl(std::get<0>(resultTuple),
791 std::get<1>(resultTuple));
792 }
793
794 /// Registers the vector replacement of a scalar value. The replacement
795 /// operation should have a single result, which replaces the scalar value.
796 ///
797 /// This utility is used to register the vector replacement of block arguments
798 /// and operation results which are not directly vectorized (i.e., their
799 /// scalar version still exists after vectorization), like uniforms.
800 ///
801 /// Example:
802 /// * 'replaced': block argument or operation outside of the vectorized loop.
803 /// * 'replacement': %0 = vector.broadcast %1 : f32 to vector<128xf32>
registerValueVectorReplacement(Value replaced,Operation * replacement)804 void VectorizationState::registerValueVectorReplacement(
805 Value replaced, Operation *replacement) {
806 assert(replacement->getNumResults() == 1 &&
807 "Expected single-result replacement");
808 if (Operation *defOp = replaced.getDefiningOp())
809 registerOpVectorReplacement(defOp, replacement);
810 else
811 registerValueVectorReplacementImpl(replaced, replacement->getResult(0));
812 }
813
814 /// Registers the vector replacement of a block argument (e.g., iter_args).
815 ///
816 /// Example:
817 /// * 'replaced': 'iter_arg' block argument.
818 /// * 'replacement': vectorized 'iter_arg' block argument.
registerBlockArgVectorReplacement(BlockArgument replaced,BlockArgument replacement)819 void VectorizationState::registerBlockArgVectorReplacement(
820 BlockArgument replaced, BlockArgument replacement) {
821 registerValueVectorReplacementImpl(replaced, replacement);
822 }
823
registerValueVectorReplacementImpl(Value replaced,Value replacement)824 void VectorizationState::registerValueVectorReplacementImpl(Value replaced,
825 Value replacement) {
826 assert(!valueVectorReplacement.contains(replaced) &&
827 "Vector replacement already registered");
828 assert(replacement.getType().isa<VectorType>() &&
829 "Expected vector type in vector replacement");
830 valueVectorReplacement.map(replaced, replacement);
831 }
832
833 /// Registers the scalar replacement of a scalar value. 'replacement' must be
834 /// scalar. Both values must be block arguments. Operation results should be
835 /// replaced using the 'registerOp*' utilitites.
836 ///
837 /// This utility is used to register the replacement of block arguments
838 /// that are within the loop to be vectorized and will continue being scalar
839 /// within the vector loop.
840 ///
841 /// Example:
842 /// * 'replaced': induction variable of a loop to be vectorized.
843 /// * 'replacement': new induction variable in the new vector loop.
registerValueScalarReplacement(BlockArgument replaced,BlockArgument replacement)844 void VectorizationState::registerValueScalarReplacement(
845 BlockArgument replaced, BlockArgument replacement) {
846 registerValueScalarReplacementImpl(replaced, replacement);
847 }
848
849 /// Registers the scalar replacement of a scalar result returned from a
850 /// reduction loop. 'replacement' must be scalar.
851 ///
852 /// This utility is used to register the replacement for scalar results of
853 /// vectorized reduction loops with iter_args.
854 ///
855 /// Example 2:
856 /// * 'replaced': %0 = affine.for %i = 0 to 512 iter_args(%x = ...) -> (f32)
857 /// * 'replacement': %1 = vector.reduction "add" %0 : vector<4xf32> into f32
registerLoopResultScalarReplacement(Value replaced,Value replacement)858 void VectorizationState::registerLoopResultScalarReplacement(
859 Value replaced, Value replacement) {
860 assert(isa<AffineForOp>(replaced.getDefiningOp()));
861 assert(loopResultScalarReplacement.count(replaced) == 0 &&
862 "already registered");
863 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ will replace a result of the loop "
864 "with scalar: "
865 << replacement);
866 loopResultScalarReplacement[replaced] = replacement;
867 }
868
registerValueScalarReplacementImpl(Value replaced,Value replacement)869 void VectorizationState::registerValueScalarReplacementImpl(Value replaced,
870 Value replacement) {
871 assert(!valueScalarReplacement.contains(replaced) &&
872 "Scalar value replacement already registered");
873 assert(!replacement.getType().isa<VectorType>() &&
874 "Expected scalar type in scalar replacement");
875 valueScalarReplacement.map(replaced, replacement);
876 }
877
878 /// Returns in 'replacedVals' the scalar replacement for values in 'inputVals'.
getScalarValueReplacementsFor(ValueRange inputVals,SmallVectorImpl<Value> & replacedVals)879 void VectorizationState::getScalarValueReplacementsFor(
880 ValueRange inputVals, SmallVectorImpl<Value> &replacedVals) {
881 for (Value inputVal : inputVals)
882 replacedVals.push_back(valueScalarReplacement.lookupOrDefault(inputVal));
883 }
884
885 /// Erases a loop nest, including all its nested operations.
eraseLoopNest(AffineForOp forOp)886 static void eraseLoopNest(AffineForOp forOp) {
887 LLVM_DEBUG(dbgs() << "[early-vect]+++++ erasing:\n" << forOp << "\n");
888 forOp.erase();
889 }
890
891 /// Erases the scalar loop nest after its successful vectorization.
finishVectorizationPattern(AffineForOp rootLoop)892 void VectorizationState::finishVectorizationPattern(AffineForOp rootLoop) {
893 LLVM_DEBUG(dbgs() << "\n[early-vect] Finalizing vectorization\n");
894 eraseLoopNest(rootLoop);
895 }
896
897 // Apply 'map' with 'mapOperands' returning resulting values in 'results'.
computeMemoryOpIndices(Operation * op,AffineMap map,ValueRange mapOperands,VectorizationState & state,SmallVectorImpl<Value> & results)898 static void computeMemoryOpIndices(Operation *op, AffineMap map,
899 ValueRange mapOperands,
900 VectorizationState &state,
901 SmallVectorImpl<Value> &results) {
902 for (auto resultExpr : map.getResults()) {
903 auto singleResMap =
904 AffineMap::get(map.getNumDims(), map.getNumSymbols(), resultExpr);
905 auto afOp = state.builder.create<AffineApplyOp>(op->getLoc(), singleResMap,
906 mapOperands);
907 results.push_back(afOp);
908 }
909 }
910
911 /// Returns a FilterFunctionType that can be used in NestedPattern to match a
912 /// loop whose underlying load/store accesses are either invariant or all
913 // varying along the `fastestVaryingMemRefDimension`.
914 static FilterFunctionType
isVectorizableLoopPtrFactory(const DenseSet<Operation * > & parallelLoops,int fastestVaryingMemRefDimension)915 isVectorizableLoopPtrFactory(const DenseSet<Operation *> ¶llelLoops,
916 int fastestVaryingMemRefDimension) {
917 return [¶llelLoops, fastestVaryingMemRefDimension](Operation &forOp) {
918 auto loop = cast<AffineForOp>(forOp);
919 auto parallelIt = parallelLoops.find(loop);
920 if (parallelIt == parallelLoops.end())
921 return false;
922 int memRefDim = -1;
923 auto vectorizableBody =
924 isVectorizableLoopBody(loop, &memRefDim, vectorTransferPattern());
925 if (!vectorizableBody)
926 return false;
927 return memRefDim == -1 || fastestVaryingMemRefDimension == -1 ||
928 memRefDim == fastestVaryingMemRefDimension;
929 };
930 }
931
932 /// Returns the vector type resulting from applying the provided vectorization
933 /// strategy on the scalar type.
getVectorType(Type scalarTy,const VectorizationStrategy * strategy)934 static VectorType getVectorType(Type scalarTy,
935 const VectorizationStrategy *strategy) {
936 assert(!scalarTy.isa<VectorType>() && "Expected scalar type");
937 return VectorType::get(strategy->vectorSizes, scalarTy);
938 }
939
940 /// Tries to transform a scalar constant into a vector constant. Returns the
941 /// vector constant if the scalar type is valid vector element type. Returns
942 /// nullptr, otherwise.
vectorizeConstant(ConstantOp constOp,VectorizationState & state)943 static ConstantOp vectorizeConstant(ConstantOp constOp,
944 VectorizationState &state) {
945 Type scalarTy = constOp.getType();
946 if (!VectorType::isValidElementType(scalarTy))
947 return nullptr;
948
949 auto vecTy = getVectorType(scalarTy, state.strategy);
950 auto vecAttr = DenseElementsAttr::get(vecTy, constOp.getValue());
951 auto newConstOp = state.builder.create<ConstantOp>(constOp.getLoc(), vecAttr);
952
953 // Register vector replacement for future uses in the scope.
954 state.registerOpVectorReplacement(constOp, newConstOp);
955 return newConstOp;
956 }
957
958 /// Creates a constant vector filled with the neutral elements of the given
959 /// reduction. The scalar type of vector elements will be taken from
960 /// `oldOperand`.
createInitialVector(AtomicRMWKind reductionKind,Value oldOperand,VectorizationState & state)961 static ConstantOp createInitialVector(AtomicRMWKind reductionKind,
962 Value oldOperand,
963 VectorizationState &state) {
964 Type scalarTy = oldOperand.getType();
965 if (!VectorType::isValidElementType(scalarTy))
966 return nullptr;
967
968 Attribute valueAttr = getIdentityValueAttr(
969 reductionKind, scalarTy, state.builder, oldOperand.getLoc());
970 auto vecTy = getVectorType(scalarTy, state.strategy);
971 auto vecAttr = DenseElementsAttr::get(vecTy, valueAttr);
972 auto newConstOp =
973 state.builder.create<ConstantOp>(oldOperand.getLoc(), vecAttr);
974
975 return newConstOp;
976 }
977
978 /// Creates a mask used to filter out garbage elements in the last iteration
979 /// of unaligned loops. If a mask is not required then `nullptr` is returned.
980 /// The mask will be a vector of booleans representing meaningful vector
981 /// elements in the current iteration. It is filled with ones for each iteration
982 /// except for the last one, where it has the form `11...100...0` with the
983 /// number of ones equal to the number of meaningful elements (i.e. the number
984 /// of iterations that would be left in the original loop).
createMask(AffineForOp vecForOp,VectorizationState & state)985 static Value createMask(AffineForOp vecForOp, VectorizationState &state) {
986 assert(state.strategy->vectorSizes.size() == 1 &&
987 "Creating a mask non-1-D vectors is not supported.");
988 assert(vecForOp.getStep() == state.strategy->vectorSizes[0] &&
989 "Creating a mask for loops with non-unit original step size is not "
990 "supported.");
991
992 // Check if we have already created the mask.
993 if (Value mask = state.vecLoopToMask.lookup(vecForOp))
994 return mask;
995
996 // If the loop has constant bounds and the original number of iterations is
997 // divisable by the vector size then we don't need a mask.
998 if (vecForOp.hasConstantBounds()) {
999 int64_t originalTripCount =
1000 vecForOp.getConstantUpperBound() - vecForOp.getConstantLowerBound();
1001 if (originalTripCount % vecForOp.getStep() == 0)
1002 return nullptr;
1003 }
1004
1005 OpBuilder::InsertionGuard guard(state.builder);
1006 state.builder.setInsertionPointToStart(vecForOp.getBody());
1007
1008 // We generate the mask using the `vector.create_mask` operation which accepts
1009 // the number of meaningful elements (i.e. the legth of the prefix of 1s).
1010 // To compute the number of meaningful elements we subtract the current value
1011 // of the iteration variable from the upper bound of the loop. Example:
1012 //
1013 // // 500 is the upper bound of the loop
1014 // #map = affine_map<(d0) -> (500 - d0)>
1015 // %elems_left = affine.apply #map(%iv)
1016 // %mask = vector.create_mask %elems_left : vector<128xi1>
1017
1018 Location loc = vecForOp.getLoc();
1019
1020 // First we get the upper bound of the loop using `affine.apply` or
1021 // `affine.min`.
1022 AffineMap ubMap = vecForOp.getUpperBoundMap();
1023 Value ub;
1024 if (ubMap.getNumResults() == 1)
1025 ub = state.builder.create<AffineApplyOp>(loc, vecForOp.getUpperBoundMap(),
1026 vecForOp.getUpperBoundOperands());
1027 else
1028 ub = state.builder.create<AffineMinOp>(loc, vecForOp.getUpperBoundMap(),
1029 vecForOp.getUpperBoundOperands());
1030 // Then we compute the number of (original) iterations left in the loop.
1031 AffineExpr subExpr =
1032 state.builder.getAffineDimExpr(0) - state.builder.getAffineDimExpr(1);
1033 Value itersLeft =
1034 makeComposedAffineApply(state.builder, loc, AffineMap::get(2, 0, subExpr),
1035 {ub, vecForOp.getInductionVar()});
1036 // If the affine maps were successfully composed then `ub` is unneeded.
1037 if (ub.use_empty())
1038 ub.getDefiningOp()->erase();
1039 // Finally we create the mask.
1040 Type maskTy = VectorType::get(state.strategy->vectorSizes,
1041 state.builder.getIntegerType(1));
1042 Value mask =
1043 state.builder.create<vector::CreateMaskOp>(loc, maskTy, itersLeft);
1044
1045 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ creating a mask:\n"
1046 << itersLeft << "\n"
1047 << mask << "\n");
1048
1049 state.vecLoopToMask[vecForOp] = mask;
1050 return mask;
1051 }
1052
1053 /// Returns true if the provided value is vector uniform given the vectorization
1054 /// strategy.
1055 // TODO: For now, only values that are invariants to all the loops in the
1056 // vectorization strategy are considered vector uniforms.
isUniformDefinition(Value value,const VectorizationStrategy * strategy)1057 static bool isUniformDefinition(Value value,
1058 const VectorizationStrategy *strategy) {
1059 for (auto loopToDim : strategy->loopToVectorDim) {
1060 auto loop = cast<AffineForOp>(loopToDim.first);
1061 if (!loop.isDefinedOutsideOfLoop(value))
1062 return false;
1063 }
1064 return true;
1065 }
1066
1067 /// Generates a broadcast op for the provided uniform value using the
1068 /// vectorization strategy in 'state'.
vectorizeUniform(Value uniformVal,VectorizationState & state)1069 static Operation *vectorizeUniform(Value uniformVal,
1070 VectorizationState &state) {
1071 OpBuilder::InsertionGuard guard(state.builder);
1072 state.builder.setInsertionPointAfterValue(uniformVal);
1073
1074 auto vectorTy = getVectorType(uniformVal.getType(), state.strategy);
1075 auto bcastOp = state.builder.create<BroadcastOp>(uniformVal.getLoc(),
1076 vectorTy, uniformVal);
1077 state.registerValueVectorReplacement(uniformVal, bcastOp);
1078 return bcastOp;
1079 }
1080
1081 /// Tries to vectorize a given `operand` by applying the following logic:
1082 /// 1. if the defining operation has been already vectorized, `operand` is
1083 /// already in the proper vector form;
1084 /// 2. if the `operand` is a constant, returns the vectorized form of the
1085 /// constant;
1086 /// 3. if the `operand` is uniform, returns a vector broadcast of the `op`;
1087 /// 4. otherwise, the vectorization of `operand` is not supported.
1088 /// Newly created vector operations are registered in `state` as replacement
1089 /// for their scalar counterparts.
1090 /// In particular this logic captures some of the use cases where definitions
1091 /// that are not scoped under the current pattern are needed to vectorize.
1092 /// One such example is top level function constants that need to be splatted.
1093 ///
1094 /// Returns an operand that has been vectorized to match `state`'s strategy if
1095 /// vectorization is possible with the above logic. Returns nullptr otherwise.
1096 ///
1097 /// TODO: handle more complex cases.
vectorizeOperand(Value operand,VectorizationState & state)1098 static Value vectorizeOperand(Value operand, VectorizationState &state) {
1099 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorize operand: " << operand);
1100 // If this value is already vectorized, we are done.
1101 if (Value vecRepl = state.valueVectorReplacement.lookupOrNull(operand)) {
1102 LLVM_DEBUG(dbgs() << " -> already vectorized: " << vecRepl);
1103 return vecRepl;
1104 }
1105
1106 // An vector operand that is not in the replacement map should never reach
1107 // this point. Reaching this point could mean that the code was already
1108 // vectorized and we shouldn't try to vectorize already vectorized code.
1109 assert(!operand.getType().isa<VectorType>() &&
1110 "Vector op not found in replacement map");
1111
1112 // Vectorize constant.
1113 if (auto constOp = operand.getDefiningOp<ConstantOp>()) {
1114 ConstantOp vecConstant = vectorizeConstant(constOp, state);
1115 LLVM_DEBUG(dbgs() << "-> constant: " << vecConstant);
1116 return vecConstant.getResult();
1117 }
1118
1119 // Vectorize uniform values.
1120 if (isUniformDefinition(operand, state.strategy)) {
1121 Operation *vecUniform = vectorizeUniform(operand, state);
1122 LLVM_DEBUG(dbgs() << "-> uniform: " << *vecUniform);
1123 return vecUniform->getResult(0);
1124 }
1125
1126 // Check for unsupported block argument scenarios. A supported block argument
1127 // should have been vectorized already.
1128 if (!operand.getDefiningOp())
1129 LLVM_DEBUG(dbgs() << "-> unsupported block argument\n");
1130 else
1131 // Generic unsupported case.
1132 LLVM_DEBUG(dbgs() << "-> non-vectorizable\n");
1133
1134 return nullptr;
1135 }
1136
1137 /// Vectorizes an affine load with the vectorization strategy in 'state' by
1138 /// generating a 'vector.transfer_read' op with the proper permutation map
1139 /// inferred from the indices of the load. The new 'vector.transfer_read' is
1140 /// registered as replacement of the scalar load. Returns the newly created
1141 /// 'vector.transfer_read' if vectorization was successful. Returns nullptr,
1142 /// otherwise.
vectorizeAffineLoad(AffineLoadOp loadOp,VectorizationState & state)1143 static Operation *vectorizeAffineLoad(AffineLoadOp loadOp,
1144 VectorizationState &state) {
1145 MemRefType memRefType = loadOp.getMemRefType();
1146 Type elementType = memRefType.getElementType();
1147 auto vectorType = VectorType::get(state.strategy->vectorSizes, elementType);
1148
1149 // Replace map operands with operands from the vector loop nest.
1150 SmallVector<Value, 8> mapOperands;
1151 state.getScalarValueReplacementsFor(loadOp.getMapOperands(), mapOperands);
1152
1153 // Compute indices for the transfer op. AffineApplyOp's may be generated.
1154 SmallVector<Value, 8> indices;
1155 indices.reserve(memRefType.getRank());
1156 if (loadOp.getAffineMap() !=
1157 state.builder.getMultiDimIdentityMap(memRefType.getRank()))
1158 computeMemoryOpIndices(loadOp, loadOp.getAffineMap(), mapOperands, state,
1159 indices);
1160 else
1161 indices.append(mapOperands.begin(), mapOperands.end());
1162
1163 // Compute permutation map using the information of new vector loops.
1164 auto permutationMap = makePermutationMap(state.builder.getInsertionBlock(),
1165 indices, state.vecLoopToVecDim);
1166 if (!permutationMap) {
1167 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ can't compute permutationMap\n");
1168 return nullptr;
1169 }
1170 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ permutationMap: ");
1171 LLVM_DEBUG(permutationMap.print(dbgs()));
1172
1173 auto transfer = state.builder.create<vector::TransferReadOp>(
1174 loadOp.getLoc(), vectorType, loadOp.getMemRef(), indices, permutationMap);
1175
1176 // Register replacement for future uses in the scope.
1177 state.registerOpVectorReplacement(loadOp, transfer);
1178 return transfer;
1179 }
1180
1181 /// Vectorizes an affine store with the vectorization strategy in 'state' by
1182 /// generating a 'vector.transfer_write' op with the proper permutation map
1183 /// inferred from the indices of the store. The new 'vector.transfer_store' is
1184 /// registered as replacement of the scalar load. Returns the newly created
1185 /// 'vector.transfer_write' if vectorization was successful. Returns nullptr,
1186 /// otherwise.
vectorizeAffineStore(AffineStoreOp storeOp,VectorizationState & state)1187 static Operation *vectorizeAffineStore(AffineStoreOp storeOp,
1188 VectorizationState &state) {
1189 MemRefType memRefType = storeOp.getMemRefType();
1190 Value vectorValue = vectorizeOperand(storeOp.getValueToStore(), state);
1191 if (!vectorValue)
1192 return nullptr;
1193
1194 // Replace map operands with operands from the vector loop nest.
1195 SmallVector<Value, 8> mapOperands;
1196 state.getScalarValueReplacementsFor(storeOp.getMapOperands(), mapOperands);
1197
1198 // Compute indices for the transfer op. AffineApplyOp's may be generated.
1199 SmallVector<Value, 8> indices;
1200 indices.reserve(memRefType.getRank());
1201 if (storeOp.getAffineMap() !=
1202 state.builder.getMultiDimIdentityMap(memRefType.getRank()))
1203 computeMemoryOpIndices(storeOp, storeOp.getAffineMap(), mapOperands, state,
1204 indices);
1205 else
1206 indices.append(mapOperands.begin(), mapOperands.end());
1207
1208 // Compute permutation map using the information of new vector loops.
1209 auto permutationMap = makePermutationMap(state.builder.getInsertionBlock(),
1210 indices, state.vecLoopToVecDim);
1211 if (!permutationMap)
1212 return nullptr;
1213 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ permutationMap: ");
1214 LLVM_DEBUG(permutationMap.print(dbgs()));
1215
1216 auto transfer = state.builder.create<vector::TransferWriteOp>(
1217 storeOp.getLoc(), vectorValue, storeOp.getMemRef(), indices,
1218 permutationMap);
1219 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorized store: " << transfer);
1220
1221 // Register replacement for future uses in the scope.
1222 state.registerOpVectorReplacement(storeOp, transfer);
1223 return transfer;
1224 }
1225
1226 /// Returns true if `value` is a constant equal to the neutral element of the
1227 /// given vectorizable reduction.
isNeutralElementConst(AtomicRMWKind reductionKind,Value value,VectorizationState & state)1228 static bool isNeutralElementConst(AtomicRMWKind reductionKind, Value value,
1229 VectorizationState &state) {
1230 Type scalarTy = value.getType();
1231 if (!VectorType::isValidElementType(scalarTy))
1232 return false;
1233 Attribute valueAttr = getIdentityValueAttr(reductionKind, scalarTy,
1234 state.builder, value.getLoc());
1235 if (auto constOp = dyn_cast_or_null<ConstantOp>(value.getDefiningOp()))
1236 return constOp.value() == valueAttr;
1237 return false;
1238 }
1239
1240 /// Vectorizes a loop with the vectorization strategy in 'state'. A new loop is
1241 /// created and registered as replacement for the scalar loop. The builder's
1242 /// insertion point is set to the new loop's body so that subsequent vectorized
1243 /// operations are inserted into the new loop. If the loop is a vector
1244 /// dimension, the step of the newly created loop will reflect the vectorization
1245 /// factor used to vectorized that dimension.
vectorizeAffineForOp(AffineForOp forOp,VectorizationState & state)1246 static Operation *vectorizeAffineForOp(AffineForOp forOp,
1247 VectorizationState &state) {
1248 const VectorizationStrategy &strategy = *state.strategy;
1249 auto loopToVecDimIt = strategy.loopToVectorDim.find(forOp);
1250 bool isLoopVecDim = loopToVecDimIt != strategy.loopToVectorDim.end();
1251
1252 // TODO: Vectorization of reduction loops is not supported for non-unit steps.
1253 if (isLoopVecDim && forOp.getNumIterOperands() > 0 && forOp.getStep() != 1) {
1254 LLVM_DEBUG(
1255 dbgs()
1256 << "\n[early-vect]+++++ unsupported step size for reduction loop: "
1257 << forOp.getStep() << "\n");
1258 return nullptr;
1259 }
1260
1261 // If we are vectorizing a vector dimension, compute a new step for the new
1262 // vectorized loop using the vectorization factor for the vector dimension.
1263 // Otherwise, propagate the step of the scalar loop.
1264 unsigned newStep;
1265 if (isLoopVecDim) {
1266 unsigned vectorDim = loopToVecDimIt->second;
1267 assert(vectorDim < strategy.vectorSizes.size() && "vector dim overflow");
1268 int64_t forOpVecFactor = strategy.vectorSizes[vectorDim];
1269 newStep = forOp.getStep() * forOpVecFactor;
1270 } else {
1271 newStep = forOp.getStep();
1272 }
1273
1274 // Get information about reduction kinds.
1275 ArrayRef<LoopReduction> reductions;
1276 if (isLoopVecDim && forOp.getNumIterOperands() > 0) {
1277 auto it = strategy.reductionLoops.find(forOp);
1278 assert(it != strategy.reductionLoops.end() &&
1279 "Reduction descriptors not found when vectorizing a reduction loop");
1280 reductions = it->second;
1281 assert(reductions.size() == forOp.getNumIterOperands() &&
1282 "The size of reductions array must match the number of iter_args");
1283 }
1284
1285 // Vectorize 'iter_args'.
1286 SmallVector<Value, 8> vecIterOperands;
1287 if (!isLoopVecDim) {
1288 for (auto operand : forOp.getIterOperands())
1289 vecIterOperands.push_back(vectorizeOperand(operand, state));
1290 } else {
1291 // For reduction loops we need to pass a vector of neutral elements as an
1292 // initial value of the accumulator. We will add the original initial value
1293 // later.
1294 for (auto redAndOperand : llvm::zip(reductions, forOp.getIterOperands())) {
1295 vecIterOperands.push_back(createInitialVector(
1296 std::get<0>(redAndOperand).kind, std::get<1>(redAndOperand), state));
1297 }
1298 }
1299
1300 auto vecForOp = state.builder.create<AffineForOp>(
1301 forOp.getLoc(), forOp.getLowerBoundOperands(), forOp.getLowerBoundMap(),
1302 forOp.getUpperBoundOperands(), forOp.getUpperBoundMap(), newStep,
1303 vecIterOperands,
1304 /*bodyBuilder=*/[](OpBuilder &, Location, Value, ValueRange) {
1305 // Make sure we don't create a default terminator in the loop body as
1306 // the proper terminator will be added during vectorization.
1307 return;
1308 });
1309
1310 // Register loop-related replacements:
1311 // 1) The new vectorized loop is registered as vector replacement of the
1312 // scalar loop.
1313 // 2) The new iv of the vectorized loop is registered as scalar replacement
1314 // since a scalar copy of the iv will prevail in the vectorized loop.
1315 // TODO: A vector replacement will also be added in the future when
1316 // vectorization of linear ops is supported.
1317 // 3) The new 'iter_args' region arguments are registered as vector
1318 // replacements since they have been vectorized.
1319 // 4) If the loop performs a reduction along the vector dimension, a
1320 // `vector.reduction` or similar op is inserted for each resulting value
1321 // of the loop and its scalar value replaces the corresponding scalar
1322 // result of the loop.
1323 state.registerOpVectorReplacement(forOp, vecForOp);
1324 state.registerValueScalarReplacement(forOp.getInductionVar(),
1325 vecForOp.getInductionVar());
1326 for (auto iterTuple :
1327 llvm ::zip(forOp.getRegionIterArgs(), vecForOp.getRegionIterArgs()))
1328 state.registerBlockArgVectorReplacement(std::get<0>(iterTuple),
1329 std::get<1>(iterTuple));
1330
1331 if (isLoopVecDim) {
1332 for (unsigned i = 0; i < vecForOp.getNumIterOperands(); ++i) {
1333 // First, we reduce the vector returned from the loop into a scalar.
1334 Value reducedRes =
1335 getVectorReductionOp(reductions[i].kind, state.builder,
1336 vecForOp.getLoc(), vecForOp.getResult(i));
1337 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ creating a vector reduction: "
1338 << reducedRes);
1339 // Then we combine it with the original (scalar) initial value unless it
1340 // is equal to the neutral element of the reduction.
1341 Value origInit = forOp.getOperand(forOp.getNumControlOperands() + i);
1342 Value finalRes = reducedRes;
1343 if (!isNeutralElementConst(reductions[i].kind, origInit, state))
1344 finalRes = getReductionOp(reductions[i].kind, state.builder,
1345 reducedRes.getLoc(), reducedRes, origInit);
1346 state.registerLoopResultScalarReplacement(forOp.getResult(i), finalRes);
1347 }
1348 }
1349
1350 if (isLoopVecDim)
1351 state.vecLoopToVecDim[vecForOp] = loopToVecDimIt->second;
1352
1353 // Change insertion point so that upcoming vectorized instructions are
1354 // inserted into the vectorized loop's body.
1355 state.builder.setInsertionPointToStart(vecForOp.getBody());
1356
1357 // If this is a reduction loop then we may need to create a mask to filter out
1358 // garbage in the last iteration.
1359 if (isLoopVecDim && forOp.getNumIterOperands() > 0)
1360 createMask(vecForOp, state);
1361
1362 return vecForOp;
1363 }
1364
1365 /// Vectorizes arbitrary operation by plain widening. We apply generic type
1366 /// widening of all its results and retrieve the vector counterparts for all its
1367 /// operands.
widenOp(Operation * op,VectorizationState & state)1368 static Operation *widenOp(Operation *op, VectorizationState &state) {
1369 SmallVector<Type, 8> vectorTypes;
1370 for (Value result : op->getResults())
1371 vectorTypes.push_back(
1372 VectorType::get(state.strategy->vectorSizes, result.getType()));
1373
1374 SmallVector<Value, 8> vectorOperands;
1375 for (Value operand : op->getOperands()) {
1376 Value vecOperand = vectorizeOperand(operand, state);
1377 if (!vecOperand) {
1378 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ an operand failed vectorize\n");
1379 return nullptr;
1380 }
1381 vectorOperands.push_back(vecOperand);
1382 }
1383
1384 // Create a clone of the op with the proper operands and return types.
1385 // TODO: The following assumes there is always an op with a fixed
1386 // name that works both in scalar mode and vector mode.
1387 // TODO: Is it worth considering an Operation.clone operation which
1388 // changes the type so we can promote an Operation with less boilerplate?
1389 OperationState vecOpState(op->getLoc(), op->getName().getStringRef(),
1390 vectorOperands, vectorTypes, op->getAttrs(),
1391 /*successors=*/{}, /*regions=*/{});
1392 Operation *vecOp = state.builder.createOperation(vecOpState);
1393 state.registerOpVectorReplacement(op, vecOp);
1394 return vecOp;
1395 }
1396
1397 /// Vectorizes a yield operation by widening its types. The builder's insertion
1398 /// point is set after the vectorized parent op to continue vectorizing the
1399 /// operations after the parent op. When vectorizing a reduction loop a mask may
1400 /// be used to prevent adding garbage values to the accumulator.
vectorizeAffineYieldOp(AffineYieldOp yieldOp,VectorizationState & state)1401 static Operation *vectorizeAffineYieldOp(AffineYieldOp yieldOp,
1402 VectorizationState &state) {
1403 Operation *newYieldOp = widenOp(yieldOp, state);
1404 Operation *newParentOp = state.builder.getInsertionBlock()->getParentOp();
1405
1406 // If there is a mask for this loop then we must prevent garbage values from
1407 // being added to the accumulator by inserting `select` operations, for
1408 // example:
1409 //
1410 // %res = addf %acc, %val : vector<128xf32>
1411 // %res_masked = select %mask, %res, %acc : vector<128xi1>, vector<128xf32>
1412 // affine.yield %res_masked : vector<128xf32>
1413 //
1414 if (Value mask = state.vecLoopToMask.lookup(newParentOp)) {
1415 state.builder.setInsertionPoint(newYieldOp);
1416 for (unsigned i = 0; i < newYieldOp->getNumOperands(); ++i) {
1417 Value result = newYieldOp->getOperand(i);
1418 Value iterArg = cast<AffineForOp>(newParentOp).getRegionIterArgs()[i];
1419 Value maskedResult = state.builder.create<SelectOp>(result.getLoc(), mask,
1420 result, iterArg);
1421 LLVM_DEBUG(
1422 dbgs() << "\n[early-vect]+++++ masking a yielded vector value: "
1423 << maskedResult);
1424 newYieldOp->setOperand(i, maskedResult);
1425 }
1426 }
1427
1428 state.builder.setInsertionPointAfter(newParentOp);
1429 return newYieldOp;
1430 }
1431
1432 /// Encodes Operation-specific behavior for vectorization. In general we
1433 /// assume that all operands of an op must be vectorized but this is not
1434 /// always true. In the future, it would be nice to have a trait that
1435 /// describes how a particular operation vectorizes. For now we implement the
1436 /// case distinction here. Returns a vectorized form of an operation or
1437 /// nullptr if vectorization fails.
1438 // TODO: consider adding a trait to Op to describe how it gets vectorized.
1439 // Maybe some Ops are not vectorizable or require some tricky logic, we cannot
1440 // do one-off logic here; ideally it would be TableGen'd.
vectorizeOneOperation(Operation * op,VectorizationState & state)1441 static Operation *vectorizeOneOperation(Operation *op,
1442 VectorizationState &state) {
1443 // Sanity checks.
1444 assert(!isa<vector::TransferReadOp>(op) &&
1445 "vector.transfer_read cannot be further vectorized");
1446 assert(!isa<vector::TransferWriteOp>(op) &&
1447 "vector.transfer_write cannot be further vectorized");
1448
1449 if (auto loadOp = dyn_cast<AffineLoadOp>(op))
1450 return vectorizeAffineLoad(loadOp, state);
1451 if (auto storeOp = dyn_cast<AffineStoreOp>(op))
1452 return vectorizeAffineStore(storeOp, state);
1453 if (auto forOp = dyn_cast<AffineForOp>(op))
1454 return vectorizeAffineForOp(forOp, state);
1455 if (auto yieldOp = dyn_cast<AffineYieldOp>(op))
1456 return vectorizeAffineYieldOp(yieldOp, state);
1457 if (auto constant = dyn_cast<ConstantOp>(op))
1458 return vectorizeConstant(constant, state);
1459
1460 // Other ops with regions are not supported.
1461 if (op->getNumRegions() != 0)
1462 return nullptr;
1463
1464 return widenOp(op, state);
1465 }
1466
1467 /// Recursive implementation to convert all the nested loops in 'match' to a 2D
1468 /// vector container that preserves the relative nesting level of each loop with
1469 /// respect to the others in 'match'. 'currentLevel' is the nesting level that
1470 /// will be assigned to the loop in the current 'match'.
1471 static void
getMatchedAffineLoopsRec(NestedMatch match,unsigned currentLevel,std::vector<SmallVector<AffineForOp,2>> & loops)1472 getMatchedAffineLoopsRec(NestedMatch match, unsigned currentLevel,
1473 std::vector<SmallVector<AffineForOp, 2>> &loops) {
1474 // Add a new empty level to the output if it doesn't exist already.
1475 assert(currentLevel <= loops.size() && "Unexpected currentLevel");
1476 if (currentLevel == loops.size())
1477 loops.push_back(SmallVector<AffineForOp, 2>());
1478
1479 // Add current match and recursively visit its children.
1480 loops[currentLevel].push_back(cast<AffineForOp>(match.getMatchedOperation()));
1481 for (auto childMatch : match.getMatchedChildren()) {
1482 getMatchedAffineLoopsRec(childMatch, currentLevel + 1, loops);
1483 }
1484 }
1485
1486 /// Converts all the nested loops in 'match' to a 2D vector container that
1487 /// preserves the relative nesting level of each loop with respect to the others
1488 /// in 'match'. This means that every loop in 'loops[i]' will have a parent loop
1489 /// in 'loops[i-1]'. A loop in 'loops[i]' may or may not have a child loop in
1490 /// 'loops[i+1]'.
1491 static void
getMatchedAffineLoops(NestedMatch match,std::vector<SmallVector<AffineForOp,2>> & loops)1492 getMatchedAffineLoops(NestedMatch match,
1493 std::vector<SmallVector<AffineForOp, 2>> &loops) {
1494 getMatchedAffineLoopsRec(match, /*currLoopDepth=*/0, loops);
1495 }
1496
1497 /// Internal implementation to vectorize affine loops from a single loop nest
1498 /// using an n-D vectorization strategy.
1499 static LogicalResult
vectorizeLoopNest(std::vector<SmallVector<AffineForOp,2>> & loops,const VectorizationStrategy & strategy)1500 vectorizeLoopNest(std::vector<SmallVector<AffineForOp, 2>> &loops,
1501 const VectorizationStrategy &strategy) {
1502 assert(loops[0].size() == 1 && "Expected single root loop");
1503 AffineForOp rootLoop = loops[0][0];
1504 VectorizationState state(rootLoop.getContext());
1505 state.builder.setInsertionPointAfter(rootLoop);
1506 state.strategy = &strategy;
1507
1508 // Since patterns are recursive, they can very well intersect.
1509 // Since we do not want a fully greedy strategy in general, we decouple
1510 // pattern matching, from profitability analysis, from application.
1511 // As a consequence we must check that each root pattern is still
1512 // vectorizable. If a pattern is not vectorizable anymore, we just skip it.
1513 // TODO: implement a non-greedy profitability analysis that keeps only
1514 // non-intersecting patterns.
1515 if (!isVectorizableLoopBody(rootLoop, vectorTransferPattern())) {
1516 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ loop is not vectorizable");
1517 return failure();
1518 }
1519
1520 //////////////////////////////////////////////////////////////////////////////
1521 // Vectorize the scalar loop nest following a topological order. A new vector
1522 // loop nest with the vectorized operations is created along the process. If
1523 // vectorization succeeds, the scalar loop nest is erased. If vectorization
1524 // fails, the vector loop nest is erased and the scalar loop nest is not
1525 // modified.
1526 //////////////////////////////////////////////////////////////////////////////
1527
1528 auto opVecResult = rootLoop.walk<WalkOrder::PreOrder>([&](Operation *op) {
1529 LLVM_DEBUG(dbgs() << "[early-vect]+++++ Vectorizing: " << *op);
1530 Operation *vectorOp = vectorizeOneOperation(op, state);
1531 if (!vectorOp) {
1532 LLVM_DEBUG(
1533 dbgs() << "[early-vect]+++++ failed vectorizing the operation: "
1534 << *op << "\n");
1535 return WalkResult::interrupt();
1536 }
1537
1538 return WalkResult::advance();
1539 });
1540
1541 if (opVecResult.wasInterrupted()) {
1542 LLVM_DEBUG(dbgs() << "[early-vect]+++++ failed vectorization for: "
1543 << rootLoop << "\n");
1544 // Erase vector loop nest if it was created.
1545 auto vecRootLoopIt = state.opVectorReplacement.find(rootLoop);
1546 if (vecRootLoopIt != state.opVectorReplacement.end())
1547 eraseLoopNest(cast<AffineForOp>(vecRootLoopIt->second));
1548
1549 return failure();
1550 }
1551
1552 // Replace results of reduction loops with the scalar values computed using
1553 // `vector.reduce` or similar ops.
1554 for (auto resPair : state.loopResultScalarReplacement)
1555 resPair.first.replaceAllUsesWith(resPair.second);
1556
1557 assert(state.opVectorReplacement.count(rootLoop) == 1 &&
1558 "Expected vector replacement for loop nest");
1559 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ success vectorizing pattern");
1560 LLVM_DEBUG(dbgs() << "\n[early-vect]+++++ vectorization result:\n"
1561 << *state.opVectorReplacement[rootLoop]);
1562
1563 // Finish this vectorization pattern.
1564 state.finishVectorizationPattern(rootLoop);
1565 return success();
1566 }
1567
1568 /// Extracts the matched loops and vectorizes them following a topological
1569 /// order. A new vector loop nest will be created if vectorization succeeds. The
1570 /// original loop nest won't be modified in any case.
vectorizeRootMatch(NestedMatch m,const VectorizationStrategy & strategy)1571 static LogicalResult vectorizeRootMatch(NestedMatch m,
1572 const VectorizationStrategy &strategy) {
1573 std::vector<SmallVector<AffineForOp, 2>> loopsToVectorize;
1574 getMatchedAffineLoops(m, loopsToVectorize);
1575 return vectorizeLoopNest(loopsToVectorize, strategy);
1576 }
1577
1578 /// Traverses all the loop matches and classifies them into intersection
1579 /// buckets. Two matches intersect if any of them encloses the other one. A
1580 /// match intersects with a bucket if the match intersects with the root
1581 /// (outermost) loop in that bucket.
computeIntersectionBuckets(ArrayRef<NestedMatch> matches,std::vector<SmallVector<NestedMatch,8>> & intersectionBuckets)1582 static void computeIntersectionBuckets(
1583 ArrayRef<NestedMatch> matches,
1584 std::vector<SmallVector<NestedMatch, 8>> &intersectionBuckets) {
1585 assert(intersectionBuckets.empty() && "Expected empty output");
1586 // Keeps track of the root (outermost) loop of each bucket.
1587 SmallVector<AffineForOp, 8> bucketRoots;
1588
1589 for (const NestedMatch &match : matches) {
1590 AffineForOp matchRoot = cast<AffineForOp>(match.getMatchedOperation());
1591 bool intersects = false;
1592 for (int i = 0, end = intersectionBuckets.size(); i < end; ++i) {
1593 AffineForOp bucketRoot = bucketRoots[i];
1594 // Add match to the bucket if the bucket root encloses the match root.
1595 if (bucketRoot->isAncestor(matchRoot)) {
1596 intersectionBuckets[i].push_back(match);
1597 intersects = true;
1598 break;
1599 }
1600 // Add match to the bucket if the match root encloses the bucket root. The
1601 // match root becomes the new bucket root.
1602 if (matchRoot->isAncestor(bucketRoot)) {
1603 bucketRoots[i] = matchRoot;
1604 intersectionBuckets[i].push_back(match);
1605 intersects = true;
1606 break;
1607 }
1608 }
1609
1610 // Match doesn't intersect with any existing bucket. Create a new bucket for
1611 // it.
1612 if (!intersects) {
1613 bucketRoots.push_back(matchRoot);
1614 intersectionBuckets.push_back(SmallVector<NestedMatch, 8>());
1615 intersectionBuckets.back().push_back(match);
1616 }
1617 }
1618 }
1619
1620 /// Internal implementation to vectorize affine loops in 'loops' using the n-D
1621 /// vectorization factors in 'vectorSizes'. By default, each vectorization
1622 /// factor is applied inner-to-outer to the loops of each loop nest.
1623 /// 'fastestVaryingPattern' can be optionally used to provide a different loop
1624 /// vectorization order. `reductionLoops` can be provided to specify loops which
1625 /// can be vectorized along the reduction dimension.
vectorizeLoops(Operation * parentOp,DenseSet<Operation * > & loops,ArrayRef<int64_t> vectorSizes,ArrayRef<int64_t> fastestVaryingPattern,const ReductionLoopMap & reductionLoops)1626 static void vectorizeLoops(Operation *parentOp, DenseSet<Operation *> &loops,
1627 ArrayRef<int64_t> vectorSizes,
1628 ArrayRef<int64_t> fastestVaryingPattern,
1629 const ReductionLoopMap &reductionLoops) {
1630 assert((reductionLoops.empty() || vectorSizes.size() == 1) &&
1631 "Vectorizing reductions is supported only for 1-D vectors");
1632
1633 // Compute 1-D, 2-D or 3-D loop pattern to be matched on the target loops.
1634 Optional<NestedPattern> pattern =
1635 makePattern(loops, vectorSizes.size(), fastestVaryingPattern);
1636 if (!pattern.hasValue()) {
1637 LLVM_DEBUG(dbgs() << "\n[early-vect] pattern couldn't be computed\n");
1638 return;
1639 }
1640
1641 LLVM_DEBUG(dbgs() << "\n******************************************");
1642 LLVM_DEBUG(dbgs() << "\n******************************************");
1643 LLVM_DEBUG(dbgs() << "\n[early-vect] new pattern on parent op\n");
1644 LLVM_DEBUG(dbgs() << *parentOp << "\n");
1645
1646 unsigned patternDepth = pattern->getDepth();
1647
1648 // Compute all the pattern matches and classify them into buckets of
1649 // intersecting matches.
1650 SmallVector<NestedMatch, 32> allMatches;
1651 pattern->match(parentOp, &allMatches);
1652 std::vector<SmallVector<NestedMatch, 8>> intersectionBuckets;
1653 computeIntersectionBuckets(allMatches, intersectionBuckets);
1654
1655 // Iterate over all buckets and vectorize the matches eagerly. We can only
1656 // vectorize one match from each bucket since all the matches within a bucket
1657 // intersect.
1658 for (auto &intersectingMatches : intersectionBuckets) {
1659 for (NestedMatch &match : intersectingMatches) {
1660 VectorizationStrategy strategy;
1661 // TODO: depending on profitability, elect to reduce the vector size.
1662 strategy.vectorSizes.assign(vectorSizes.begin(), vectorSizes.end());
1663 strategy.reductionLoops = reductionLoops;
1664 if (failed(analyzeProfitability(match.getMatchedChildren(), 1,
1665 patternDepth, &strategy))) {
1666 continue;
1667 }
1668 vectorizeLoopIfProfitable(match.getMatchedOperation(), 0, patternDepth,
1669 &strategy);
1670 // Vectorize match. Skip the rest of intersecting matches in the bucket if
1671 // vectorization succeeded.
1672 // TODO: if pattern does not apply, report it; alter the cost/benefit.
1673 // TODO: some diagnostics if failure to vectorize occurs.
1674 if (succeeded(vectorizeRootMatch(match, strategy)))
1675 break;
1676 }
1677 }
1678
1679 LLVM_DEBUG(dbgs() << "\n");
1680 }
1681
1682 std::unique_ptr<OperationPass<FuncOp>>
createSuperVectorizePass(ArrayRef<int64_t> virtualVectorSize)1683 createSuperVectorizePass(ArrayRef<int64_t> virtualVectorSize) {
1684 return std::make_unique<Vectorize>(virtualVectorSize);
1685 }
createSuperVectorizePass()1686 std::unique_ptr<OperationPass<FuncOp>> createSuperVectorizePass() {
1687 return std::make_unique<Vectorize>();
1688 }
1689
1690 /// Applies vectorization to the current function by searching over a bunch of
1691 /// predetermined patterns.
runOnFunction()1692 void Vectorize::runOnFunction() {
1693 FuncOp f = getFunction();
1694 if (!fastestVaryingPattern.empty() &&
1695 fastestVaryingPattern.size() != vectorSizes.size()) {
1696 f.emitRemark("Fastest varying pattern specified with different size than "
1697 "the vector size.");
1698 return signalPassFailure();
1699 }
1700
1701 if (vectorizeReductions && vectorSizes.size() != 1) {
1702 f.emitError("Vectorizing reductions is supported only for 1-D vectors.");
1703 return signalPassFailure();
1704 }
1705
1706 DenseSet<Operation *> parallelLoops;
1707 ReductionLoopMap reductionLoops;
1708
1709 // If 'vectorize-reduction=true' is provided, we also populate the
1710 // `reductionLoops` map.
1711 if (vectorizeReductions) {
1712 f.walk([¶llelLoops, &reductionLoops](AffineForOp loop) {
1713 SmallVector<LoopReduction, 2> reductions;
1714 if (isLoopParallel(loop, &reductions)) {
1715 parallelLoops.insert(loop);
1716 // If it's not a reduction loop, adding it to the map is not necessary.
1717 if (!reductions.empty())
1718 reductionLoops[loop] = reductions;
1719 }
1720 });
1721 } else {
1722 f.walk([¶llelLoops](AffineForOp loop) {
1723 if (isLoopParallel(loop))
1724 parallelLoops.insert(loop);
1725 });
1726 }
1727
1728 // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
1729 NestedPatternContext mlContext;
1730 vectorizeLoops(f, parallelLoops, vectorSizes, fastestVaryingPattern,
1731 reductionLoops);
1732 }
1733
1734 /// Verify that affine loops in 'loops' meet the nesting criteria expected by
1735 /// SuperVectorizer:
1736 /// * There must be at least one loop.
1737 /// * There must be a single root loop (nesting level 0).
1738 /// * Each loop at a given nesting level must be nested in a loop from a
1739 /// previous nesting level.
1740 static LogicalResult
verifyLoopNesting(const std::vector<SmallVector<AffineForOp,2>> & loops)1741 verifyLoopNesting(const std::vector<SmallVector<AffineForOp, 2>> &loops) {
1742 // Expected at least one loop.
1743 if (loops.empty())
1744 return failure();
1745
1746 // Expected only one root loop.
1747 if (loops[0].size() != 1)
1748 return failure();
1749
1750 // Traverse loops outer-to-inner to check some invariants.
1751 for (int i = 1, end = loops.size(); i < end; ++i) {
1752 for (AffineForOp loop : loops[i]) {
1753 // Check that each loop at this level is nested in one of the loops from
1754 // the previous level.
1755 if (none_of(loops[i - 1], [&](AffineForOp maybeParent) {
1756 return maybeParent->isProperAncestor(loop);
1757 }))
1758 return failure();
1759
1760 // Check that each loop at this level is not nested in another loop from
1761 // this level.
1762 for (AffineForOp sibling : loops[i]) {
1763 if (sibling->isProperAncestor(loop))
1764 return failure();
1765 }
1766 }
1767 }
1768
1769 return success();
1770 }
1771
1772 namespace mlir {
1773
1774 /// External utility to vectorize affine loops in 'loops' using the n-D
1775 /// vectorization factors in 'vectorSizes'. By default, each vectorization
1776 /// factor is applied inner-to-outer to the loops of each loop nest.
1777 /// 'fastestVaryingPattern' can be optionally used to provide a different loop
1778 /// vectorization order.
1779 /// If `reductionLoops` is not empty, the given reduction loops may be
1780 /// vectorized along the reduction dimension.
1781 /// TODO: Vectorizing reductions is supported only for 1-D vectorization.
vectorizeAffineLoops(Operation * parentOp,DenseSet<Operation * > & loops,ArrayRef<int64_t> vectorSizes,ArrayRef<int64_t> fastestVaryingPattern,const ReductionLoopMap & reductionLoops)1782 void vectorizeAffineLoops(Operation *parentOp, DenseSet<Operation *> &loops,
1783 ArrayRef<int64_t> vectorSizes,
1784 ArrayRef<int64_t> fastestVaryingPattern,
1785 const ReductionLoopMap &reductionLoops) {
1786 // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
1787 NestedPatternContext mlContext;
1788 vectorizeLoops(parentOp, loops, vectorSizes, fastestVaryingPattern,
1789 reductionLoops);
1790 }
1791
1792 /// External utility to vectorize affine loops from a single loop nest using an
1793 /// n-D vectorization strategy (see doc in VectorizationStrategy definition).
1794 /// Loops are provided in a 2D vector container. The first dimension represents
1795 /// the nesting level relative to the loops to be vectorized. The second
1796 /// dimension contains the loops. This means that:
1797 /// a) every loop in 'loops[i]' must have a parent loop in 'loops[i-1]',
1798 /// b) a loop in 'loops[i]' may or may not have a child loop in 'loops[i+1]'.
1799 ///
1800 /// For example, for the following loop nest:
1801 ///
1802 /// func @vec2d(%in0: memref<64x128x512xf32>, %in1: memref<64x128x128xf32>,
1803 /// %out0: memref<64x128x512xf32>,
1804 /// %out1: memref<64x128x128xf32>) {
1805 /// affine.for %i0 = 0 to 64 {
1806 /// affine.for %i1 = 0 to 128 {
1807 /// affine.for %i2 = 0 to 512 {
1808 /// %ld = affine.load %in0[%i0, %i1, %i2] : memref<64x128x512xf32>
1809 /// affine.store %ld, %out0[%i0, %i1, %i2] : memref<64x128x512xf32>
1810 /// }
1811 /// affine.for %i3 = 0 to 128 {
1812 /// %ld = affine.load %in1[%i0, %i1, %i3] : memref<64x128x128xf32>
1813 /// affine.store %ld, %out1[%i0, %i1, %i3] : memref<64x128x128xf32>
1814 /// }
1815 /// }
1816 /// }
1817 /// return
1818 /// }
1819 ///
1820 /// loops = {{%i0}, {%i2, %i3}}, to vectorize the outermost and the two
1821 /// innermost loops;
1822 /// loops = {{%i1}, {%i2, %i3}}, to vectorize the middle and the two innermost
1823 /// loops;
1824 /// loops = {{%i2}}, to vectorize only the first innermost loop;
1825 /// loops = {{%i3}}, to vectorize only the second innermost loop;
1826 /// loops = {{%i1}}, to vectorize only the middle loop.
1827 LogicalResult
vectorizeAffineLoopNest(std::vector<SmallVector<AffineForOp,2>> & loops,const VectorizationStrategy & strategy)1828 vectorizeAffineLoopNest(std::vector<SmallVector<AffineForOp, 2>> &loops,
1829 const VectorizationStrategy &strategy) {
1830 // Thread-safe RAII local context, BumpPtrAllocator freed on exit.
1831 NestedPatternContext mlContext;
1832 if (failed(verifyLoopNesting(loops)))
1833 return failure();
1834 return vectorizeLoopNest(loops, strategy);
1835 }
1836
1837 std::unique_ptr<OperationPass<FuncOp>>
createSuperVectorizePass(ArrayRef<int64_t> virtualVectorSize)1838 createSuperVectorizePass(ArrayRef<int64_t> virtualVectorSize) {
1839 return std::make_unique<Vectorize>(virtualVectorSize);
1840 }
createSuperVectorizePass()1841 std::unique_ptr<OperationPass<FuncOp>> createSuperVectorizePass() {
1842 return std::make_unique<Vectorize>();
1843 }
1844
1845 } // namespace mlir
1846