1 /* Vectorizer
2    Copyright (C) 2003-2019 Free Software Foundation, Inc.
3    Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #ifndef GCC_TREE_VECTORIZER_H
22 #define GCC_TREE_VECTORIZER_H
23 
24 typedef struct _stmt_vec_info *stmt_vec_info;
25 
26 #include "tree-data-ref.h"
27 #include "tree-hash-traits.h"
28 #include "target.h"
29 
30 /* Used for naming of new temporaries.  */
31 enum vect_var_kind {
32   vect_simple_var,
33   vect_pointer_var,
34   vect_scalar_var,
35   vect_mask_var
36 };
37 
38 /* Defines type of operation.  */
39 enum operation_type {
40   unary_op = 1,
41   binary_op,
42   ternary_op
43 };
44 
45 /* Define type of available alignment support.  */
46 enum dr_alignment_support {
47   dr_unaligned_unsupported,
48   dr_unaligned_supported,
49   dr_explicit_realign,
50   dr_explicit_realign_optimized,
51   dr_aligned
52 };
53 
54 /* Define type of def-use cross-iteration cycle.  */
55 enum vect_def_type {
56   vect_uninitialized_def = 0,
57   vect_constant_def = 1,
58   vect_external_def,
59   vect_internal_def,
60   vect_induction_def,
61   vect_reduction_def,
62   vect_double_reduction_def,
63   vect_nested_cycle,
64   vect_unknown_def_type
65 };
66 
67 /* Define type of reduction.  */
68 enum vect_reduction_type {
69   TREE_CODE_REDUCTION,
70   COND_REDUCTION,
71   INTEGER_INDUC_COND_REDUCTION,
72   CONST_COND_REDUCTION,
73 
74   /* Retain a scalar phi and use a FOLD_EXTRACT_LAST within the loop
75      to implement:
76 
77        for (int i = 0; i < VF; ++i)
78          res = cond[i] ? val[i] : res;  */
79   EXTRACT_LAST_REDUCTION,
80 
81   /* Use a folding reduction within the loop to implement:
82 
83        for (int i = 0; i < VF; ++i)
84 	 res = res OP val[i];
85 
86      (with no reassocation).  */
87   FOLD_LEFT_REDUCTION
88 };
89 
90 #define VECTORIZABLE_CYCLE_DEF(D) (((D) == vect_reduction_def)           \
91                                    || ((D) == vect_double_reduction_def) \
92                                    || ((D) == vect_nested_cycle))
93 
94 /* Structure to encapsulate information about a group of like
95    instructions to be presented to the target cost model.  */
96 struct stmt_info_for_cost {
97   int count;
98   enum vect_cost_for_stmt kind;
99   enum vect_cost_model_location where;
100   stmt_vec_info stmt_info;
101   int misalign;
102 };
103 
104 typedef vec<stmt_info_for_cost> stmt_vector_for_cost;
105 
106 /* Maps base addresses to an innermost_loop_behavior that gives the maximum
107    known alignment for that base.  */
108 typedef hash_map<tree_operand_hash,
109 		 innermost_loop_behavior *> vec_base_alignments;
110 
111 /************************************************************************
112   SLP
113  ************************************************************************/
114 typedef struct _slp_tree *slp_tree;
115 
116 /* A computation tree of an SLP instance.  Each node corresponds to a group of
117    stmts to be packed in a SIMD stmt.  */
118 struct _slp_tree {
119   /* Nodes that contain def-stmts of this node statements operands.  */
120   vec<slp_tree> children;
121   /* A group of scalar stmts to be vectorized together.  */
122   vec<stmt_vec_info> stmts;
123   /* Load permutation relative to the stores, NULL if there is no
124      permutation.  */
125   vec<unsigned> load_permutation;
126   /* Vectorized stmt/s.  */
127   vec<stmt_vec_info> vec_stmts;
128   /* Number of vector stmts that are created to replace the group of scalar
129      stmts. It is calculated during the transformation phase as the number of
130      scalar elements in one scalar iteration (GROUP_SIZE) multiplied by VF
131      divided by vector size.  */
132   unsigned int vec_stmts_size;
133   /* Reference count in the SLP graph.  */
134   unsigned int refcnt;
135   /* Whether the scalar computations use two different operators.  */
136   bool two_operators;
137   /* The DEF type of this node.  */
138   enum vect_def_type def_type;
139 };
140 
141 
142 /* SLP instance is a sequence of stmts in a loop that can be packed into
143    SIMD stmts.  */
144 typedef struct _slp_instance {
145   /* The root of SLP tree.  */
146   slp_tree root;
147 
148   /* Size of groups of scalar stmts that will be replaced by SIMD stmt/s.  */
149   unsigned int group_size;
150 
151   /* The unrolling factor required to vectorized this SLP instance.  */
152   poly_uint64 unrolling_factor;
153 
154   /* The group of nodes that contain loads of this SLP instance.  */
155   vec<slp_tree> loads;
156 
157   /* The SLP node containing the reduction PHIs.  */
158   slp_tree reduc_phis;
159 } *slp_instance;
160 
161 
162 /* Access Functions.  */
163 #define SLP_INSTANCE_TREE(S)                     (S)->root
164 #define SLP_INSTANCE_GROUP_SIZE(S)               (S)->group_size
165 #define SLP_INSTANCE_UNROLLING_FACTOR(S)         (S)->unrolling_factor
166 #define SLP_INSTANCE_LOADS(S)                    (S)->loads
167 
168 #define SLP_TREE_CHILDREN(S)                     (S)->children
169 #define SLP_TREE_SCALAR_STMTS(S)                 (S)->stmts
170 #define SLP_TREE_VEC_STMTS(S)                    (S)->vec_stmts
171 #define SLP_TREE_NUMBER_OF_VEC_STMTS(S)          (S)->vec_stmts_size
172 #define SLP_TREE_LOAD_PERMUTATION(S)             (S)->load_permutation
173 #define SLP_TREE_TWO_OPERATORS(S)		 (S)->two_operators
174 #define SLP_TREE_DEF_TYPE(S)			 (S)->def_type
175 
176 
177 
178 /* Describes two objects whose addresses must be unequal for the vectorized
179    loop to be valid.  */
180 typedef std::pair<tree, tree> vec_object_pair;
181 
182 /* Records that vectorization is only possible if abs (EXPR) >= MIN_VALUE.
183    UNSIGNED_P is true if we can assume that abs (EXPR) == EXPR.  */
184 struct vec_lower_bound {
vec_lower_boundvec_lower_bound185   vec_lower_bound () {}
vec_lower_boundvec_lower_bound186   vec_lower_bound (tree e, bool u, poly_uint64 m)
187     : expr (e), unsigned_p (u), min_value (m) {}
188 
189   tree expr;
190   bool unsigned_p;
191   poly_uint64 min_value;
192 };
193 
194 /* Vectorizer state shared between different analyses like vector sizes
195    of the same CFG region.  */
196 struct vec_info_shared {
197   vec_info_shared();
198   ~vec_info_shared();
199 
200   void save_datarefs();
201   void check_datarefs();
202 
203   /* All data references.  Freed by free_data_refs, so not an auto_vec.  */
204   vec<data_reference_p> datarefs;
205   vec<data_reference> datarefs_copy;
206 
207   /* The loop nest in which the data dependences are computed.  */
208   auto_vec<loop_p> loop_nest;
209 
210   /* All data dependences.  Freed by free_dependence_relations, so not
211      an auto_vec.  */
212   vec<ddr_p> ddrs;
213 };
214 
215 /* Vectorizer state common between loop and basic-block vectorization.  */
216 struct vec_info {
217   enum vec_kind { bb, loop };
218 
219   vec_info (vec_kind, void *, vec_info_shared *);
220   ~vec_info ();
221 
222   stmt_vec_info add_stmt (gimple *);
223   stmt_vec_info lookup_stmt (gimple *);
224   stmt_vec_info lookup_def (tree);
225   stmt_vec_info lookup_single_use (tree);
226   struct dr_vec_info *lookup_dr (data_reference *);
227   void move_dr (stmt_vec_info, stmt_vec_info);
228   void remove_stmt (stmt_vec_info);
229   void replace_stmt (gimple_stmt_iterator *, stmt_vec_info, gimple *);
230 
231   /* The type of vectorization.  */
232   vec_kind kind;
233 
234   /* Shared vectorizer state.  */
235   vec_info_shared *shared;
236 
237   /* The mapping of GIMPLE UID to stmt_vec_info.  */
238   vec<stmt_vec_info> stmt_vec_infos;
239 
240   /* All SLP instances.  */
241   auto_vec<slp_instance> slp_instances;
242 
243   /* Maps base addresses to an innermost_loop_behavior that gives the maximum
244      known alignment for that base.  */
245   vec_base_alignments base_alignments;
246 
247   /* All interleaving chains of stores, represented by the first
248      stmt in the chain.  */
249   auto_vec<stmt_vec_info> grouped_stores;
250 
251   /* Cost data used by the target cost model.  */
252   void *target_cost_data;
253 
254 private:
255   stmt_vec_info new_stmt_vec_info (gimple *stmt);
256   void set_vinfo_for_stmt (gimple *, stmt_vec_info);
257   void free_stmt_vec_infos ();
258   void free_stmt_vec_info (stmt_vec_info);
259 };
260 
261 struct _loop_vec_info;
262 struct _bb_vec_info;
263 
264 template<>
265 template<>
266 inline bool
test(vec_info * i)267 is_a_helper <_loop_vec_info *>::test (vec_info *i)
268 {
269   return i->kind == vec_info::loop;
270 }
271 
272 template<>
273 template<>
274 inline bool
test(vec_info * i)275 is_a_helper <_bb_vec_info *>::test (vec_info *i)
276 {
277   return i->kind == vec_info::bb;
278 }
279 
280 
281 /* In general, we can divide the vector statements in a vectorized loop
282    into related groups ("rgroups") and say that for each rgroup there is
283    some nS such that the rgroup operates on nS values from one scalar
284    iteration followed by nS values from the next.  That is, if VF is the
285    vectorization factor of the loop, the rgroup operates on a sequence:
286 
287      (1,1) (1,2) ... (1,nS) (2,1) ... (2,nS) ... (VF,1) ... (VF,nS)
288 
289    where (i,j) represents a scalar value with index j in a scalar
290    iteration with index i.
291 
292    [ We use the term "rgroup" to emphasise that this grouping isn't
293      necessarily the same as the grouping of statements used elsewhere.
294      For example, if we implement a group of scalar loads using gather
295      loads, we'll use a separate gather load for each scalar load, and
296      thus each gather load will belong to its own rgroup. ]
297 
298    In general this sequence will occupy nV vectors concatenated
299    together.  If these vectors have nL lanes each, the total number
300    of scalar values N is given by:
301 
302        N = nS * VF = nV * nL
303 
304    None of nS, VF, nV and nL are required to be a power of 2.  nS and nV
305    are compile-time constants but VF and nL can be variable (if the target
306    supports variable-length vectors).
307 
308    In classical vectorization, each iteration of the vector loop would
309    handle exactly VF iterations of the original scalar loop.  However,
310    in a fully-masked loop, a particular iteration of the vector loop
311    might handle fewer than VF iterations of the scalar loop.  The vector
312    lanes that correspond to iterations of the scalar loop are said to be
313    "active" and the other lanes are said to be "inactive".
314 
315    In a fully-masked loop, many rgroups need to be masked to ensure that
316    they have no effect for the inactive lanes.  Each such rgroup needs a
317    sequence of booleans in the same order as above, but with each (i,j)
318    replaced by a boolean that indicates whether iteration i is active.
319    This sequence occupies nV vector masks that again have nL lanes each.
320    Thus the mask sequence as a whole consists of VF independent booleans
321    that are each repeated nS times.
322 
323    We make the simplifying assumption that if a sequence of nV masks is
324    suitable for one (nS,nL) pair, we can reuse it for (nS/2,nL/2) by
325    VIEW_CONVERTing it.  This holds for all current targets that support
326    fully-masked loops.  For example, suppose the scalar loop is:
327 
328      float *f;
329      double *d;
330      for (int i = 0; i < n; ++i)
331        {
332 	 f[i * 2 + 0] += 1.0f;
333 	 f[i * 2 + 1] += 2.0f;
334 	 d[i] += 3.0;
335        }
336 
337    and suppose that vectors have 256 bits.  The vectorized f accesses
338    will belong to one rgroup and the vectorized d access to another:
339 
340      f rgroup: nS = 2, nV = 1, nL = 8
341      d rgroup: nS = 1, nV = 1, nL = 4
342 	       VF = 4
343 
344      [ In this simple example the rgroups do correspond to the normal
345        SLP grouping scheme. ]
346 
347    If only the first three lanes are active, the masks we need are:
348 
349      f rgroup: 1 1 | 1 1 | 1 1 | 0 0
350      d rgroup:  1  |  1  |  1  |  0
351 
352    Here we can use a mask calculated for f's rgroup for d's, but not
353    vice versa.
354 
355    Thus for each value of nV, it is enough to provide nV masks, with the
356    mask being calculated based on the highest nL (or, equivalently, based
357    on the highest nS) required by any rgroup with that nV.  We therefore
358    represent the entire collection of masks as a two-level table, with the
359    first level being indexed by nV - 1 (since nV == 0 doesn't exist) and
360    the second being indexed by the mask index 0 <= i < nV.  */
361 
362 /* The masks needed by rgroups with nV vectors, according to the
363    description above.  */
364 struct rgroup_masks {
365   /* The largest nS for all rgroups that use these masks.  */
366   unsigned int max_nscalars_per_iter;
367 
368   /* The type of mask to use, based on the highest nS recorded above.  */
369   tree mask_type;
370 
371   /* A vector of nV masks, in iteration order.  */
372   vec<tree> masks;
373 };
374 
375 typedef auto_vec<rgroup_masks> vec_loop_masks;
376 
377 /*-----------------------------------------------------------------*/
378 /* Info on vectorized loops.                                       */
379 /*-----------------------------------------------------------------*/
380 typedef struct _loop_vec_info : public vec_info {
381   _loop_vec_info (struct loop *, vec_info_shared *);
382   ~_loop_vec_info ();
383 
384   /* The loop to which this info struct refers to.  */
385   struct loop *loop;
386 
387   /* The loop basic blocks.  */
388   basic_block *bbs;
389 
390   /* Number of latch executions.  */
391   tree num_itersm1;
392   /* Number of iterations.  */
393   tree num_iters;
394   /* Number of iterations of the original loop.  */
395   tree num_iters_unchanged;
396   /* Condition under which this loop is analyzed and versioned.  */
397   tree num_iters_assumptions;
398 
399   /* Threshold of number of iterations below which vectorzation will not be
400      performed. It is calculated from MIN_PROFITABLE_ITERS and
401      PARAM_MIN_VECT_LOOP_BOUND.  */
402   unsigned int th;
403 
404   /* When applying loop versioning, the vector form should only be used
405      if the number of scalar iterations is >= this value, on top of all
406      the other requirements.  Ignored when loop versioning is not being
407      used.  */
408   poly_uint64 versioning_threshold;
409 
410   /* Unrolling factor  */
411   poly_uint64 vectorization_factor;
412 
413   /* Maximum runtime vectorization factor, or MAX_VECTORIZATION_FACTOR
414      if there is no particular limit.  */
415   unsigned HOST_WIDE_INT max_vectorization_factor;
416 
417   /* The masks that a fully-masked loop should use to avoid operating
418      on inactive scalars.  */
419   vec_loop_masks masks;
420 
421   /* If we are using a loop mask to align memory addresses, this variable
422      contains the number of vector elements that we should skip in the
423      first iteration of the vector loop (i.e. the number of leading
424      elements that should be false in the first mask).  */
425   tree mask_skip_niters;
426 
427   /* Type of the variables to use in the WHILE_ULT call for fully-masked
428      loops.  */
429   tree mask_compare_type;
430 
431   /* Unknown DRs according to which loop was peeled.  */
432   struct dr_vec_info *unaligned_dr;
433 
434   /* peeling_for_alignment indicates whether peeling for alignment will take
435      place, and what the peeling factor should be:
436      peeling_for_alignment = X means:
437         If X=0: Peeling for alignment will not be applied.
438         If X>0: Peel first X iterations.
439         If X=-1: Generate a runtime test to calculate the number of iterations
440                  to be peeled, using the dataref recorded in the field
441                  unaligned_dr.  */
442   int peeling_for_alignment;
443 
444   /* The mask used to check the alignment of pointers or arrays.  */
445   int ptr_mask;
446 
447   /* Data Dependence Relations defining address ranges that are candidates
448      for a run-time aliasing check.  */
449   auto_vec<ddr_p> may_alias_ddrs;
450 
451   /* Data Dependence Relations defining address ranges together with segment
452      lengths from which the run-time aliasing check is built.  */
453   auto_vec<dr_with_seg_len_pair_t> comp_alias_ddrs;
454 
455   /* Check that the addresses of each pair of objects is unequal.  */
456   auto_vec<vec_object_pair> check_unequal_addrs;
457 
458   /* List of values that are required to be nonzero.  This is used to check
459      whether things like "x[i * n] += 1;" are safe and eventually gets added
460      to the checks for lower bounds below.  */
461   auto_vec<tree> check_nonzero;
462 
463   /* List of values that need to be checked for a minimum value.  */
464   auto_vec<vec_lower_bound> lower_bounds;
465 
466   /* Statements in the loop that have data references that are candidates for a
467      runtime (loop versioning) misalignment check.  */
468   auto_vec<stmt_vec_info> may_misalign_stmts;
469 
470   /* Reduction cycles detected in the loop. Used in loop-aware SLP.  */
471   auto_vec<stmt_vec_info> reductions;
472 
473   /* All reduction chains in the loop, represented by the first
474      stmt in the chain.  */
475   auto_vec<stmt_vec_info> reduction_chains;
476 
477   /* Cost vector for a single scalar iteration.  */
478   auto_vec<stmt_info_for_cost> scalar_cost_vec;
479 
480   /* Map of IV base/step expressions to inserted name in the preheader.  */
481   hash_map<tree_operand_hash, tree> *ivexpr_map;
482 
483   /* The unrolling factor needed to SLP the loop. In case of that pure SLP is
484      applied to the loop, i.e., no unrolling is needed, this is 1.  */
485   poly_uint64 slp_unrolling_factor;
486 
487   /* Cost of a single scalar iteration.  */
488   int single_scalar_iteration_cost;
489 
490   /* Is the loop vectorizable? */
491   bool vectorizable;
492 
493   /* Records whether we still have the option of using a fully-masked loop.  */
494   bool can_fully_mask_p;
495 
496   /* True if have decided to use a fully-masked loop.  */
497   bool fully_masked_p;
498 
499   /* When we have grouped data accesses with gaps, we may introduce invalid
500      memory accesses.  We peel the last iteration of the loop to prevent
501      this.  */
502   bool peeling_for_gaps;
503 
504   /* When the number of iterations is not a multiple of the vector size
505      we need to peel off iterations at the end to form an epilogue loop.  */
506   bool peeling_for_niter;
507 
508   /* Reductions are canonicalized so that the last operand is the reduction
509      operand.  If this places a constant into RHS1, this decanonicalizes
510      GIMPLE for other phases, so we must track when this has occurred and
511      fix it up.  */
512   bool operands_swapped;
513 
514   /* True if there are no loop carried data dependencies in the loop.
515      If loop->safelen <= 1, then this is always true, either the loop
516      didn't have any loop carried data dependencies, or the loop is being
517      vectorized guarded with some runtime alias checks, or couldn't
518      be vectorized at all, but then this field shouldn't be used.
519      For loop->safelen >= 2, the user has asserted that there are no
520      backward dependencies, but there still could be loop carried forward
521      dependencies in such loops.  This flag will be false if normal
522      vectorizer data dependency analysis would fail or require versioning
523      for alias, but because of loop->safelen >= 2 it has been vectorized
524      even without versioning for alias.  E.g. in:
525      #pragma omp simd
526      for (int i = 0; i < m; i++)
527        a[i] = a[i + k] * c;
528      (or #pragma simd or #pragma ivdep) we can vectorize this and it will
529      DTRT even for k > 0 && k < m, but without safelen we would not
530      vectorize this, so this field would be false.  */
531   bool no_data_dependencies;
532 
533   /* Mark loops having masked stores.  */
534   bool has_mask_store;
535 
536   /* If if-conversion versioned this loop before conversion, this is the
537      loop version without if-conversion.  */
538   struct loop *scalar_loop;
539 
540   /* For loops being epilogues of already vectorized loops
541      this points to the original vectorized loop.  Otherwise NULL.  */
542   _loop_vec_info *orig_loop_info;
543 
544 } *loop_vec_info;
545 
546 /* Access Functions.  */
547 #define LOOP_VINFO_LOOP(L)                 (L)->loop
548 #define LOOP_VINFO_BBS(L)                  (L)->bbs
549 #define LOOP_VINFO_NITERSM1(L)             (L)->num_itersm1
550 #define LOOP_VINFO_NITERS(L)               (L)->num_iters
551 /* Since LOOP_VINFO_NITERS and LOOP_VINFO_NITERSM1 can change after
552    prologue peeling retain total unchanged scalar loop iterations for
553    cost model.  */
554 #define LOOP_VINFO_NITERS_UNCHANGED(L)     (L)->num_iters_unchanged
555 #define LOOP_VINFO_NITERS_ASSUMPTIONS(L)   (L)->num_iters_assumptions
556 #define LOOP_VINFO_COST_MODEL_THRESHOLD(L) (L)->th
557 #define LOOP_VINFO_VERSIONING_THRESHOLD(L) (L)->versioning_threshold
558 #define LOOP_VINFO_VECTORIZABLE_P(L)       (L)->vectorizable
559 #define LOOP_VINFO_CAN_FULLY_MASK_P(L)     (L)->can_fully_mask_p
560 #define LOOP_VINFO_FULLY_MASKED_P(L)       (L)->fully_masked_p
561 #define LOOP_VINFO_VECT_FACTOR(L)          (L)->vectorization_factor
562 #define LOOP_VINFO_MAX_VECT_FACTOR(L)      (L)->max_vectorization_factor
563 #define LOOP_VINFO_MASKS(L)                (L)->masks
564 #define LOOP_VINFO_MASK_SKIP_NITERS(L)     (L)->mask_skip_niters
565 #define LOOP_VINFO_MASK_COMPARE_TYPE(L)    (L)->mask_compare_type
566 #define LOOP_VINFO_PTR_MASK(L)             (L)->ptr_mask
567 #define LOOP_VINFO_LOOP_NEST(L)            (L)->shared->loop_nest
568 #define LOOP_VINFO_DATAREFS(L)             (L)->shared->datarefs
569 #define LOOP_VINFO_DDRS(L)                 (L)->shared->ddrs
570 #define LOOP_VINFO_INT_NITERS(L)           (TREE_INT_CST_LOW ((L)->num_iters))
571 #define LOOP_VINFO_PEELING_FOR_ALIGNMENT(L) (L)->peeling_for_alignment
572 #define LOOP_VINFO_UNALIGNED_DR(L)         (L)->unaligned_dr
573 #define LOOP_VINFO_MAY_MISALIGN_STMTS(L)   (L)->may_misalign_stmts
574 #define LOOP_VINFO_MAY_ALIAS_DDRS(L)       (L)->may_alias_ddrs
575 #define LOOP_VINFO_COMP_ALIAS_DDRS(L)      (L)->comp_alias_ddrs
576 #define LOOP_VINFO_CHECK_UNEQUAL_ADDRS(L)  (L)->check_unequal_addrs
577 #define LOOP_VINFO_CHECK_NONZERO(L)        (L)->check_nonzero
578 #define LOOP_VINFO_LOWER_BOUNDS(L)         (L)->lower_bounds
579 #define LOOP_VINFO_GROUPED_STORES(L)       (L)->grouped_stores
580 #define LOOP_VINFO_SLP_INSTANCES(L)        (L)->slp_instances
581 #define LOOP_VINFO_SLP_UNROLLING_FACTOR(L) (L)->slp_unrolling_factor
582 #define LOOP_VINFO_REDUCTIONS(L)           (L)->reductions
583 #define LOOP_VINFO_REDUCTION_CHAINS(L)     (L)->reduction_chains
584 #define LOOP_VINFO_TARGET_COST_DATA(L)     (L)->target_cost_data
585 #define LOOP_VINFO_PEELING_FOR_GAPS(L)     (L)->peeling_for_gaps
586 #define LOOP_VINFO_OPERANDS_SWAPPED(L)     (L)->operands_swapped
587 #define LOOP_VINFO_PEELING_FOR_NITER(L)    (L)->peeling_for_niter
588 #define LOOP_VINFO_NO_DATA_DEPENDENCIES(L) (L)->no_data_dependencies
589 #define LOOP_VINFO_SCALAR_LOOP(L)	   (L)->scalar_loop
590 #define LOOP_VINFO_HAS_MASK_STORE(L)       (L)->has_mask_store
591 #define LOOP_VINFO_SCALAR_ITERATION_COST(L) (L)->scalar_cost_vec
592 #define LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST(L) (L)->single_scalar_iteration_cost
593 #define LOOP_VINFO_ORIG_LOOP_INFO(L)       (L)->orig_loop_info
594 
595 #define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L)	\
596   ((L)->may_misalign_stmts.length () > 0)
597 #define LOOP_REQUIRES_VERSIONING_FOR_ALIAS(L)		\
598   ((L)->comp_alias_ddrs.length () > 0 \
599    || (L)->check_unequal_addrs.length () > 0 \
600    || (L)->lower_bounds.length () > 0)
601 #define LOOP_REQUIRES_VERSIONING_FOR_NITERS(L)		\
602   (LOOP_VINFO_NITERS_ASSUMPTIONS (L))
603 #define LOOP_REQUIRES_VERSIONING(L)			\
604   (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (L)		\
605    || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (L)		\
606    || LOOP_REQUIRES_VERSIONING_FOR_NITERS (L))
607 
608 #define LOOP_VINFO_NITERS_KNOWN_P(L)          \
609   (tree_fits_shwi_p ((L)->num_iters) && tree_to_shwi ((L)->num_iters) > 0)
610 
611 #define LOOP_VINFO_EPILOGUE_P(L) \
612   (LOOP_VINFO_ORIG_LOOP_INFO (L) != NULL)
613 
614 #define LOOP_VINFO_ORIG_MAX_VECT_FACTOR(L) \
615   (LOOP_VINFO_MAX_VECT_FACTOR (LOOP_VINFO_ORIG_LOOP_INFO (L)))
616 
617 /* Wrapper for loop_vec_info, for tracking success/failure, where a non-NULL
618    value signifies success, and a NULL value signifies failure, supporting
619    propagating an opt_problem * describing the failure back up the call
620    stack.  */
621 typedef opt_pointer_wrapper <loop_vec_info> opt_loop_vec_info;
622 
623 static inline loop_vec_info
loop_vec_info_for_loop(struct loop * loop)624 loop_vec_info_for_loop (struct loop *loop)
625 {
626   return (loop_vec_info) loop->aux;
627 }
628 
629 typedef struct _bb_vec_info : public vec_info
630 {
631   _bb_vec_info (gimple_stmt_iterator, gimple_stmt_iterator, vec_info_shared *);
632   ~_bb_vec_info ();
633 
634   basic_block bb;
635   gimple_stmt_iterator region_begin;
636   gimple_stmt_iterator region_end;
637 } *bb_vec_info;
638 
639 #define BB_VINFO_BB(B)               (B)->bb
640 #define BB_VINFO_GROUPED_STORES(B)   (B)->grouped_stores
641 #define BB_VINFO_SLP_INSTANCES(B)    (B)->slp_instances
642 #define BB_VINFO_DATAREFS(B)         (B)->shared->datarefs
643 #define BB_VINFO_DDRS(B)             (B)->shared->ddrs
644 #define BB_VINFO_TARGET_COST_DATA(B) (B)->target_cost_data
645 
646 static inline bb_vec_info
vec_info_for_bb(basic_block bb)647 vec_info_for_bb (basic_block bb)
648 {
649   return (bb_vec_info) bb->aux;
650 }
651 
652 /*-----------------------------------------------------------------*/
653 /* Info on vectorized defs.                                        */
654 /*-----------------------------------------------------------------*/
655 enum stmt_vec_info_type {
656   undef_vec_info_type = 0,
657   load_vec_info_type,
658   store_vec_info_type,
659   shift_vec_info_type,
660   op_vec_info_type,
661   call_vec_info_type,
662   call_simd_clone_vec_info_type,
663   assignment_vec_info_type,
664   condition_vec_info_type,
665   comparison_vec_info_type,
666   reduc_vec_info_type,
667   induc_vec_info_type,
668   type_promotion_vec_info_type,
669   type_demotion_vec_info_type,
670   type_conversion_vec_info_type,
671   loop_exit_ctrl_vec_info_type
672 };
673 
674 /* Indicates whether/how a variable is used in the scope of loop/basic
675    block.  */
676 enum vect_relevant {
677   vect_unused_in_scope = 0,
678 
679   /* The def is only used outside the loop.  */
680   vect_used_only_live,
681   /* The def is in the inner loop, and the use is in the outer loop, and the
682      use is a reduction stmt.  */
683   vect_used_in_outer_by_reduction,
684   /* The def is in the inner loop, and the use is in the outer loop (and is
685      not part of reduction).  */
686   vect_used_in_outer,
687 
688   /* defs that feed computations that end up (only) in a reduction. These
689      defs may be used by non-reduction stmts, but eventually, any
690      computations/values that are affected by these defs are used to compute
691      a reduction (i.e. don't get stored to memory, for example). We use this
692      to identify computations that we can change the order in which they are
693      computed.  */
694   vect_used_by_reduction,
695 
696   vect_used_in_scope
697 };
698 
699 /* The type of vectorization that can be applied to the stmt: regular loop-based
700    vectorization; pure SLP - the stmt is a part of SLP instances and does not
701    have uses outside SLP instances; or hybrid SLP and loop-based - the stmt is
702    a part of SLP instance and also must be loop-based vectorized, since it has
703    uses outside SLP sequences.
704 
705    In the loop context the meanings of pure and hybrid SLP are slightly
706    different. By saying that pure SLP is applied to the loop, we mean that we
707    exploit only intra-iteration parallelism in the loop; i.e., the loop can be
708    vectorized without doing any conceptual unrolling, cause we don't pack
709    together stmts from different iterations, only within a single iteration.
710    Loop hybrid SLP means that we exploit both intra-iteration and
711    inter-iteration parallelism (e.g., number of elements in the vector is 4
712    and the slp-group-size is 2, in which case we don't have enough parallelism
713    within an iteration, so we obtain the rest of the parallelism from subsequent
714    iterations by unrolling the loop by 2).  */
715 enum slp_vect_type {
716   loop_vect = 0,
717   pure_slp,
718   hybrid
719 };
720 
721 /* Says whether a statement is a load, a store of a vectorized statement
722    result, or a store of an invariant value.  */
723 enum vec_load_store_type {
724   VLS_LOAD,
725   VLS_STORE,
726   VLS_STORE_INVARIANT
727 };
728 
729 /* Describes how we're going to vectorize an individual load or store,
730    or a group of loads or stores.  */
731 enum vect_memory_access_type {
732   /* An access to an invariant address.  This is used only for loads.  */
733   VMAT_INVARIANT,
734 
735   /* A simple contiguous access.  */
736   VMAT_CONTIGUOUS,
737 
738   /* A contiguous access that goes down in memory rather than up,
739      with no additional permutation.  This is used only for stores
740      of invariants.  */
741   VMAT_CONTIGUOUS_DOWN,
742 
743   /* A simple contiguous access in which the elements need to be permuted
744      after loading or before storing.  Only used for loop vectorization;
745      SLP uses separate permutes.  */
746   VMAT_CONTIGUOUS_PERMUTE,
747 
748   /* A simple contiguous access in which the elements need to be reversed
749      after loading or before storing.  */
750   VMAT_CONTIGUOUS_REVERSE,
751 
752   /* An access that uses IFN_LOAD_LANES or IFN_STORE_LANES.  */
753   VMAT_LOAD_STORE_LANES,
754 
755   /* An access in which each scalar element is loaded or stored
756      individually.  */
757   VMAT_ELEMENTWISE,
758 
759   /* A hybrid of VMAT_CONTIGUOUS and VMAT_ELEMENTWISE, used for grouped
760      SLP accesses.  Each unrolled iteration uses a contiguous load
761      or store for the whole group, but the groups from separate iterations
762      are combined in the same way as for VMAT_ELEMENTWISE.  */
763   VMAT_STRIDED_SLP,
764 
765   /* The access uses gather loads or scatter stores.  */
766   VMAT_GATHER_SCATTER
767 };
768 
769 struct dr_vec_info {
770   /* The data reference itself.  */
771   data_reference *dr;
772   /* The statement that contains the data reference.  */
773   stmt_vec_info stmt;
774   /* The misalignment in bytes of the reference, or -1 if not known.  */
775   int misalignment;
776   /* The byte alignment that we'd ideally like the reference to have,
777      and the value that misalignment is measured against.  */
778   poly_uint64 target_alignment;
779   /* If true the alignment of base_decl needs to be increased.  */
780   bool base_misaligned;
781   tree base_decl;
782 };
783 
784 typedef struct data_reference *dr_p;
785 
786 struct _stmt_vec_info {
787 
788   enum stmt_vec_info_type type;
789 
790   /* Indicates whether this stmts is part of a computation whose result is
791      used outside the loop.  */
792   bool live;
793 
794   /* Stmt is part of some pattern (computation idiom)  */
795   bool in_pattern_p;
796 
797   /* True if the statement was created during pattern recognition as
798      part of the replacement for RELATED_STMT.  This implies that the
799      statement isn't part of any basic block, although for convenience
800      its gimple_bb is the same as for RELATED_STMT.  */
801   bool pattern_stmt_p;
802 
803   /* Is this statement vectorizable or should it be skipped in (partial)
804      vectorization.  */
805   bool vectorizable;
806 
807   /* The stmt to which this info struct refers to.  */
808   gimple *stmt;
809 
810   /* The vec_info with respect to which STMT is vectorized.  */
811   vec_info *vinfo;
812 
813   /* The vector type to be used for the LHS of this statement.  */
814   tree vectype;
815 
816   /* The vectorized version of the stmt.  */
817   stmt_vec_info vectorized_stmt;
818 
819 
820   /* The following is relevant only for stmts that contain a non-scalar
821      data-ref (array/pointer/struct access). A GIMPLE stmt is expected to have
822      at most one such data-ref.  */
823 
824   dr_vec_info dr_aux;
825 
826   /* Information about the data-ref relative to this loop
827      nest (the loop that is being considered for vectorization).  */
828   innermost_loop_behavior dr_wrt_vec_loop;
829 
830   /* For loop PHI nodes, the base and evolution part of it.  This makes sure
831      this information is still available in vect_update_ivs_after_vectorizer
832      where we may not be able to re-analyze the PHI nodes evolution as
833      peeling for the prologue loop can make it unanalyzable.  The evolution
834      part is still correct after peeling, but the base may have changed from
835      the version here.  */
836   tree loop_phi_evolution_base_unchanged;
837   tree loop_phi_evolution_part;
838 
839   /* Used for various bookkeeping purposes, generally holding a pointer to
840      some other stmt S that is in some way "related" to this stmt.
841      Current use of this field is:
842         If this stmt is part of a pattern (i.e. the field 'in_pattern_p' is
843         true): S is the "pattern stmt" that represents (and replaces) the
844         sequence of stmts that constitutes the pattern.  Similarly, the
845         related_stmt of the "pattern stmt" points back to this stmt (which is
846         the last stmt in the original sequence of stmts that constitutes the
847         pattern).  */
848   stmt_vec_info related_stmt;
849 
850   /* Used to keep a sequence of def stmts of a pattern stmt if such exists.
851      The sequence is attached to the original statement rather than the
852      pattern statement.  */
853   gimple_seq pattern_def_seq;
854 
855   /* List of datarefs that are known to have the same alignment as the dataref
856      of this stmt.  */
857   vec<dr_p> same_align_refs;
858 
859   /* Selected SIMD clone's function info.  First vector element
860      is SIMD clone's function decl, followed by a pair of trees (base + step)
861      for linear arguments (pair of NULLs for other arguments).  */
862   vec<tree> simd_clone_info;
863 
864   /* Classify the def of this stmt.  */
865   enum vect_def_type def_type;
866 
867   /*  Whether the stmt is SLPed, loop-based vectorized, or both.  */
868   enum slp_vect_type slp_type;
869 
870   /* Interleaving and reduction chains info.  */
871   /* First element in the group.  */
872   stmt_vec_info first_element;
873   /* Pointer to the next element in the group.  */
874   stmt_vec_info next_element;
875   /* The size of the group.  */
876   unsigned int size;
877   /* For stores, number of stores from this group seen. We vectorize the last
878      one.  */
879   unsigned int store_count;
880   /* For loads only, the gap from the previous load. For consecutive loads, GAP
881      is 1.  */
882   unsigned int gap;
883 
884   /* The minimum negative dependence distance this stmt participates in
885      or zero if none.  */
886   unsigned int min_neg_dist;
887 
888   /* Not all stmts in the loop need to be vectorized. e.g, the increment
889      of the loop induction variable and computation of array indexes. relevant
890      indicates whether the stmt needs to be vectorized.  */
891   enum vect_relevant relevant;
892 
893   /* For loads if this is a gather, for stores if this is a scatter.  */
894   bool gather_scatter_p;
895 
896   /* True if this is an access with loop-invariant stride.  */
897   bool strided_p;
898 
899   /* For both loads and stores.  */
900   bool simd_lane_access_p;
901 
902   /* Classifies how the load or store is going to be implemented
903      for loop vectorization.  */
904   vect_memory_access_type memory_access_type;
905 
906   /* For reduction loops, this is the type of reduction.  */
907   enum vect_reduction_type v_reduc_type;
908 
909   /* For CONST_COND_REDUCTION, record the reduc code.  */
910   enum tree_code const_cond_reduc_code;
911 
912   /* On a reduction PHI the reduction type as detected by
913      vect_force_simple_reduction.  */
914   enum vect_reduction_type reduc_type;
915 
916   /* On a reduction PHI the def returned by vect_force_simple_reduction.
917      On the def returned by vect_force_simple_reduction the
918      corresponding PHI.  */
919   stmt_vec_info reduc_def;
920 
921   /* The number of scalar stmt references from active SLP instances.  */
922   unsigned int num_slp_uses;
923 
924   /* If nonzero, the lhs of the statement could be truncated to this
925      many bits without affecting any users of the result.  */
926   unsigned int min_output_precision;
927 
928   /* If nonzero, all non-boolean input operands have the same precision,
929      and they could each be truncated to this many bits without changing
930      the result.  */
931   unsigned int min_input_precision;
932 
933   /* If OPERATION_BITS is nonzero, the statement could be performed on
934      an integer with the sign and number of bits given by OPERATION_SIGN
935      and OPERATION_BITS without changing the result.  */
936   unsigned int operation_precision;
937   signop operation_sign;
938 };
939 
940 /* Information about a gather/scatter call.  */
941 struct gather_scatter_info {
942   /* The internal function to use for the gather/scatter operation,
943      or IFN_LAST if a built-in function should be used instead.  */
944   internal_fn ifn;
945 
946   /* The FUNCTION_DECL for the built-in gather/scatter function,
947      or null if an internal function should be used instead.  */
948   tree decl;
949 
950   /* The loop-invariant base value.  */
951   tree base;
952 
953   /* The original scalar offset, which is a non-loop-invariant SSA_NAME.  */
954   tree offset;
955 
956   /* Each offset element should be multiplied by this amount before
957      being added to the base.  */
958   int scale;
959 
960   /* The definition type for the vectorized offset.  */
961   enum vect_def_type offset_dt;
962 
963   /* The type of the vectorized offset.  */
964   tree offset_vectype;
965 
966   /* The type of the scalar elements after loading or before storing.  */
967   tree element_type;
968 
969   /* The type of the scalar elements being loaded or stored.  */
970   tree memory_type;
971 };
972 
973 /* Access Functions.  */
974 #define STMT_VINFO_TYPE(S)                 (S)->type
975 #define STMT_VINFO_STMT(S)                 (S)->stmt
976 inline loop_vec_info
STMT_VINFO_LOOP_VINFO(stmt_vec_info stmt_vinfo)977 STMT_VINFO_LOOP_VINFO (stmt_vec_info stmt_vinfo)
978 {
979   if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (stmt_vinfo->vinfo))
980     return loop_vinfo;
981   return NULL;
982 }
983 inline bb_vec_info
STMT_VINFO_BB_VINFO(stmt_vec_info stmt_vinfo)984 STMT_VINFO_BB_VINFO (stmt_vec_info stmt_vinfo)
985 {
986   if (bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (stmt_vinfo->vinfo))
987     return bb_vinfo;
988   return NULL;
989 }
990 #define STMT_VINFO_RELEVANT(S)             (S)->relevant
991 #define STMT_VINFO_LIVE_P(S)               (S)->live
992 #define STMT_VINFO_VECTYPE(S)              (S)->vectype
993 #define STMT_VINFO_VEC_STMT(S)             (S)->vectorized_stmt
994 #define STMT_VINFO_VECTORIZABLE(S)         (S)->vectorizable
995 #define STMT_VINFO_DATA_REF(S)             ((S)->dr_aux.dr + 0)
996 #define STMT_VINFO_GATHER_SCATTER_P(S)	   (S)->gather_scatter_p
997 #define STMT_VINFO_STRIDED_P(S)	   	   (S)->strided_p
998 #define STMT_VINFO_MEMORY_ACCESS_TYPE(S)   (S)->memory_access_type
999 #define STMT_VINFO_SIMD_LANE_ACCESS_P(S)   (S)->simd_lane_access_p
1000 #define STMT_VINFO_VEC_REDUCTION_TYPE(S)   (S)->v_reduc_type
1001 #define STMT_VINFO_VEC_CONST_COND_REDUC_CODE(S) (S)->const_cond_reduc_code
1002 
1003 #define STMT_VINFO_DR_WRT_VEC_LOOP(S)      (S)->dr_wrt_vec_loop
1004 #define STMT_VINFO_DR_BASE_ADDRESS(S)      (S)->dr_wrt_vec_loop.base_address
1005 #define STMT_VINFO_DR_INIT(S)              (S)->dr_wrt_vec_loop.init
1006 #define STMT_VINFO_DR_OFFSET(S)            (S)->dr_wrt_vec_loop.offset
1007 #define STMT_VINFO_DR_STEP(S)              (S)->dr_wrt_vec_loop.step
1008 #define STMT_VINFO_DR_BASE_ALIGNMENT(S)    (S)->dr_wrt_vec_loop.base_alignment
1009 #define STMT_VINFO_DR_BASE_MISALIGNMENT(S) \
1010   (S)->dr_wrt_vec_loop.base_misalignment
1011 #define STMT_VINFO_DR_OFFSET_ALIGNMENT(S) \
1012   (S)->dr_wrt_vec_loop.offset_alignment
1013 #define STMT_VINFO_DR_STEP_ALIGNMENT(S) \
1014   (S)->dr_wrt_vec_loop.step_alignment
1015 
1016 #define STMT_VINFO_DR_INFO(S) \
1017   (gcc_checking_assert ((S)->dr_aux.stmt == (S)), &(S)->dr_aux)
1018 
1019 #define STMT_VINFO_IN_PATTERN_P(S)         (S)->in_pattern_p
1020 #define STMT_VINFO_RELATED_STMT(S)         (S)->related_stmt
1021 #define STMT_VINFO_PATTERN_DEF_SEQ(S)      (S)->pattern_def_seq
1022 #define STMT_VINFO_SAME_ALIGN_REFS(S)      (S)->same_align_refs
1023 #define STMT_VINFO_SIMD_CLONE_INFO(S)	   (S)->simd_clone_info
1024 #define STMT_VINFO_DEF_TYPE(S)             (S)->def_type
1025 #define STMT_VINFO_GROUPED_ACCESS(S) \
1026   ((S)->dr_aux.dr && DR_GROUP_FIRST_ELEMENT(S))
1027 #define STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED(S) (S)->loop_phi_evolution_base_unchanged
1028 #define STMT_VINFO_LOOP_PHI_EVOLUTION_PART(S) (S)->loop_phi_evolution_part
1029 #define STMT_VINFO_MIN_NEG_DIST(S)	(S)->min_neg_dist
1030 #define STMT_VINFO_NUM_SLP_USES(S)	(S)->num_slp_uses
1031 #define STMT_VINFO_REDUC_TYPE(S)	(S)->reduc_type
1032 #define STMT_VINFO_REDUC_DEF(S)		(S)->reduc_def
1033 
1034 #define DR_GROUP_FIRST_ELEMENT(S) \
1035   (gcc_checking_assert ((S)->dr_aux.dr), (S)->first_element)
1036 #define DR_GROUP_NEXT_ELEMENT(S) \
1037   (gcc_checking_assert ((S)->dr_aux.dr), (S)->next_element)
1038 #define DR_GROUP_SIZE(S) \
1039   (gcc_checking_assert ((S)->dr_aux.dr), (S)->size)
1040 #define DR_GROUP_STORE_COUNT(S) \
1041   (gcc_checking_assert ((S)->dr_aux.dr), (S)->store_count)
1042 #define DR_GROUP_GAP(S) \
1043   (gcc_checking_assert ((S)->dr_aux.dr), (S)->gap)
1044 
1045 #define REDUC_GROUP_FIRST_ELEMENT(S) \
1046   (gcc_checking_assert (!(S)->dr_aux.dr), (S)->first_element)
1047 #define REDUC_GROUP_NEXT_ELEMENT(S) \
1048   (gcc_checking_assert (!(S)->dr_aux.dr), (S)->next_element)
1049 #define REDUC_GROUP_SIZE(S) \
1050   (gcc_checking_assert (!(S)->dr_aux.dr), (S)->size)
1051 
1052 #define STMT_VINFO_RELEVANT_P(S)          ((S)->relevant != vect_unused_in_scope)
1053 
1054 #define HYBRID_SLP_STMT(S)                ((S)->slp_type == hybrid)
1055 #define PURE_SLP_STMT(S)                  ((S)->slp_type == pure_slp)
1056 #define STMT_SLP_TYPE(S)                   (S)->slp_type
1057 
1058 #define VECT_MAX_COST 1000
1059 
1060 /* The maximum number of intermediate steps required in multi-step type
1061    conversion.  */
1062 #define MAX_INTERM_CVT_STEPS         3
1063 
1064 #define MAX_VECTORIZATION_FACTOR INT_MAX
1065 
1066 /* Nonzero if TYPE represents a (scalar) boolean type or type
1067    in the middle-end compatible with it (unsigned precision 1 integral
1068    types).  Used to determine which types should be vectorized as
1069    VECTOR_BOOLEAN_TYPE_P.  */
1070 
1071 #define VECT_SCALAR_BOOLEAN_TYPE_P(TYPE) \
1072   (TREE_CODE (TYPE) == BOOLEAN_TYPE		\
1073    || ((TREE_CODE (TYPE) == INTEGER_TYPE	\
1074 	|| TREE_CODE (TYPE) == ENUMERAL_TYPE)	\
1075        && TYPE_PRECISION (TYPE) == 1		\
1076        && TYPE_UNSIGNED (TYPE)))
1077 
1078 static inline bool
nested_in_vect_loop_p(struct loop * loop,stmt_vec_info stmt_info)1079 nested_in_vect_loop_p (struct loop *loop, stmt_vec_info stmt_info)
1080 {
1081   return (loop->inner
1082 	  && (loop->inner == (gimple_bb (stmt_info->stmt))->loop_father));
1083 }
1084 
1085 /* Return TRUE if a statement represented by STMT_INFO is a part of a
1086    pattern.  */
1087 
1088 static inline bool
is_pattern_stmt_p(stmt_vec_info stmt_info)1089 is_pattern_stmt_p (stmt_vec_info stmt_info)
1090 {
1091   return stmt_info->pattern_stmt_p;
1092 }
1093 
1094 /* If STMT_INFO is a pattern statement, return the statement that it
1095    replaces, otherwise return STMT_INFO itself.  */
1096 
1097 inline stmt_vec_info
vect_orig_stmt(stmt_vec_info stmt_info)1098 vect_orig_stmt (stmt_vec_info stmt_info)
1099 {
1100   if (is_pattern_stmt_p (stmt_info))
1101     return STMT_VINFO_RELATED_STMT (stmt_info);
1102   return stmt_info;
1103 }
1104 
1105 /* Return the later statement between STMT1_INFO and STMT2_INFO.  */
1106 
1107 static inline stmt_vec_info
get_later_stmt(stmt_vec_info stmt1_info,stmt_vec_info stmt2_info)1108 get_later_stmt (stmt_vec_info stmt1_info, stmt_vec_info stmt2_info)
1109 {
1110   if (gimple_uid (vect_orig_stmt (stmt1_info)->stmt)
1111       > gimple_uid (vect_orig_stmt (stmt2_info)->stmt))
1112     return stmt1_info;
1113   else
1114     return stmt2_info;
1115 }
1116 
1117 /* If STMT_INFO has been replaced by a pattern statement, return the
1118    replacement statement, otherwise return STMT_INFO itself.  */
1119 
1120 inline stmt_vec_info
vect_stmt_to_vectorize(stmt_vec_info stmt_info)1121 vect_stmt_to_vectorize (stmt_vec_info stmt_info)
1122 {
1123   if (STMT_VINFO_IN_PATTERN_P (stmt_info))
1124     return STMT_VINFO_RELATED_STMT (stmt_info);
1125   return stmt_info;
1126 }
1127 
1128 /* Return true if BB is a loop header.  */
1129 
1130 static inline bool
is_loop_header_bb_p(basic_block bb)1131 is_loop_header_bb_p (basic_block bb)
1132 {
1133   if (bb == (bb->loop_father)->header)
1134     return true;
1135   gcc_checking_assert (EDGE_COUNT (bb->preds) == 1);
1136   return false;
1137 }
1138 
1139 /* Return pow2 (X).  */
1140 
1141 static inline int
vect_pow2(int x)1142 vect_pow2 (int x)
1143 {
1144   int i, res = 1;
1145 
1146   for (i = 0; i < x; i++)
1147     res *= 2;
1148 
1149   return res;
1150 }
1151 
1152 /* Alias targetm.vectorize.builtin_vectorization_cost.  */
1153 
1154 static inline int
builtin_vectorization_cost(enum vect_cost_for_stmt type_of_cost,tree vectype,int misalign)1155 builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
1156 			    tree vectype, int misalign)
1157 {
1158   return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
1159 						       vectype, misalign);
1160 }
1161 
1162 /* Get cost by calling cost target builtin.  */
1163 
1164 static inline
vect_get_stmt_cost(enum vect_cost_for_stmt type_of_cost)1165 int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost)
1166 {
1167   return builtin_vectorization_cost (type_of_cost, NULL, 0);
1168 }
1169 
1170 /* Alias targetm.vectorize.init_cost.  */
1171 
1172 static inline void *
init_cost(struct loop * loop_info)1173 init_cost (struct loop *loop_info)
1174 {
1175   return targetm.vectorize.init_cost (loop_info);
1176 }
1177 
1178 extern void dump_stmt_cost (FILE *, void *, int, enum vect_cost_for_stmt,
1179 			    stmt_vec_info, int, unsigned,
1180 			    enum vect_cost_model_location);
1181 
1182 /* Alias targetm.vectorize.add_stmt_cost.  */
1183 
1184 static inline unsigned
add_stmt_cost(void * data,int count,enum vect_cost_for_stmt kind,stmt_vec_info stmt_info,int misalign,enum vect_cost_model_location where)1185 add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
1186 	       stmt_vec_info stmt_info, int misalign,
1187 	       enum vect_cost_model_location where)
1188 {
1189   unsigned cost = targetm.vectorize.add_stmt_cost (data, count, kind,
1190 						   stmt_info, misalign, where);
1191   if (dump_file && (dump_flags & TDF_DETAILS))
1192     dump_stmt_cost (dump_file, data, count, kind, stmt_info, misalign,
1193 		    cost, where);
1194   return cost;
1195 }
1196 
1197 /* Alias targetm.vectorize.finish_cost.  */
1198 
1199 static inline void
finish_cost(void * data,unsigned * prologue_cost,unsigned * body_cost,unsigned * epilogue_cost)1200 finish_cost (void *data, unsigned *prologue_cost,
1201 	     unsigned *body_cost, unsigned *epilogue_cost)
1202 {
1203   targetm.vectorize.finish_cost (data, prologue_cost, body_cost, epilogue_cost);
1204 }
1205 
1206 /* Alias targetm.vectorize.destroy_cost_data.  */
1207 
1208 static inline void
destroy_cost_data(void * data)1209 destroy_cost_data (void *data)
1210 {
1211   targetm.vectorize.destroy_cost_data (data);
1212 }
1213 
1214 inline void
add_stmt_costs(void * data,stmt_vector_for_cost * cost_vec)1215 add_stmt_costs (void *data, stmt_vector_for_cost *cost_vec)
1216 {
1217   stmt_info_for_cost *cost;
1218   unsigned i;
1219   FOR_EACH_VEC_ELT (*cost_vec, i, cost)
1220     add_stmt_cost (data, cost->count, cost->kind, cost->stmt_info,
1221 		   cost->misalign, cost->where);
1222 }
1223 
1224 /*-----------------------------------------------------------------*/
1225 /* Info on data references alignment.                              */
1226 /*-----------------------------------------------------------------*/
1227 #define DR_MISALIGNMENT_UNKNOWN (-1)
1228 #define DR_MISALIGNMENT_UNINITIALIZED (-2)
1229 
1230 inline void
set_dr_misalignment(dr_vec_info * dr_info,int val)1231 set_dr_misalignment (dr_vec_info *dr_info, int val)
1232 {
1233   dr_info->misalignment = val;
1234 }
1235 
1236 inline int
dr_misalignment(dr_vec_info * dr_info)1237 dr_misalignment (dr_vec_info *dr_info)
1238 {
1239   int misalign = dr_info->misalignment;
1240   gcc_assert (misalign != DR_MISALIGNMENT_UNINITIALIZED);
1241   return misalign;
1242 }
1243 
1244 /* Reflects actual alignment of first access in the vectorized loop,
1245    taking into account peeling/versioning if applied.  */
1246 #define DR_MISALIGNMENT(DR) dr_misalignment (DR)
1247 #define SET_DR_MISALIGNMENT(DR, VAL) set_dr_misalignment (DR, VAL)
1248 
1249 /* Only defined once DR_MISALIGNMENT is defined.  */
1250 #define DR_TARGET_ALIGNMENT(DR) ((DR)->target_alignment)
1251 
1252 /* Return true if data access DR_INFO is aligned to its target alignment
1253    (which may be less than a full vector).  */
1254 
1255 static inline bool
aligned_access_p(dr_vec_info * dr_info)1256 aligned_access_p (dr_vec_info *dr_info)
1257 {
1258   return (DR_MISALIGNMENT (dr_info) == 0);
1259 }
1260 
1261 /* Return TRUE if the alignment of the data access is known, and FALSE
1262    otherwise.  */
1263 
1264 static inline bool
known_alignment_for_access_p(dr_vec_info * dr_info)1265 known_alignment_for_access_p (dr_vec_info *dr_info)
1266 {
1267   return (DR_MISALIGNMENT (dr_info) != DR_MISALIGNMENT_UNKNOWN);
1268 }
1269 
1270 /* Return the minimum alignment in bytes that the vectorized version
1271    of DR_INFO is guaranteed to have.  */
1272 
1273 static inline unsigned int
vect_known_alignment_in_bytes(dr_vec_info * dr_info)1274 vect_known_alignment_in_bytes (dr_vec_info *dr_info)
1275 {
1276   if (DR_MISALIGNMENT (dr_info) == DR_MISALIGNMENT_UNKNOWN)
1277     return TYPE_ALIGN_UNIT (TREE_TYPE (DR_REF (dr_info->dr)));
1278   if (DR_MISALIGNMENT (dr_info) == 0)
1279     return known_alignment (DR_TARGET_ALIGNMENT (dr_info));
1280   return DR_MISALIGNMENT (dr_info) & -DR_MISALIGNMENT (dr_info);
1281 }
1282 
1283 /* Return the behavior of DR_INFO with respect to the vectorization context
1284    (which for outer loop vectorization might not be the behavior recorded
1285    in DR_INFO itself).  */
1286 
1287 static inline innermost_loop_behavior *
vect_dr_behavior(dr_vec_info * dr_info)1288 vect_dr_behavior (dr_vec_info *dr_info)
1289 {
1290   stmt_vec_info stmt_info = dr_info->stmt;
1291   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1292   if (loop_vinfo == NULL
1293       || !nested_in_vect_loop_p (LOOP_VINFO_LOOP (loop_vinfo), stmt_info))
1294     return &DR_INNERMOST (dr_info->dr);
1295   else
1296     return &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info);
1297 }
1298 
1299 /* Return true if the vect cost model is unlimited.  */
1300 static inline bool
unlimited_cost_model(loop_p loop)1301 unlimited_cost_model (loop_p loop)
1302 {
1303   if (loop != NULL && loop->force_vectorize
1304       && flag_simd_cost_model != VECT_COST_MODEL_DEFAULT)
1305     return flag_simd_cost_model == VECT_COST_MODEL_UNLIMITED;
1306   return (flag_vect_cost_model == VECT_COST_MODEL_UNLIMITED);
1307 }
1308 
1309 /* Return true if the loop described by LOOP_VINFO is fully-masked and
1310    if the first iteration should use a partial mask in order to achieve
1311    alignment.  */
1312 
1313 static inline bool
vect_use_loop_mask_for_alignment_p(loop_vec_info loop_vinfo)1314 vect_use_loop_mask_for_alignment_p (loop_vec_info loop_vinfo)
1315 {
1316   return (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
1317 	  && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo));
1318 }
1319 
1320 /* Return the number of vectors of type VECTYPE that are needed to get
1321    NUNITS elements.  NUNITS should be based on the vectorization factor,
1322    so it is always a known multiple of the number of elements in VECTYPE.  */
1323 
1324 static inline unsigned int
vect_get_num_vectors(poly_uint64 nunits,tree vectype)1325 vect_get_num_vectors (poly_uint64 nunits, tree vectype)
1326 {
1327   return exact_div (nunits, TYPE_VECTOR_SUBPARTS (vectype)).to_constant ();
1328 }
1329 
1330 /* Return the number of copies needed for loop vectorization when
1331    a statement operates on vectors of type VECTYPE.  This is the
1332    vectorization factor divided by the number of elements in
1333    VECTYPE and is always known at compile time.  */
1334 
1335 static inline unsigned int
vect_get_num_copies(loop_vec_info loop_vinfo,tree vectype)1336 vect_get_num_copies (loop_vec_info loop_vinfo, tree vectype)
1337 {
1338   return vect_get_num_vectors (LOOP_VINFO_VECT_FACTOR (loop_vinfo), vectype);
1339 }
1340 
1341 /* Update maximum unit count *MAX_NUNITS so that it accounts for
1342    the number of units in vector type VECTYPE.  *MAX_NUNITS can be 1
1343    if we haven't yet recorded any vector types.  */
1344 
1345 static inline void
vect_update_max_nunits(poly_uint64 * max_nunits,tree vectype)1346 vect_update_max_nunits (poly_uint64 *max_nunits, tree vectype)
1347 {
1348   /* All unit counts have the form current_vector_size * X for some
1349      rational X, so two unit sizes must have a common multiple.
1350      Everything is a multiple of the initial value of 1.  */
1351   poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1352   *max_nunits = force_common_multiple (*max_nunits, nunits);
1353 }
1354 
1355 /* Return the vectorization factor that should be used for costing
1356    purposes while vectorizing the loop described by LOOP_VINFO.
1357    Pick a reasonable estimate if the vectorization factor isn't
1358    known at compile time.  */
1359 
1360 static inline unsigned int
vect_vf_for_cost(loop_vec_info loop_vinfo)1361 vect_vf_for_cost (loop_vec_info loop_vinfo)
1362 {
1363   return estimated_poly_value (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
1364 }
1365 
1366 /* Estimate the number of elements in VEC_TYPE for costing purposes.
1367    Pick a reasonable estimate if the exact number isn't known at
1368    compile time.  */
1369 
1370 static inline unsigned int
vect_nunits_for_cost(tree vec_type)1371 vect_nunits_for_cost (tree vec_type)
1372 {
1373   return estimated_poly_value (TYPE_VECTOR_SUBPARTS (vec_type));
1374 }
1375 
1376 /* Return the maximum possible vectorization factor for LOOP_VINFO.  */
1377 
1378 static inline unsigned HOST_WIDE_INT
vect_max_vf(loop_vec_info loop_vinfo)1379 vect_max_vf (loop_vec_info loop_vinfo)
1380 {
1381   unsigned HOST_WIDE_INT vf;
1382   if (LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf))
1383     return vf;
1384   return MAX_VECTORIZATION_FACTOR;
1385 }
1386 
1387 /* Return the size of the value accessed by unvectorized data reference
1388    DR_INFO.  This is only valid once STMT_VINFO_VECTYPE has been calculated
1389    for the associated gimple statement, since that guarantees that DR_INFO
1390    accesses either a scalar or a scalar equivalent.  ("Scalar equivalent"
1391    here includes things like V1SI, which can be vectorized in the same way
1392    as a plain SI.)  */
1393 
1394 inline unsigned int
vect_get_scalar_dr_size(dr_vec_info * dr_info)1395 vect_get_scalar_dr_size (dr_vec_info *dr_info)
1396 {
1397   return tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr_info->dr))));
1398 }
1399 
1400 /* Source location + hotness information. */
1401 extern dump_user_location_t vect_location;
1402 
1403 /* A macro for calling:
1404      dump_begin_scope (MSG, vect_location);
1405    via an RAII object, thus printing "=== MSG ===\n" to the dumpfile etc,
1406    and then calling
1407      dump_end_scope ();
1408    once the object goes out of scope, thus capturing the nesting of
1409    the scopes.
1410 
1411    These scopes affect dump messages within them: dump messages at the
1412    top level implicitly default to MSG_PRIORITY_USER_FACING, whereas those
1413    in a nested scope implicitly default to MSG_PRIORITY_INTERNALS.  */
1414 
1415 #define DUMP_VECT_SCOPE(MSG) \
1416   AUTO_DUMP_SCOPE (MSG, vect_location)
1417 
1418 /* A sentinel class for ensuring that the "vect_location" global gets
1419    reset at the end of a scope.
1420 
1421    The "vect_location" global is used during dumping and contains a
1422    location_t, which could contain references to a tree block via the
1423    ad-hoc data.  This data is used for tracking inlining information,
1424    but it's not a GC root; it's simply assumed that such locations never
1425    get accessed if the blocks are optimized away.
1426 
1427    Hence we need to ensure that such locations are purged at the end
1428    of any operations using them (e.g. via this class).  */
1429 
1430 class auto_purge_vect_location
1431 {
1432  public:
1433   ~auto_purge_vect_location ();
1434 };
1435 
1436 /*-----------------------------------------------------------------*/
1437 /* Function prototypes.                                            */
1438 /*-----------------------------------------------------------------*/
1439 
1440 /* Simple loop peeling and versioning utilities for vectorizer's purposes -
1441    in tree-vect-loop-manip.c.  */
1442 extern void vect_set_loop_condition (struct loop *, loop_vec_info,
1443 				     tree, tree, tree, bool);
1444 extern bool slpeel_can_duplicate_loop_p (const struct loop *, const_edge);
1445 struct loop *slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *,
1446 						     struct loop *, edge);
1447 struct loop *vect_loop_versioning (loop_vec_info, unsigned int, bool,
1448 				   poly_uint64);
1449 extern struct loop *vect_do_peeling (loop_vec_info, tree, tree,
1450 				     tree *, tree *, tree *, int, bool, bool);
1451 extern void vect_prepare_for_masked_peels (loop_vec_info);
1452 extern dump_user_location_t find_loop_location (struct loop *);
1453 extern bool vect_can_advance_ivs_p (loop_vec_info);
1454 
1455 /* In tree-vect-stmts.c.  */
1456 extern poly_uint64 current_vector_size;
1457 extern tree get_vectype_for_scalar_type (tree);
1458 extern tree get_vectype_for_scalar_type_and_size (tree, poly_uint64);
1459 extern tree get_mask_type_for_scalar_type (tree);
1460 extern tree get_same_sized_vectype (tree, tree);
1461 extern bool vect_get_loop_mask_type (loop_vec_info);
1462 extern bool vect_is_simple_use (tree, vec_info *, enum vect_def_type *,
1463 				stmt_vec_info * = NULL, gimple ** = NULL);
1464 extern bool vect_is_simple_use (tree, vec_info *, enum vect_def_type *,
1465 				tree *, stmt_vec_info * = NULL,
1466 				gimple ** = NULL);
1467 extern bool supportable_widening_operation (enum tree_code, stmt_vec_info,
1468 					    tree, tree, enum tree_code *,
1469 					    enum tree_code *, int *,
1470 					    vec<tree> *);
1471 extern bool supportable_narrowing_operation (enum tree_code, tree, tree,
1472 					     enum tree_code *,
1473 					     int *, vec<tree> *);
1474 extern unsigned record_stmt_cost (stmt_vector_for_cost *, int,
1475 				  enum vect_cost_for_stmt, stmt_vec_info,
1476 				  int, enum vect_cost_model_location);
1477 extern stmt_vec_info vect_finish_replace_stmt (stmt_vec_info, gimple *);
1478 extern stmt_vec_info vect_finish_stmt_generation (stmt_vec_info, gimple *,
1479 						  gimple_stmt_iterator *);
1480 extern opt_result vect_mark_stmts_to_be_vectorized (loop_vec_info);
1481 extern tree vect_get_store_rhs (stmt_vec_info);
1482 extern tree vect_get_vec_def_for_operand_1 (stmt_vec_info, enum vect_def_type);
1483 extern tree vect_get_vec_def_for_operand (tree, stmt_vec_info, tree = NULL);
1484 extern void vect_get_vec_defs (tree, tree, stmt_vec_info, vec<tree> *,
1485 			       vec<tree> *, slp_tree);
1486 extern void vect_get_vec_defs_for_stmt_copy (vec_info *,
1487 					     vec<tree> *, vec<tree> *);
1488 extern tree vect_init_vector (stmt_vec_info, tree, tree,
1489                               gimple_stmt_iterator *);
1490 extern tree vect_get_vec_def_for_stmt_copy (vec_info *, tree);
1491 extern bool vect_transform_stmt (stmt_vec_info, gimple_stmt_iterator *,
1492 				 slp_tree, slp_instance);
1493 extern void vect_remove_stores (stmt_vec_info);
1494 extern opt_result vect_analyze_stmt (stmt_vec_info, bool *, slp_tree,
1495 				     slp_instance, stmt_vector_for_cost *);
1496 extern bool vectorizable_condition (stmt_vec_info, gimple_stmt_iterator *,
1497 				    stmt_vec_info *, bool, slp_tree,
1498 				    stmt_vector_for_cost *);
1499 extern bool vectorizable_shift (stmt_vec_info, gimple_stmt_iterator *,
1500 				stmt_vec_info *, slp_tree,
1501 				stmt_vector_for_cost *);
1502 extern void vect_get_load_cost (stmt_vec_info, int, bool,
1503 				unsigned int *, unsigned int *,
1504 				stmt_vector_for_cost *,
1505 				stmt_vector_for_cost *, bool);
1506 extern void vect_get_store_cost (stmt_vec_info, int,
1507 				 unsigned int *, stmt_vector_for_cost *);
1508 extern bool vect_supportable_shift (enum tree_code, tree);
1509 extern tree vect_gen_perm_mask_any (tree, const vec_perm_indices &);
1510 extern tree vect_gen_perm_mask_checked (tree, const vec_perm_indices &);
1511 extern void optimize_mask_stores (struct loop*);
1512 extern gcall *vect_gen_while (tree, tree, tree);
1513 extern tree vect_gen_while_not (gimple_seq *, tree, tree, tree);
1514 extern opt_result vect_get_vector_types_for_stmt (stmt_vec_info, tree *,
1515 						  tree *);
1516 extern opt_tree vect_get_mask_type_for_stmt (stmt_vec_info);
1517 
1518 /* In tree-vect-data-refs.c.  */
1519 extern bool vect_can_force_dr_alignment_p (const_tree, poly_uint64);
1520 extern enum dr_alignment_support vect_supportable_dr_alignment
1521                                            (dr_vec_info *, bool);
1522 extern tree vect_get_smallest_scalar_type (stmt_vec_info, HOST_WIDE_INT *,
1523                                            HOST_WIDE_INT *);
1524 extern opt_result vect_analyze_data_ref_dependences (loop_vec_info, unsigned int *);
1525 extern bool vect_slp_analyze_instance_dependence (slp_instance);
1526 extern opt_result vect_enhance_data_refs_alignment (loop_vec_info);
1527 extern opt_result vect_analyze_data_refs_alignment (loop_vec_info);
1528 extern opt_result vect_verify_datarefs_alignment (loop_vec_info);
1529 extern bool vect_slp_analyze_and_verify_instance_alignment (slp_instance);
1530 extern opt_result vect_analyze_data_ref_accesses (vec_info *);
1531 extern opt_result vect_prune_runtime_alias_test_list (loop_vec_info);
1532 extern bool vect_gather_scatter_fn_p (bool, bool, tree, tree, unsigned int,
1533 				      signop, int, internal_fn *, tree *);
1534 extern bool vect_check_gather_scatter (stmt_vec_info, loop_vec_info,
1535 				       gather_scatter_info *);
1536 extern opt_result vect_find_stmt_data_reference (loop_p, gimple *,
1537 						 vec<data_reference_p> *);
1538 extern opt_result vect_analyze_data_refs (vec_info *, poly_uint64 *);
1539 extern void vect_record_base_alignments (vec_info *);
1540 extern tree vect_create_data_ref_ptr (stmt_vec_info, tree, struct loop *, tree,
1541 				      tree *, gimple_stmt_iterator *,
1542 				      gimple **, bool,
1543 				      tree = NULL_TREE, tree = NULL_TREE);
1544 extern tree bump_vector_ptr (tree, gimple *, gimple_stmt_iterator *,
1545 			     stmt_vec_info, tree);
1546 extern void vect_copy_ref_info (tree, tree);
1547 extern tree vect_create_destination_var (tree, tree);
1548 extern bool vect_grouped_store_supported (tree, unsigned HOST_WIDE_INT);
1549 extern bool vect_store_lanes_supported (tree, unsigned HOST_WIDE_INT, bool);
1550 extern bool vect_grouped_load_supported (tree, bool, unsigned HOST_WIDE_INT);
1551 extern bool vect_load_lanes_supported (tree, unsigned HOST_WIDE_INT, bool);
1552 extern void vect_permute_store_chain (vec<tree> ,unsigned int, stmt_vec_info,
1553                                     gimple_stmt_iterator *, vec<tree> *);
1554 extern tree vect_setup_realignment (stmt_vec_info, gimple_stmt_iterator *,
1555 				    tree *, enum dr_alignment_support, tree,
1556                                     struct loop **);
1557 extern void vect_transform_grouped_load (stmt_vec_info, vec<tree> , int,
1558                                          gimple_stmt_iterator *);
1559 extern void vect_record_grouped_load_vectors (stmt_vec_info, vec<tree>);
1560 extern tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *);
1561 extern tree vect_get_new_ssa_name (tree, enum vect_var_kind,
1562 				   const char * = NULL);
1563 extern tree vect_create_addr_base_for_vector_ref (stmt_vec_info, gimple_seq *,
1564 						  tree, tree = NULL_TREE);
1565 
1566 /* In tree-vect-loop.c.  */
1567 /* FORNOW: Used in tree-parloops.c.  */
1568 extern stmt_vec_info vect_force_simple_reduction (loop_vec_info, stmt_vec_info,
1569 						  bool *, bool);
1570 /* Used in gimple-loop-interchange.c.  */
1571 extern bool check_reduction_path (dump_user_location_t, loop_p, gphi *, tree,
1572 				  enum tree_code);
1573 /* Drive for loop analysis stage.  */
1574 extern opt_loop_vec_info vect_analyze_loop (struct loop *,
1575 					    loop_vec_info,
1576 					    vec_info_shared *);
1577 extern tree vect_build_loop_niters (loop_vec_info, bool * = NULL);
1578 extern void vect_gen_vector_loop_niters (loop_vec_info, tree, tree *,
1579 					 tree *, bool);
1580 extern tree vect_halve_mask_nunits (tree);
1581 extern tree vect_double_mask_nunits (tree);
1582 extern void vect_record_loop_mask (loop_vec_info, vec_loop_masks *,
1583 				   unsigned int, tree);
1584 extern tree vect_get_loop_mask (gimple_stmt_iterator *, vec_loop_masks *,
1585 				unsigned int, tree, unsigned int);
1586 
1587 /* Drive for loop transformation stage.  */
1588 extern struct loop *vect_transform_loop (loop_vec_info);
1589 extern opt_loop_vec_info vect_analyze_loop_form (struct loop *,
1590 						 vec_info_shared *);
1591 extern bool vectorizable_live_operation (stmt_vec_info, gimple_stmt_iterator *,
1592 					 slp_tree, int, stmt_vec_info *,
1593 					 stmt_vector_for_cost *);
1594 extern bool vectorizable_reduction (stmt_vec_info, gimple_stmt_iterator *,
1595 				    stmt_vec_info *, slp_tree, slp_instance,
1596 				    stmt_vector_for_cost *);
1597 extern bool vectorizable_induction (stmt_vec_info, gimple_stmt_iterator *,
1598 				    stmt_vec_info *, slp_tree,
1599 				    stmt_vector_for_cost *);
1600 extern tree get_initial_def_for_reduction (stmt_vec_info, tree, tree *);
1601 extern bool vect_worthwhile_without_simd_p (vec_info *, tree_code);
1602 extern int vect_get_known_peeling_cost (loop_vec_info, int, int *,
1603 					stmt_vector_for_cost *,
1604 					stmt_vector_for_cost *,
1605 					stmt_vector_for_cost *);
1606 extern tree cse_and_gimplify_to_preheader (loop_vec_info, tree);
1607 
1608 /* In tree-vect-slp.c.  */
1609 extern void vect_free_slp_instance (slp_instance, bool);
1610 extern bool vect_transform_slp_perm_load (slp_tree, vec<tree> ,
1611 					  gimple_stmt_iterator *, poly_uint64,
1612 					  slp_instance, bool, unsigned *);
1613 extern bool vect_slp_analyze_operations (vec_info *);
1614 extern void vect_schedule_slp (vec_info *);
1615 extern opt_result vect_analyze_slp (vec_info *, unsigned);
1616 extern bool vect_make_slp_decision (loop_vec_info);
1617 extern void vect_detect_hybrid_slp (loop_vec_info);
1618 extern void vect_get_slp_defs (vec<tree> , slp_tree, vec<vec<tree> > *);
1619 extern bool vect_slp_bb (basic_block);
1620 extern stmt_vec_info vect_find_last_scalar_stmt_in_slp (slp_tree);
1621 extern bool is_simple_and_all_uses_invariant (stmt_vec_info, loop_vec_info);
1622 extern bool can_duplicate_and_interleave_p (unsigned int, machine_mode,
1623 					    unsigned int * = NULL,
1624 					    tree * = NULL, tree * = NULL);
1625 extern void duplicate_and_interleave (gimple_seq *, tree, vec<tree>,
1626 				      unsigned int, vec<tree> &);
1627 extern int vect_get_place_in_interleaving_chain (stmt_vec_info, stmt_vec_info);
1628 
1629 /* In tree-vect-patterns.c.  */
1630 /* Pattern recognition functions.
1631    Additional pattern recognition functions can (and will) be added
1632    in the future.  */
1633 void vect_pattern_recog (vec_info *);
1634 
1635 /* In tree-vectorizer.c.  */
1636 unsigned vectorize_loops (void);
1637 void vect_free_loop_info_assumptions (struct loop *);
1638 
1639 #endif  /* GCC_TREE_VECTORIZER_H  */
1640