1 /* Vectorizer
2    Copyright (C) 2003-2016 Free Software Foundation, Inc.
3    Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #ifndef GCC_TREE_VECTORIZER_H
22 #define GCC_TREE_VECTORIZER_H
23 
24 #include "tree-data-ref.h"
25 #include "target.h"
26 
27 /* Used for naming of new temporaries.  */
28 enum vect_var_kind {
29   vect_simple_var,
30   vect_pointer_var,
31   vect_scalar_var,
32   vect_mask_var
33 };
34 
35 /* Defines type of operation.  */
36 enum operation_type {
37   unary_op = 1,
38   binary_op,
39   ternary_op
40 };
41 
42 /* Define type of available alignment support.  */
43 enum dr_alignment_support {
44   dr_unaligned_unsupported,
45   dr_unaligned_supported,
46   dr_explicit_realign,
47   dr_explicit_realign_optimized,
48   dr_aligned
49 };
50 
51 /* Define type of def-use cross-iteration cycle.  */
52 enum vect_def_type {
53   vect_uninitialized_def = 0,
54   vect_constant_def = 1,
55   vect_external_def,
56   vect_internal_def,
57   vect_induction_def,
58   vect_reduction_def,
59   vect_double_reduction_def,
60   vect_nested_cycle,
61   vect_unknown_def_type
62 };
63 
64 /* Define type of reduction.  */
65 enum vect_reduction_type {
66   TREE_CODE_REDUCTION,
67   COND_REDUCTION,
68   INTEGER_INDUC_COND_REDUCTION
69 };
70 
71 #define VECTORIZABLE_CYCLE_DEF(D) (((D) == vect_reduction_def)           \
72                                    || ((D) == vect_double_reduction_def) \
73                                    || ((D) == vect_nested_cycle))
74 
75 /* Structure to encapsulate information about a group of like
76    instructions to be presented to the target cost model.  */
77 struct stmt_info_for_cost {
78   int count;
79   enum vect_cost_for_stmt kind;
80   gimple *stmt;
81   int misalign;
82 };
83 
84 typedef vec<stmt_info_for_cost> stmt_vector_for_cost;
85 
86 /************************************************************************
87   SLP
88  ************************************************************************/
89 typedef struct _slp_tree *slp_tree;
90 
91 /* A computation tree of an SLP instance.  Each node corresponds to a group of
92    stmts to be packed in a SIMD stmt.  */
93 struct _slp_tree {
94   /* Nodes that contain def-stmts of this node statements operands.  */
95   vec<slp_tree> children;
96   /* A group of scalar stmts to be vectorized together.  */
97   vec<gimple *> stmts;
98   /* Load permutation relative to the stores, NULL if there is no
99      permutation.  */
100   vec<unsigned> load_permutation;
101   /* Vectorized stmt/s.  */
102   vec<gimple *> vec_stmts;
103   /* Number of vector stmts that are created to replace the group of scalar
104      stmts. It is calculated during the transformation phase as the number of
105      scalar elements in one scalar iteration (GROUP_SIZE) multiplied by VF
106      divided by vector size.  */
107   unsigned int vec_stmts_size;
108   /* Whether the scalar computations use two different operators.  */
109   bool two_operators;
110   /* The DEF type of this node.  */
111   enum vect_def_type def_type;
112 };
113 
114 
115 /* SLP instance is a sequence of stmts in a loop that can be packed into
116    SIMD stmts.  */
117 typedef struct _slp_instance {
118   /* The root of SLP tree.  */
119   slp_tree root;
120 
121   /* Size of groups of scalar stmts that will be replaced by SIMD stmt/s.  */
122   unsigned int group_size;
123 
124   /* The unrolling factor required to vectorized this SLP instance.  */
125   unsigned int unrolling_factor;
126 
127   /* The group of nodes that contain loads of this SLP instance.  */
128   vec<slp_tree> loads;
129 } *slp_instance;
130 
131 
132 /* Access Functions.  */
133 #define SLP_INSTANCE_TREE(S)                     (S)->root
134 #define SLP_INSTANCE_GROUP_SIZE(S)               (S)->group_size
135 #define SLP_INSTANCE_UNROLLING_FACTOR(S)         (S)->unrolling_factor
136 #define SLP_INSTANCE_LOADS(S)                    (S)->loads
137 
138 #define SLP_TREE_CHILDREN(S)                     (S)->children
139 #define SLP_TREE_SCALAR_STMTS(S)                 (S)->stmts
140 #define SLP_TREE_VEC_STMTS(S)                    (S)->vec_stmts
141 #define SLP_TREE_NUMBER_OF_VEC_STMTS(S)          (S)->vec_stmts_size
142 #define SLP_TREE_LOAD_PERMUTATION(S)             (S)->load_permutation
143 #define SLP_TREE_TWO_OPERATORS(S)		 (S)->two_operators
144 #define SLP_TREE_DEF_TYPE(S)			 (S)->def_type
145 
146 
147 
148 /* This struct is used to store the information of a data reference,
149    including the data ref itself, the access offset (calculated by summing its
150    offset and init) and the segment length for aliasing checks.
151    This is used to merge alias checks.  */
152 
153 struct dr_with_seg_len
154 {
dr_with_seg_lendr_with_seg_len155   dr_with_seg_len (data_reference_p d, tree len)
156     : dr (d),
157       offset (size_binop (PLUS_EXPR, DR_OFFSET (d), DR_INIT (d))),
158       seg_len (len) {}
159 
160   data_reference_p dr;
161   tree offset;
162   tree seg_len;
163 };
164 
165 /* This struct contains two dr_with_seg_len objects with aliasing data
166    refs.  Two comparisons are generated from them.  */
167 
168 struct dr_with_seg_len_pair_t
169 {
dr_with_seg_len_pair_tdr_with_seg_len_pair_t170   dr_with_seg_len_pair_t (const dr_with_seg_len& d1,
171 			       const dr_with_seg_len& d2)
172     : first (d1), second (d2) {}
173 
174   dr_with_seg_len first;
175   dr_with_seg_len second;
176 };
177 
178 
179 
180 /* Vectorizer state common between loop and basic-block vectorization.  */
181 struct vec_info {
182   enum { bb, loop } kind;
183 
184   /* All SLP instances.  */
185   vec<slp_instance> slp_instances;
186 
187   /* All data references.  */
188   vec<data_reference_p> datarefs;
189 
190   /* All data dependences.  */
191   vec<ddr_p> ddrs;
192 
193   /* All interleaving chains of stores, represented by the first
194      stmt in the chain.  */
195   vec<gimple *> grouped_stores;
196 
197   /* Cost data used by the target cost model.  */
198   void *target_cost_data;
199 };
200 
201 struct _loop_vec_info;
202 struct _bb_vec_info;
203 
204 template<>
205 template<>
206 inline bool
test(vec_info * i)207 is_a_helper <_loop_vec_info *>::test (vec_info *i)
208 {
209   return i->kind == vec_info::loop;
210 }
211 
212 template<>
213 template<>
214 inline bool
test(vec_info * i)215 is_a_helper <_bb_vec_info *>::test (vec_info *i)
216 {
217   return i->kind == vec_info::bb;
218 }
219 
220 
221 /*-----------------------------------------------------------------*/
222 /* Info on vectorized loops.                                       */
223 /*-----------------------------------------------------------------*/
224 typedef struct _loop_vec_info : public vec_info {
225 
226   /* The loop to which this info struct refers to.  */
227   struct loop *loop;
228 
229   /* The loop basic blocks.  */
230   basic_block *bbs;
231 
232   /* Number of latch executions.  */
233   tree num_itersm1;
234   /* Number of iterations.  */
235   tree num_iters;
236   /* Number of iterations of the original loop.  */
237   tree num_iters_unchanged;
238 
239   /* Threshold of number of iterations below which vectorzation will not be
240      performed. It is calculated from MIN_PROFITABLE_ITERS and
241      PARAM_MIN_VECT_LOOP_BOUND.  */
242   unsigned int th;
243 
244   /* Is the loop vectorizable? */
245   bool vectorizable;
246 
247   /* Unrolling factor  */
248   int vectorization_factor;
249 
250   /* Unknown DRs according to which loop was peeled.  */
251   struct data_reference *unaligned_dr;
252 
253   /* peeling_for_alignment indicates whether peeling for alignment will take
254      place, and what the peeling factor should be:
255      peeling_for_alignment = X means:
256         If X=0: Peeling for alignment will not be applied.
257         If X>0: Peel first X iterations.
258         If X=-1: Generate a runtime test to calculate the number of iterations
259                  to be peeled, using the dataref recorded in the field
260                  unaligned_dr.  */
261   int peeling_for_alignment;
262 
263   /* The mask used to check the alignment of pointers or arrays.  */
264   int ptr_mask;
265 
266   /* The loop nest in which the data dependences are computed.  */
267   vec<loop_p> loop_nest;
268 
269   /* Data Dependence Relations defining address ranges that are candidates
270      for a run-time aliasing check.  */
271   vec<ddr_p> may_alias_ddrs;
272 
273   /* Data Dependence Relations defining address ranges together with segment
274      lengths from which the run-time aliasing check is built.  */
275   vec<dr_with_seg_len_pair_t> comp_alias_ddrs;
276 
277   /* Statements in the loop that have data references that are candidates for a
278      runtime (loop versioning) misalignment check.  */
279   vec<gimple *> may_misalign_stmts;
280 
281   /* The unrolling factor needed to SLP the loop. In case of that pure SLP is
282      applied to the loop, i.e., no unrolling is needed, this is 1.  */
283   unsigned slp_unrolling_factor;
284 
285   /* Reduction cycles detected in the loop. Used in loop-aware SLP.  */
286   vec<gimple *> reductions;
287 
288   /* All reduction chains in the loop, represented by the first
289      stmt in the chain.  */
290   vec<gimple *> reduction_chains;
291 
292   /* Cost vector for a single scalar iteration.  */
293   vec<stmt_info_for_cost> scalar_cost_vec;
294 
295   /* Cost of a single scalar iteration.  */
296   int single_scalar_iteration_cost;
297 
298   /* When we have grouped data accesses with gaps, we may introduce invalid
299      memory accesses.  We peel the last iteration of the loop to prevent
300      this.  */
301   bool peeling_for_gaps;
302 
303   /* When the number of iterations is not a multiple of the vector size
304      we need to peel off iterations at the end to form an epilogue loop.  */
305   bool peeling_for_niter;
306 
307   /* Reductions are canonicalized so that the last operand is the reduction
308      operand.  If this places a constant into RHS1, this decanonicalizes
309      GIMPLE for other phases, so we must track when this has occurred and
310      fix it up.  */
311   bool operands_swapped;
312 
313   /* True if there are no loop carried data dependencies in the loop.
314      If loop->safelen <= 1, then this is always true, either the loop
315      didn't have any loop carried data dependencies, or the loop is being
316      vectorized guarded with some runtime alias checks, or couldn't
317      be vectorized at all, but then this field shouldn't be used.
318      For loop->safelen >= 2, the user has asserted that there are no
319      backward dependencies, but there still could be loop carried forward
320      dependencies in such loops.  This flag will be false if normal
321      vectorizer data dependency analysis would fail or require versioning
322      for alias, but because of loop->safelen >= 2 it has been vectorized
323      even without versioning for alias.  E.g. in:
324      #pragma omp simd
325      for (int i = 0; i < m; i++)
326        a[i] = a[i + k] * c;
327      (or #pragma simd or #pragma ivdep) we can vectorize this and it will
328      DTRT even for k > 0 && k < m, but without safelen we would not
329      vectorize this, so this field would be false.  */
330   bool no_data_dependencies;
331 
332   /* If if-conversion versioned this loop before conversion, this is the
333      loop version without if-conversion.  */
334   struct loop *scalar_loop;
335 
336   /* Mark loops having masked stores.  */
337   bool has_mask_store;
338 
339 } *loop_vec_info;
340 
341 /* Access Functions.  */
342 #define LOOP_VINFO_LOOP(L)                 (L)->loop
343 #define LOOP_VINFO_BBS(L)                  (L)->bbs
344 #define LOOP_VINFO_NITERSM1(L)             (L)->num_itersm1
345 #define LOOP_VINFO_NITERS(L)               (L)->num_iters
346 /* Since LOOP_VINFO_NITERS and LOOP_VINFO_NITERSM1 can change after
347    prologue peeling retain total unchanged scalar loop iterations for
348    cost model.  */
349 #define LOOP_VINFO_NITERS_UNCHANGED(L)     (L)->num_iters_unchanged
350 #define LOOP_VINFO_COST_MODEL_THRESHOLD(L) (L)->th
351 #define LOOP_VINFO_VECTORIZABLE_P(L)       (L)->vectorizable
352 #define LOOP_VINFO_VECT_FACTOR(L)          (L)->vectorization_factor
353 #define LOOP_VINFO_PTR_MASK(L)             (L)->ptr_mask
354 #define LOOP_VINFO_LOOP_NEST(L)            (L)->loop_nest
355 #define LOOP_VINFO_DATAREFS(L)             (L)->datarefs
356 #define LOOP_VINFO_DDRS(L)                 (L)->ddrs
357 #define LOOP_VINFO_INT_NITERS(L)           (TREE_INT_CST_LOW ((L)->num_iters))
358 #define LOOP_VINFO_PEELING_FOR_ALIGNMENT(L) (L)->peeling_for_alignment
359 #define LOOP_VINFO_UNALIGNED_DR(L)         (L)->unaligned_dr
360 #define LOOP_VINFO_MAY_MISALIGN_STMTS(L)   (L)->may_misalign_stmts
361 #define LOOP_VINFO_MAY_ALIAS_DDRS(L)       (L)->may_alias_ddrs
362 #define LOOP_VINFO_COMP_ALIAS_DDRS(L)      (L)->comp_alias_ddrs
363 #define LOOP_VINFO_GROUPED_STORES(L)       (L)->grouped_stores
364 #define LOOP_VINFO_SLP_INSTANCES(L)        (L)->slp_instances
365 #define LOOP_VINFO_SLP_UNROLLING_FACTOR(L) (L)->slp_unrolling_factor
366 #define LOOP_VINFO_REDUCTIONS(L)           (L)->reductions
367 #define LOOP_VINFO_REDUCTION_CHAINS(L)     (L)->reduction_chains
368 #define LOOP_VINFO_TARGET_COST_DATA(L)     (L)->target_cost_data
369 #define LOOP_VINFO_PEELING_FOR_GAPS(L)     (L)->peeling_for_gaps
370 #define LOOP_VINFO_OPERANDS_SWAPPED(L)     (L)->operands_swapped
371 #define LOOP_VINFO_PEELING_FOR_NITER(L)    (L)->peeling_for_niter
372 #define LOOP_VINFO_NO_DATA_DEPENDENCIES(L) (L)->no_data_dependencies
373 #define LOOP_VINFO_SCALAR_LOOP(L)	   (L)->scalar_loop
374 #define LOOP_VINFO_HAS_MASK_STORE(L)       (L)->has_mask_store
375 #define LOOP_VINFO_SCALAR_ITERATION_COST(L) (L)->scalar_cost_vec
376 #define LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST(L) (L)->single_scalar_iteration_cost
377 
378 #define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L) \
379   ((L)->may_misalign_stmts.length () > 0)
380 #define LOOP_REQUIRES_VERSIONING_FOR_ALIAS(L)     \
381   ((L)->may_alias_ddrs.length () > 0)
382 
383 #define LOOP_VINFO_NITERS_KNOWN_P(L)          \
384   (tree_fits_shwi_p ((L)->num_iters) && tree_to_shwi ((L)->num_iters) > 0)
385 
386 static inline loop_vec_info
loop_vec_info_for_loop(struct loop * loop)387 loop_vec_info_for_loop (struct loop *loop)
388 {
389   return (loop_vec_info) loop->aux;
390 }
391 
392 static inline bool
nested_in_vect_loop_p(struct loop * loop,gimple * stmt)393 nested_in_vect_loop_p (struct loop *loop, gimple *stmt)
394 {
395   return (loop->inner
396           && (loop->inner == (gimple_bb (stmt))->loop_father));
397 }
398 
399 typedef struct _bb_vec_info : public vec_info
400 {
401   basic_block bb;
402   gimple_stmt_iterator region_begin;
403   gimple_stmt_iterator region_end;
404 } *bb_vec_info;
405 
406 #define BB_VINFO_BB(B)               (B)->bb
407 #define BB_VINFO_GROUPED_STORES(B)   (B)->grouped_stores
408 #define BB_VINFO_SLP_INSTANCES(B)    (B)->slp_instances
409 #define BB_VINFO_DATAREFS(B)         (B)->datarefs
410 #define BB_VINFO_DDRS(B)             (B)->ddrs
411 #define BB_VINFO_TARGET_COST_DATA(B) (B)->target_cost_data
412 
413 static inline bb_vec_info
vec_info_for_bb(basic_block bb)414 vec_info_for_bb (basic_block bb)
415 {
416   return (bb_vec_info) bb->aux;
417 }
418 
419 /*-----------------------------------------------------------------*/
420 /* Info on vectorized defs.                                        */
421 /*-----------------------------------------------------------------*/
422 enum stmt_vec_info_type {
423   undef_vec_info_type = 0,
424   load_vec_info_type,
425   store_vec_info_type,
426   shift_vec_info_type,
427   op_vec_info_type,
428   call_vec_info_type,
429   call_simd_clone_vec_info_type,
430   assignment_vec_info_type,
431   condition_vec_info_type,
432   comparison_vec_info_type,
433   reduc_vec_info_type,
434   induc_vec_info_type,
435   type_promotion_vec_info_type,
436   type_demotion_vec_info_type,
437   type_conversion_vec_info_type,
438   loop_exit_ctrl_vec_info_type
439 };
440 
441 /* Indicates whether/how a variable is used in the scope of loop/basic
442    block.  */
443 enum vect_relevant {
444   vect_unused_in_scope = 0,
445   /* The def is in the inner loop, and the use is in the outer loop, and the
446      use is a reduction stmt.  */
447   vect_used_in_outer_by_reduction,
448   /* The def is in the inner loop, and the use is in the outer loop (and is
449      not part of reduction).  */
450   vect_used_in_outer,
451 
452   /* defs that feed computations that end up (only) in a reduction. These
453      defs may be used by non-reduction stmts, but eventually, any
454      computations/values that are affected by these defs are used to compute
455      a reduction (i.e. don't get stored to memory, for example). We use this
456      to identify computations that we can change the order in which they are
457      computed.  */
458   vect_used_by_reduction,
459 
460   vect_used_in_scope
461 };
462 
463 /* The type of vectorization that can be applied to the stmt: regular loop-based
464    vectorization; pure SLP - the stmt is a part of SLP instances and does not
465    have uses outside SLP instances; or hybrid SLP and loop-based - the stmt is
466    a part of SLP instance and also must be loop-based vectorized, since it has
467    uses outside SLP sequences.
468 
469    In the loop context the meanings of pure and hybrid SLP are slightly
470    different. By saying that pure SLP is applied to the loop, we mean that we
471    exploit only intra-iteration parallelism in the loop; i.e., the loop can be
472    vectorized without doing any conceptual unrolling, cause we don't pack
473    together stmts from different iterations, only within a single iteration.
474    Loop hybrid SLP means that we exploit both intra-iteration and
475    inter-iteration parallelism (e.g., number of elements in the vector is 4
476    and the slp-group-size is 2, in which case we don't have enough parallelism
477    within an iteration, so we obtain the rest of the parallelism from subsequent
478    iterations by unrolling the loop by 2).  */
479 enum slp_vect_type {
480   loop_vect = 0,
481   pure_slp,
482   hybrid
483 };
484 
485 
486 typedef struct data_reference *dr_p;
487 
488 typedef struct _stmt_vec_info {
489 
490   enum stmt_vec_info_type type;
491 
492   /* Indicates whether this stmts is part of a computation whose result is
493      used outside the loop.  */
494   bool live;
495 
496   /* Stmt is part of some pattern (computation idiom)  */
497   bool in_pattern_p;
498 
499   /* The stmt to which this info struct refers to.  */
500   gimple *stmt;
501 
502   /* The vec_info with respect to which STMT is vectorized.  */
503   vec_info *vinfo;
504 
505   /* The vector type to be used for the LHS of this statement.  */
506   tree vectype;
507 
508   /* The vectorized version of the stmt.  */
509   gimple *vectorized_stmt;
510 
511 
512   /** The following is relevant only for stmts that contain a non-scalar
513      data-ref (array/pointer/struct access). A GIMPLE stmt is expected to have
514      at most one such data-ref.  **/
515 
516   /* Information about the data-ref (access function, etc),
517      relative to the inner-most containing loop.  */
518   struct data_reference *data_ref_info;
519 
520   /* Information about the data-ref relative to this loop
521      nest (the loop that is being considered for vectorization).  */
522   tree dr_base_address;
523   tree dr_init;
524   tree dr_offset;
525   tree dr_step;
526   tree dr_aligned_to;
527 
528   /* For loop PHI nodes, the base and evolution part of it.  This makes sure
529      this information is still available in vect_update_ivs_after_vectorizer
530      where we may not be able to re-analyze the PHI nodes evolution as
531      peeling for the prologue loop can make it unanalyzable.  The evolution
532      part is still correct after peeling, but the base may have changed from
533      the version here.  */
534   tree loop_phi_evolution_base_unchanged;
535   tree loop_phi_evolution_part;
536 
537   /* Used for various bookkeeping purposes, generally holding a pointer to
538      some other stmt S that is in some way "related" to this stmt.
539      Current use of this field is:
540         If this stmt is part of a pattern (i.e. the field 'in_pattern_p' is
541         true): S is the "pattern stmt" that represents (and replaces) the
542         sequence of stmts that constitutes the pattern.  Similarly, the
543         related_stmt of the "pattern stmt" points back to this stmt (which is
544         the last stmt in the original sequence of stmts that constitutes the
545         pattern).  */
546   gimple *related_stmt;
547 
548   /* Used to keep a sequence of def stmts of a pattern stmt if such exists.  */
549   gimple_seq pattern_def_seq;
550 
551   /* List of datarefs that are known to have the same alignment as the dataref
552      of this stmt.  */
553   vec<dr_p> same_align_refs;
554 
555   /* Selected SIMD clone's function info.  First vector element
556      is SIMD clone's function decl, followed by a pair of trees (base + step)
557      for linear arguments (pair of NULLs for other arguments).  */
558   vec<tree> simd_clone_info;
559 
560   /* Classify the def of this stmt.  */
561   enum vect_def_type def_type;
562 
563   /*  Whether the stmt is SLPed, loop-based vectorized, or both.  */
564   enum slp_vect_type slp_type;
565 
566   /* Interleaving and reduction chains info.  */
567   /* First element in the group.  */
568   gimple *first_element;
569   /* Pointer to the next element in the group.  */
570   gimple *next_element;
571   /* For data-refs, in case that two or more stmts share data-ref, this is the
572      pointer to the previously detected stmt with the same dr.  */
573   gimple *same_dr_stmt;
574   /* The size of the group.  */
575   unsigned int size;
576   /* For stores, number of stores from this group seen. We vectorize the last
577      one.  */
578   unsigned int store_count;
579   /* For loads only, the gap from the previous load. For consecutive loads, GAP
580      is 1.  */
581   unsigned int gap;
582 
583   /* The minimum negative dependence distance this stmt participates in
584      or zero if none.  */
585   unsigned int min_neg_dist;
586 
587   /* Not all stmts in the loop need to be vectorized. e.g, the increment
588      of the loop induction variable and computation of array indexes. relevant
589      indicates whether the stmt needs to be vectorized.  */
590   enum vect_relevant relevant;
591 
592   /* Is this statement vectorizable or should it be skipped in (partial)
593      vectorization.  */
594   bool vectorizable;
595 
596   /* For loads if this is a gather, for stores if this is a scatter.  */
597   bool gather_scatter_p;
598 
599   /* True if this is an access with loop-invariant stride.  */
600   bool strided_p;
601 
602   /* For both loads and stores.  */
603   bool simd_lane_access_p;
604 
605   /* For reduction loops, this is the type of reduction.  */
606   enum vect_reduction_type v_reduc_type;
607 
608   /* The number of scalar stmt references from active SLP instances.  */
609   unsigned int num_slp_uses;
610 } *stmt_vec_info;
611 
612 /* Access Functions.  */
613 #define STMT_VINFO_TYPE(S)                 (S)->type
614 #define STMT_VINFO_STMT(S)                 (S)->stmt
615 inline loop_vec_info
STMT_VINFO_LOOP_VINFO(stmt_vec_info stmt_vinfo)616 STMT_VINFO_LOOP_VINFO (stmt_vec_info stmt_vinfo)
617 {
618   if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (stmt_vinfo->vinfo))
619     return loop_vinfo;
620   return NULL;
621 }
622 inline bb_vec_info
STMT_VINFO_BB_VINFO(stmt_vec_info stmt_vinfo)623 STMT_VINFO_BB_VINFO (stmt_vec_info stmt_vinfo)
624 {
625   if (bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (stmt_vinfo->vinfo))
626     return bb_vinfo;
627   return NULL;
628 }
629 #define STMT_VINFO_RELEVANT(S)             (S)->relevant
630 #define STMT_VINFO_LIVE_P(S)               (S)->live
631 #define STMT_VINFO_VECTYPE(S)              (S)->vectype
632 #define STMT_VINFO_VEC_STMT(S)             (S)->vectorized_stmt
633 #define STMT_VINFO_VECTORIZABLE(S)         (S)->vectorizable
634 #define STMT_VINFO_DATA_REF(S)             (S)->data_ref_info
635 #define STMT_VINFO_GATHER_SCATTER_P(S)	   (S)->gather_scatter_p
636 #define STMT_VINFO_STRIDED_P(S)	   	   (S)->strided_p
637 #define STMT_VINFO_SIMD_LANE_ACCESS_P(S)   (S)->simd_lane_access_p
638 #define STMT_VINFO_VEC_REDUCTION_TYPE(S)   (S)->v_reduc_type
639 
640 #define STMT_VINFO_DR_BASE_ADDRESS(S)      (S)->dr_base_address
641 #define STMT_VINFO_DR_INIT(S)              (S)->dr_init
642 #define STMT_VINFO_DR_OFFSET(S)            (S)->dr_offset
643 #define STMT_VINFO_DR_STEP(S)              (S)->dr_step
644 #define STMT_VINFO_DR_ALIGNED_TO(S)        (S)->dr_aligned_to
645 
646 #define STMT_VINFO_IN_PATTERN_P(S)         (S)->in_pattern_p
647 #define STMT_VINFO_RELATED_STMT(S)         (S)->related_stmt
648 #define STMT_VINFO_PATTERN_DEF_SEQ(S)      (S)->pattern_def_seq
649 #define STMT_VINFO_SAME_ALIGN_REFS(S)      (S)->same_align_refs
650 #define STMT_VINFO_SIMD_CLONE_INFO(S)	   (S)->simd_clone_info
651 #define STMT_VINFO_DEF_TYPE(S)             (S)->def_type
652 #define STMT_VINFO_GROUP_FIRST_ELEMENT(S)  (S)->first_element
653 #define STMT_VINFO_GROUP_NEXT_ELEMENT(S)   (S)->next_element
654 #define STMT_VINFO_GROUP_SIZE(S)           (S)->size
655 #define STMT_VINFO_GROUP_STORE_COUNT(S)    (S)->store_count
656 #define STMT_VINFO_GROUP_GAP(S)            (S)->gap
657 #define STMT_VINFO_GROUP_SAME_DR_STMT(S)   (S)->same_dr_stmt
658 #define STMT_VINFO_GROUPED_ACCESS(S)      ((S)->first_element != NULL && (S)->data_ref_info)
659 #define STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED(S) (S)->loop_phi_evolution_base_unchanged
660 #define STMT_VINFO_LOOP_PHI_EVOLUTION_PART(S) (S)->loop_phi_evolution_part
661 #define STMT_VINFO_MIN_NEG_DIST(S)	(S)->min_neg_dist
662 #define STMT_VINFO_NUM_SLP_USES(S)	(S)->num_slp_uses
663 
664 #define GROUP_FIRST_ELEMENT(S)          (S)->first_element
665 #define GROUP_NEXT_ELEMENT(S)           (S)->next_element
666 #define GROUP_SIZE(S)                   (S)->size
667 #define GROUP_STORE_COUNT(S)            (S)->store_count
668 #define GROUP_GAP(S)                    (S)->gap
669 #define GROUP_SAME_DR_STMT(S)           (S)->same_dr_stmt
670 
671 #define STMT_VINFO_RELEVANT_P(S)          ((S)->relevant != vect_unused_in_scope)
672 
673 #define HYBRID_SLP_STMT(S)                ((S)->slp_type == hybrid)
674 #define PURE_SLP_STMT(S)                  ((S)->slp_type == pure_slp)
675 #define STMT_SLP_TYPE(S)                   (S)->slp_type
676 
677 struct dataref_aux {
678   int misalignment;
679   /* If true the alignment of base_decl needs to be increased.  */
680   bool base_misaligned;
681   /* If true we know the base is at least vector element alignment aligned.  */
682   bool base_element_aligned;
683   tree base_decl;
684 };
685 
686 #define DR_VECT_AUX(dr) ((dataref_aux *)(dr)->aux)
687 
688 #define VECT_MAX_COST 1000
689 
690 /* The maximum number of intermediate steps required in multi-step type
691    conversion.  */
692 #define MAX_INTERM_CVT_STEPS         3
693 
694 /* The maximum vectorization factor supported by any target (V64QI).  */
695 #define MAX_VECTORIZATION_FACTOR 64
696 
697 extern vec<stmt_vec_info> stmt_vec_info_vec;
698 
699 void init_stmt_vec_info_vec (void);
700 void free_stmt_vec_info_vec (void);
701 
702 /* Return a stmt_vec_info corresponding to STMT.  */
703 
704 static inline stmt_vec_info
vinfo_for_stmt(gimple * stmt)705 vinfo_for_stmt (gimple *stmt)
706 {
707   unsigned int uid = gimple_uid (stmt);
708   if (uid == 0)
709     return NULL;
710 
711   return stmt_vec_info_vec[uid - 1];
712 }
713 
714 /* Set vectorizer information INFO for STMT.  */
715 
716 static inline void
set_vinfo_for_stmt(gimple * stmt,stmt_vec_info info)717 set_vinfo_for_stmt (gimple *stmt, stmt_vec_info info)
718 {
719   unsigned int uid = gimple_uid (stmt);
720   if (uid == 0)
721     {
722       gcc_checking_assert (info);
723       uid = stmt_vec_info_vec.length () + 1;
724       gimple_set_uid (stmt, uid);
725       stmt_vec_info_vec.safe_push (info);
726     }
727   else
728     {
729       gcc_checking_assert (info == NULL);
730       stmt_vec_info_vec[uid - 1] = info;
731     }
732 }
733 
734 /* Return the earlier statement between STMT1 and STMT2.  */
735 
736 static inline gimple *
get_earlier_stmt(gimple * stmt1,gimple * stmt2)737 get_earlier_stmt (gimple *stmt1, gimple *stmt2)
738 {
739   unsigned int uid1, uid2;
740 
741   if (stmt1 == NULL)
742     return stmt2;
743 
744   if (stmt2 == NULL)
745     return stmt1;
746 
747   uid1 = gimple_uid (stmt1);
748   uid2 = gimple_uid (stmt2);
749 
750   if (uid1 == 0 || uid2 == 0)
751     return NULL;
752 
753   gcc_checking_assert (uid1 <= stmt_vec_info_vec.length ()
754 		       && uid2 <= stmt_vec_info_vec.length ());
755 
756   if (uid1 < uid2)
757     return stmt1;
758   else
759     return stmt2;
760 }
761 
762 /* Return the later statement between STMT1 and STMT2.  */
763 
764 static inline gimple *
get_later_stmt(gimple * stmt1,gimple * stmt2)765 get_later_stmt (gimple *stmt1, gimple *stmt2)
766 {
767   unsigned int uid1, uid2;
768 
769   if (stmt1 == NULL)
770     return stmt2;
771 
772   if (stmt2 == NULL)
773     return stmt1;
774 
775   uid1 = gimple_uid (stmt1);
776   uid2 = gimple_uid (stmt2);
777 
778   if (uid1 == 0 || uid2 == 0)
779     return NULL;
780 
781   gcc_assert (uid1 <= stmt_vec_info_vec.length ());
782   gcc_assert (uid2 <= stmt_vec_info_vec.length ());
783 
784   if (uid1 > uid2)
785     return stmt1;
786   else
787     return stmt2;
788 }
789 
790 /* Return TRUE if a statement represented by STMT_INFO is a part of a
791    pattern.  */
792 
793 static inline bool
is_pattern_stmt_p(stmt_vec_info stmt_info)794 is_pattern_stmt_p (stmt_vec_info stmt_info)
795 {
796   gimple *related_stmt;
797   stmt_vec_info related_stmt_info;
798 
799   related_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
800   if (related_stmt
801       && (related_stmt_info = vinfo_for_stmt (related_stmt))
802       && STMT_VINFO_IN_PATTERN_P (related_stmt_info))
803     return true;
804 
805   return false;
806 }
807 
808 /* Return true if BB is a loop header.  */
809 
810 static inline bool
is_loop_header_bb_p(basic_block bb)811 is_loop_header_bb_p (basic_block bb)
812 {
813   if (bb == (bb->loop_father)->header)
814     return true;
815   gcc_checking_assert (EDGE_COUNT (bb->preds) == 1);
816   return false;
817 }
818 
819 /* Return pow2 (X).  */
820 
821 static inline int
vect_pow2(int x)822 vect_pow2 (int x)
823 {
824   int i, res = 1;
825 
826   for (i = 0; i < x; i++)
827     res *= 2;
828 
829   return res;
830 }
831 
832 /* Alias targetm.vectorize.builtin_vectorization_cost.  */
833 
834 static inline int
builtin_vectorization_cost(enum vect_cost_for_stmt type_of_cost,tree vectype,int misalign)835 builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
836 			    tree vectype, int misalign)
837 {
838   return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
839 						       vectype, misalign);
840 }
841 
842 /* Get cost by calling cost target builtin.  */
843 
844 static inline
vect_get_stmt_cost(enum vect_cost_for_stmt type_of_cost)845 int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost)
846 {
847   return builtin_vectorization_cost (type_of_cost, NULL, 0);
848 }
849 
850 /* Alias targetm.vectorize.init_cost.  */
851 
852 static inline void *
init_cost(struct loop * loop_info)853 init_cost (struct loop *loop_info)
854 {
855   return targetm.vectorize.init_cost (loop_info);
856 }
857 
858 /* Alias targetm.vectorize.add_stmt_cost.  */
859 
860 static inline unsigned
add_stmt_cost(void * data,int count,enum vect_cost_for_stmt kind,stmt_vec_info stmt_info,int misalign,enum vect_cost_model_location where)861 add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
862 	       stmt_vec_info stmt_info, int misalign,
863 	       enum vect_cost_model_location where)
864 {
865   return targetm.vectorize.add_stmt_cost (data, count, kind,
866 					  stmt_info, misalign, where);
867 }
868 
869 /* Alias targetm.vectorize.finish_cost.  */
870 
871 static inline void
finish_cost(void * data,unsigned * prologue_cost,unsigned * body_cost,unsigned * epilogue_cost)872 finish_cost (void *data, unsigned *prologue_cost,
873 	     unsigned *body_cost, unsigned *epilogue_cost)
874 {
875   targetm.vectorize.finish_cost (data, prologue_cost, body_cost, epilogue_cost);
876 }
877 
878 /* Alias targetm.vectorize.destroy_cost_data.  */
879 
880 static inline void
destroy_cost_data(void * data)881 destroy_cost_data (void *data)
882 {
883   targetm.vectorize.destroy_cost_data (data);
884 }
885 
886 /*-----------------------------------------------------------------*/
887 /* Info on data references alignment.                              */
888 /*-----------------------------------------------------------------*/
889 inline void
set_dr_misalignment(struct data_reference * dr,int val)890 set_dr_misalignment (struct data_reference *dr, int val)
891 {
892   dataref_aux *data_aux = DR_VECT_AUX (dr);
893 
894   if (!data_aux)
895     {
896       data_aux = XCNEW (dataref_aux);
897       dr->aux = data_aux;
898     }
899 
900   data_aux->misalignment = val;
901 }
902 
903 inline int
dr_misalignment(struct data_reference * dr)904 dr_misalignment (struct data_reference *dr)
905 {
906   return DR_VECT_AUX (dr)->misalignment;
907 }
908 
909 /* Reflects actual alignment of first access in the vectorized loop,
910    taking into account peeling/versioning if applied.  */
911 #define DR_MISALIGNMENT(DR) dr_misalignment (DR)
912 #define SET_DR_MISALIGNMENT(DR, VAL) set_dr_misalignment (DR, VAL)
913 
914 /* Return TRUE if the data access is aligned, and FALSE otherwise.  */
915 
916 static inline bool
aligned_access_p(struct data_reference * data_ref_info)917 aligned_access_p (struct data_reference *data_ref_info)
918 {
919   return (DR_MISALIGNMENT (data_ref_info) == 0);
920 }
921 
922 /* Return TRUE if the alignment of the data access is known, and FALSE
923    otherwise.  */
924 
925 static inline bool
known_alignment_for_access_p(struct data_reference * data_ref_info)926 known_alignment_for_access_p (struct data_reference *data_ref_info)
927 {
928   return (DR_MISALIGNMENT (data_ref_info) != -1);
929 }
930 
931 
932 /* Return true if the vect cost model is unlimited.  */
933 static inline bool
unlimited_cost_model(loop_p loop)934 unlimited_cost_model (loop_p loop)
935 {
936   if (loop != NULL && loop->force_vectorize
937       && flag_simd_cost_model != VECT_COST_MODEL_DEFAULT)
938     return flag_simd_cost_model == VECT_COST_MODEL_UNLIMITED;
939   return (flag_vect_cost_model == VECT_COST_MODEL_UNLIMITED);
940 }
941 
942 /* Source location */
943 extern source_location vect_location;
944 
945 /*-----------------------------------------------------------------*/
946 /* Function prototypes.                                            */
947 /*-----------------------------------------------------------------*/
948 
949 /* Simple loop peeling and versioning utilities for vectorizer's purposes -
950    in tree-vect-loop-manip.c.  */
951 extern void slpeel_make_loop_iterate_ntimes (struct loop *, tree);
952 extern bool slpeel_can_duplicate_loop_p (const struct loop *, const_edge);
953 struct loop *slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *,
954 						     struct loop *, edge);
955 extern void vect_loop_versioning (loop_vec_info, unsigned int, bool);
956 extern void vect_do_peeling_for_loop_bound (loop_vec_info, tree, tree,
957 					    unsigned int, bool);
958 extern void vect_do_peeling_for_alignment (loop_vec_info, tree,
959 					   unsigned int, bool);
960 extern source_location find_loop_location (struct loop *);
961 extern bool vect_can_advance_ivs_p (loop_vec_info);
962 
963 /* In tree-vect-stmts.c.  */
964 extern unsigned int current_vector_size;
965 extern tree get_vectype_for_scalar_type (tree);
966 extern tree get_mask_type_for_scalar_type (tree);
967 extern tree get_same_sized_vectype (tree, tree);
968 extern bool vect_is_simple_use (tree, vec_info *, gimple **,
969                                 enum vect_def_type *);
970 extern bool vect_is_simple_use (tree, vec_info *, gimple **,
971 				enum vect_def_type *, tree *);
972 extern bool supportable_widening_operation (enum tree_code, gimple *, tree,
973 					    tree, enum tree_code *,
974 					    enum tree_code *, int *,
975 					    vec<tree> *);
976 extern bool supportable_narrowing_operation (enum tree_code, tree, tree,
977 					     enum tree_code *,
978 					     int *, vec<tree> *);
979 extern stmt_vec_info new_stmt_vec_info (gimple *stmt, vec_info *);
980 extern void free_stmt_vec_info (gimple *stmt);
981 extern void vect_model_simple_cost (stmt_vec_info, int, enum vect_def_type *,
982                                     stmt_vector_for_cost *,
983 				    stmt_vector_for_cost *);
984 extern void vect_model_store_cost (stmt_vec_info, int, bool,
985 				   enum vect_def_type, slp_tree,
986 				   stmt_vector_for_cost *,
987 				   stmt_vector_for_cost *);
988 extern void vect_model_load_cost (stmt_vec_info, int, bool, slp_tree,
989 				  stmt_vector_for_cost *,
990 				  stmt_vector_for_cost *);
991 extern unsigned record_stmt_cost (stmt_vector_for_cost *, int,
992 				  enum vect_cost_for_stmt, stmt_vec_info,
993 				  int, enum vect_cost_model_location);
994 extern void vect_finish_stmt_generation (gimple *, gimple *,
995                                          gimple_stmt_iterator *);
996 extern bool vect_mark_stmts_to_be_vectorized (loop_vec_info);
997 extern tree vect_get_vec_def_for_operand (tree, gimple *, tree = NULL);
998 extern tree vect_init_vector (gimple *, tree, tree,
999                               gimple_stmt_iterator *);
1000 extern tree vect_get_vec_def_for_stmt_copy (enum vect_def_type, tree);
1001 extern bool vect_transform_stmt (gimple *, gimple_stmt_iterator *,
1002                                  bool *, slp_tree, slp_instance);
1003 extern void vect_remove_stores (gimple *);
1004 extern bool vect_analyze_stmt (gimple *, bool *, slp_tree);
1005 extern bool vectorizable_condition (gimple *, gimple_stmt_iterator *,
1006 				    gimple **, tree, int, slp_tree);
1007 extern bool vectorizable_comparison (gimple *, gimple_stmt_iterator *,
1008 				     gimple **, tree, int, slp_tree);
1009 extern void vect_get_load_cost (struct data_reference *, int, bool,
1010 				unsigned int *, unsigned int *,
1011 				stmt_vector_for_cost *,
1012 				stmt_vector_for_cost *, bool);
1013 extern void vect_get_store_cost (struct data_reference *, int,
1014 				 unsigned int *, stmt_vector_for_cost *);
1015 extern bool vect_supportable_shift (enum tree_code, tree);
1016 extern void vect_get_vec_defs (tree, tree, gimple *, vec<tree> *,
1017 			       vec<tree> *, slp_tree, int);
1018 extern tree vect_gen_perm_mask_any (tree, const unsigned char *);
1019 extern tree vect_gen_perm_mask_checked (tree, const unsigned char *);
1020 extern void optimize_mask_stores (struct loop*);
1021 
1022 /* In tree-vect-data-refs.c.  */
1023 extern bool vect_can_force_dr_alignment_p (const_tree, unsigned int);
1024 extern enum dr_alignment_support vect_supportable_dr_alignment
1025                                            (struct data_reference *, bool);
1026 extern tree vect_get_smallest_scalar_type (gimple *, HOST_WIDE_INT *,
1027                                            HOST_WIDE_INT *);
1028 extern bool vect_analyze_data_ref_dependences (loop_vec_info, int *);
1029 extern bool vect_slp_analyze_instance_dependence (slp_instance);
1030 extern bool vect_enhance_data_refs_alignment (loop_vec_info);
1031 extern bool vect_analyze_data_refs_alignment (loop_vec_info);
1032 extern bool vect_verify_datarefs_alignment (loop_vec_info);
1033 extern bool vect_slp_analyze_and_verify_instance_alignment (slp_instance);
1034 extern bool vect_analyze_data_ref_accesses (vec_info *);
1035 extern bool vect_prune_runtime_alias_test_list (loop_vec_info);
1036 extern tree vect_check_gather_scatter (gimple *, loop_vec_info, tree *, tree *,
1037 				       int *);
1038 extern bool vect_analyze_data_refs (vec_info *, int *);
1039 extern tree vect_create_data_ref_ptr (gimple *, tree, struct loop *, tree,
1040 				      tree *, gimple_stmt_iterator *,
1041 				      gimple **, bool, bool *,
1042 				      tree = NULL_TREE);
1043 extern tree bump_vector_ptr (tree, gimple *, gimple_stmt_iterator *, gimple *,
1044 			     tree);
1045 extern tree vect_create_destination_var (tree, tree);
1046 extern bool vect_grouped_store_supported (tree, unsigned HOST_WIDE_INT);
1047 extern bool vect_store_lanes_supported (tree, unsigned HOST_WIDE_INT);
1048 extern bool vect_grouped_load_supported (tree, unsigned HOST_WIDE_INT);
1049 extern bool vect_load_lanes_supported (tree, unsigned HOST_WIDE_INT);
1050 extern void vect_permute_store_chain (vec<tree> ,unsigned int, gimple *,
1051                                     gimple_stmt_iterator *, vec<tree> *);
1052 extern tree vect_setup_realignment (gimple *, gimple_stmt_iterator *, tree *,
1053                                     enum dr_alignment_support, tree,
1054                                     struct loop **);
1055 extern void vect_transform_grouped_load (gimple *, vec<tree> , int,
1056                                          gimple_stmt_iterator *);
1057 extern void vect_record_grouped_load_vectors (gimple *, vec<tree> );
1058 extern tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *);
1059 extern tree vect_get_new_ssa_name (tree, enum vect_var_kind,
1060 				   const char * = NULL);
1061 extern tree vect_create_addr_base_for_vector_ref (gimple *, gimple_seq *,
1062 						  tree, struct loop *,
1063 						  tree = NULL_TREE);
1064 
1065 /* In tree-vect-loop.c.  */
1066 /* FORNOW: Used in tree-parloops.c.  */
1067 extern void destroy_loop_vec_info (loop_vec_info, bool);
1068 extern gimple *vect_force_simple_reduction (loop_vec_info, gimple *, bool,
1069 					    bool *, bool);
1070 /* Drive for loop analysis stage.  */
1071 extern loop_vec_info vect_analyze_loop (struct loop *);
1072 /* Drive for loop transformation stage.  */
1073 extern void vect_transform_loop (loop_vec_info);
1074 extern loop_vec_info vect_analyze_loop_form (struct loop *);
1075 extern bool vectorizable_live_operation (gimple *, gimple_stmt_iterator *,
1076 					 gimple **);
1077 extern bool vectorizable_reduction (gimple *, gimple_stmt_iterator *,
1078 				    gimple **, slp_tree);
1079 extern bool vectorizable_induction (gimple *, gimple_stmt_iterator *, gimple **);
1080 extern tree get_initial_def_for_reduction (gimple *, tree, tree *);
1081 extern int vect_min_worthwhile_factor (enum tree_code);
1082 extern int vect_get_known_peeling_cost (loop_vec_info, int, int *,
1083 					stmt_vector_for_cost *,
1084 					stmt_vector_for_cost *,
1085 					stmt_vector_for_cost *);
1086 
1087 /* In tree-vect-slp.c.  */
1088 extern void vect_free_slp_instance (slp_instance);
1089 extern bool vect_transform_slp_perm_load (slp_tree, vec<tree> ,
1090                                           gimple_stmt_iterator *, int,
1091                                           slp_instance, bool);
1092 extern bool vect_slp_analyze_operations (vec<slp_instance> slp_instances,
1093 					 void *);
1094 extern bool vect_schedule_slp (vec_info *);
1095 extern bool vect_analyze_slp (vec_info *, unsigned);
1096 extern bool vect_make_slp_decision (loop_vec_info);
1097 extern void vect_detect_hybrid_slp (loop_vec_info);
1098 extern void vect_get_slp_defs (vec<tree> , slp_tree,
1099 			       vec<vec<tree> > *, int);
1100 extern bool vect_slp_bb (basic_block);
1101 extern gimple *vect_find_last_scalar_stmt_in_slp (slp_tree);
1102 
1103 /* In tree-vect-patterns.c.  */
1104 /* Pattern recognition functions.
1105    Additional pattern recognition functions can (and will) be added
1106    in the future.  */
1107 typedef gimple *(* vect_recog_func_ptr) (vec<gimple *> *, tree *, tree *);
1108 #define NUM_PATTERNS 14
1109 void vect_pattern_recog (vec_info *);
1110 
1111 /* In tree-vectorizer.c.  */
1112 unsigned vectorize_loops (void);
1113 void vect_destroy_datarefs (vec_info *);
1114 bool vect_stmt_in_region_p (vec_info *, gimple *);
1115 
1116 #endif  /* GCC_TREE_VECTORIZER_H  */
1117