1 /* Vectorizer
2    Copyright (C) 2003-2016 Free Software Foundation, Inc.
3    Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 /* Loop and basic block vectorizer.
22 
23   This file contains drivers for the three vectorizers:
24   (1) loop vectorizer (inter-iteration parallelism),
25   (2) loop-aware SLP (intra-iteration parallelism) (invoked by the loop
26       vectorizer)
27   (3) BB vectorizer (out-of-loops), aka SLP
28 
29   The rest of the vectorizer's code is organized as follows:
30   - tree-vect-loop.c - loop specific parts such as reductions, etc. These are
31     used by drivers (1) and (2).
32   - tree-vect-loop-manip.c - vectorizer's loop control-flow utilities, used by
33     drivers (1) and (2).
34   - tree-vect-slp.c - BB vectorization specific analysis and transformation,
35     used by drivers (2) and (3).
36   - tree-vect-stmts.c - statements analysis and transformation (used by all).
37   - tree-vect-data-refs.c - vectorizer specific data-refs analysis and
38     manipulations (used by all).
39   - tree-vect-patterns.c - vectorizable code patterns detector (used by all)
40 
41   Here's a poor attempt at illustrating that:
42 
43      tree-vectorizer.c:
44      loop_vect()  loop_aware_slp()  slp_vect()
45           |        /           \          /
46           |       /             \        /
47           tree-vect-loop.c  tree-vect-slp.c
48                 | \      \  /      /   |
49                 |  \      \/      /    |
50                 |   \     /\     /     |
51                 |    \   /  \   /      |
52          tree-vect-stmts.c  tree-vect-data-refs.c
53                        \      /
54                     tree-vect-patterns.c
55 */
56 
57 #include "config.h"
58 #include "system.h"
59 #include "coretypes.h"
60 #include "backend.h"
61 #include "tree.h"
62 #include "gimple.h"
63 #include "predict.h"
64 #include "tree-pass.h"
65 #include "ssa.h"
66 #include "cgraph.h"
67 #include "fold-const.h"
68 #include "stor-layout.h"
69 #include "gimple-iterator.h"
70 #include "gimple-walk.h"
71 #include "tree-ssa-loop-manip.h"
72 #include "tree-cfg.h"
73 #include "cfgloop.h"
74 #include "tree-vectorizer.h"
75 #include "tree-ssa-propagate.h"
76 #include "dbgcnt.h"
77 #include "tree-scalar-evolution.h"
78 
79 
80 /* Loop or bb location.  */
81 source_location vect_location;
82 
83 /* Vector mapping GIMPLE stmt to stmt_vec_info. */
84 vec<stmt_vec_info> stmt_vec_info_vec;
85 
86 /* For mapping simduid to vectorization factor.  */
87 
88 struct simduid_to_vf : free_ptr_hash<simduid_to_vf>
89 {
90   unsigned int simduid;
91   int vf;
92 
93   /* hash_table support.  */
94   static inline hashval_t hash (const simduid_to_vf *);
95   static inline int equal (const simduid_to_vf *, const simduid_to_vf *);
96 };
97 
98 inline hashval_t
hash(const simduid_to_vf * p)99 simduid_to_vf::hash (const simduid_to_vf *p)
100 {
101   return p->simduid;
102 }
103 
104 inline int
equal(const simduid_to_vf * p1,const simduid_to_vf * p2)105 simduid_to_vf::equal (const simduid_to_vf *p1, const simduid_to_vf *p2)
106 {
107   return p1->simduid == p2->simduid;
108 }
109 
110 /* This hash maps the OMP simd array to the corresponding simduid used
111    to index into it.  Like thus,
112 
113         _7 = GOMP_SIMD_LANE (simduid.0)
114         ...
115         ...
116         D.1737[_7] = stuff;
117 
118 
119    This hash maps from the OMP simd array (D.1737[]) to DECL_UID of
120    simduid.0.  */
121 
122 struct simd_array_to_simduid : free_ptr_hash<simd_array_to_simduid>
123 {
124   tree decl;
125   unsigned int simduid;
126 
127   /* hash_table support.  */
128   static inline hashval_t hash (const simd_array_to_simduid *);
129   static inline int equal (const simd_array_to_simduid *,
130 			   const simd_array_to_simduid *);
131 };
132 
133 inline hashval_t
hash(const simd_array_to_simduid * p)134 simd_array_to_simduid::hash (const simd_array_to_simduid *p)
135 {
136   return DECL_UID (p->decl);
137 }
138 
139 inline int
equal(const simd_array_to_simduid * p1,const simd_array_to_simduid * p2)140 simd_array_to_simduid::equal (const simd_array_to_simduid *p1,
141 			      const simd_array_to_simduid *p2)
142 {
143   return p1->decl == p2->decl;
144 }
145 
146 /* Fold IFN_GOMP_SIMD_LANE, IFN_GOMP_SIMD_VF, IFN_GOMP_SIMD_LAST_LANE,
147    into their corresponding constants and remove
148    IFN_GOMP_SIMD_ORDERED_{START,END}.  */
149 
150 static void
adjust_simduid_builtins(hash_table<simduid_to_vf> * htab)151 adjust_simduid_builtins (hash_table<simduid_to_vf> *htab)
152 {
153   basic_block bb;
154 
155   FOR_EACH_BB_FN (bb, cfun)
156     {
157       gimple_stmt_iterator i;
158 
159       for (i = gsi_start_bb (bb); !gsi_end_p (i); )
160 	{
161 	  unsigned int vf = 1;
162 	  enum internal_fn ifn;
163 	  gimple *stmt = gsi_stmt (i);
164 	  tree t;
165 	  if (!is_gimple_call (stmt)
166 	      || !gimple_call_internal_p (stmt))
167 	    {
168 	      gsi_next (&i);
169 	      continue;
170 	    }
171 	  ifn = gimple_call_internal_fn (stmt);
172 	  switch (ifn)
173 	    {
174 	    case IFN_GOMP_SIMD_LANE:
175 	    case IFN_GOMP_SIMD_VF:
176 	    case IFN_GOMP_SIMD_LAST_LANE:
177 	      break;
178 	    case IFN_GOMP_SIMD_ORDERED_START:
179 	    case IFN_GOMP_SIMD_ORDERED_END:
180 	      if (integer_onep (gimple_call_arg (stmt, 0)))
181 		{
182 		  enum built_in_function bcode
183 		    = (ifn == IFN_GOMP_SIMD_ORDERED_START
184 		       ? BUILT_IN_GOMP_ORDERED_START
185 		       : BUILT_IN_GOMP_ORDERED_END);
186 		  gimple *g
187 		    = gimple_build_call (builtin_decl_explicit (bcode), 0);
188 		  tree vdef = gimple_vdef (stmt);
189 		  gimple_set_vdef (g, vdef);
190 		  SSA_NAME_DEF_STMT (vdef) = g;
191 		  gimple_set_vuse (g, gimple_vuse (stmt));
192 		  gsi_replace (&i, g, true);
193 		  continue;
194 		}
195 	      gsi_remove (&i, true);
196 	      unlink_stmt_vdef (stmt);
197 	      continue;
198 	    default:
199 	      gsi_next (&i);
200 	      continue;
201 	    }
202 	  tree arg = gimple_call_arg (stmt, 0);
203 	  gcc_assert (arg != NULL_TREE);
204 	  gcc_assert (TREE_CODE (arg) == SSA_NAME);
205 	  simduid_to_vf *p = NULL, data;
206 	  data.simduid = DECL_UID (SSA_NAME_VAR (arg));
207 	  if (htab)
208 	    {
209 	      p = htab->find (&data);
210 	      if (p)
211 		vf = p->vf;
212 	    }
213 	  switch (ifn)
214 	    {
215 	    case IFN_GOMP_SIMD_VF:
216 	      t = build_int_cst (unsigned_type_node, vf);
217 	      break;
218 	    case IFN_GOMP_SIMD_LANE:
219 	      t = build_int_cst (unsigned_type_node, 0);
220 	      break;
221 	    case IFN_GOMP_SIMD_LAST_LANE:
222 	      t = gimple_call_arg (stmt, 1);
223 	      break;
224 	    default:
225 	      gcc_unreachable ();
226 	    }
227 	  update_call_from_tree (&i, t);
228 	  gsi_next (&i);
229 	}
230     }
231 }
232 
233 /* Helper structure for note_simd_array_uses.  */
234 
235 struct note_simd_array_uses_struct
236 {
237   hash_table<simd_array_to_simduid> **htab;
238   unsigned int simduid;
239 };
240 
241 /* Callback for note_simd_array_uses, called through walk_gimple_op.  */
242 
243 static tree
note_simd_array_uses_cb(tree * tp,int * walk_subtrees,void * data)244 note_simd_array_uses_cb (tree *tp, int *walk_subtrees, void *data)
245 {
246   struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
247   struct note_simd_array_uses_struct *ns
248     = (struct note_simd_array_uses_struct *) wi->info;
249 
250   if (TYPE_P (*tp))
251     *walk_subtrees = 0;
252   else if (VAR_P (*tp)
253 	   && lookup_attribute ("omp simd array", DECL_ATTRIBUTES (*tp))
254 	   && DECL_CONTEXT (*tp) == current_function_decl)
255     {
256       simd_array_to_simduid data;
257       if (!*ns->htab)
258 	*ns->htab = new hash_table<simd_array_to_simduid> (15);
259       data.decl = *tp;
260       data.simduid = ns->simduid;
261       simd_array_to_simduid **slot = (*ns->htab)->find_slot (&data, INSERT);
262       if (*slot == NULL)
263 	{
264 	  simd_array_to_simduid *p = XNEW (simd_array_to_simduid);
265 	  *p = data;
266 	  *slot = p;
267 	}
268       else if ((*slot)->simduid != ns->simduid)
269 	(*slot)->simduid = -1U;
270       *walk_subtrees = 0;
271     }
272   return NULL_TREE;
273 }
274 
275 /* Find "omp simd array" temporaries and map them to corresponding
276    simduid.  */
277 
278 static void
note_simd_array_uses(hash_table<simd_array_to_simduid> ** htab)279 note_simd_array_uses (hash_table<simd_array_to_simduid> **htab)
280 {
281   basic_block bb;
282   gimple_stmt_iterator gsi;
283   struct walk_stmt_info wi;
284   struct note_simd_array_uses_struct ns;
285 
286   memset (&wi, 0, sizeof (wi));
287   wi.info = &ns;
288   ns.htab = htab;
289 
290   FOR_EACH_BB_FN (bb, cfun)
291     for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
292       {
293 	gimple *stmt = gsi_stmt (gsi);
294 	if (!is_gimple_call (stmt) || !gimple_call_internal_p (stmt))
295 	  continue;
296 	switch (gimple_call_internal_fn (stmt))
297 	  {
298 	  case IFN_GOMP_SIMD_LANE:
299 	  case IFN_GOMP_SIMD_VF:
300 	  case IFN_GOMP_SIMD_LAST_LANE:
301 	    break;
302 	  default:
303 	    continue;
304 	  }
305 	tree lhs = gimple_call_lhs (stmt);
306 	if (lhs == NULL_TREE)
307 	  continue;
308 	imm_use_iterator use_iter;
309 	gimple *use_stmt;
310 	ns.simduid = DECL_UID (SSA_NAME_VAR (gimple_call_arg (stmt, 0)));
311 	FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, lhs)
312 	  if (!is_gimple_debug (use_stmt))
313 	    walk_gimple_op (use_stmt, note_simd_array_uses_cb, &wi);
314       }
315 }
316 
317 /* Shrink arrays with "omp simd array" attribute to the corresponding
318    vectorization factor.  */
319 
320 static void
shrink_simd_arrays(hash_table<simd_array_to_simduid> * simd_array_to_simduid_htab,hash_table<simduid_to_vf> * simduid_to_vf_htab)321 shrink_simd_arrays
322   (hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab,
323    hash_table<simduid_to_vf> *simduid_to_vf_htab)
324 {
325   for (hash_table<simd_array_to_simduid>::iterator iter
326 	 = simd_array_to_simduid_htab->begin ();
327        iter != simd_array_to_simduid_htab->end (); ++iter)
328     if ((*iter)->simduid != -1U)
329       {
330 	tree decl = (*iter)->decl;
331 	int vf = 1;
332 	if (simduid_to_vf_htab)
333 	  {
334 	    simduid_to_vf *p = NULL, data;
335 	    data.simduid = (*iter)->simduid;
336 	    p = simduid_to_vf_htab->find (&data);
337 	    if (p)
338 	      vf = p->vf;
339 	  }
340 	tree atype
341 	  = build_array_type_nelts (TREE_TYPE (TREE_TYPE (decl)), vf);
342 	TREE_TYPE (decl) = atype;
343 	relayout_decl (decl);
344       }
345 
346   delete simd_array_to_simduid_htab;
347 }
348 
349 /* A helper function to free data refs.  */
350 
351 void
vect_destroy_datarefs(vec_info * vinfo)352 vect_destroy_datarefs (vec_info *vinfo)
353 {
354   struct data_reference *dr;
355   unsigned int i;
356 
357   FOR_EACH_VEC_ELT (vinfo->datarefs, i, dr)
358     if (dr->aux)
359       {
360         free (dr->aux);
361         dr->aux = NULL;
362       }
363 
364   free_data_refs (vinfo->datarefs);
365 }
366 
367 
368 /* Return whether STMT is inside the region we try to vectorize.  */
369 
370 bool
vect_stmt_in_region_p(vec_info * vinfo,gimple * stmt)371 vect_stmt_in_region_p (vec_info *vinfo, gimple *stmt)
372 {
373   if (!gimple_bb (stmt))
374     return false;
375 
376   if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo))
377     {
378       struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
379       if (!flow_bb_inside_loop_p (loop, gimple_bb (stmt)))
380 	return false;
381     }
382   else
383     {
384       bb_vec_info bb_vinfo = as_a <bb_vec_info> (vinfo);
385       if (gimple_bb (stmt) != BB_VINFO_BB (bb_vinfo)
386 	  || gimple_uid (stmt) == -1U
387 	  || gimple_code (stmt) == GIMPLE_PHI)
388 	return false;
389     }
390 
391   return true;
392 }
393 
394 
395 /* If LOOP has been versioned during ifcvt, return the internal call
396    guarding it.  */
397 
398 static gimple *
vect_loop_vectorized_call(struct loop * loop)399 vect_loop_vectorized_call (struct loop *loop)
400 {
401   basic_block bb = loop_preheader_edge (loop)->src;
402   gimple *g;
403   do
404     {
405       g = last_stmt (bb);
406       if (g)
407 	break;
408       if (!single_pred_p (bb))
409 	break;
410       bb = single_pred (bb);
411     }
412   while (1);
413   if (g && gimple_code (g) == GIMPLE_COND)
414     {
415       gimple_stmt_iterator gsi = gsi_for_stmt (g);
416       gsi_prev (&gsi);
417       if (!gsi_end_p (gsi))
418 	{
419 	  g = gsi_stmt (gsi);
420 	  if (is_gimple_call (g)
421 	      && gimple_call_internal_p (g)
422 	      && gimple_call_internal_fn (g) == IFN_LOOP_VECTORIZED
423 	      && (tree_to_shwi (gimple_call_arg (g, 0)) == loop->num
424 		  || tree_to_shwi (gimple_call_arg (g, 1)) == loop->num))
425 	    return g;
426 	}
427     }
428   return NULL;
429 }
430 
431 /* Fold LOOP_VECTORIZED internal call G to VALUE and
432    update any immediate uses of it's LHS.  */
433 
434 static void
fold_loop_vectorized_call(gimple * g,tree value)435 fold_loop_vectorized_call (gimple *g, tree value)
436 {
437   tree lhs = gimple_call_lhs (g);
438   use_operand_p use_p;
439   imm_use_iterator iter;
440   gimple *use_stmt;
441   gimple_stmt_iterator gsi = gsi_for_stmt (g);
442 
443   update_call_from_tree (&gsi, value);
444   FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
445     {
446       FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
447 	SET_USE (use_p, value);
448       update_stmt (use_stmt);
449     }
450 }
451 /* Set the uids of all the statements in basic blocks inside loop
452    represented by LOOP_VINFO. LOOP_VECTORIZED_CALL is the internal
453    call guarding the loop which has been if converted.  */
454 static void
set_uid_loop_bbs(loop_vec_info loop_vinfo,gimple * loop_vectorized_call)455 set_uid_loop_bbs (loop_vec_info loop_vinfo, gimple *loop_vectorized_call)
456 {
457   tree arg = gimple_call_arg (loop_vectorized_call, 1);
458   basic_block *bbs;
459   unsigned int i;
460   struct loop *scalar_loop = get_loop (cfun, tree_to_shwi (arg));
461 
462   LOOP_VINFO_SCALAR_LOOP (loop_vinfo) = scalar_loop;
463   gcc_checking_assert (vect_loop_vectorized_call
464 		       (LOOP_VINFO_SCALAR_LOOP (loop_vinfo))
465 		       == loop_vectorized_call);
466   bbs = get_loop_body (scalar_loop);
467   for (i = 0; i < scalar_loop->num_nodes; i++)
468     {
469       basic_block bb = bbs[i];
470       gimple_stmt_iterator gsi;
471       for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
472 	{
473 	  gimple *phi = gsi_stmt (gsi);
474 	  gimple_set_uid (phi, 0);
475 	}
476       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
477 	{
478 	  gimple *stmt = gsi_stmt (gsi);
479 	  gimple_set_uid (stmt, 0);
480 	}
481     }
482   free (bbs);
483 }
484 
485 /* Function vectorize_loops.
486 
487    Entry point to loop vectorization phase.  */
488 
489 unsigned
vectorize_loops(void)490 vectorize_loops (void)
491 {
492   unsigned int i;
493   unsigned int num_vectorized_loops = 0;
494   unsigned int vect_loops_num;
495   struct loop *loop;
496   hash_table<simduid_to_vf> *simduid_to_vf_htab = NULL;
497   hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab = NULL;
498   bool any_ifcvt_loops = false;
499   unsigned ret = 0;
500 
501   vect_loops_num = number_of_loops (cfun);
502 
503   /* Bail out if there are no loops.  */
504   if (vect_loops_num <= 1)
505     return 0;
506 
507   if (cfun->has_simduid_loops)
508     note_simd_array_uses (&simd_array_to_simduid_htab);
509 
510   init_stmt_vec_info_vec ();
511 
512   /*  ----------- Analyze loops. -----------  */
513 
514   /* If some loop was duplicated, it gets bigger number
515      than all previously defined loops.  This fact allows us to run
516      only over initial loops skipping newly generated ones.  */
517   FOR_EACH_LOOP (loop, 0)
518     if (loop->dont_vectorize)
519       any_ifcvt_loops = true;
520     else if ((flag_tree_loop_vectorize
521 	      && optimize_loop_nest_for_speed_p (loop))
522 	     || loop->force_vectorize)
523       {
524 	loop_vec_info loop_vinfo;
525 	vect_location = find_loop_location (loop);
526         if (LOCATION_LOCUS (vect_location) != UNKNOWN_LOCATION
527 	    && dump_enabled_p ())
528 	  dump_printf (MSG_NOTE, "\nAnalyzing loop at %s:%d\n",
529                        LOCATION_FILE (vect_location),
530 		       LOCATION_LINE (vect_location));
531 
532 	loop_vinfo = vect_analyze_loop (loop);
533 	loop->aux = loop_vinfo;
534 
535 	if (!loop_vinfo || !LOOP_VINFO_VECTORIZABLE_P (loop_vinfo))
536 	  continue;
537 
538         if (!dbg_cnt (vect_loop))
539 	  {
540 	    /* We may miss some if-converted loops due to
541 	       debug counter.  Set any_ifcvt_loops to visit
542 	       them at finalization.  */
543 	    any_ifcvt_loops = true;
544 	    break;
545 	  }
546 
547 	gimple *loop_vectorized_call = vect_loop_vectorized_call (loop);
548 	if (loop_vectorized_call)
549 	  set_uid_loop_bbs (loop_vinfo, loop_vectorized_call);
550         if (LOCATION_LOCUS (vect_location) != UNKNOWN_LOCATION
551 	    && dump_enabled_p ())
552           dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
553                            "loop vectorized\n");
554 	vect_transform_loop (loop_vinfo);
555 	num_vectorized_loops++;
556 	/* Now that the loop has been vectorized, allow it to be unrolled
557 	   etc.  */
558 	loop->force_vectorize = false;
559 
560 	if (loop->simduid)
561 	  {
562 	    simduid_to_vf *simduid_to_vf_data = XNEW (simduid_to_vf);
563 	    if (!simduid_to_vf_htab)
564 	      simduid_to_vf_htab = new hash_table<simduid_to_vf> (15);
565 	    simduid_to_vf_data->simduid = DECL_UID (loop->simduid);
566 	    simduid_to_vf_data->vf = loop_vinfo->vectorization_factor;
567 	    *simduid_to_vf_htab->find_slot (simduid_to_vf_data, INSERT)
568 	      = simduid_to_vf_data;
569 	  }
570 
571 	if (loop_vectorized_call)
572 	  {
573 	    fold_loop_vectorized_call (loop_vectorized_call, boolean_true_node);
574 	    ret |= TODO_cleanup_cfg;
575 	  }
576       }
577 
578   vect_location = UNKNOWN_LOCATION;
579 
580   statistics_counter_event (cfun, "Vectorized loops", num_vectorized_loops);
581   if (dump_enabled_p ()
582       || (num_vectorized_loops > 0 && dump_enabled_p ()))
583     dump_printf_loc (MSG_NOTE, vect_location,
584                      "vectorized %u loops in function.\n",
585                      num_vectorized_loops);
586 
587   /*  ----------- Finalize. -----------  */
588 
589   if (any_ifcvt_loops)
590     for (i = 1; i < vect_loops_num; i++)
591       {
592 	loop = get_loop (cfun, i);
593 	if (loop && loop->dont_vectorize)
594 	  {
595 	    gimple *g = vect_loop_vectorized_call (loop);
596 	    if (g)
597 	      {
598 		fold_loop_vectorized_call (g, boolean_false_node);
599 		ret |= TODO_cleanup_cfg;
600 	      }
601 	  }
602       }
603 
604   for (i = 1; i < vect_loops_num; i++)
605     {
606       loop_vec_info loop_vinfo;
607       bool has_mask_store;
608 
609       loop = get_loop (cfun, i);
610       if (!loop)
611 	continue;
612       loop_vinfo = (loop_vec_info) loop->aux;
613       has_mask_store = false;
614       if (loop_vinfo)
615 	has_mask_store = LOOP_VINFO_HAS_MASK_STORE (loop_vinfo);
616       destroy_loop_vec_info (loop_vinfo, true);
617       if (has_mask_store)
618 	optimize_mask_stores (loop);
619       loop->aux = NULL;
620     }
621 
622   free_stmt_vec_info_vec ();
623 
624   /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins.  */
625   if (cfun->has_simduid_loops)
626     adjust_simduid_builtins (simduid_to_vf_htab);
627 
628   /* Shrink any "omp array simd" temporary arrays to the
629      actual vectorization factors.  */
630   if (simd_array_to_simduid_htab)
631     shrink_simd_arrays (simd_array_to_simduid_htab, simduid_to_vf_htab);
632   delete simduid_to_vf_htab;
633   cfun->has_simduid_loops = false;
634 
635   if (num_vectorized_loops > 0)
636     {
637       /* If we vectorized any loop only virtual SSA form needs to be updated.
638 	 ???  Also while we try hard to update loop-closed SSA form we fail
639 	 to properly do this in some corner-cases (see PR56286).  */
640       rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa_only_virtuals);
641       return TODO_cleanup_cfg;
642     }
643 
644   return ret;
645 }
646 
647 
648 /* Entry point to the simduid cleanup pass.  */
649 
650 namespace {
651 
652 const pass_data pass_data_simduid_cleanup =
653 {
654   GIMPLE_PASS, /* type */
655   "simduid", /* name */
656   OPTGROUP_NONE, /* optinfo_flags */
657   TV_NONE, /* tv_id */
658   ( PROP_ssa | PROP_cfg ), /* properties_required */
659   0, /* properties_provided */
660   0, /* properties_destroyed */
661   0, /* todo_flags_start */
662   0, /* todo_flags_finish */
663 };
664 
665 class pass_simduid_cleanup : public gimple_opt_pass
666 {
667 public:
pass_simduid_cleanup(gcc::context * ctxt)668   pass_simduid_cleanup (gcc::context *ctxt)
669     : gimple_opt_pass (pass_data_simduid_cleanup, ctxt)
670   {}
671 
672   /* opt_pass methods: */
clone()673   opt_pass * clone () { return new pass_simduid_cleanup (m_ctxt); }
gate(function * fun)674   virtual bool gate (function *fun) { return fun->has_simduid_loops; }
675   virtual unsigned int execute (function *);
676 
677 }; // class pass_simduid_cleanup
678 
679 unsigned int
execute(function * fun)680 pass_simduid_cleanup::execute (function *fun)
681 {
682   hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab = NULL;
683 
684   note_simd_array_uses (&simd_array_to_simduid_htab);
685 
686   /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins.  */
687   adjust_simduid_builtins (NULL);
688 
689   /* Shrink any "omp array simd" temporary arrays to the
690      actual vectorization factors.  */
691   if (simd_array_to_simduid_htab)
692     shrink_simd_arrays (simd_array_to_simduid_htab, NULL);
693   fun->has_simduid_loops = false;
694   return 0;
695 }
696 
697 }  // anon namespace
698 
699 gimple_opt_pass *
make_pass_simduid_cleanup(gcc::context * ctxt)700 make_pass_simduid_cleanup (gcc::context *ctxt)
701 {
702   return new pass_simduid_cleanup (ctxt);
703 }
704 
705 
706 /*  Entry point to basic block SLP phase.  */
707 
708 namespace {
709 
710 const pass_data pass_data_slp_vectorize =
711 {
712   GIMPLE_PASS, /* type */
713   "slp", /* name */
714   OPTGROUP_LOOP | OPTGROUP_VEC, /* optinfo_flags */
715   TV_TREE_SLP_VECTORIZATION, /* tv_id */
716   ( PROP_ssa | PROP_cfg ), /* properties_required */
717   0, /* properties_provided */
718   0, /* properties_destroyed */
719   0, /* todo_flags_start */
720   TODO_update_ssa, /* todo_flags_finish */
721 };
722 
723 class pass_slp_vectorize : public gimple_opt_pass
724 {
725 public:
pass_slp_vectorize(gcc::context * ctxt)726   pass_slp_vectorize (gcc::context *ctxt)
727     : gimple_opt_pass (pass_data_slp_vectorize, ctxt)
728   {}
729 
730   /* opt_pass methods: */
clone()731   opt_pass * clone () { return new pass_slp_vectorize (m_ctxt); }
gate(function *)732   virtual bool gate (function *) { return flag_tree_slp_vectorize != 0; }
733   virtual unsigned int execute (function *);
734 
735 }; // class pass_slp_vectorize
736 
737 unsigned int
execute(function * fun)738 pass_slp_vectorize::execute (function *fun)
739 {
740   basic_block bb;
741 
742   bool in_loop_pipeline = scev_initialized_p ();
743   if (!in_loop_pipeline)
744     {
745       loop_optimizer_init (LOOPS_NORMAL);
746       scev_initialize ();
747     }
748 
749   /* Mark all stmts as not belonging to the current region and unvisited.  */
750   FOR_EACH_BB_FN (bb, fun)
751     {
752       for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
753 	   gsi_next (&gsi))
754 	{
755 	  gimple *stmt = gsi_stmt (gsi);
756 	  gimple_set_uid (stmt, -1);
757 	  gimple_set_visited (stmt, false);
758 	}
759     }
760 
761   init_stmt_vec_info_vec ();
762 
763   FOR_EACH_BB_FN (bb, fun)
764     {
765       if (vect_slp_bb (bb))
766 	dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
767 			 "basic block vectorized\n");
768     }
769 
770   free_stmt_vec_info_vec ();
771 
772   if (!in_loop_pipeline)
773     {
774       scev_finalize ();
775       loop_optimizer_finalize ();
776     }
777 
778   return 0;
779 }
780 
781 } // anon namespace
782 
783 gimple_opt_pass *
make_pass_slp_vectorize(gcc::context * ctxt)784 make_pass_slp_vectorize (gcc::context *ctxt)
785 {
786   return new pass_slp_vectorize (ctxt);
787 }
788 
789 
790 /* Increase alignment of global arrays to improve vectorization potential.
791    TODO:
792    - Consider also structs that have an array field.
793    - Use ipa analysis to prune arrays that can't be vectorized?
794      This should involve global alignment analysis and in the future also
795      array padding.  */
796 
797 static unsigned int
increase_alignment(void)798 increase_alignment (void)
799 {
800   varpool_node *vnode;
801 
802   vect_location = UNKNOWN_LOCATION;
803 
804   /* Increase the alignment of all global arrays for vectorization.  */
805   FOR_EACH_DEFINED_VARIABLE (vnode)
806     {
807       tree vectype, decl = vnode->decl;
808       tree t;
809       unsigned int alignment;
810 
811       t = TREE_TYPE (decl);
812       if (TREE_CODE (t) != ARRAY_TYPE)
813         continue;
814       vectype = get_vectype_for_scalar_type (strip_array_types (t));
815       if (!vectype)
816         continue;
817       alignment = TYPE_ALIGN (vectype);
818       if (DECL_ALIGN (decl) >= alignment)
819         continue;
820 
821       if (vect_can_force_dr_alignment_p (decl, alignment))
822         {
823 	  vnode->increase_alignment (TYPE_ALIGN (vectype));
824           dump_printf (MSG_NOTE, "Increasing alignment of decl: ");
825           dump_generic_expr (MSG_NOTE, TDF_SLIM, decl);
826           dump_printf (MSG_NOTE, "\n");
827         }
828     }
829   return 0;
830 }
831 
832 
833 namespace {
834 
835 const pass_data pass_data_ipa_increase_alignment =
836 {
837   SIMPLE_IPA_PASS, /* type */
838   "increase_alignment", /* name */
839   OPTGROUP_LOOP | OPTGROUP_VEC, /* optinfo_flags */
840   TV_IPA_OPT, /* tv_id */
841   0, /* properties_required */
842   0, /* properties_provided */
843   0, /* properties_destroyed */
844   0, /* todo_flags_start */
845   0, /* todo_flags_finish */
846 };
847 
848 class pass_ipa_increase_alignment : public simple_ipa_opt_pass
849 {
850 public:
pass_ipa_increase_alignment(gcc::context * ctxt)851   pass_ipa_increase_alignment (gcc::context *ctxt)
852     : simple_ipa_opt_pass (pass_data_ipa_increase_alignment, ctxt)
853   {}
854 
855   /* opt_pass methods: */
gate(function *)856   virtual bool gate (function *)
857     {
858       return flag_section_anchors && flag_tree_loop_vectorize;
859     }
860 
execute(function *)861   virtual unsigned int execute (function *) { return increase_alignment (); }
862 
863 }; // class pass_ipa_increase_alignment
864 
865 } // anon namespace
866 
867 simple_ipa_opt_pass *
make_pass_ipa_increase_alignment(gcc::context * ctxt)868 make_pass_ipa_increase_alignment (gcc::context *ctxt)
869 {
870   return new pass_ipa_increase_alignment (ctxt);
871 }
872