1 /* Loop invariant motion.
2    Copyright (C) 2003-2020 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "tree.h"
25 #include "gimple.h"
26 #include "cfghooks.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "gimple-pretty-print.h"
30 #include "fold-const.h"
31 #include "cfganal.h"
32 #include "tree-eh.h"
33 #include "gimplify.h"
34 #include "gimple-iterator.h"
35 #include "tree-cfg.h"
36 #include "tree-ssa-loop-manip.h"
37 #include "tree-ssa-loop.h"
38 #include "tree-into-ssa.h"
39 #include "cfgloop.h"
40 #include "domwalk.h"
41 #include "tree-affine.h"
42 #include "tree-ssa-propagate.h"
43 #include "trans-mem.h"
44 #include "gimple-fold.h"
45 #include "tree-scalar-evolution.h"
46 #include "tree-ssa-loop-niter.h"
47 #include "alias.h"
48 #include "builtins.h"
49 #include "tree-dfa.h"
50 
51 /* TODO:  Support for predicated code motion.  I.e.
52 
53    while (1)
54      {
55        if (cond)
56 	 {
57 	   a = inv;
58 	   something;
59 	 }
60      }
61 
62    Where COND and INV are invariants, but evaluating INV may trap or be
63    invalid from some other reason if !COND.  This may be transformed to
64 
65    if (cond)
66      a = inv;
67    while (1)
68      {
69        if (cond)
70 	 something;
71      }  */
72 
73 /* The auxiliary data kept for each statement.  */
74 
75 struct lim_aux_data
76 {
77   class loop *max_loop;	/* The outermost loop in that the statement
78 				   is invariant.  */
79 
80   class loop *tgt_loop;	/* The loop out of that we want to move the
81 				   invariant.  */
82 
83   class loop *always_executed_in;
84 				/* The outermost loop for that we are sure
85 				   the statement is executed if the loop
86 				   is entered.  */
87 
88   unsigned cost;		/* Cost of the computation performed by the
89 				   statement.  */
90 
91   unsigned ref;			/* The simple_mem_ref in this stmt or 0.  */
92 
93   vec<gimple *> depends;	/* Vector of statements that must be also
94 				   hoisted out of the loop when this statement
95 				   is hoisted; i.e. those that define the
96 				   operands of the statement and are inside of
97 				   the MAX_LOOP loop.  */
98 };
99 
100 /* Maps statements to their lim_aux_data.  */
101 
102 static hash_map<gimple *, lim_aux_data *> *lim_aux_data_map;
103 
104 /* Description of a memory reference location.  */
105 
106 struct mem_ref_loc
107 {
108   tree *ref;			/* The reference itself.  */
109   gimple *stmt;			/* The statement in that it occurs.  */
110 };
111 
112 
113 /* Description of a memory reference.  */
114 
115 class im_mem_ref
116 {
117 public:
118   unsigned id : 30;		/* ID assigned to the memory reference
119 				   (its index in memory_accesses.refs_list)  */
120   unsigned ref_canonical : 1;   /* Whether mem.ref was canonicalized.  */
121   unsigned ref_decomposed : 1;  /* Whether the ref was hashed from mem.  */
122   hashval_t hash;		/* Its hash value.  */
123 
124   /* The memory access itself and associated caching of alias-oracle
125      query meta-data.  */
126   ao_ref mem;
127 
128   bitmap stored;		/* The set of loops in that this memory location
129 				   is stored to.  */
130   vec<mem_ref_loc>		accesses_in_loop;
131 				/* The locations of the accesses.  Vector
132 				   indexed by the loop number.  */
133 
134   /* The following sets are computed on demand.  We keep both set and
135      its complement, so that we know whether the information was
136      already computed or not.  */
137   bitmap_head indep_loop;	/* The set of loops in that the memory
138 				   reference is independent, meaning:
139 				   If it is stored in the loop, this store
140 				     is independent on all other loads and
141 				     stores.
142 				   If it is only loaded, then it is independent
143 				     on all stores in the loop.  */
144   bitmap_head dep_loop;		/* The complement of INDEP_LOOP.  */
145 };
146 
147 /* We use two bits per loop in the ref->{in,}dep_loop bitmaps, the first
148    to record (in)dependence against stores in the loop and its subloops, the
149    second to record (in)dependence against all references in the loop
150    and its subloops.  */
151 #define LOOP_DEP_BIT(loopnum, storedp) (2 * (loopnum) + (storedp ? 1 : 0))
152 
153 /* Mem_ref hashtable helpers.  */
154 
155 struct mem_ref_hasher : nofree_ptr_hash <im_mem_ref>
156 {
157   typedef ao_ref *compare_type;
158   static inline hashval_t hash (const im_mem_ref *);
159   static inline bool equal (const im_mem_ref *, const ao_ref *);
160 };
161 
162 /* A hash function for class im_mem_ref object OBJ.  */
163 
164 inline hashval_t
hash(const im_mem_ref * mem)165 mem_ref_hasher::hash (const im_mem_ref *mem)
166 {
167   return mem->hash;
168 }
169 
170 /* An equality function for class im_mem_ref object MEM1 with
171    memory reference OBJ2.  */
172 
173 inline bool
equal(const im_mem_ref * mem1,const ao_ref * obj2)174 mem_ref_hasher::equal (const im_mem_ref *mem1, const ao_ref *obj2)
175 {
176   if (obj2->max_size_known_p ())
177     return (mem1->ref_decomposed
178 	    && operand_equal_p (mem1->mem.base, obj2->base, 0)
179 	    && known_eq (mem1->mem.offset, obj2->offset)
180 	    && known_eq (mem1->mem.size, obj2->size)
181 	    && known_eq (mem1->mem.max_size, obj2->max_size)
182 	    && mem1->mem.volatile_p == obj2->volatile_p
183 	    && (mem1->mem.ref_alias_set == obj2->ref_alias_set
184 		/* We are not canonicalizing alias-sets but for the
185 		   special-case we didn't canonicalize yet and the
186 		   incoming ref is a alias-set zero MEM we pick
187 		   the correct one already.  */
188 		|| (!mem1->ref_canonical
189 		    && (TREE_CODE (obj2->ref) == MEM_REF
190 			|| TREE_CODE (obj2->ref) == TARGET_MEM_REF)
191 		    && obj2->ref_alias_set == 0)
192 		/* Likewise if there's a canonical ref with alias-set zero.  */
193 		|| (mem1->ref_canonical && mem1->mem.ref_alias_set == 0))
194 	    && types_compatible_p (TREE_TYPE (mem1->mem.ref),
195 				   TREE_TYPE (obj2->ref)));
196   else
197     return operand_equal_p (mem1->mem.ref, obj2->ref, 0);
198 }
199 
200 
201 /* Description of memory accesses in loops.  */
202 
203 static struct
204 {
205   /* The hash table of memory references accessed in loops.  */
206   hash_table<mem_ref_hasher> *refs;
207 
208   /* The list of memory references.  */
209   vec<im_mem_ref *> refs_list;
210 
211   /* The set of memory references accessed in each loop.  */
212   vec<bitmap_head> refs_in_loop;
213 
214   /* The set of memory references stored in each loop.  */
215   vec<bitmap_head> refs_stored_in_loop;
216 
217   /* The set of memory references stored in each loop, including subloops .  */
218   vec<bitmap_head> all_refs_stored_in_loop;
219 
220   /* Cache for expanding memory addresses.  */
221   hash_map<tree, name_expansion *> *ttae_cache;
222 } memory_accesses;
223 
224 /* Obstack for the bitmaps in the above data structures.  */
225 static bitmap_obstack lim_bitmap_obstack;
226 static obstack mem_ref_obstack;
227 
228 static bool ref_indep_loop_p (class loop *, im_mem_ref *);
229 static bool ref_always_accessed_p (class loop *, im_mem_ref *, bool);
230 
231 /* Minimum cost of an expensive expression.  */
232 #define LIM_EXPENSIVE ((unsigned) param_lim_expensive)
233 
234 /* The outermost loop for which execution of the header guarantees that the
235    block will be executed.  */
236 #define ALWAYS_EXECUTED_IN(BB) ((class loop *) (BB)->aux)
237 #define SET_ALWAYS_EXECUTED_IN(BB, VAL) ((BB)->aux = (void *) (VAL))
238 
239 /* ID of the shared unanalyzable mem.  */
240 #define UNANALYZABLE_MEM_ID 0
241 
242 /* Whether the reference was analyzable.  */
243 #define MEM_ANALYZABLE(REF) ((REF)->id != UNANALYZABLE_MEM_ID)
244 
245 static struct lim_aux_data *
init_lim_data(gimple * stmt)246 init_lim_data (gimple *stmt)
247 {
248   lim_aux_data *p = XCNEW (struct lim_aux_data);
249   lim_aux_data_map->put (stmt, p);
250 
251   return p;
252 }
253 
254 static struct lim_aux_data *
get_lim_data(gimple * stmt)255 get_lim_data (gimple *stmt)
256 {
257   lim_aux_data **p = lim_aux_data_map->get (stmt);
258   if (!p)
259     return NULL;
260 
261   return *p;
262 }
263 
264 /* Releases the memory occupied by DATA.  */
265 
266 static void
free_lim_aux_data(struct lim_aux_data * data)267 free_lim_aux_data (struct lim_aux_data *data)
268 {
269   data->depends.release ();
270   free (data);
271 }
272 
273 static void
clear_lim_data(gimple * stmt)274 clear_lim_data (gimple *stmt)
275 {
276   lim_aux_data **p = lim_aux_data_map->get (stmt);
277   if (!p)
278     return;
279 
280   free_lim_aux_data (*p);
281   *p = NULL;
282 }
283 
284 
285 /* The possibilities of statement movement.  */
286 enum move_pos
287   {
288     MOVE_IMPOSSIBLE,		/* No movement -- side effect expression.  */
289     MOVE_PRESERVE_EXECUTION,	/* Must not cause the non-executed statement
290 				   become executed -- memory accesses, ... */
291     MOVE_POSSIBLE		/* Unlimited movement.  */
292   };
293 
294 
295 /* If it is possible to hoist the statement STMT unconditionally,
296    returns MOVE_POSSIBLE.
297    If it is possible to hoist the statement STMT, but we must avoid making
298    it executed if it would not be executed in the original program (e.g.
299    because it may trap), return MOVE_PRESERVE_EXECUTION.
300    Otherwise return MOVE_IMPOSSIBLE.  */
301 
302 enum move_pos
movement_possibility(gimple * stmt)303 movement_possibility (gimple *stmt)
304 {
305   tree lhs;
306   enum move_pos ret = MOVE_POSSIBLE;
307 
308   if (flag_unswitch_loops
309       && gimple_code (stmt) == GIMPLE_COND)
310     {
311       /* If we perform unswitching, force the operands of the invariant
312 	 condition to be moved out of the loop.  */
313       return MOVE_POSSIBLE;
314     }
315 
316   if (gimple_code (stmt) == GIMPLE_PHI
317       && gimple_phi_num_args (stmt) <= 2
318       && !virtual_operand_p (gimple_phi_result (stmt))
319       && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_phi_result (stmt)))
320     return MOVE_POSSIBLE;
321 
322   if (gimple_get_lhs (stmt) == NULL_TREE)
323     return MOVE_IMPOSSIBLE;
324 
325   if (gimple_vdef (stmt))
326     return MOVE_IMPOSSIBLE;
327 
328   if (stmt_ends_bb_p (stmt)
329       || gimple_has_volatile_ops (stmt)
330       || gimple_has_side_effects (stmt)
331       || stmt_could_throw_p (cfun, stmt))
332     return MOVE_IMPOSSIBLE;
333 
334   if (is_gimple_call (stmt))
335     {
336       /* While pure or const call is guaranteed to have no side effects, we
337 	 cannot move it arbitrarily.  Consider code like
338 
339 	 char *s = something ();
340 
341 	 while (1)
342 	   {
343 	     if (s)
344 	       t = strlen (s);
345 	     else
346 	       t = 0;
347 	   }
348 
349 	 Here the strlen call cannot be moved out of the loop, even though
350 	 s is invariant.  In addition to possibly creating a call with
351 	 invalid arguments, moving out a function call that is not executed
352 	 may cause performance regressions in case the call is costly and
353 	 not executed at all.  */
354       ret = MOVE_PRESERVE_EXECUTION;
355       lhs = gimple_call_lhs (stmt);
356     }
357   else if (is_gimple_assign (stmt))
358     lhs = gimple_assign_lhs (stmt);
359   else
360     return MOVE_IMPOSSIBLE;
361 
362   if (TREE_CODE (lhs) == SSA_NAME
363       && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
364     return MOVE_IMPOSSIBLE;
365 
366   if (TREE_CODE (lhs) != SSA_NAME
367       || gimple_could_trap_p (stmt))
368     return MOVE_PRESERVE_EXECUTION;
369 
370   /* Non local loads in a transaction cannot be hoisted out.  Well,
371      unless the load happens on every path out of the loop, but we
372      don't take this into account yet.  */
373   if (flag_tm
374       && gimple_in_transaction (stmt)
375       && gimple_assign_single_p (stmt))
376     {
377       tree rhs = gimple_assign_rhs1 (stmt);
378       if (DECL_P (rhs) && is_global_var (rhs))
379 	{
380 	  if (dump_file)
381 	    {
382 	      fprintf (dump_file, "Cannot hoist conditional load of ");
383 	      print_generic_expr (dump_file, rhs, TDF_SLIM);
384 	      fprintf (dump_file, " because it is in a transaction.\n");
385 	    }
386 	  return MOVE_IMPOSSIBLE;
387 	}
388     }
389 
390   return ret;
391 }
392 
393 /* Suppose that operand DEF is used inside the LOOP.  Returns the outermost
394    loop to that we could move the expression using DEF if it did not have
395    other operands, i.e. the outermost loop enclosing LOOP in that the value
396    of DEF is invariant.  */
397 
398 static class loop *
outermost_invariant_loop(tree def,class loop * loop)399 outermost_invariant_loop (tree def, class loop *loop)
400 {
401   gimple *def_stmt;
402   basic_block def_bb;
403   class loop *max_loop;
404   struct lim_aux_data *lim_data;
405 
406   if (!def)
407     return superloop_at_depth (loop, 1);
408 
409   if (TREE_CODE (def) != SSA_NAME)
410     {
411       gcc_assert (is_gimple_min_invariant (def));
412       return superloop_at_depth (loop, 1);
413     }
414 
415   def_stmt = SSA_NAME_DEF_STMT (def);
416   def_bb = gimple_bb (def_stmt);
417   if (!def_bb)
418     return superloop_at_depth (loop, 1);
419 
420   max_loop = find_common_loop (loop, def_bb->loop_father);
421 
422   lim_data = get_lim_data (def_stmt);
423   if (lim_data != NULL && lim_data->max_loop != NULL)
424     max_loop = find_common_loop (max_loop,
425 				 loop_outer (lim_data->max_loop));
426   if (max_loop == loop)
427     return NULL;
428   max_loop = superloop_at_depth (loop, loop_depth (max_loop) + 1);
429 
430   return max_loop;
431 }
432 
433 /* DATA is a structure containing information associated with a statement
434    inside LOOP.  DEF is one of the operands of this statement.
435 
436    Find the outermost loop enclosing LOOP in that value of DEF is invariant
437    and record this in DATA->max_loop field.  If DEF itself is defined inside
438    this loop as well (i.e. we need to hoist it out of the loop if we want
439    to hoist the statement represented by DATA), record the statement in that
440    DEF is defined to the DATA->depends list.  Additionally if ADD_COST is true,
441    add the cost of the computation of DEF to the DATA->cost.
442 
443    If DEF is not invariant in LOOP, return false.  Otherwise return TRUE.  */
444 
445 static bool
add_dependency(tree def,struct lim_aux_data * data,class loop * loop,bool add_cost)446 add_dependency (tree def, struct lim_aux_data *data, class loop *loop,
447 		bool add_cost)
448 {
449   gimple *def_stmt = SSA_NAME_DEF_STMT (def);
450   basic_block def_bb = gimple_bb (def_stmt);
451   class loop *max_loop;
452   struct lim_aux_data *def_data;
453 
454   if (!def_bb)
455     return true;
456 
457   max_loop = outermost_invariant_loop (def, loop);
458   if (!max_loop)
459     return false;
460 
461   if (flow_loop_nested_p (data->max_loop, max_loop))
462     data->max_loop = max_loop;
463 
464   def_data = get_lim_data (def_stmt);
465   if (!def_data)
466     return true;
467 
468   if (add_cost
469       /* Only add the cost if the statement defining DEF is inside LOOP,
470 	 i.e. if it is likely that by moving the invariants dependent
471 	 on it, we will be able to avoid creating a new register for
472 	 it (since it will be only used in these dependent invariants).  */
473       && def_bb->loop_father == loop)
474     data->cost += def_data->cost;
475 
476   data->depends.safe_push (def_stmt);
477 
478   return true;
479 }
480 
481 /* Returns an estimate for a cost of statement STMT.  The values here
482    are just ad-hoc constants, similar to costs for inlining.  */
483 
484 static unsigned
stmt_cost(gimple * stmt)485 stmt_cost (gimple *stmt)
486 {
487   /* Always try to create possibilities for unswitching.  */
488   if (gimple_code (stmt) == GIMPLE_COND
489       || gimple_code (stmt) == GIMPLE_PHI)
490     return LIM_EXPENSIVE;
491 
492   /* We should be hoisting calls if possible.  */
493   if (is_gimple_call (stmt))
494     {
495       tree fndecl;
496 
497       /* Unless the call is a builtin_constant_p; this always folds to a
498 	 constant, so moving it is useless.  */
499       fndecl = gimple_call_fndecl (stmt);
500       if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_CONSTANT_P))
501 	return 0;
502 
503       return LIM_EXPENSIVE;
504     }
505 
506   /* Hoisting memory references out should almost surely be a win.  */
507   if (gimple_references_memory_p (stmt))
508     return LIM_EXPENSIVE;
509 
510   if (gimple_code (stmt) != GIMPLE_ASSIGN)
511     return 1;
512 
513   switch (gimple_assign_rhs_code (stmt))
514     {
515     case MULT_EXPR:
516     case WIDEN_MULT_EXPR:
517     case WIDEN_MULT_PLUS_EXPR:
518     case WIDEN_MULT_MINUS_EXPR:
519     case DOT_PROD_EXPR:
520     case TRUNC_DIV_EXPR:
521     case CEIL_DIV_EXPR:
522     case FLOOR_DIV_EXPR:
523     case ROUND_DIV_EXPR:
524     case EXACT_DIV_EXPR:
525     case CEIL_MOD_EXPR:
526     case FLOOR_MOD_EXPR:
527     case ROUND_MOD_EXPR:
528     case TRUNC_MOD_EXPR:
529     case RDIV_EXPR:
530       /* Division and multiplication are usually expensive.  */
531       return LIM_EXPENSIVE;
532 
533     case LSHIFT_EXPR:
534     case RSHIFT_EXPR:
535     case WIDEN_LSHIFT_EXPR:
536     case LROTATE_EXPR:
537     case RROTATE_EXPR:
538       /* Shifts and rotates are usually expensive.  */
539       return LIM_EXPENSIVE;
540 
541     case CONSTRUCTOR:
542       /* Make vector construction cost proportional to the number
543          of elements.  */
544       return CONSTRUCTOR_NELTS (gimple_assign_rhs1 (stmt));
545 
546     case SSA_NAME:
547     case PAREN_EXPR:
548       /* Whether or not something is wrapped inside a PAREN_EXPR
549          should not change move cost.  Nor should an intermediate
550 	 unpropagated SSA name copy.  */
551       return 0;
552 
553     default:
554       return 1;
555     }
556 }
557 
558 /* Finds the outermost loop between OUTER and LOOP in that the memory reference
559    REF is independent.  If REF is not independent in LOOP, NULL is returned
560    instead.  */
561 
562 static class loop *
outermost_indep_loop(class loop * outer,class loop * loop,im_mem_ref * ref)563 outermost_indep_loop (class loop *outer, class loop *loop, im_mem_ref *ref)
564 {
565   class loop *aloop;
566 
567   if (ref->stored && bitmap_bit_p (ref->stored, loop->num))
568     return NULL;
569 
570   for (aloop = outer;
571        aloop != loop;
572        aloop = superloop_at_depth (loop, loop_depth (aloop) + 1))
573     if ((!ref->stored || !bitmap_bit_p (ref->stored, aloop->num))
574 	&& ref_indep_loop_p (aloop, ref))
575       return aloop;
576 
577   if (ref_indep_loop_p (loop, ref))
578     return loop;
579   else
580     return NULL;
581 }
582 
583 /* If there is a simple load or store to a memory reference in STMT, returns
584    the location of the memory reference, and sets IS_STORE according to whether
585    it is a store or load.  Otherwise, returns NULL.  */
586 
587 static tree *
simple_mem_ref_in_stmt(gimple * stmt,bool * is_store)588 simple_mem_ref_in_stmt (gimple *stmt, bool *is_store)
589 {
590   tree *lhs, *rhs;
591 
592   /* Recognize SSA_NAME = MEM and MEM = (SSA_NAME | invariant) patterns.  */
593   if (!gimple_assign_single_p (stmt))
594     return NULL;
595 
596   lhs = gimple_assign_lhs_ptr (stmt);
597   rhs = gimple_assign_rhs1_ptr (stmt);
598 
599   if (TREE_CODE (*lhs) == SSA_NAME && gimple_vuse (stmt))
600     {
601       *is_store = false;
602       return rhs;
603     }
604   else if (gimple_vdef (stmt)
605 	   && (TREE_CODE (*rhs) == SSA_NAME || is_gimple_min_invariant (*rhs)))
606     {
607       *is_store = true;
608       return lhs;
609     }
610   else
611     return NULL;
612 }
613 
614 /* From a controlling predicate in DOM determine the arguments from
615    the PHI node PHI that are chosen if the predicate evaluates to
616    true and false and store them to *TRUE_ARG_P and *FALSE_ARG_P if
617    they are non-NULL.  Returns true if the arguments can be determined,
618    else return false.  */
619 
620 static bool
extract_true_false_args_from_phi(basic_block dom,gphi * phi,tree * true_arg_p,tree * false_arg_p)621 extract_true_false_args_from_phi (basic_block dom, gphi *phi,
622 				  tree *true_arg_p, tree *false_arg_p)
623 {
624   edge te, fe;
625   if (! extract_true_false_controlled_edges (dom, gimple_bb (phi),
626 					     &te, &fe))
627     return false;
628 
629   if (true_arg_p)
630     *true_arg_p = PHI_ARG_DEF (phi, te->dest_idx);
631   if (false_arg_p)
632     *false_arg_p = PHI_ARG_DEF (phi, fe->dest_idx);
633 
634   return true;
635 }
636 
637 /* Determine the outermost loop to that it is possible to hoist a statement
638    STMT and store it to LIM_DATA (STMT)->max_loop.  To do this we determine
639    the outermost loop in that the value computed by STMT is invariant.
640    If MUST_PRESERVE_EXEC is true, additionally choose such a loop that
641    we preserve the fact whether STMT is executed.  It also fills other related
642    information to LIM_DATA (STMT).
643 
644    The function returns false if STMT cannot be hoisted outside of the loop it
645    is defined in, and true otherwise.  */
646 
647 static bool
determine_max_movement(gimple * stmt,bool must_preserve_exec)648 determine_max_movement (gimple *stmt, bool must_preserve_exec)
649 {
650   basic_block bb = gimple_bb (stmt);
651   class loop *loop = bb->loop_father;
652   class loop *level;
653   struct lim_aux_data *lim_data = get_lim_data (stmt);
654   tree val;
655   ssa_op_iter iter;
656 
657   if (must_preserve_exec)
658     level = ALWAYS_EXECUTED_IN (bb);
659   else
660     level = superloop_at_depth (loop, 1);
661   lim_data->max_loop = level;
662 
663   if (gphi *phi = dyn_cast <gphi *> (stmt))
664     {
665       use_operand_p use_p;
666       unsigned min_cost = UINT_MAX;
667       unsigned total_cost = 0;
668       struct lim_aux_data *def_data;
669 
670       /* We will end up promoting dependencies to be unconditionally
671 	 evaluated.  For this reason the PHI cost (and thus the
672 	 cost we remove from the loop by doing the invariant motion)
673 	 is that of the cheapest PHI argument dependency chain.  */
674       FOR_EACH_PHI_ARG (use_p, phi, iter, SSA_OP_USE)
675 	{
676 	  val = USE_FROM_PTR (use_p);
677 
678 	  if (TREE_CODE (val) != SSA_NAME)
679 	    {
680 	      /* Assign const 1 to constants.  */
681 	      min_cost = MIN (min_cost, 1);
682 	      total_cost += 1;
683 	      continue;
684 	    }
685 	  if (!add_dependency (val, lim_data, loop, false))
686 	    return false;
687 
688 	  gimple *def_stmt = SSA_NAME_DEF_STMT (val);
689 	  if (gimple_bb (def_stmt)
690 	      && gimple_bb (def_stmt)->loop_father == loop)
691 	    {
692 	      def_data = get_lim_data (def_stmt);
693 	      if (def_data)
694 		{
695 		  min_cost = MIN (min_cost, def_data->cost);
696 		  total_cost += def_data->cost;
697 		}
698 	    }
699 	}
700 
701       min_cost = MIN (min_cost, total_cost);
702       lim_data->cost += min_cost;
703 
704       if (gimple_phi_num_args (phi) > 1)
705 	{
706 	  basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
707 	  gimple *cond;
708 	  if (gsi_end_p (gsi_last_bb (dom)))
709 	    return false;
710 	  cond = gsi_stmt (gsi_last_bb (dom));
711 	  if (gimple_code (cond) != GIMPLE_COND)
712 	    return false;
713 	  /* Verify that this is an extended form of a diamond and
714 	     the PHI arguments are completely controlled by the
715 	     predicate in DOM.  */
716 	  if (!extract_true_false_args_from_phi (dom, phi, NULL, NULL))
717 	    return false;
718 
719 	  /* Fold in dependencies and cost of the condition.  */
720 	  FOR_EACH_SSA_TREE_OPERAND (val, cond, iter, SSA_OP_USE)
721 	    {
722 	      if (!add_dependency (val, lim_data, loop, false))
723 		return false;
724 	      def_data = get_lim_data (SSA_NAME_DEF_STMT (val));
725 	      if (def_data)
726 		lim_data->cost += def_data->cost;
727 	    }
728 
729 	  /* We want to avoid unconditionally executing very expensive
730 	     operations.  As costs for our dependencies cannot be
731 	     negative just claim we are not invariand for this case.
732 	     We also are not sure whether the control-flow inside the
733 	     loop will vanish.  */
734 	  if (total_cost - min_cost >= 2 * LIM_EXPENSIVE
735 	      && !(min_cost != 0
736 		   && total_cost / min_cost <= 2))
737 	    return false;
738 
739 	  /* Assume that the control-flow in the loop will vanish.
740 	     ???  We should verify this and not artificially increase
741 	     the cost if that is not the case.  */
742 	  lim_data->cost += stmt_cost (stmt);
743 	}
744 
745       return true;
746     }
747   else
748     FOR_EACH_SSA_TREE_OPERAND (val, stmt, iter, SSA_OP_USE)
749       if (!add_dependency (val, lim_data, loop, true))
750 	return false;
751 
752   if (gimple_vuse (stmt))
753     {
754       im_mem_ref *ref
755 	= lim_data ? memory_accesses.refs_list[lim_data->ref] : NULL;
756       if (ref
757 	  && MEM_ANALYZABLE (ref))
758 	{
759 	  lim_data->max_loop = outermost_indep_loop (lim_data->max_loop,
760 						     loop, ref);
761 	  if (!lim_data->max_loop)
762 	    return false;
763 	}
764       else if (! add_dependency (gimple_vuse (stmt), lim_data, loop, false))
765 	return false;
766     }
767 
768   lim_data->cost += stmt_cost (stmt);
769 
770   return true;
771 }
772 
773 /* Suppose that some statement in ORIG_LOOP is hoisted to the loop LEVEL,
774    and that one of the operands of this statement is computed by STMT.
775    Ensure that STMT (together with all the statements that define its
776    operands) is hoisted at least out of the loop LEVEL.  */
777 
778 static void
set_level(gimple * stmt,class loop * orig_loop,class loop * level)779 set_level (gimple *stmt, class loop *orig_loop, class loop *level)
780 {
781   class loop *stmt_loop = gimple_bb (stmt)->loop_father;
782   struct lim_aux_data *lim_data;
783   gimple *dep_stmt;
784   unsigned i;
785 
786   stmt_loop = find_common_loop (orig_loop, stmt_loop);
787   lim_data = get_lim_data (stmt);
788   if (lim_data != NULL && lim_data->tgt_loop != NULL)
789     stmt_loop = find_common_loop (stmt_loop,
790 				  loop_outer (lim_data->tgt_loop));
791   if (flow_loop_nested_p (stmt_loop, level))
792     return;
793 
794   gcc_assert (level == lim_data->max_loop
795 	      || flow_loop_nested_p (lim_data->max_loop, level));
796 
797   lim_data->tgt_loop = level;
798   FOR_EACH_VEC_ELT (lim_data->depends, i, dep_stmt)
799     set_level (dep_stmt, orig_loop, level);
800 }
801 
802 /* Determines an outermost loop from that we want to hoist the statement STMT.
803    For now we chose the outermost possible loop.  TODO -- use profiling
804    information to set it more sanely.  */
805 
806 static void
set_profitable_level(gimple * stmt)807 set_profitable_level (gimple *stmt)
808 {
809   set_level (stmt, gimple_bb (stmt)->loop_father, get_lim_data (stmt)->max_loop);
810 }
811 
812 /* Returns true if STMT is a call that has side effects.  */
813 
814 static bool
nonpure_call_p(gimple * stmt)815 nonpure_call_p (gimple *stmt)
816 {
817   if (gimple_code (stmt) != GIMPLE_CALL)
818     return false;
819 
820   return gimple_has_side_effects (stmt);
821 }
822 
823 /* Rewrite a/b to a*(1/b).  Return the invariant stmt to process.  */
824 
825 static gimple *
rewrite_reciprocal(gimple_stmt_iterator * bsi)826 rewrite_reciprocal (gimple_stmt_iterator *bsi)
827 {
828   gassign *stmt, *stmt1, *stmt2;
829   tree name, lhs, type;
830   tree real_one;
831   gimple_stmt_iterator gsi;
832 
833   stmt = as_a <gassign *> (gsi_stmt (*bsi));
834   lhs = gimple_assign_lhs (stmt);
835   type = TREE_TYPE (lhs);
836 
837   real_one = build_one_cst (type);
838 
839   name = make_temp_ssa_name (type, NULL, "reciptmp");
840   stmt1 = gimple_build_assign (name, RDIV_EXPR, real_one,
841 			       gimple_assign_rhs2 (stmt));
842   stmt2 = gimple_build_assign (lhs, MULT_EXPR, name,
843 			       gimple_assign_rhs1 (stmt));
844 
845   /* Replace division stmt with reciprocal and multiply stmts.
846      The multiply stmt is not invariant, so update iterator
847      and avoid rescanning.  */
848   gsi = *bsi;
849   gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
850   gsi_replace (&gsi, stmt2, true);
851 
852   /* Continue processing with invariant reciprocal statement.  */
853   return stmt1;
854 }
855 
856 /* Check if the pattern at *BSI is a bittest of the form
857    (A >> B) & 1 != 0 and in this case rewrite it to A & (1 << B) != 0.  */
858 
859 static gimple *
rewrite_bittest(gimple_stmt_iterator * bsi)860 rewrite_bittest (gimple_stmt_iterator *bsi)
861 {
862   gassign *stmt;
863   gimple *stmt1;
864   gassign *stmt2;
865   gimple *use_stmt;
866   gcond *cond_stmt;
867   tree lhs, name, t, a, b;
868   use_operand_p use;
869 
870   stmt = as_a <gassign *> (gsi_stmt (*bsi));
871   lhs = gimple_assign_lhs (stmt);
872 
873   /* Verify that the single use of lhs is a comparison against zero.  */
874   if (TREE_CODE (lhs) != SSA_NAME
875       || !single_imm_use (lhs, &use, &use_stmt))
876     return stmt;
877   cond_stmt = dyn_cast <gcond *> (use_stmt);
878   if (!cond_stmt)
879     return stmt;
880   if (gimple_cond_lhs (cond_stmt) != lhs
881       || (gimple_cond_code (cond_stmt) != NE_EXPR
882 	  && gimple_cond_code (cond_stmt) != EQ_EXPR)
883       || !integer_zerop (gimple_cond_rhs (cond_stmt)))
884     return stmt;
885 
886   /* Get at the operands of the shift.  The rhs is TMP1 & 1.  */
887   stmt1 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
888   if (gimple_code (stmt1) != GIMPLE_ASSIGN)
889     return stmt;
890 
891   /* There is a conversion in between possibly inserted by fold.  */
892   if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt1)))
893     {
894       t = gimple_assign_rhs1 (stmt1);
895       if (TREE_CODE (t) != SSA_NAME
896 	  || !has_single_use (t))
897 	return stmt;
898       stmt1 = SSA_NAME_DEF_STMT (t);
899       if (gimple_code (stmt1) != GIMPLE_ASSIGN)
900 	return stmt;
901     }
902 
903   /* Verify that B is loop invariant but A is not.  Verify that with
904      all the stmt walking we are still in the same loop.  */
905   if (gimple_assign_rhs_code (stmt1) != RSHIFT_EXPR
906       || loop_containing_stmt (stmt1) != loop_containing_stmt (stmt))
907     return stmt;
908 
909   a = gimple_assign_rhs1 (stmt1);
910   b = gimple_assign_rhs2 (stmt1);
911 
912   if (outermost_invariant_loop (b, loop_containing_stmt (stmt1)) != NULL
913       && outermost_invariant_loop (a, loop_containing_stmt (stmt1)) == NULL)
914     {
915       gimple_stmt_iterator rsi;
916 
917       /* 1 << B */
918       t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (a),
919 		       build_int_cst (TREE_TYPE (a), 1), b);
920       name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
921       stmt1 = gimple_build_assign (name, t);
922 
923       /* A & (1 << B) */
924       t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (a), a, name);
925       name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
926       stmt2 = gimple_build_assign (name, t);
927 
928       /* Replace the SSA_NAME we compare against zero.  Adjust
929 	 the type of zero accordingly.  */
930       SET_USE (use, name);
931       gimple_cond_set_rhs (cond_stmt,
932 			   build_int_cst_type (TREE_TYPE (name),
933 					       0));
934 
935       /* Don't use gsi_replace here, none of the new assignments sets
936 	 the variable originally set in stmt.  Move bsi to stmt1, and
937 	 then remove the original stmt, so that we get a chance to
938 	 retain debug info for it.  */
939       rsi = *bsi;
940       gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
941       gsi_insert_before (&rsi, stmt2, GSI_SAME_STMT);
942       gimple *to_release = gsi_stmt (rsi);
943       gsi_remove (&rsi, true);
944       release_defs (to_release);
945 
946       return stmt1;
947     }
948 
949   return stmt;
950 }
951 
952 /* For each statement determines the outermost loop in that it is invariant,
953    -   statements on whose motion it depends and the cost of the computation.
954    -   This information is stored to the LIM_DATA structure associated with
955    -   each statement.  */
956 class invariantness_dom_walker : public dom_walker
957 {
958 public:
invariantness_dom_walker(cdi_direction direction)959   invariantness_dom_walker (cdi_direction direction)
960     : dom_walker (direction) {}
961 
962   virtual edge before_dom_children (basic_block);
963 };
964 
965 /* Determine the outermost loops in that statements in basic block BB are
966    invariant, and record them to the LIM_DATA associated with the statements.
967    Callback for dom_walker.  */
968 
969 edge
before_dom_children(basic_block bb)970 invariantness_dom_walker::before_dom_children (basic_block bb)
971 {
972   enum move_pos pos;
973   gimple_stmt_iterator bsi;
974   gimple *stmt;
975   bool maybe_never = ALWAYS_EXECUTED_IN (bb) == NULL;
976   class loop *outermost = ALWAYS_EXECUTED_IN (bb);
977   struct lim_aux_data *lim_data;
978 
979   if (!loop_outer (bb->loop_father))
980     return NULL;
981 
982   if (dump_file && (dump_flags & TDF_DETAILS))
983     fprintf (dump_file, "Basic block %d (loop %d -- depth %d):\n\n",
984 	     bb->index, bb->loop_father->num, loop_depth (bb->loop_father));
985 
986   /* Look at PHI nodes, but only if there is at most two.
987      ???  We could relax this further by post-processing the inserted
988      code and transforming adjacent cond-exprs with the same predicate
989      to control flow again.  */
990   bsi = gsi_start_phis (bb);
991   if (!gsi_end_p (bsi)
992       && ((gsi_next (&bsi), gsi_end_p (bsi))
993 	  || (gsi_next (&bsi), gsi_end_p (bsi))))
994     for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
995       {
996 	stmt = gsi_stmt (bsi);
997 
998 	pos = movement_possibility (stmt);
999 	if (pos == MOVE_IMPOSSIBLE)
1000 	  continue;
1001 
1002 	lim_data = get_lim_data (stmt);
1003 	if (! lim_data)
1004 	  lim_data = init_lim_data (stmt);
1005 	lim_data->always_executed_in = outermost;
1006 
1007 	if (!determine_max_movement (stmt, false))
1008 	  {
1009 	    lim_data->max_loop = NULL;
1010 	    continue;
1011 	  }
1012 
1013 	if (dump_file && (dump_flags & TDF_DETAILS))
1014 	  {
1015 	    print_gimple_stmt (dump_file, stmt, 2);
1016 	    fprintf (dump_file, "  invariant up to level %d, cost %d.\n\n",
1017 		     loop_depth (lim_data->max_loop),
1018 		     lim_data->cost);
1019 	  }
1020 
1021 	if (lim_data->cost >= LIM_EXPENSIVE)
1022 	  set_profitable_level (stmt);
1023       }
1024 
1025   for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1026     {
1027       stmt = gsi_stmt (bsi);
1028 
1029       pos = movement_possibility (stmt);
1030       if (pos == MOVE_IMPOSSIBLE)
1031 	{
1032 	  if (nonpure_call_p (stmt))
1033 	    {
1034 	      maybe_never = true;
1035 	      outermost = NULL;
1036 	    }
1037 	  /* Make sure to note always_executed_in for stores to make
1038 	     store-motion work.  */
1039 	  else if (stmt_makes_single_store (stmt))
1040 	    {
1041 	      struct lim_aux_data *lim_data = get_lim_data (stmt);
1042 	      if (! lim_data)
1043 		lim_data = init_lim_data (stmt);
1044 	      lim_data->always_executed_in = outermost;
1045 	    }
1046 	  continue;
1047 	}
1048 
1049       if (is_gimple_assign (stmt)
1050 	  && (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
1051 	      == GIMPLE_BINARY_RHS))
1052 	{
1053 	  tree op0 = gimple_assign_rhs1 (stmt);
1054 	  tree op1 = gimple_assign_rhs2 (stmt);
1055 	  class loop *ol1 = outermost_invariant_loop (op1,
1056 					loop_containing_stmt (stmt));
1057 
1058 	  /* If divisor is invariant, convert a/b to a*(1/b), allowing reciprocal
1059 	     to be hoisted out of loop, saving expensive divide.  */
1060 	  if (pos == MOVE_POSSIBLE
1061 	      && gimple_assign_rhs_code (stmt) == RDIV_EXPR
1062 	      && flag_unsafe_math_optimizations
1063 	      && !flag_trapping_math
1064 	      && ol1 != NULL
1065 	      && outermost_invariant_loop (op0, ol1) == NULL)
1066 	    stmt = rewrite_reciprocal (&bsi);
1067 
1068 	  /* If the shift count is invariant, convert (A >> B) & 1 to
1069 	     A & (1 << B) allowing the bit mask to be hoisted out of the loop
1070 	     saving an expensive shift.  */
1071 	  if (pos == MOVE_POSSIBLE
1072 	      && gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
1073 	      && integer_onep (op1)
1074 	      && TREE_CODE (op0) == SSA_NAME
1075 	      && has_single_use (op0))
1076 	    stmt = rewrite_bittest (&bsi);
1077 	}
1078 
1079       lim_data = get_lim_data (stmt);
1080       if (! lim_data)
1081 	lim_data = init_lim_data (stmt);
1082       lim_data->always_executed_in = outermost;
1083 
1084       if (maybe_never && pos == MOVE_PRESERVE_EXECUTION)
1085 	continue;
1086 
1087       if (!determine_max_movement (stmt, pos == MOVE_PRESERVE_EXECUTION))
1088 	{
1089 	  lim_data->max_loop = NULL;
1090 	  continue;
1091 	}
1092 
1093       if (dump_file && (dump_flags & TDF_DETAILS))
1094 	{
1095 	  print_gimple_stmt (dump_file, stmt, 2);
1096 	  fprintf (dump_file, "  invariant up to level %d, cost %d.\n\n",
1097 		   loop_depth (lim_data->max_loop),
1098 		   lim_data->cost);
1099 	}
1100 
1101       if (lim_data->cost >= LIM_EXPENSIVE)
1102 	set_profitable_level (stmt);
1103     }
1104   return NULL;
1105 }
1106 
1107 /* Hoist the statements in basic block BB out of the loops prescribed by
1108    data stored in LIM_DATA structures associated with each statement.  Callback
1109    for walk_dominator_tree.  */
1110 
1111 unsigned int
move_computations_worker(basic_block bb)1112 move_computations_worker (basic_block bb)
1113 {
1114   class loop *level;
1115   unsigned cost = 0;
1116   struct lim_aux_data *lim_data;
1117   unsigned int todo = 0;
1118 
1119   if (!loop_outer (bb->loop_father))
1120     return todo;
1121 
1122   for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi); )
1123     {
1124       gassign *new_stmt;
1125       gphi *stmt = bsi.phi ();
1126 
1127       lim_data = get_lim_data (stmt);
1128       if (lim_data == NULL)
1129 	{
1130 	  gsi_next (&bsi);
1131 	  continue;
1132 	}
1133 
1134       cost = lim_data->cost;
1135       level = lim_data->tgt_loop;
1136       clear_lim_data (stmt);
1137 
1138       if (!level)
1139 	{
1140 	  gsi_next (&bsi);
1141 	  continue;
1142 	}
1143 
1144       if (dump_file && (dump_flags & TDF_DETAILS))
1145 	{
1146 	  fprintf (dump_file, "Moving PHI node\n");
1147 	  print_gimple_stmt (dump_file, stmt, 0);
1148 	  fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1149 		   cost, level->num);
1150 	}
1151 
1152       if (gimple_phi_num_args (stmt) == 1)
1153 	{
1154 	  tree arg = PHI_ARG_DEF (stmt, 0);
1155 	  new_stmt = gimple_build_assign (gimple_phi_result (stmt),
1156 					  TREE_CODE (arg), arg);
1157 	}
1158       else
1159 	{
1160 	  basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
1161 	  gimple *cond = gsi_stmt (gsi_last_bb (dom));
1162 	  tree arg0 = NULL_TREE, arg1 = NULL_TREE, t;
1163 	  /* Get the PHI arguments corresponding to the true and false
1164 	     edges of COND.  */
1165 	  extract_true_false_args_from_phi (dom, stmt, &arg0, &arg1);
1166 	  gcc_assert (arg0 && arg1);
1167 	  t = build2 (gimple_cond_code (cond), boolean_type_node,
1168 		      gimple_cond_lhs (cond), gimple_cond_rhs (cond));
1169 	  new_stmt = gimple_build_assign (gimple_phi_result (stmt),
1170 					  COND_EXPR, t, arg0, arg1);
1171 	  todo |= TODO_cleanup_cfg;
1172 	}
1173       if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (new_stmt)))
1174 	  && (!ALWAYS_EXECUTED_IN (bb)
1175 	      || (ALWAYS_EXECUTED_IN (bb) != level
1176 		  && !flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1177 	{
1178 	  tree lhs = gimple_assign_lhs (new_stmt);
1179 	  SSA_NAME_RANGE_INFO (lhs) = NULL;
1180 	}
1181       gsi_insert_on_edge (loop_preheader_edge (level), new_stmt);
1182       remove_phi_node (&bsi, false);
1183     }
1184 
1185   for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi); )
1186     {
1187       edge e;
1188 
1189       gimple *stmt = gsi_stmt (bsi);
1190 
1191       lim_data = get_lim_data (stmt);
1192       if (lim_data == NULL)
1193 	{
1194 	  gsi_next (&bsi);
1195 	  continue;
1196 	}
1197 
1198       cost = lim_data->cost;
1199       level = lim_data->tgt_loop;
1200       clear_lim_data (stmt);
1201 
1202       if (!level)
1203 	{
1204 	  gsi_next (&bsi);
1205 	  continue;
1206 	}
1207 
1208       /* We do not really want to move conditionals out of the loop; we just
1209 	 placed it here to force its operands to be moved if necessary.  */
1210       if (gimple_code (stmt) == GIMPLE_COND)
1211 	continue;
1212 
1213       if (dump_file && (dump_flags & TDF_DETAILS))
1214 	{
1215 	  fprintf (dump_file, "Moving statement\n");
1216 	  print_gimple_stmt (dump_file, stmt, 0);
1217 	  fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1218 		   cost, level->num);
1219 	}
1220 
1221       e = loop_preheader_edge (level);
1222       gcc_assert (!gimple_vdef (stmt));
1223       if (gimple_vuse (stmt))
1224 	{
1225 	  /* The new VUSE is the one from the virtual PHI in the loop
1226 	     header or the one already present.  */
1227 	  gphi_iterator gsi2;
1228 	  for (gsi2 = gsi_start_phis (e->dest);
1229 	       !gsi_end_p (gsi2); gsi_next (&gsi2))
1230 	    {
1231 	      gphi *phi = gsi2.phi ();
1232 	      if (virtual_operand_p (gimple_phi_result (phi)))
1233 		{
1234 		  SET_USE (gimple_vuse_op (stmt),
1235 			   PHI_ARG_DEF_FROM_EDGE (phi, e));
1236 		  break;
1237 		}
1238 	    }
1239 	}
1240       gsi_remove (&bsi, false);
1241       if (gimple_has_lhs (stmt)
1242 	  && TREE_CODE (gimple_get_lhs (stmt)) == SSA_NAME
1243 	  && INTEGRAL_TYPE_P (TREE_TYPE (gimple_get_lhs (stmt)))
1244 	  && (!ALWAYS_EXECUTED_IN (bb)
1245 	      || !(ALWAYS_EXECUTED_IN (bb) == level
1246 		   || flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1247 	{
1248 	  tree lhs = gimple_get_lhs (stmt);
1249 	  SSA_NAME_RANGE_INFO (lhs) = NULL;
1250 	}
1251       /* In case this is a stmt that is not unconditionally executed
1252          when the target loop header is executed and the stmt may
1253 	 invoke undefined integer or pointer overflow rewrite it to
1254 	 unsigned arithmetic.  */
1255       if (is_gimple_assign (stmt)
1256 	  && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (stmt)))
1257 	  && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (gimple_assign_lhs (stmt)))
1258 	  && arith_code_with_undefined_signed_overflow
1259 	       (gimple_assign_rhs_code (stmt))
1260 	  && (!ALWAYS_EXECUTED_IN (bb)
1261 	      || !(ALWAYS_EXECUTED_IN (bb) == level
1262 		   || flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1263 	gsi_insert_seq_on_edge (e, rewrite_to_defined_overflow (stmt));
1264       else
1265 	gsi_insert_on_edge (e, stmt);
1266     }
1267 
1268   return todo;
1269 }
1270 
1271 /* Hoist the statements out of the loops prescribed by data stored in
1272    LIM_DATA structures associated with each statement.*/
1273 
1274 static unsigned int
move_computations(void)1275 move_computations (void)
1276 {
1277   int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
1278   int n = pre_and_rev_post_order_compute_fn (cfun, NULL, rpo, false);
1279   unsigned todo = 0;
1280 
1281   for (int i = 0; i < n; ++i)
1282     todo |= move_computations_worker (BASIC_BLOCK_FOR_FN (cfun, rpo[i]));
1283 
1284   free (rpo);
1285 
1286   gsi_commit_edge_inserts ();
1287   if (need_ssa_update_p (cfun))
1288     rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1289 
1290   return todo;
1291 }
1292 
1293 /* Checks whether the statement defining variable *INDEX can be hoisted
1294    out of the loop passed in DATA.  Callback for for_each_index.  */
1295 
1296 static bool
may_move_till(tree ref,tree * index,void * data)1297 may_move_till (tree ref, tree *index, void *data)
1298 {
1299   class loop *loop = (class loop *) data, *max_loop;
1300 
1301   /* If REF is an array reference, check also that the step and the lower
1302      bound is invariant in LOOP.  */
1303   if (TREE_CODE (ref) == ARRAY_REF)
1304     {
1305       tree step = TREE_OPERAND (ref, 3);
1306       tree lbound = TREE_OPERAND (ref, 2);
1307 
1308       max_loop = outermost_invariant_loop (step, loop);
1309       if (!max_loop)
1310 	return false;
1311 
1312       max_loop = outermost_invariant_loop (lbound, loop);
1313       if (!max_loop)
1314 	return false;
1315     }
1316 
1317   max_loop = outermost_invariant_loop (*index, loop);
1318   if (!max_loop)
1319     return false;
1320 
1321   return true;
1322 }
1323 
1324 /* If OP is SSA NAME, force the statement that defines it to be
1325    moved out of the LOOP.  ORIG_LOOP is the loop in that EXPR is used.  */
1326 
1327 static void
force_move_till_op(tree op,class loop * orig_loop,class loop * loop)1328 force_move_till_op (tree op, class loop *orig_loop, class loop *loop)
1329 {
1330   gimple *stmt;
1331 
1332   if (!op
1333       || is_gimple_min_invariant (op))
1334     return;
1335 
1336   gcc_assert (TREE_CODE (op) == SSA_NAME);
1337 
1338   stmt = SSA_NAME_DEF_STMT (op);
1339   if (gimple_nop_p (stmt))
1340     return;
1341 
1342   set_level (stmt, orig_loop, loop);
1343 }
1344 
1345 /* Forces statement defining invariants in REF (and *INDEX) to be moved out of
1346    the LOOP.  The reference REF is used in the loop ORIG_LOOP.  Callback for
1347    for_each_index.  */
1348 
1349 struct fmt_data
1350 {
1351   class loop *loop;
1352   class loop *orig_loop;
1353 };
1354 
1355 static bool
force_move_till(tree ref,tree * index,void * data)1356 force_move_till (tree ref, tree *index, void *data)
1357 {
1358   struct fmt_data *fmt_data = (struct fmt_data *) data;
1359 
1360   if (TREE_CODE (ref) == ARRAY_REF)
1361     {
1362       tree step = TREE_OPERAND (ref, 3);
1363       tree lbound = TREE_OPERAND (ref, 2);
1364 
1365       force_move_till_op (step, fmt_data->orig_loop, fmt_data->loop);
1366       force_move_till_op (lbound, fmt_data->orig_loop, fmt_data->loop);
1367     }
1368 
1369   force_move_till_op (*index, fmt_data->orig_loop, fmt_data->loop);
1370 
1371   return true;
1372 }
1373 
1374 /* A function to free the mem_ref object OBJ.  */
1375 
1376 static void
memref_free(class im_mem_ref * mem)1377 memref_free (class im_mem_ref *mem)
1378 {
1379   mem->accesses_in_loop.release ();
1380 }
1381 
1382 /* Allocates and returns a memory reference description for MEM whose hash
1383    value is HASH and id is ID.  */
1384 
1385 static im_mem_ref *
mem_ref_alloc(ao_ref * mem,unsigned hash,unsigned id)1386 mem_ref_alloc (ao_ref *mem, unsigned hash, unsigned id)
1387 {
1388   im_mem_ref *ref = XOBNEW (&mem_ref_obstack, class im_mem_ref);
1389   if (mem)
1390     ref->mem = *mem;
1391   else
1392     ao_ref_init (&ref->mem, error_mark_node);
1393   ref->id = id;
1394   ref->ref_canonical = false;
1395   ref->ref_decomposed = false;
1396   ref->hash = hash;
1397   ref->stored = NULL;
1398   bitmap_initialize (&ref->indep_loop, &lim_bitmap_obstack);
1399   bitmap_initialize (&ref->dep_loop, &lim_bitmap_obstack);
1400   ref->accesses_in_loop.create (1);
1401 
1402   return ref;
1403 }
1404 
1405 /* Records memory reference location *LOC in LOOP to the memory reference
1406    description REF.  The reference occurs in statement STMT.  */
1407 
1408 static void
record_mem_ref_loc(im_mem_ref * ref,gimple * stmt,tree * loc)1409 record_mem_ref_loc (im_mem_ref *ref, gimple *stmt, tree *loc)
1410 {
1411   mem_ref_loc aref;
1412   aref.stmt = stmt;
1413   aref.ref = loc;
1414   ref->accesses_in_loop.safe_push (aref);
1415 }
1416 
1417 /* Set the LOOP bit in REF stored bitmap and allocate that if
1418    necessary.  Return whether a bit was changed.  */
1419 
1420 static bool
set_ref_stored_in_loop(im_mem_ref * ref,class loop * loop)1421 set_ref_stored_in_loop (im_mem_ref *ref, class loop *loop)
1422 {
1423   if (!ref->stored)
1424     ref->stored = BITMAP_ALLOC (&lim_bitmap_obstack);
1425   return bitmap_set_bit (ref->stored, loop->num);
1426 }
1427 
1428 /* Marks reference REF as stored in LOOP.  */
1429 
1430 static void
mark_ref_stored(im_mem_ref * ref,class loop * loop)1431 mark_ref_stored (im_mem_ref *ref, class loop *loop)
1432 {
1433   while (loop != current_loops->tree_root
1434 	 && set_ref_stored_in_loop (ref, loop))
1435     loop = loop_outer (loop);
1436 }
1437 
1438 /* Gathers memory references in statement STMT in LOOP, storing the
1439    information about them in the memory_accesses structure.  Marks
1440    the vops accessed through unrecognized statements there as
1441    well.  */
1442 
1443 static void
gather_mem_refs_stmt(class loop * loop,gimple * stmt)1444 gather_mem_refs_stmt (class loop *loop, gimple *stmt)
1445 {
1446   tree *mem = NULL;
1447   hashval_t hash;
1448   im_mem_ref **slot;
1449   im_mem_ref *ref;
1450   bool is_stored;
1451   unsigned id;
1452 
1453   if (!gimple_vuse (stmt))
1454     return;
1455 
1456   mem = simple_mem_ref_in_stmt (stmt, &is_stored);
1457   if (!mem)
1458     {
1459       /* We use the shared mem_ref for all unanalyzable refs.  */
1460       id = UNANALYZABLE_MEM_ID;
1461       ref = memory_accesses.refs_list[id];
1462       if (dump_file && (dump_flags & TDF_DETAILS))
1463 	{
1464 	  fprintf (dump_file, "Unanalyzed memory reference %u: ", id);
1465 	  print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1466 	}
1467       is_stored = gimple_vdef (stmt);
1468     }
1469   else
1470     {
1471       /* We are looking for equal refs that might differ in structure
1472          such as a.b vs. MEM[&a + 4].  So we key off the ao_ref but
1473 	 make sure we can canonicalize the ref in the hashtable if
1474 	 non-operand_equal_p refs are found.  For the lookup we mark
1475 	 the case we want strict equality with aor.max_size == -1.  */
1476       ao_ref aor;
1477       ao_ref_init (&aor, *mem);
1478       ao_ref_base (&aor);
1479       ao_ref_alias_set (&aor);
1480       HOST_WIDE_INT offset, size, max_size;
1481       poly_int64 saved_maxsize = aor.max_size, mem_off;
1482       tree mem_base;
1483       bool ref_decomposed;
1484       if (aor.max_size_known_p ()
1485 	  && aor.offset.is_constant (&offset)
1486 	  && aor.size.is_constant (&size)
1487 	  && aor.max_size.is_constant (&max_size)
1488 	  && size == max_size
1489 	  && (size % BITS_PER_UNIT) == 0
1490 	  /* We're canonicalizing to a MEM where TYPE_SIZE specifies the
1491 	     size.  Make sure this is consistent with the extraction.  */
1492 	  && poly_int_tree_p (TYPE_SIZE (TREE_TYPE (*mem)))
1493 	  && known_eq (wi::to_poly_offset (TYPE_SIZE (TREE_TYPE (*mem))),
1494 		       aor.size)
1495 	  && (mem_base = get_addr_base_and_unit_offset (aor.ref, &mem_off)))
1496 	{
1497 	  ref_decomposed = true;
1498 	  hash = iterative_hash_expr (ao_ref_base (&aor), 0);
1499 	  hash = iterative_hash_host_wide_int (offset, hash);
1500 	  hash = iterative_hash_host_wide_int (size, hash);
1501 	}
1502       else
1503 	{
1504 	  ref_decomposed = false;
1505 	  hash = iterative_hash_expr (aor.ref, 0);
1506 	  aor.max_size = -1;
1507 	}
1508       slot = memory_accesses.refs->find_slot_with_hash (&aor, hash, INSERT);
1509       aor.max_size = saved_maxsize;
1510       if (*slot)
1511 	{
1512 	  if (!(*slot)->ref_canonical
1513 	      && !operand_equal_p (*mem, (*slot)->mem.ref, 0))
1514 	    {
1515 	      /* If we didn't yet canonicalize the hashtable ref (which
1516 	         we'll end up using for code insertion) and hit a second
1517 		 equal ref that is not structurally equivalent create
1518 		 a canonical ref which is a bare MEM_REF.  */
1519 	      if (TREE_CODE (*mem) == MEM_REF
1520 		  || TREE_CODE (*mem) == TARGET_MEM_REF)
1521 		{
1522 		  (*slot)->mem.ref = *mem;
1523 		  (*slot)->mem.base_alias_set = ao_ref_base_alias_set (&aor);
1524 		}
1525 	      else
1526 		{
1527 		  tree ref_alias_type = reference_alias_ptr_type (*mem);
1528 		  unsigned int ref_align = get_object_alignment (*mem);
1529 		  tree ref_type = TREE_TYPE (*mem);
1530 		  tree tmp = build1 (ADDR_EXPR, ptr_type_node,
1531 				     unshare_expr (mem_base));
1532 		  if (TYPE_ALIGN (ref_type) != ref_align)
1533 		    ref_type = build_aligned_type (ref_type, ref_align);
1534 		  (*slot)->mem.ref
1535 		    = fold_build2 (MEM_REF, ref_type, tmp,
1536 				   build_int_cst (ref_alias_type, mem_off));
1537 		  if ((*slot)->mem.volatile_p)
1538 		    TREE_THIS_VOLATILE ((*slot)->mem.ref) = 1;
1539 		  gcc_checking_assert (TREE_CODE ((*slot)->mem.ref) == MEM_REF
1540 				       && is_gimple_mem_ref_addr
1541 				            (TREE_OPERAND ((*slot)->mem.ref,
1542 							   0)));
1543 		  (*slot)->mem.base_alias_set = (*slot)->mem.ref_alias_set;
1544 		}
1545 	      (*slot)->ref_canonical = true;
1546 	    }
1547 	  ref = *slot;
1548 	  id = ref->id;
1549 	}
1550       else
1551 	{
1552 	  id = memory_accesses.refs_list.length ();
1553 	  ref = mem_ref_alloc (&aor, hash, id);
1554 	  ref->ref_decomposed = ref_decomposed;
1555 	  memory_accesses.refs_list.safe_push (ref);
1556 	  *slot = ref;
1557 
1558 	  if (dump_file && (dump_flags & TDF_DETAILS))
1559 	    {
1560 	      fprintf (dump_file, "Memory reference %u: ", id);
1561 	      print_generic_expr (dump_file, ref->mem.ref, TDF_SLIM);
1562 	      fprintf (dump_file, "\n");
1563 	    }
1564 	}
1565 
1566       record_mem_ref_loc (ref, stmt, mem);
1567     }
1568   bitmap_set_bit (&memory_accesses.refs_in_loop[loop->num], ref->id);
1569   if (is_stored)
1570     {
1571       bitmap_set_bit (&memory_accesses.refs_stored_in_loop[loop->num], ref->id);
1572       mark_ref_stored (ref, loop);
1573     }
1574   init_lim_data (stmt)->ref = ref->id;
1575   return;
1576 }
1577 
1578 static unsigned *bb_loop_postorder;
1579 
1580 /* qsort sort function to sort blocks after their loop fathers postorder.  */
1581 
1582 static int
sort_bbs_in_loop_postorder_cmp(const void * bb1_,const void * bb2_,void * bb_loop_postorder_)1583 sort_bbs_in_loop_postorder_cmp (const void *bb1_, const void *bb2_,
1584 				void *bb_loop_postorder_)
1585 {
1586   unsigned *bb_loop_postorder = (unsigned *)bb_loop_postorder_;
1587   basic_block bb1 = *(const basic_block *)bb1_;
1588   basic_block bb2 = *(const basic_block *)bb2_;
1589   class loop *loop1 = bb1->loop_father;
1590   class loop *loop2 = bb2->loop_father;
1591   if (loop1->num == loop2->num)
1592     return bb1->index - bb2->index;
1593   return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
1594 }
1595 
1596 /* qsort sort function to sort ref locs after their loop fathers postorder.  */
1597 
1598 static int
sort_locs_in_loop_postorder_cmp(const void * loc1_,const void * loc2_,void * bb_loop_postorder_)1599 sort_locs_in_loop_postorder_cmp (const void *loc1_, const void *loc2_,
1600 				 void *bb_loop_postorder_)
1601 {
1602   unsigned *bb_loop_postorder = (unsigned *)bb_loop_postorder_;
1603   const mem_ref_loc *loc1 = (const mem_ref_loc *)loc1_;
1604   const mem_ref_loc *loc2 = (const mem_ref_loc *)loc2_;
1605   class loop *loop1 = gimple_bb (loc1->stmt)->loop_father;
1606   class loop *loop2 = gimple_bb (loc2->stmt)->loop_father;
1607   if (loop1->num == loop2->num)
1608     return 0;
1609   return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
1610 }
1611 
1612 /* Gathers memory references in loops.  */
1613 
1614 static void
analyze_memory_references(void)1615 analyze_memory_references (void)
1616 {
1617   gimple_stmt_iterator bsi;
1618   basic_block bb, *bbs;
1619   class loop *loop, *outer;
1620   unsigned i, n;
1621 
1622   /* Collect all basic-blocks in loops and sort them after their
1623      loops postorder.  */
1624   i = 0;
1625   bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
1626   FOR_EACH_BB_FN (bb, cfun)
1627     if (bb->loop_father != current_loops->tree_root)
1628       bbs[i++] = bb;
1629   n = i;
1630   gcc_sort_r (bbs, n, sizeof (basic_block), sort_bbs_in_loop_postorder_cmp,
1631 	      bb_loop_postorder);
1632 
1633   /* Visit blocks in loop postorder and assign mem-ref IDs in that order.
1634      That results in better locality for all the bitmaps.  */
1635   for (i = 0; i < n; ++i)
1636     {
1637       basic_block bb = bbs[i];
1638       for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1639         gather_mem_refs_stmt (bb->loop_father, gsi_stmt (bsi));
1640     }
1641 
1642   /* Sort the location list of gathered memory references after their
1643      loop postorder number.  */
1644   im_mem_ref *ref;
1645   FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
1646     ref->accesses_in_loop.sort (sort_locs_in_loop_postorder_cmp,
1647 				bb_loop_postorder);
1648 
1649   free (bbs);
1650 
1651   /* Propagate the information about accessed memory references up
1652      the loop hierarchy.  */
1653   FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1654     {
1655       /* Finalize the overall touched references (including subloops).  */
1656       bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[loop->num],
1657 		       &memory_accesses.refs_stored_in_loop[loop->num]);
1658 
1659       /* Propagate the information about accessed memory references up
1660 	 the loop hierarchy.  */
1661       outer = loop_outer (loop);
1662       if (outer == current_loops->tree_root)
1663 	continue;
1664 
1665       bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[outer->num],
1666 		       &memory_accesses.all_refs_stored_in_loop[loop->num]);
1667     }
1668 }
1669 
1670 /* Returns true if MEM1 and MEM2 may alias.  TTAE_CACHE is used as a cache in
1671    tree_to_aff_combination_expand.  */
1672 
1673 static bool
mem_refs_may_alias_p(im_mem_ref * mem1,im_mem_ref * mem2,hash_map<tree,name_expansion * > ** ttae_cache)1674 mem_refs_may_alias_p (im_mem_ref *mem1, im_mem_ref *mem2,
1675 		      hash_map<tree, name_expansion *> **ttae_cache)
1676 {
1677   /* Perform BASE + OFFSET analysis -- if MEM1 and MEM2 are based on the same
1678      object and their offset differ in such a way that the locations cannot
1679      overlap, then they cannot alias.  */
1680   poly_widest_int size1, size2;
1681   aff_tree off1, off2;
1682 
1683   /* Perform basic offset and type-based disambiguation.  */
1684   if (!refs_may_alias_p_1 (&mem1->mem, &mem2->mem, true))
1685     return false;
1686 
1687   /* The expansion of addresses may be a bit expensive, thus we only do
1688      the check at -O2 and higher optimization levels.  */
1689   if (optimize < 2)
1690     return true;
1691 
1692   get_inner_reference_aff (mem1->mem.ref, &off1, &size1);
1693   get_inner_reference_aff (mem2->mem.ref, &off2, &size2);
1694   aff_combination_expand (&off1, ttae_cache);
1695   aff_combination_expand (&off2, ttae_cache);
1696   aff_combination_scale (&off1, -1);
1697   aff_combination_add (&off2, &off1);
1698 
1699   if (aff_comb_cannot_overlap_p (&off2, size1, size2))
1700     return false;
1701 
1702   return true;
1703 }
1704 
1705 /* Compare function for bsearch searching for reference locations
1706    in a loop.  */
1707 
1708 static int
find_ref_loc_in_loop_cmp(const void * loop_,const void * loc_,void * bb_loop_postorder_)1709 find_ref_loc_in_loop_cmp (const void *loop_, const void *loc_,
1710 			  void *bb_loop_postorder_)
1711 {
1712   unsigned *bb_loop_postorder = (unsigned *)bb_loop_postorder_;
1713   class loop *loop = (class loop *)const_cast<void *>(loop_);
1714   mem_ref_loc *loc = (mem_ref_loc *)const_cast<void *>(loc_);
1715   class loop *loc_loop = gimple_bb (loc->stmt)->loop_father;
1716   if (loop->num  == loc_loop->num
1717       || flow_loop_nested_p (loop, loc_loop))
1718     return 0;
1719   return (bb_loop_postorder[loop->num] < bb_loop_postorder[loc_loop->num]
1720 	  ? -1 : 1);
1721 }
1722 
1723 /* Iterates over all locations of REF in LOOP and its subloops calling
1724    fn.operator() with the location as argument.  When that operator
1725    returns true the iteration is stopped and true is returned.
1726    Otherwise false is returned.  */
1727 
1728 template <typename FN>
1729 static bool
for_all_locs_in_loop(class loop * loop,im_mem_ref * ref,FN fn)1730 for_all_locs_in_loop (class loop *loop, im_mem_ref *ref, FN fn)
1731 {
1732   unsigned i;
1733   mem_ref_loc *loc;
1734 
1735   /* Search for the cluster of locs in the accesses_in_loop vector
1736      which is sorted after postorder index of the loop father.  */
1737   loc = ref->accesses_in_loop.bsearch (loop, find_ref_loc_in_loop_cmp,
1738 				       bb_loop_postorder);
1739   if (!loc)
1740     return false;
1741 
1742   /* We have found one location inside loop or its sub-loops.  Iterate
1743      both forward and backward to cover the whole cluster.  */
1744   i = loc - ref->accesses_in_loop.address ();
1745   while (i > 0)
1746     {
1747       --i;
1748       mem_ref_loc *l = &ref->accesses_in_loop[i];
1749       if (!flow_bb_inside_loop_p (loop, gimple_bb (l->stmt)))
1750 	break;
1751       if (fn (l))
1752 	return true;
1753     }
1754   for (i = loc - ref->accesses_in_loop.address ();
1755        i < ref->accesses_in_loop.length (); ++i)
1756     {
1757       mem_ref_loc *l = &ref->accesses_in_loop[i];
1758       if (!flow_bb_inside_loop_p (loop, gimple_bb (l->stmt)))
1759 	break;
1760       if (fn (l))
1761 	return true;
1762     }
1763 
1764   return false;
1765 }
1766 
1767 /* Rewrites location LOC by TMP_VAR.  */
1768 
1769 class rewrite_mem_ref_loc
1770 {
1771 public:
rewrite_mem_ref_loc(tree tmp_var_)1772   rewrite_mem_ref_loc (tree tmp_var_) : tmp_var (tmp_var_) {}
1773   bool operator () (mem_ref_loc *loc);
1774   tree tmp_var;
1775 };
1776 
1777 bool
operator()1778 rewrite_mem_ref_loc::operator () (mem_ref_loc *loc)
1779 {
1780   *loc->ref = tmp_var;
1781   update_stmt (loc->stmt);
1782   return false;
1783 }
1784 
1785 /* Rewrites all references to REF in LOOP by variable TMP_VAR.  */
1786 
1787 static void
rewrite_mem_refs(class loop * loop,im_mem_ref * ref,tree tmp_var)1788 rewrite_mem_refs (class loop *loop, im_mem_ref *ref, tree tmp_var)
1789 {
1790   for_all_locs_in_loop (loop, ref, rewrite_mem_ref_loc (tmp_var));
1791 }
1792 
1793 /* Stores the first reference location in LOCP.  */
1794 
1795 class first_mem_ref_loc_1
1796 {
1797 public:
first_mem_ref_loc_1(mem_ref_loc ** locp_)1798   first_mem_ref_loc_1 (mem_ref_loc **locp_) : locp (locp_) {}
1799   bool operator () (mem_ref_loc *loc);
1800   mem_ref_loc **locp;
1801 };
1802 
1803 bool
operator()1804 first_mem_ref_loc_1::operator () (mem_ref_loc *loc)
1805 {
1806   *locp = loc;
1807   return true;
1808 }
1809 
1810 /* Returns the first reference location to REF in LOOP.  */
1811 
1812 static mem_ref_loc *
first_mem_ref_loc(class loop * loop,im_mem_ref * ref)1813 first_mem_ref_loc (class loop *loop, im_mem_ref *ref)
1814 {
1815   mem_ref_loc *locp = NULL;
1816   for_all_locs_in_loop (loop, ref, first_mem_ref_loc_1 (&locp));
1817   return locp;
1818 }
1819 
1820 struct prev_flag_edges {
1821   /* Edge to insert new flag comparison code.  */
1822   edge append_cond_position;
1823 
1824   /* Edge for fall through from previous flag comparison.  */
1825   edge last_cond_fallthru;
1826 };
1827 
1828 /* Helper function for execute_sm.  Emit code to store TMP_VAR into
1829    MEM along edge EX.
1830 
1831    The store is only done if MEM has changed.  We do this so no
1832    changes to MEM occur on code paths that did not originally store
1833    into it.
1834 
1835    The common case for execute_sm will transform:
1836 
1837      for (...) {
1838        if (foo)
1839          stuff;
1840        else
1841          MEM = TMP_VAR;
1842      }
1843 
1844    into:
1845 
1846      lsm = MEM;
1847      for (...) {
1848        if (foo)
1849          stuff;
1850        else
1851          lsm = TMP_VAR;
1852      }
1853      MEM = lsm;
1854 
1855   This function will generate:
1856 
1857      lsm = MEM;
1858 
1859      lsm_flag = false;
1860      ...
1861      for (...) {
1862        if (foo)
1863          stuff;
1864        else {
1865          lsm = TMP_VAR;
1866          lsm_flag = true;
1867        }
1868      }
1869      if (lsm_flag)	<--
1870        MEM = lsm;	<--
1871 */
1872 
1873 static void
execute_sm_if_changed(edge ex,tree mem,tree tmp_var,tree flag,edge preheader,hash_set<basic_block> * flag_bbs)1874 execute_sm_if_changed (edge ex, tree mem, tree tmp_var, tree flag,
1875 		       edge preheader, hash_set <basic_block> *flag_bbs)
1876 {
1877   basic_block new_bb, then_bb, old_dest;
1878   bool loop_has_only_one_exit;
1879   edge then_old_edge, orig_ex = ex;
1880   gimple_stmt_iterator gsi;
1881   gimple *stmt;
1882   struct prev_flag_edges *prev_edges = (struct prev_flag_edges *) ex->aux;
1883   bool irr = ex->flags & EDGE_IRREDUCIBLE_LOOP;
1884 
1885   profile_count count_sum = profile_count::zero ();
1886   int nbbs = 0, ncount = 0;
1887   profile_probability flag_probability = profile_probability::uninitialized ();
1888 
1889   /* Flag is set in FLAG_BBS. Determine probability that flag will be true
1890      at loop exit.
1891 
1892      This code may look fancy, but it cannot update profile very realistically
1893      because we do not know the probability that flag will be true at given
1894      loop exit.
1895 
1896      We look for two interesting extremes
1897        - when exit is dominated by block setting the flag, we know it will
1898          always be true.  This is a common case.
1899        - when all blocks setting the flag have very low frequency we know
1900          it will likely be false.
1901      In all other cases we default to 2/3 for flag being true.  */
1902 
1903   for (hash_set<basic_block>::iterator it = flag_bbs->begin ();
1904        it != flag_bbs->end (); ++it)
1905     {
1906        if ((*it)->count.initialized_p ())
1907          count_sum += (*it)->count, ncount ++;
1908        if (dominated_by_p (CDI_DOMINATORS, ex->src, *it))
1909 	 flag_probability = profile_probability::always ();
1910        nbbs++;
1911     }
1912 
1913   profile_probability cap = profile_probability::always ().apply_scale (2, 3);
1914 
1915   if (flag_probability.initialized_p ())
1916     ;
1917   else if (ncount == nbbs
1918 	   && preheader->count () >= count_sum && preheader->count ().nonzero_p ())
1919     {
1920       flag_probability = count_sum.probability_in (preheader->count ());
1921       if (flag_probability > cap)
1922 	flag_probability = cap;
1923     }
1924 
1925   if (!flag_probability.initialized_p ())
1926     flag_probability = cap;
1927 
1928   /* ?? Insert store after previous store if applicable.  See note
1929      below.  */
1930   if (prev_edges)
1931     ex = prev_edges->append_cond_position;
1932 
1933   loop_has_only_one_exit = single_pred_p (ex->dest);
1934 
1935   if (loop_has_only_one_exit)
1936     ex = split_block_after_labels (ex->dest);
1937   else
1938     {
1939       for (gphi_iterator gpi = gsi_start_phis (ex->dest);
1940 	   !gsi_end_p (gpi); gsi_next (&gpi))
1941 	{
1942 	  gphi *phi = gpi.phi ();
1943 	  if (virtual_operand_p (gimple_phi_result (phi)))
1944 	    continue;
1945 
1946 	  /* When the destination has a non-virtual PHI node with multiple
1947 	     predecessors make sure we preserve the PHI structure by
1948 	     forcing a forwarder block so that hoisting of that PHI will
1949 	     still work.  */
1950 	  split_edge (ex);
1951 	  break;
1952 	}
1953     }
1954 
1955   old_dest = ex->dest;
1956   new_bb = split_edge (ex);
1957   then_bb = create_empty_bb (new_bb);
1958   then_bb->count = new_bb->count.apply_probability (flag_probability);
1959   if (irr)
1960     then_bb->flags = BB_IRREDUCIBLE_LOOP;
1961   add_bb_to_loop (then_bb, new_bb->loop_father);
1962 
1963   gsi = gsi_start_bb (new_bb);
1964   stmt = gimple_build_cond (NE_EXPR, flag, boolean_false_node,
1965 			    NULL_TREE, NULL_TREE);
1966   gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1967 
1968   gsi = gsi_start_bb (then_bb);
1969   /* Insert actual store.  */
1970   stmt = gimple_build_assign (unshare_expr (mem), tmp_var);
1971   gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1972 
1973   edge e1 = single_succ_edge (new_bb);
1974   edge e2 = make_edge (new_bb, then_bb,
1975 	               EDGE_TRUE_VALUE | (irr ? EDGE_IRREDUCIBLE_LOOP : 0));
1976   e2->probability = flag_probability;
1977 
1978   e1->flags |= EDGE_FALSE_VALUE | (irr ? EDGE_IRREDUCIBLE_LOOP : 0);
1979   e1->flags &= ~EDGE_FALLTHRU;
1980 
1981   e1->probability = flag_probability.invert ();
1982 
1983   then_old_edge = make_single_succ_edge (then_bb, old_dest,
1984 			     EDGE_FALLTHRU | (irr ? EDGE_IRREDUCIBLE_LOOP : 0));
1985 
1986   set_immediate_dominator (CDI_DOMINATORS, then_bb, new_bb);
1987 
1988   if (prev_edges)
1989     {
1990       basic_block prevbb = prev_edges->last_cond_fallthru->src;
1991       redirect_edge_succ (prev_edges->last_cond_fallthru, new_bb);
1992       set_immediate_dominator (CDI_DOMINATORS, new_bb, prevbb);
1993       set_immediate_dominator (CDI_DOMINATORS, old_dest,
1994 			       recompute_dominator (CDI_DOMINATORS, old_dest));
1995     }
1996 
1997   /* ?? Because stores may alias, they must happen in the exact
1998      sequence they originally happened.  Save the position right after
1999      the (_lsm) store we just created so we can continue appending after
2000      it and maintain the original order.  */
2001   {
2002     struct prev_flag_edges *p;
2003 
2004     if (orig_ex->aux)
2005       orig_ex->aux = NULL;
2006     alloc_aux_for_edge (orig_ex, sizeof (struct prev_flag_edges));
2007     p = (struct prev_flag_edges *) orig_ex->aux;
2008     p->append_cond_position = then_old_edge;
2009     p->last_cond_fallthru = find_edge (new_bb, old_dest);
2010     orig_ex->aux = (void *) p;
2011   }
2012 
2013   if (!loop_has_only_one_exit)
2014     for (gphi_iterator gpi = gsi_start_phis (old_dest);
2015 	 !gsi_end_p (gpi); gsi_next (&gpi))
2016       {
2017 	gphi *phi = gpi.phi ();
2018 	unsigned i;
2019 
2020 	for (i = 0; i < gimple_phi_num_args (phi); i++)
2021 	  if (gimple_phi_arg_edge (phi, i)->src == new_bb)
2022 	    {
2023 	      tree arg = gimple_phi_arg_def (phi, i);
2024 	      add_phi_arg (phi, arg, then_old_edge, UNKNOWN_LOCATION);
2025 	      update_stmt (phi);
2026 	    }
2027       }
2028 }
2029 
2030 /* When REF is set on the location, set flag indicating the store.  */
2031 
2032 class sm_set_flag_if_changed
2033 {
2034 public:
sm_set_flag_if_changed(tree flag_,hash_set<basic_block> * bbs_)2035   sm_set_flag_if_changed (tree flag_, hash_set <basic_block> *bbs_)
2036 	 : flag (flag_), bbs (bbs_) {}
2037   bool operator () (mem_ref_loc *loc);
2038   tree flag;
2039   hash_set <basic_block> *bbs;
2040 };
2041 
2042 bool
operator()2043 sm_set_flag_if_changed::operator () (mem_ref_loc *loc)
2044 {
2045   /* Only set the flag for writes.  */
2046   if (is_gimple_assign (loc->stmt)
2047       && gimple_assign_lhs_ptr (loc->stmt) == loc->ref)
2048     {
2049       gimple_stmt_iterator gsi = gsi_for_stmt (loc->stmt);
2050       gimple *stmt = gimple_build_assign (flag, boolean_true_node);
2051       gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2052       bbs->add (gimple_bb (stmt));
2053     }
2054   return false;
2055 }
2056 
2057 /* Helper function for execute_sm.  On every location where REF is
2058    set, set an appropriate flag indicating the store.  */
2059 
2060 static tree
execute_sm_if_changed_flag_set(class loop * loop,im_mem_ref * ref,hash_set<basic_block> * bbs)2061 execute_sm_if_changed_flag_set (class loop *loop, im_mem_ref *ref,
2062 				hash_set <basic_block> *bbs)
2063 {
2064   tree flag;
2065   char *str = get_lsm_tmp_name (ref->mem.ref, ~0, "_flag");
2066   flag = create_tmp_reg (boolean_type_node, str);
2067   for_all_locs_in_loop (loop, ref, sm_set_flag_if_changed (flag, bbs));
2068   return flag;
2069 }
2070 
2071 /* Executes store motion of memory reference REF from LOOP.
2072    Exits from the LOOP are stored in EXITS.  The initialization of the
2073    temporary variable is put to the preheader of the loop, and assignments
2074    to the reference from the temporary variable are emitted to exits.  */
2075 
2076 static void
execute_sm(class loop * loop,vec<edge> exits,im_mem_ref * ref)2077 execute_sm (class loop *loop, vec<edge> exits, im_mem_ref *ref)
2078 {
2079   tree tmp_var, store_flag = NULL_TREE;
2080   unsigned i;
2081   gassign *load;
2082   struct fmt_data fmt_data;
2083   edge ex;
2084   struct lim_aux_data *lim_data;
2085   bool multi_threaded_model_p = false;
2086   gimple_stmt_iterator gsi;
2087   hash_set<basic_block> flag_bbs;
2088 
2089   if (dump_file && (dump_flags & TDF_DETAILS))
2090     {
2091       fprintf (dump_file, "Executing store motion of ");
2092       print_generic_expr (dump_file, ref->mem.ref);
2093       fprintf (dump_file, " from loop %d\n", loop->num);
2094     }
2095 
2096   tmp_var = create_tmp_reg (TREE_TYPE (ref->mem.ref),
2097 			    get_lsm_tmp_name (ref->mem.ref, ~0));
2098 
2099   fmt_data.loop = loop;
2100   fmt_data.orig_loop = loop;
2101   for_each_index (&ref->mem.ref, force_move_till, &fmt_data);
2102 
2103   if (bb_in_transaction (loop_preheader_edge (loop)->src)
2104       || (! flag_store_data_races
2105 	  && ! ref_always_accessed_p (loop, ref, true)))
2106     multi_threaded_model_p = true;
2107 
2108   if (multi_threaded_model_p)
2109     store_flag = execute_sm_if_changed_flag_set (loop, ref, &flag_bbs);
2110 
2111   rewrite_mem_refs (loop, ref, tmp_var);
2112 
2113   /* Emit the load code on a random exit edge or into the latch if
2114      the loop does not exit, so that we are sure it will be processed
2115      by move_computations after all dependencies.  */
2116   gsi = gsi_for_stmt (first_mem_ref_loc (loop, ref)->stmt);
2117 
2118   /* FIXME/TODO: For the multi-threaded variant, we could avoid this
2119      load altogether, since the store is predicated by a flag.  We
2120      could, do the load only if it was originally in the loop.  */
2121   load = gimple_build_assign (tmp_var, unshare_expr (ref->mem.ref));
2122   lim_data = init_lim_data (load);
2123   lim_data->max_loop = loop;
2124   lim_data->tgt_loop = loop;
2125   gsi_insert_before (&gsi, load, GSI_SAME_STMT);
2126 
2127   if (multi_threaded_model_p)
2128     {
2129       load = gimple_build_assign (store_flag, boolean_false_node);
2130       lim_data = init_lim_data (load);
2131       lim_data->max_loop = loop;
2132       lim_data->tgt_loop = loop;
2133       gsi_insert_before (&gsi, load, GSI_SAME_STMT);
2134     }
2135 
2136   /* Sink the store to every exit from the loop.  */
2137   FOR_EACH_VEC_ELT (exits, i, ex)
2138     if (!multi_threaded_model_p)
2139       {
2140 	gassign *store;
2141 	store = gimple_build_assign (unshare_expr (ref->mem.ref), tmp_var);
2142 	gsi_insert_on_edge (ex, store);
2143       }
2144     else
2145       execute_sm_if_changed (ex, ref->mem.ref, tmp_var, store_flag,
2146 			     loop_preheader_edge (loop), &flag_bbs);
2147 }
2148 
2149 /* Hoists memory references MEM_REFS out of LOOP.  EXITS is the list of exit
2150    edges of the LOOP.  */
2151 
2152 static void
hoist_memory_references(class loop * loop,bitmap mem_refs,vec<edge> exits)2153 hoist_memory_references (class loop *loop, bitmap mem_refs,
2154 			 vec<edge> exits)
2155 {
2156   im_mem_ref *ref;
2157   unsigned  i;
2158   bitmap_iterator bi;
2159 
2160   EXECUTE_IF_SET_IN_BITMAP (mem_refs, 0, i, bi)
2161     {
2162       ref = memory_accesses.refs_list[i];
2163       execute_sm (loop, exits, ref);
2164     }
2165 }
2166 
2167 class ref_always_accessed
2168 {
2169 public:
ref_always_accessed(class loop * loop_,bool stored_p_)2170   ref_always_accessed (class loop *loop_, bool stored_p_)
2171       : loop (loop_), stored_p (stored_p_) {}
2172   bool operator () (mem_ref_loc *loc);
2173   class loop *loop;
2174   bool stored_p;
2175 };
2176 
2177 bool
operator()2178 ref_always_accessed::operator () (mem_ref_loc *loc)
2179 {
2180   class loop *must_exec;
2181 
2182   struct lim_aux_data *lim_data = get_lim_data (loc->stmt);
2183   if (!lim_data)
2184     return false;
2185 
2186   /* If we require an always executed store make sure the statement
2187      is a store.  */
2188   if (stored_p)
2189     {
2190       tree lhs = gimple_get_lhs (loc->stmt);
2191       if (!lhs
2192 	  || !(DECL_P (lhs) || REFERENCE_CLASS_P (lhs)))
2193 	return false;
2194     }
2195 
2196   must_exec = lim_data->always_executed_in;
2197   if (!must_exec)
2198     return false;
2199 
2200   if (must_exec == loop
2201       || flow_loop_nested_p (must_exec, loop))
2202     return true;
2203 
2204   return false;
2205 }
2206 
2207 /* Returns true if REF is always accessed in LOOP.  If STORED_P is true
2208    make sure REF is always stored to in LOOP.  */
2209 
2210 static bool
ref_always_accessed_p(class loop * loop,im_mem_ref * ref,bool stored_p)2211 ref_always_accessed_p (class loop *loop, im_mem_ref *ref, bool stored_p)
2212 {
2213   return for_all_locs_in_loop (loop, ref,
2214 			       ref_always_accessed (loop, stored_p));
2215 }
2216 
2217 /* Returns true if REF1 and REF2 are independent.  */
2218 
2219 static bool
refs_independent_p(im_mem_ref * ref1,im_mem_ref * ref2)2220 refs_independent_p (im_mem_ref *ref1, im_mem_ref *ref2)
2221 {
2222   if (ref1 == ref2)
2223     return true;
2224 
2225   if (dump_file && (dump_flags & TDF_DETAILS))
2226     fprintf (dump_file, "Querying dependency of refs %u and %u: ",
2227 	     ref1->id, ref2->id);
2228 
2229   if (mem_refs_may_alias_p (ref1, ref2, &memory_accesses.ttae_cache))
2230     {
2231       if (dump_file && (dump_flags & TDF_DETAILS))
2232 	fprintf (dump_file, "dependent.\n");
2233       return false;
2234     }
2235   else
2236     {
2237       if (dump_file && (dump_flags & TDF_DETAILS))
2238 	fprintf (dump_file, "independent.\n");
2239       return true;
2240     }
2241 }
2242 
2243 /* Mark REF dependent on stores or loads (according to STORED_P) in LOOP
2244    and its super-loops.  */
2245 
2246 static void
record_dep_loop(class loop * loop,im_mem_ref * ref,bool stored_p)2247 record_dep_loop (class loop *loop, im_mem_ref *ref, bool stored_p)
2248 {
2249   /* We can propagate dependent-in-loop bits up the loop
2250      hierarchy to all outer loops.  */
2251   while (loop != current_loops->tree_root
2252 	 && bitmap_set_bit (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2253     loop = loop_outer (loop);
2254 }
2255 
2256 /* Returns true if REF is independent on all other memory
2257    references in LOOP.  */
2258 
2259 static bool
ref_indep_loop_p_1(class loop * loop,im_mem_ref * ref,bool stored_p)2260 ref_indep_loop_p_1 (class loop *loop, im_mem_ref *ref, bool stored_p)
2261 {
2262   stored_p |= (ref->stored && bitmap_bit_p (ref->stored, loop->num));
2263 
2264   bool indep_p = true;
2265   bitmap refs_to_check;
2266 
2267   if (stored_p)
2268     refs_to_check = &memory_accesses.refs_in_loop[loop->num];
2269   else
2270     refs_to_check = &memory_accesses.refs_stored_in_loop[loop->num];
2271 
2272   if (bitmap_bit_p (refs_to_check, UNANALYZABLE_MEM_ID))
2273     indep_p = false;
2274   else
2275     {
2276       if (bitmap_bit_p (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2277 	return true;
2278       if (bitmap_bit_p (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2279 	return false;
2280 
2281       class loop *inner = loop->inner;
2282       while (inner)
2283 	{
2284 	  if (!ref_indep_loop_p_1 (inner, ref, stored_p))
2285 	    {
2286 	      indep_p = false;
2287 	      break;
2288 	    }
2289 	  inner = inner->next;
2290 	}
2291 
2292       if (indep_p)
2293 	{
2294 	  unsigned i;
2295 	  bitmap_iterator bi;
2296 	  EXECUTE_IF_SET_IN_BITMAP (refs_to_check, 0, i, bi)
2297 	    {
2298 	      im_mem_ref *aref = memory_accesses.refs_list[i];
2299 	      if (!refs_independent_p (ref, aref))
2300 		{
2301 		  indep_p = false;
2302 		  break;
2303 		}
2304 	    }
2305 	}
2306     }
2307 
2308   if (dump_file && (dump_flags & TDF_DETAILS))
2309     fprintf (dump_file, "Querying dependencies of ref %u in loop %d: %s\n",
2310 	     ref->id, loop->num, indep_p ? "independent" : "dependent");
2311 
2312   /* Record the computed result in the cache.  */
2313   if (indep_p)
2314     {
2315       if (bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p))
2316 	  && stored_p)
2317 	{
2318 	  /* If it's independend against all refs then it's independent
2319 	     against stores, too.  */
2320 	  bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, false));
2321 	}
2322     }
2323   else
2324     {
2325       record_dep_loop (loop, ref, stored_p);
2326       if (!stored_p)
2327 	{
2328 	  /* If it's dependent against stores it's dependent against
2329 	     all refs, too.  */
2330 	  record_dep_loop (loop, ref, true);
2331 	}
2332     }
2333 
2334   return indep_p;
2335 }
2336 
2337 /* Returns true if REF is independent on all other memory references in
2338    LOOP.  */
2339 
2340 static bool
ref_indep_loop_p(class loop * loop,im_mem_ref * ref)2341 ref_indep_loop_p (class loop *loop, im_mem_ref *ref)
2342 {
2343   gcc_checking_assert (MEM_ANALYZABLE (ref));
2344 
2345   return ref_indep_loop_p_1 (loop, ref, false);
2346 }
2347 
2348 /* Returns true if we can perform store motion of REF from LOOP.  */
2349 
2350 static bool
can_sm_ref_p(class loop * loop,im_mem_ref * ref)2351 can_sm_ref_p (class loop *loop, im_mem_ref *ref)
2352 {
2353   tree base;
2354 
2355   /* Can't hoist unanalyzable refs.  */
2356   if (!MEM_ANALYZABLE (ref))
2357     return false;
2358 
2359   /* It should be movable.  */
2360   if (!is_gimple_reg_type (TREE_TYPE (ref->mem.ref))
2361       || TREE_THIS_VOLATILE (ref->mem.ref)
2362       || !for_each_index (&ref->mem.ref, may_move_till, loop))
2363     return false;
2364 
2365   /* If it can throw fail, we do not properly update EH info.  */
2366   if (tree_could_throw_p (ref->mem.ref))
2367     return false;
2368 
2369   /* If it can trap, it must be always executed in LOOP.
2370      Readonly memory locations may trap when storing to them, but
2371      tree_could_trap_p is a predicate for rvalues, so check that
2372      explicitly.  */
2373   base = get_base_address (ref->mem.ref);
2374   if ((tree_could_trap_p (ref->mem.ref)
2375        || (DECL_P (base) && TREE_READONLY (base)))
2376       && !ref_always_accessed_p (loop, ref, true))
2377     return false;
2378 
2379   /* And it must be independent on all other memory references
2380      in LOOP.  */
2381   if (!ref_indep_loop_p (loop, ref))
2382     return false;
2383 
2384   return true;
2385 }
2386 
2387 /* Marks the references in LOOP for that store motion should be performed
2388    in REFS_TO_SM.  SM_EXECUTED is the set of references for that store
2389    motion was performed in one of the outer loops.  */
2390 
2391 static void
find_refs_for_sm(class loop * loop,bitmap sm_executed,bitmap refs_to_sm)2392 find_refs_for_sm (class loop *loop, bitmap sm_executed, bitmap refs_to_sm)
2393 {
2394   bitmap refs = &memory_accesses.all_refs_stored_in_loop[loop->num];
2395   unsigned i;
2396   bitmap_iterator bi;
2397   im_mem_ref *ref;
2398 
2399   EXECUTE_IF_AND_COMPL_IN_BITMAP (refs, sm_executed, 0, i, bi)
2400     {
2401       ref = memory_accesses.refs_list[i];
2402       if (can_sm_ref_p (loop, ref))
2403 	bitmap_set_bit (refs_to_sm, i);
2404     }
2405 }
2406 
2407 /* Checks whether LOOP (with exits stored in EXITS array) is suitable
2408    for a store motion optimization (i.e. whether we can insert statement
2409    on its exits).  */
2410 
2411 static bool
loop_suitable_for_sm(class loop * loop ATTRIBUTE_UNUSED,vec<edge> exits)2412 loop_suitable_for_sm (class loop *loop ATTRIBUTE_UNUSED,
2413 		      vec<edge> exits)
2414 {
2415   unsigned i;
2416   edge ex;
2417 
2418   FOR_EACH_VEC_ELT (exits, i, ex)
2419     if (ex->flags & (EDGE_ABNORMAL | EDGE_EH))
2420       return false;
2421 
2422   return true;
2423 }
2424 
2425 /* Try to perform store motion for all memory references modified inside
2426    LOOP.  SM_EXECUTED is the bitmap of the memory references for that
2427    store motion was executed in one of the outer loops.  */
2428 
2429 static void
store_motion_loop(class loop * loop,bitmap sm_executed)2430 store_motion_loop (class loop *loop, bitmap sm_executed)
2431 {
2432   vec<edge> exits = get_loop_exit_edges (loop);
2433   class loop *subloop;
2434   bitmap sm_in_loop = BITMAP_ALLOC (&lim_bitmap_obstack);
2435 
2436   if (loop_suitable_for_sm (loop, exits))
2437     {
2438       find_refs_for_sm (loop, sm_executed, sm_in_loop);
2439       hoist_memory_references (loop, sm_in_loop, exits);
2440     }
2441   exits.release ();
2442 
2443   bitmap_ior_into (sm_executed, sm_in_loop);
2444   for (subloop = loop->inner; subloop != NULL; subloop = subloop->next)
2445     store_motion_loop (subloop, sm_executed);
2446   bitmap_and_compl_into (sm_executed, sm_in_loop);
2447   BITMAP_FREE (sm_in_loop);
2448 }
2449 
2450 /* Try to perform store motion for all memory references modified inside
2451    loops.  */
2452 
2453 static void
store_motion(void)2454 store_motion (void)
2455 {
2456   class loop *loop;
2457   bitmap sm_executed = BITMAP_ALLOC (&lim_bitmap_obstack);
2458 
2459   for (loop = current_loops->tree_root->inner; loop != NULL; loop = loop->next)
2460     store_motion_loop (loop, sm_executed);
2461 
2462   BITMAP_FREE (sm_executed);
2463   gsi_commit_edge_inserts ();
2464 }
2465 
2466 /* Fills ALWAYS_EXECUTED_IN information for basic blocks of LOOP, i.e.
2467    for each such basic block bb records the outermost loop for that execution
2468    of its header implies execution of bb.  CONTAINS_CALL is the bitmap of
2469    blocks that contain a nonpure call.  */
2470 
2471 static void
fill_always_executed_in_1(class loop * loop,sbitmap contains_call)2472 fill_always_executed_in_1 (class loop *loop, sbitmap contains_call)
2473 {
2474   basic_block bb = NULL, *bbs, last = NULL;
2475   unsigned i;
2476   edge e;
2477   class loop *inn_loop = loop;
2478 
2479   if (ALWAYS_EXECUTED_IN (loop->header) == NULL)
2480     {
2481       bbs = get_loop_body_in_dom_order (loop);
2482 
2483       for (i = 0; i < loop->num_nodes; i++)
2484 	{
2485 	  edge_iterator ei;
2486 	  bb = bbs[i];
2487 
2488 	  if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2489 	    last = bb;
2490 
2491 	  if (bitmap_bit_p (contains_call, bb->index))
2492 	    break;
2493 
2494 	  FOR_EACH_EDGE (e, ei, bb->succs)
2495 	    {
2496 	      /* If there is an exit from this BB.  */
2497 	      if (!flow_bb_inside_loop_p (loop, e->dest))
2498 		break;
2499 	      /* Or we enter a possibly non-finite loop.  */
2500 	      if (flow_loop_nested_p (bb->loop_father,
2501 				      e->dest->loop_father)
2502 		  && ! finite_loop_p (e->dest->loop_father))
2503 		break;
2504 	    }
2505 	  if (e)
2506 	    break;
2507 
2508 	  /* A loop might be infinite (TODO use simple loop analysis
2509 	     to disprove this if possible).  */
2510 	  if (bb->flags & BB_IRREDUCIBLE_LOOP)
2511 	    break;
2512 
2513 	  if (!flow_bb_inside_loop_p (inn_loop, bb))
2514 	    break;
2515 
2516 	  if (bb->loop_father->header == bb)
2517 	    {
2518 	      if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2519 		break;
2520 
2521 	      /* In a loop that is always entered we may proceed anyway.
2522 		 But record that we entered it and stop once we leave it.  */
2523 	      inn_loop = bb->loop_father;
2524 	    }
2525 	}
2526 
2527       while (1)
2528 	{
2529 	  SET_ALWAYS_EXECUTED_IN (last, loop);
2530 	  if (last == loop->header)
2531 	    break;
2532 	  last = get_immediate_dominator (CDI_DOMINATORS, last);
2533 	}
2534 
2535       free (bbs);
2536     }
2537 
2538   for (loop = loop->inner; loop; loop = loop->next)
2539     fill_always_executed_in_1 (loop, contains_call);
2540 }
2541 
2542 /* Fills ALWAYS_EXECUTED_IN information for basic blocks, i.e.
2543    for each such basic block bb records the outermost loop for that execution
2544    of its header implies execution of bb.  */
2545 
2546 static void
fill_always_executed_in(void)2547 fill_always_executed_in (void)
2548 {
2549   basic_block bb;
2550   class loop *loop;
2551 
2552   auto_sbitmap contains_call (last_basic_block_for_fn (cfun));
2553   bitmap_clear (contains_call);
2554   FOR_EACH_BB_FN (bb, cfun)
2555     {
2556       gimple_stmt_iterator gsi;
2557       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2558 	{
2559 	  if (nonpure_call_p (gsi_stmt (gsi)))
2560 	    break;
2561 	}
2562 
2563       if (!gsi_end_p (gsi))
2564 	bitmap_set_bit (contains_call, bb->index);
2565     }
2566 
2567   for (loop = current_loops->tree_root->inner; loop; loop = loop->next)
2568     fill_always_executed_in_1 (loop, contains_call);
2569 }
2570 
2571 
2572 /* Compute the global information needed by the loop invariant motion pass.  */
2573 
2574 static void
tree_ssa_lim_initialize(void)2575 tree_ssa_lim_initialize (void)
2576 {
2577   class loop *loop;
2578   unsigned i;
2579 
2580   bitmap_obstack_initialize (&lim_bitmap_obstack);
2581   gcc_obstack_init (&mem_ref_obstack);
2582   lim_aux_data_map = new hash_map<gimple *, lim_aux_data *>;
2583 
2584   if (flag_tm)
2585     compute_transaction_bits ();
2586 
2587   alloc_aux_for_edges (0);
2588 
2589   memory_accesses.refs = new hash_table<mem_ref_hasher> (100);
2590   memory_accesses.refs_list.create (100);
2591   /* Allocate a special, unanalyzable mem-ref with ID zero.  */
2592   memory_accesses.refs_list.quick_push
2593     (mem_ref_alloc (NULL, 0, UNANALYZABLE_MEM_ID));
2594 
2595   memory_accesses.refs_in_loop.create (number_of_loops (cfun));
2596   memory_accesses.refs_in_loop.quick_grow (number_of_loops (cfun));
2597   memory_accesses.refs_stored_in_loop.create (number_of_loops (cfun));
2598   memory_accesses.refs_stored_in_loop.quick_grow (number_of_loops (cfun));
2599   memory_accesses.all_refs_stored_in_loop.create (number_of_loops (cfun));
2600   memory_accesses.all_refs_stored_in_loop.quick_grow (number_of_loops (cfun));
2601 
2602   for (i = 0; i < number_of_loops (cfun); i++)
2603     {
2604       bitmap_initialize (&memory_accesses.refs_in_loop[i],
2605 			 &lim_bitmap_obstack);
2606       bitmap_initialize (&memory_accesses.refs_stored_in_loop[i],
2607 			 &lim_bitmap_obstack);
2608       bitmap_initialize (&memory_accesses.all_refs_stored_in_loop[i],
2609 			 &lim_bitmap_obstack);
2610     }
2611 
2612   memory_accesses.ttae_cache = NULL;
2613 
2614   /* Initialize bb_loop_postorder with a mapping from loop->num to
2615      its postorder index.  */
2616   i = 0;
2617   bb_loop_postorder = XNEWVEC (unsigned, number_of_loops (cfun));
2618   FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
2619     bb_loop_postorder[loop->num] = i++;
2620 }
2621 
2622 /* Cleans up after the invariant motion pass.  */
2623 
2624 static void
tree_ssa_lim_finalize(void)2625 tree_ssa_lim_finalize (void)
2626 {
2627   basic_block bb;
2628   unsigned i;
2629   im_mem_ref *ref;
2630 
2631   free_aux_for_edges ();
2632 
2633   FOR_EACH_BB_FN (bb, cfun)
2634     SET_ALWAYS_EXECUTED_IN (bb, NULL);
2635 
2636   bitmap_obstack_release (&lim_bitmap_obstack);
2637   delete lim_aux_data_map;
2638 
2639   delete memory_accesses.refs;
2640   memory_accesses.refs = NULL;
2641 
2642   FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
2643     memref_free (ref);
2644   memory_accesses.refs_list.release ();
2645   obstack_free (&mem_ref_obstack, NULL);
2646 
2647   memory_accesses.refs_in_loop.release ();
2648   memory_accesses.refs_stored_in_loop.release ();
2649   memory_accesses.all_refs_stored_in_loop.release ();
2650 
2651   if (memory_accesses.ttae_cache)
2652     free_affine_expand_cache (&memory_accesses.ttae_cache);
2653 
2654   free (bb_loop_postorder);
2655 }
2656 
2657 /* Moves invariants from loops.  Only "expensive" invariants are moved out --
2658    i.e. those that are likely to be win regardless of the register pressure.  */
2659 
2660 static unsigned int
tree_ssa_lim(void)2661 tree_ssa_lim (void)
2662 {
2663   unsigned int todo;
2664 
2665   tree_ssa_lim_initialize ();
2666 
2667   /* Gathers information about memory accesses in the loops.  */
2668   analyze_memory_references ();
2669 
2670   /* Fills ALWAYS_EXECUTED_IN information for basic blocks.  */
2671   fill_always_executed_in ();
2672 
2673   /* For each statement determine the outermost loop in that it is
2674      invariant and cost for computing the invariant.  */
2675   invariantness_dom_walker (CDI_DOMINATORS)
2676     .walk (cfun->cfg->x_entry_block_ptr);
2677 
2678   /* Execute store motion.  Force the necessary invariants to be moved
2679      out of the loops as well.  */
2680   store_motion ();
2681 
2682   /* Move the expressions that are expensive enough.  */
2683   todo = move_computations ();
2684 
2685   tree_ssa_lim_finalize ();
2686 
2687   return todo;
2688 }
2689 
2690 /* Loop invariant motion pass.  */
2691 
2692 namespace {
2693 
2694 const pass_data pass_data_lim =
2695 {
2696   GIMPLE_PASS, /* type */
2697   "lim", /* name */
2698   OPTGROUP_LOOP, /* optinfo_flags */
2699   TV_LIM, /* tv_id */
2700   PROP_cfg, /* properties_required */
2701   0, /* properties_provided */
2702   0, /* properties_destroyed */
2703   0, /* todo_flags_start */
2704   0, /* todo_flags_finish */
2705 };
2706 
2707 class pass_lim : public gimple_opt_pass
2708 {
2709 public:
pass_lim(gcc::context * ctxt)2710   pass_lim (gcc::context *ctxt)
2711     : gimple_opt_pass (pass_data_lim, ctxt)
2712   {}
2713 
2714   /* opt_pass methods: */
clone()2715   opt_pass * clone () { return new pass_lim (m_ctxt); }
gate(function *)2716   virtual bool gate (function *) { return flag_tree_loop_im != 0; }
2717   virtual unsigned int execute (function *);
2718 
2719 }; // class pass_lim
2720 
2721 unsigned int
execute(function * fun)2722 pass_lim::execute (function *fun)
2723 {
2724   bool in_loop_pipeline = scev_initialized_p ();
2725   if (!in_loop_pipeline)
2726     loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
2727 
2728   if (number_of_loops (fun) <= 1)
2729     return 0;
2730   unsigned int todo = tree_ssa_lim ();
2731 
2732   if (!in_loop_pipeline)
2733     loop_optimizer_finalize ();
2734   else
2735     scev_reset ();
2736   return todo;
2737 }
2738 
2739 } // anon namespace
2740 
2741 gimple_opt_pass *
make_pass_lim(gcc::context * ctxt)2742 make_pass_lim (gcc::context *ctxt)
2743 {
2744   return new pass_lim (ctxt);
2745 }
2746 
2747 
2748