1 /* Loop invariant motion.
2    Copyright (C) 2003-2019 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "tree.h"
25 #include "gimple.h"
26 #include "cfghooks.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "gimple-pretty-print.h"
30 #include "fold-const.h"
31 #include "cfganal.h"
32 #include "tree-eh.h"
33 #include "gimplify.h"
34 #include "gimple-iterator.h"
35 #include "tree-cfg.h"
36 #include "tree-ssa-loop-manip.h"
37 #include "tree-ssa-loop.h"
38 #include "tree-into-ssa.h"
39 #include "cfgloop.h"
40 #include "domwalk.h"
41 #include "params.h"
42 #include "tree-affine.h"
43 #include "tree-ssa-propagate.h"
44 #include "trans-mem.h"
45 #include "gimple-fold.h"
46 #include "tree-scalar-evolution.h"
47 #include "tree-ssa-loop-niter.h"
48 #include "alias.h"
49 #include "builtins.h"
50 #include "tree-dfa.h"
51 
52 /* TODO:  Support for predicated code motion.  I.e.
53 
54    while (1)
55      {
56        if (cond)
57 	 {
58 	   a = inv;
59 	   something;
60 	 }
61      }
62 
63    Where COND and INV are invariants, but evaluating INV may trap or be
64    invalid from some other reason if !COND.  This may be transformed to
65 
66    if (cond)
67      a = inv;
68    while (1)
69      {
70        if (cond)
71 	 something;
72      }  */
73 
74 /* The auxiliary data kept for each statement.  */
75 
76 struct lim_aux_data
77 {
78   struct loop *max_loop;	/* The outermost loop in that the statement
79 				   is invariant.  */
80 
81   struct loop *tgt_loop;	/* The loop out of that we want to move the
82 				   invariant.  */
83 
84   struct loop *always_executed_in;
85 				/* The outermost loop for that we are sure
86 				   the statement is executed if the loop
87 				   is entered.  */
88 
89   unsigned cost;		/* Cost of the computation performed by the
90 				   statement.  */
91 
92   unsigned ref;			/* The simple_mem_ref in this stmt or 0.  */
93 
94   vec<gimple *> depends;	/* Vector of statements that must be also
95 				   hoisted out of the loop when this statement
96 				   is hoisted; i.e. those that define the
97 				   operands of the statement and are inside of
98 				   the MAX_LOOP loop.  */
99 };
100 
101 /* Maps statements to their lim_aux_data.  */
102 
103 static hash_map<gimple *, lim_aux_data *> *lim_aux_data_map;
104 
105 /* Description of a memory reference location.  */
106 
107 struct mem_ref_loc
108 {
109   tree *ref;			/* The reference itself.  */
110   gimple *stmt;			/* The statement in that it occurs.  */
111 };
112 
113 
114 /* Description of a memory reference.  */
115 
116 struct im_mem_ref
117 {
118   unsigned id : 30;		/* ID assigned to the memory reference
119 				   (its index in memory_accesses.refs_list)  */
120   unsigned ref_canonical : 1;   /* Whether mem.ref was canonicalized.  */
121   unsigned ref_decomposed : 1;  /* Whether the ref was hashed from mem.  */
122   hashval_t hash;		/* Its hash value.  */
123 
124   /* The memory access itself and associated caching of alias-oracle
125      query meta-data.  */
126   ao_ref mem;
127 
128   bitmap stored;		/* The set of loops in that this memory location
129 				   is stored to.  */
130   vec<mem_ref_loc>		accesses_in_loop;
131 				/* The locations of the accesses.  Vector
132 				   indexed by the loop number.  */
133 
134   /* The following sets are computed on demand.  We keep both set and
135      its complement, so that we know whether the information was
136      already computed or not.  */
137   bitmap_head indep_loop;	/* The set of loops in that the memory
138 				   reference is independent, meaning:
139 				   If it is stored in the loop, this store
140 				     is independent on all other loads and
141 				     stores.
142 				   If it is only loaded, then it is independent
143 				     on all stores in the loop.  */
144   bitmap_head dep_loop;		/* The complement of INDEP_LOOP.  */
145 };
146 
147 /* We use two bits per loop in the ref->{in,}dep_loop bitmaps, the first
148    to record (in)dependence against stores in the loop and its subloops, the
149    second to record (in)dependence against all references in the loop
150    and its subloops.  */
151 #define LOOP_DEP_BIT(loopnum, storedp) (2 * (loopnum) + (storedp ? 1 : 0))
152 
153 /* Mem_ref hashtable helpers.  */
154 
155 struct mem_ref_hasher : nofree_ptr_hash <im_mem_ref>
156 {
157   typedef ao_ref *compare_type;
158   static inline hashval_t hash (const im_mem_ref *);
159   static inline bool equal (const im_mem_ref *, const ao_ref *);
160 };
161 
162 /* A hash function for struct im_mem_ref object OBJ.  */
163 
164 inline hashval_t
hash(const im_mem_ref * mem)165 mem_ref_hasher::hash (const im_mem_ref *mem)
166 {
167   return mem->hash;
168 }
169 
170 /* An equality function for struct im_mem_ref object MEM1 with
171    memory reference OBJ2.  */
172 
173 inline bool
equal(const im_mem_ref * mem1,const ao_ref * obj2)174 mem_ref_hasher::equal (const im_mem_ref *mem1, const ao_ref *obj2)
175 {
176   if (obj2->max_size_known_p ())
177     return (mem1->ref_decomposed
178 	    && operand_equal_p (mem1->mem.base, obj2->base, 0)
179 	    && known_eq (mem1->mem.offset, obj2->offset)
180 	    && known_eq (mem1->mem.size, obj2->size)
181 	    && known_eq (mem1->mem.max_size, obj2->max_size)
182 	    && mem1->mem.volatile_p == obj2->volatile_p
183 	    && (mem1->mem.ref_alias_set == obj2->ref_alias_set
184 		/* We are not canonicalizing alias-sets but for the
185 		   special-case we didn't canonicalize yet and the
186 		   incoming ref is a alias-set zero MEM we pick
187 		   the correct one already.  */
188 		|| (!mem1->ref_canonical
189 		    && (TREE_CODE (obj2->ref) == MEM_REF
190 			|| TREE_CODE (obj2->ref) == TARGET_MEM_REF)
191 		    && obj2->ref_alias_set == 0)
192 		/* Likewise if there's a canonical ref with alias-set zero.  */
193 		|| (mem1->ref_canonical && mem1->mem.ref_alias_set == 0))
194 	    && types_compatible_p (TREE_TYPE (mem1->mem.ref),
195 				   TREE_TYPE (obj2->ref)));
196   else
197     return operand_equal_p (mem1->mem.ref, obj2->ref, 0);
198 }
199 
200 
201 /* Description of memory accesses in loops.  */
202 
203 static struct
204 {
205   /* The hash table of memory references accessed in loops.  */
206   hash_table<mem_ref_hasher> *refs;
207 
208   /* The list of memory references.  */
209   vec<im_mem_ref *> refs_list;
210 
211   /* The set of memory references accessed in each loop.  */
212   vec<bitmap_head> refs_in_loop;
213 
214   /* The set of memory references stored in each loop.  */
215   vec<bitmap_head> refs_stored_in_loop;
216 
217   /* The set of memory references stored in each loop, including subloops .  */
218   vec<bitmap_head> all_refs_stored_in_loop;
219 
220   /* Cache for expanding memory addresses.  */
221   hash_map<tree, name_expansion *> *ttae_cache;
222 } memory_accesses;
223 
224 /* Obstack for the bitmaps in the above data structures.  */
225 static bitmap_obstack lim_bitmap_obstack;
226 static obstack mem_ref_obstack;
227 
228 static bool ref_indep_loop_p (struct loop *, im_mem_ref *);
229 static bool ref_always_accessed_p (struct loop *, im_mem_ref *, bool);
230 
231 /* Minimum cost of an expensive expression.  */
232 #define LIM_EXPENSIVE ((unsigned) PARAM_VALUE (PARAM_LIM_EXPENSIVE))
233 
234 /* The outermost loop for which execution of the header guarantees that the
235    block will be executed.  */
236 #define ALWAYS_EXECUTED_IN(BB) ((struct loop *) (BB)->aux)
237 #define SET_ALWAYS_EXECUTED_IN(BB, VAL) ((BB)->aux = (void *) (VAL))
238 
239 /* ID of the shared unanalyzable mem.  */
240 #define UNANALYZABLE_MEM_ID 0
241 
242 /* Whether the reference was analyzable.  */
243 #define MEM_ANALYZABLE(REF) ((REF)->id != UNANALYZABLE_MEM_ID)
244 
245 static struct lim_aux_data *
init_lim_data(gimple * stmt)246 init_lim_data (gimple *stmt)
247 {
248   lim_aux_data *p = XCNEW (struct lim_aux_data);
249   lim_aux_data_map->put (stmt, p);
250 
251   return p;
252 }
253 
254 static struct lim_aux_data *
get_lim_data(gimple * stmt)255 get_lim_data (gimple *stmt)
256 {
257   lim_aux_data **p = lim_aux_data_map->get (stmt);
258   if (!p)
259     return NULL;
260 
261   return *p;
262 }
263 
264 /* Releases the memory occupied by DATA.  */
265 
266 static void
free_lim_aux_data(struct lim_aux_data * data)267 free_lim_aux_data (struct lim_aux_data *data)
268 {
269   data->depends.release ();
270   free (data);
271 }
272 
273 static void
clear_lim_data(gimple * stmt)274 clear_lim_data (gimple *stmt)
275 {
276   lim_aux_data **p = lim_aux_data_map->get (stmt);
277   if (!p)
278     return;
279 
280   free_lim_aux_data (*p);
281   *p = NULL;
282 }
283 
284 
285 /* The possibilities of statement movement.  */
286 enum move_pos
287   {
288     MOVE_IMPOSSIBLE,		/* No movement -- side effect expression.  */
289     MOVE_PRESERVE_EXECUTION,	/* Must not cause the non-executed statement
290 				   become executed -- memory accesses, ... */
291     MOVE_POSSIBLE		/* Unlimited movement.  */
292   };
293 
294 
295 /* If it is possible to hoist the statement STMT unconditionally,
296    returns MOVE_POSSIBLE.
297    If it is possible to hoist the statement STMT, but we must avoid making
298    it executed if it would not be executed in the original program (e.g.
299    because it may trap), return MOVE_PRESERVE_EXECUTION.
300    Otherwise return MOVE_IMPOSSIBLE.  */
301 
302 enum move_pos
movement_possibility(gimple * stmt)303 movement_possibility (gimple *stmt)
304 {
305   tree lhs;
306   enum move_pos ret = MOVE_POSSIBLE;
307 
308   if (flag_unswitch_loops
309       && gimple_code (stmt) == GIMPLE_COND)
310     {
311       /* If we perform unswitching, force the operands of the invariant
312 	 condition to be moved out of the loop.  */
313       return MOVE_POSSIBLE;
314     }
315 
316   if (gimple_code (stmt) == GIMPLE_PHI
317       && gimple_phi_num_args (stmt) <= 2
318       && !virtual_operand_p (gimple_phi_result (stmt))
319       && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_phi_result (stmt)))
320     return MOVE_POSSIBLE;
321 
322   if (gimple_get_lhs (stmt) == NULL_TREE)
323     return MOVE_IMPOSSIBLE;
324 
325   if (gimple_vdef (stmt))
326     return MOVE_IMPOSSIBLE;
327 
328   if (stmt_ends_bb_p (stmt)
329       || gimple_has_volatile_ops (stmt)
330       || gimple_has_side_effects (stmt)
331       || stmt_could_throw_p (cfun, stmt))
332     return MOVE_IMPOSSIBLE;
333 
334   if (is_gimple_call (stmt))
335     {
336       /* While pure or const call is guaranteed to have no side effects, we
337 	 cannot move it arbitrarily.  Consider code like
338 
339 	 char *s = something ();
340 
341 	 while (1)
342 	   {
343 	     if (s)
344 	       t = strlen (s);
345 	     else
346 	       t = 0;
347 	   }
348 
349 	 Here the strlen call cannot be moved out of the loop, even though
350 	 s is invariant.  In addition to possibly creating a call with
351 	 invalid arguments, moving out a function call that is not executed
352 	 may cause performance regressions in case the call is costly and
353 	 not executed at all.  */
354       ret = MOVE_PRESERVE_EXECUTION;
355       lhs = gimple_call_lhs (stmt);
356     }
357   else if (is_gimple_assign (stmt))
358     lhs = gimple_assign_lhs (stmt);
359   else
360     return MOVE_IMPOSSIBLE;
361 
362   if (TREE_CODE (lhs) == SSA_NAME
363       && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
364     return MOVE_IMPOSSIBLE;
365 
366   if (TREE_CODE (lhs) != SSA_NAME
367       || gimple_could_trap_p (stmt))
368     return MOVE_PRESERVE_EXECUTION;
369 
370   /* Non local loads in a transaction cannot be hoisted out.  Well,
371      unless the load happens on every path out of the loop, but we
372      don't take this into account yet.  */
373   if (flag_tm
374       && gimple_in_transaction (stmt)
375       && gimple_assign_single_p (stmt))
376     {
377       tree rhs = gimple_assign_rhs1 (stmt);
378       if (DECL_P (rhs) && is_global_var (rhs))
379 	{
380 	  if (dump_file)
381 	    {
382 	      fprintf (dump_file, "Cannot hoist conditional load of ");
383 	      print_generic_expr (dump_file, rhs, TDF_SLIM);
384 	      fprintf (dump_file, " because it is in a transaction.\n");
385 	    }
386 	  return MOVE_IMPOSSIBLE;
387 	}
388     }
389 
390   return ret;
391 }
392 
393 /* Suppose that operand DEF is used inside the LOOP.  Returns the outermost
394    loop to that we could move the expression using DEF if it did not have
395    other operands, i.e. the outermost loop enclosing LOOP in that the value
396    of DEF is invariant.  */
397 
398 static struct loop *
outermost_invariant_loop(tree def,struct loop * loop)399 outermost_invariant_loop (tree def, struct loop *loop)
400 {
401   gimple *def_stmt;
402   basic_block def_bb;
403   struct loop *max_loop;
404   struct lim_aux_data *lim_data;
405 
406   if (!def)
407     return superloop_at_depth (loop, 1);
408 
409   if (TREE_CODE (def) != SSA_NAME)
410     {
411       gcc_assert (is_gimple_min_invariant (def));
412       return superloop_at_depth (loop, 1);
413     }
414 
415   def_stmt = SSA_NAME_DEF_STMT (def);
416   def_bb = gimple_bb (def_stmt);
417   if (!def_bb)
418     return superloop_at_depth (loop, 1);
419 
420   max_loop = find_common_loop (loop, def_bb->loop_father);
421 
422   lim_data = get_lim_data (def_stmt);
423   if (lim_data != NULL && lim_data->max_loop != NULL)
424     max_loop = find_common_loop (max_loop,
425 				 loop_outer (lim_data->max_loop));
426   if (max_loop == loop)
427     return NULL;
428   max_loop = superloop_at_depth (loop, loop_depth (max_loop) + 1);
429 
430   return max_loop;
431 }
432 
433 /* DATA is a structure containing information associated with a statement
434    inside LOOP.  DEF is one of the operands of this statement.
435 
436    Find the outermost loop enclosing LOOP in that value of DEF is invariant
437    and record this in DATA->max_loop field.  If DEF itself is defined inside
438    this loop as well (i.e. we need to hoist it out of the loop if we want
439    to hoist the statement represented by DATA), record the statement in that
440    DEF is defined to the DATA->depends list.  Additionally if ADD_COST is true,
441    add the cost of the computation of DEF to the DATA->cost.
442 
443    If DEF is not invariant in LOOP, return false.  Otherwise return TRUE.  */
444 
445 static bool
add_dependency(tree def,struct lim_aux_data * data,struct loop * loop,bool add_cost)446 add_dependency (tree def, struct lim_aux_data *data, struct loop *loop,
447 		bool add_cost)
448 {
449   gimple *def_stmt = SSA_NAME_DEF_STMT (def);
450   basic_block def_bb = gimple_bb (def_stmt);
451   struct loop *max_loop;
452   struct lim_aux_data *def_data;
453 
454   if (!def_bb)
455     return true;
456 
457   max_loop = outermost_invariant_loop (def, loop);
458   if (!max_loop)
459     return false;
460 
461   if (flow_loop_nested_p (data->max_loop, max_loop))
462     data->max_loop = max_loop;
463 
464   def_data = get_lim_data (def_stmt);
465   if (!def_data)
466     return true;
467 
468   if (add_cost
469       /* Only add the cost if the statement defining DEF is inside LOOP,
470 	 i.e. if it is likely that by moving the invariants dependent
471 	 on it, we will be able to avoid creating a new register for
472 	 it (since it will be only used in these dependent invariants).  */
473       && def_bb->loop_father == loop)
474     data->cost += def_data->cost;
475 
476   data->depends.safe_push (def_stmt);
477 
478   return true;
479 }
480 
481 /* Returns an estimate for a cost of statement STMT.  The values here
482    are just ad-hoc constants, similar to costs for inlining.  */
483 
484 static unsigned
stmt_cost(gimple * stmt)485 stmt_cost (gimple *stmt)
486 {
487   /* Always try to create possibilities for unswitching.  */
488   if (gimple_code (stmt) == GIMPLE_COND
489       || gimple_code (stmt) == GIMPLE_PHI)
490     return LIM_EXPENSIVE;
491 
492   /* We should be hoisting calls if possible.  */
493   if (is_gimple_call (stmt))
494     {
495       tree fndecl;
496 
497       /* Unless the call is a builtin_constant_p; this always folds to a
498 	 constant, so moving it is useless.  */
499       fndecl = gimple_call_fndecl (stmt);
500       if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_CONSTANT_P))
501 	return 0;
502 
503       return LIM_EXPENSIVE;
504     }
505 
506   /* Hoisting memory references out should almost surely be a win.  */
507   if (gimple_references_memory_p (stmt))
508     return LIM_EXPENSIVE;
509 
510   if (gimple_code (stmt) != GIMPLE_ASSIGN)
511     return 1;
512 
513   switch (gimple_assign_rhs_code (stmt))
514     {
515     case MULT_EXPR:
516     case WIDEN_MULT_EXPR:
517     case WIDEN_MULT_PLUS_EXPR:
518     case WIDEN_MULT_MINUS_EXPR:
519     case DOT_PROD_EXPR:
520     case TRUNC_DIV_EXPR:
521     case CEIL_DIV_EXPR:
522     case FLOOR_DIV_EXPR:
523     case ROUND_DIV_EXPR:
524     case EXACT_DIV_EXPR:
525     case CEIL_MOD_EXPR:
526     case FLOOR_MOD_EXPR:
527     case ROUND_MOD_EXPR:
528     case TRUNC_MOD_EXPR:
529     case RDIV_EXPR:
530       /* Division and multiplication are usually expensive.  */
531       return LIM_EXPENSIVE;
532 
533     case LSHIFT_EXPR:
534     case RSHIFT_EXPR:
535     case WIDEN_LSHIFT_EXPR:
536     case LROTATE_EXPR:
537     case RROTATE_EXPR:
538       /* Shifts and rotates are usually expensive.  */
539       return LIM_EXPENSIVE;
540 
541     case CONSTRUCTOR:
542       /* Make vector construction cost proportional to the number
543          of elements.  */
544       return CONSTRUCTOR_NELTS (gimple_assign_rhs1 (stmt));
545 
546     case SSA_NAME:
547     case PAREN_EXPR:
548       /* Whether or not something is wrapped inside a PAREN_EXPR
549          should not change move cost.  Nor should an intermediate
550 	 unpropagated SSA name copy.  */
551       return 0;
552 
553     default:
554       return 1;
555     }
556 }
557 
558 /* Finds the outermost loop between OUTER and LOOP in that the memory reference
559    REF is independent.  If REF is not independent in LOOP, NULL is returned
560    instead.  */
561 
562 static struct loop *
outermost_indep_loop(struct loop * outer,struct loop * loop,im_mem_ref * ref)563 outermost_indep_loop (struct loop *outer, struct loop *loop, im_mem_ref *ref)
564 {
565   struct loop *aloop;
566 
567   if (ref->stored && bitmap_bit_p (ref->stored, loop->num))
568     return NULL;
569 
570   for (aloop = outer;
571        aloop != loop;
572        aloop = superloop_at_depth (loop, loop_depth (aloop) + 1))
573     if ((!ref->stored || !bitmap_bit_p (ref->stored, aloop->num))
574 	&& ref_indep_loop_p (aloop, ref))
575       return aloop;
576 
577   if (ref_indep_loop_p (loop, ref))
578     return loop;
579   else
580     return NULL;
581 }
582 
583 /* If there is a simple load or store to a memory reference in STMT, returns
584    the location of the memory reference, and sets IS_STORE according to whether
585    it is a store or load.  Otherwise, returns NULL.  */
586 
587 static tree *
simple_mem_ref_in_stmt(gimple * stmt,bool * is_store)588 simple_mem_ref_in_stmt (gimple *stmt, bool *is_store)
589 {
590   tree *lhs, *rhs;
591 
592   /* Recognize SSA_NAME = MEM and MEM = (SSA_NAME | invariant) patterns.  */
593   if (!gimple_assign_single_p (stmt))
594     return NULL;
595 
596   lhs = gimple_assign_lhs_ptr (stmt);
597   rhs = gimple_assign_rhs1_ptr (stmt);
598 
599   if (TREE_CODE (*lhs) == SSA_NAME && gimple_vuse (stmt))
600     {
601       *is_store = false;
602       return rhs;
603     }
604   else if (gimple_vdef (stmt)
605 	   && (TREE_CODE (*rhs) == SSA_NAME || is_gimple_min_invariant (*rhs)))
606     {
607       *is_store = true;
608       return lhs;
609     }
610   else
611     return NULL;
612 }
613 
614 /* From a controlling predicate in DOM determine the arguments from
615    the PHI node PHI that are chosen if the predicate evaluates to
616    true and false and store them to *TRUE_ARG_P and *FALSE_ARG_P if
617    they are non-NULL.  Returns true if the arguments can be determined,
618    else return false.  */
619 
620 static bool
extract_true_false_args_from_phi(basic_block dom,gphi * phi,tree * true_arg_p,tree * false_arg_p)621 extract_true_false_args_from_phi (basic_block dom, gphi *phi,
622 				  tree *true_arg_p, tree *false_arg_p)
623 {
624   edge te, fe;
625   if (! extract_true_false_controlled_edges (dom, gimple_bb (phi),
626 					     &te, &fe))
627     return false;
628 
629   if (true_arg_p)
630     *true_arg_p = PHI_ARG_DEF (phi, te->dest_idx);
631   if (false_arg_p)
632     *false_arg_p = PHI_ARG_DEF (phi, fe->dest_idx);
633 
634   return true;
635 }
636 
637 /* Determine the outermost loop to that it is possible to hoist a statement
638    STMT and store it to LIM_DATA (STMT)->max_loop.  To do this we determine
639    the outermost loop in that the value computed by STMT is invariant.
640    If MUST_PRESERVE_EXEC is true, additionally choose such a loop that
641    we preserve the fact whether STMT is executed.  It also fills other related
642    information to LIM_DATA (STMT).
643 
644    The function returns false if STMT cannot be hoisted outside of the loop it
645    is defined in, and true otherwise.  */
646 
647 static bool
determine_max_movement(gimple * stmt,bool must_preserve_exec)648 determine_max_movement (gimple *stmt, bool must_preserve_exec)
649 {
650   basic_block bb = gimple_bb (stmt);
651   struct loop *loop = bb->loop_father;
652   struct loop *level;
653   struct lim_aux_data *lim_data = get_lim_data (stmt);
654   tree val;
655   ssa_op_iter iter;
656 
657   if (must_preserve_exec)
658     level = ALWAYS_EXECUTED_IN (bb);
659   else
660     level = superloop_at_depth (loop, 1);
661   lim_data->max_loop = level;
662 
663   if (gphi *phi = dyn_cast <gphi *> (stmt))
664     {
665       use_operand_p use_p;
666       unsigned min_cost = UINT_MAX;
667       unsigned total_cost = 0;
668       struct lim_aux_data *def_data;
669 
670       /* We will end up promoting dependencies to be unconditionally
671 	 evaluated.  For this reason the PHI cost (and thus the
672 	 cost we remove from the loop by doing the invariant motion)
673 	 is that of the cheapest PHI argument dependency chain.  */
674       FOR_EACH_PHI_ARG (use_p, phi, iter, SSA_OP_USE)
675 	{
676 	  val = USE_FROM_PTR (use_p);
677 
678 	  if (TREE_CODE (val) != SSA_NAME)
679 	    {
680 	      /* Assign const 1 to constants.  */
681 	      min_cost = MIN (min_cost, 1);
682 	      total_cost += 1;
683 	      continue;
684 	    }
685 	  if (!add_dependency (val, lim_data, loop, false))
686 	    return false;
687 
688 	  gimple *def_stmt = SSA_NAME_DEF_STMT (val);
689 	  if (gimple_bb (def_stmt)
690 	      && gimple_bb (def_stmt)->loop_father == loop)
691 	    {
692 	      def_data = get_lim_data (def_stmt);
693 	      if (def_data)
694 		{
695 		  min_cost = MIN (min_cost, def_data->cost);
696 		  total_cost += def_data->cost;
697 		}
698 	    }
699 	}
700 
701       min_cost = MIN (min_cost, total_cost);
702       lim_data->cost += min_cost;
703 
704       if (gimple_phi_num_args (phi) > 1)
705 	{
706 	  basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
707 	  gimple *cond;
708 	  if (gsi_end_p (gsi_last_bb (dom)))
709 	    return false;
710 	  cond = gsi_stmt (gsi_last_bb (dom));
711 	  if (gimple_code (cond) != GIMPLE_COND)
712 	    return false;
713 	  /* Verify that this is an extended form of a diamond and
714 	     the PHI arguments are completely controlled by the
715 	     predicate in DOM.  */
716 	  if (!extract_true_false_args_from_phi (dom, phi, NULL, NULL))
717 	    return false;
718 
719 	  /* Fold in dependencies and cost of the condition.  */
720 	  FOR_EACH_SSA_TREE_OPERAND (val, cond, iter, SSA_OP_USE)
721 	    {
722 	      if (!add_dependency (val, lim_data, loop, false))
723 		return false;
724 	      def_data = get_lim_data (SSA_NAME_DEF_STMT (val));
725 	      if (def_data)
726 		lim_data->cost += def_data->cost;
727 	    }
728 
729 	  /* We want to avoid unconditionally executing very expensive
730 	     operations.  As costs for our dependencies cannot be
731 	     negative just claim we are not invariand for this case.
732 	     We also are not sure whether the control-flow inside the
733 	     loop will vanish.  */
734 	  if (total_cost - min_cost >= 2 * LIM_EXPENSIVE
735 	      && !(min_cost != 0
736 		   && total_cost / min_cost <= 2))
737 	    return false;
738 
739 	  /* Assume that the control-flow in the loop will vanish.
740 	     ???  We should verify this and not artificially increase
741 	     the cost if that is not the case.  */
742 	  lim_data->cost += stmt_cost (stmt);
743 	}
744 
745       return true;
746     }
747   else
748     FOR_EACH_SSA_TREE_OPERAND (val, stmt, iter, SSA_OP_USE)
749       if (!add_dependency (val, lim_data, loop, true))
750 	return false;
751 
752   if (gimple_vuse (stmt))
753     {
754       im_mem_ref *ref
755 	= lim_data ? memory_accesses.refs_list[lim_data->ref] : NULL;
756       if (ref
757 	  && MEM_ANALYZABLE (ref))
758 	{
759 	  lim_data->max_loop = outermost_indep_loop (lim_data->max_loop,
760 						     loop, ref);
761 	  if (!lim_data->max_loop)
762 	    return false;
763 	}
764       else if (! add_dependency (gimple_vuse (stmt), lim_data, loop, false))
765 	return false;
766     }
767 
768   lim_data->cost += stmt_cost (stmt);
769 
770   return true;
771 }
772 
773 /* Suppose that some statement in ORIG_LOOP is hoisted to the loop LEVEL,
774    and that one of the operands of this statement is computed by STMT.
775    Ensure that STMT (together with all the statements that define its
776    operands) is hoisted at least out of the loop LEVEL.  */
777 
778 static void
set_level(gimple * stmt,struct loop * orig_loop,struct loop * level)779 set_level (gimple *stmt, struct loop *orig_loop, struct loop *level)
780 {
781   struct loop *stmt_loop = gimple_bb (stmt)->loop_father;
782   struct lim_aux_data *lim_data;
783   gimple *dep_stmt;
784   unsigned i;
785 
786   stmt_loop = find_common_loop (orig_loop, stmt_loop);
787   lim_data = get_lim_data (stmt);
788   if (lim_data != NULL && lim_data->tgt_loop != NULL)
789     stmt_loop = find_common_loop (stmt_loop,
790 				  loop_outer (lim_data->tgt_loop));
791   if (flow_loop_nested_p (stmt_loop, level))
792     return;
793 
794   gcc_assert (level == lim_data->max_loop
795 	      || flow_loop_nested_p (lim_data->max_loop, level));
796 
797   lim_data->tgt_loop = level;
798   FOR_EACH_VEC_ELT (lim_data->depends, i, dep_stmt)
799     set_level (dep_stmt, orig_loop, level);
800 }
801 
802 /* Determines an outermost loop from that we want to hoist the statement STMT.
803    For now we chose the outermost possible loop.  TODO -- use profiling
804    information to set it more sanely.  */
805 
806 static void
set_profitable_level(gimple * stmt)807 set_profitable_level (gimple *stmt)
808 {
809   set_level (stmt, gimple_bb (stmt)->loop_father, get_lim_data (stmt)->max_loop);
810 }
811 
812 /* Returns true if STMT is a call that has side effects.  */
813 
814 static bool
nonpure_call_p(gimple * stmt)815 nonpure_call_p (gimple *stmt)
816 {
817   if (gimple_code (stmt) != GIMPLE_CALL)
818     return false;
819 
820   return gimple_has_side_effects (stmt);
821 }
822 
823 /* Rewrite a/b to a*(1/b).  Return the invariant stmt to process.  */
824 
825 static gimple *
rewrite_reciprocal(gimple_stmt_iterator * bsi)826 rewrite_reciprocal (gimple_stmt_iterator *bsi)
827 {
828   gassign *stmt, *stmt1, *stmt2;
829   tree name, lhs, type;
830   tree real_one;
831   gimple_stmt_iterator gsi;
832 
833   stmt = as_a <gassign *> (gsi_stmt (*bsi));
834   lhs = gimple_assign_lhs (stmt);
835   type = TREE_TYPE (lhs);
836 
837   real_one = build_one_cst (type);
838 
839   name = make_temp_ssa_name (type, NULL, "reciptmp");
840   stmt1 = gimple_build_assign (name, RDIV_EXPR, real_one,
841 			       gimple_assign_rhs2 (stmt));
842   stmt2 = gimple_build_assign (lhs, MULT_EXPR, name,
843 			       gimple_assign_rhs1 (stmt));
844 
845   /* Replace division stmt with reciprocal and multiply stmts.
846      The multiply stmt is not invariant, so update iterator
847      and avoid rescanning.  */
848   gsi = *bsi;
849   gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
850   gsi_replace (&gsi, stmt2, true);
851 
852   /* Continue processing with invariant reciprocal statement.  */
853   return stmt1;
854 }
855 
856 /* Check if the pattern at *BSI is a bittest of the form
857    (A >> B) & 1 != 0 and in this case rewrite it to A & (1 << B) != 0.  */
858 
859 static gimple *
rewrite_bittest(gimple_stmt_iterator * bsi)860 rewrite_bittest (gimple_stmt_iterator *bsi)
861 {
862   gassign *stmt;
863   gimple *stmt1;
864   gassign *stmt2;
865   gimple *use_stmt;
866   gcond *cond_stmt;
867   tree lhs, name, t, a, b;
868   use_operand_p use;
869 
870   stmt = as_a <gassign *> (gsi_stmt (*bsi));
871   lhs = gimple_assign_lhs (stmt);
872 
873   /* Verify that the single use of lhs is a comparison against zero.  */
874   if (TREE_CODE (lhs) != SSA_NAME
875       || !single_imm_use (lhs, &use, &use_stmt))
876     return stmt;
877   cond_stmt = dyn_cast <gcond *> (use_stmt);
878   if (!cond_stmt)
879     return stmt;
880   if (gimple_cond_lhs (cond_stmt) != lhs
881       || (gimple_cond_code (cond_stmt) != NE_EXPR
882 	  && gimple_cond_code (cond_stmt) != EQ_EXPR)
883       || !integer_zerop (gimple_cond_rhs (cond_stmt)))
884     return stmt;
885 
886   /* Get at the operands of the shift.  The rhs is TMP1 & 1.  */
887   stmt1 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
888   if (gimple_code (stmt1) != GIMPLE_ASSIGN)
889     return stmt;
890 
891   /* There is a conversion in between possibly inserted by fold.  */
892   if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt1)))
893     {
894       t = gimple_assign_rhs1 (stmt1);
895       if (TREE_CODE (t) != SSA_NAME
896 	  || !has_single_use (t))
897 	return stmt;
898       stmt1 = SSA_NAME_DEF_STMT (t);
899       if (gimple_code (stmt1) != GIMPLE_ASSIGN)
900 	return stmt;
901     }
902 
903   /* Verify that B is loop invariant but A is not.  Verify that with
904      all the stmt walking we are still in the same loop.  */
905   if (gimple_assign_rhs_code (stmt1) != RSHIFT_EXPR
906       || loop_containing_stmt (stmt1) != loop_containing_stmt (stmt))
907     return stmt;
908 
909   a = gimple_assign_rhs1 (stmt1);
910   b = gimple_assign_rhs2 (stmt1);
911 
912   if (outermost_invariant_loop (b, loop_containing_stmt (stmt1)) != NULL
913       && outermost_invariant_loop (a, loop_containing_stmt (stmt1)) == NULL)
914     {
915       gimple_stmt_iterator rsi;
916 
917       /* 1 << B */
918       t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (a),
919 		       build_int_cst (TREE_TYPE (a), 1), b);
920       name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
921       stmt1 = gimple_build_assign (name, t);
922 
923       /* A & (1 << B) */
924       t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (a), a, name);
925       name = make_temp_ssa_name (TREE_TYPE (a), NULL, "shifttmp");
926       stmt2 = gimple_build_assign (name, t);
927 
928       /* Replace the SSA_NAME we compare against zero.  Adjust
929 	 the type of zero accordingly.  */
930       SET_USE (use, name);
931       gimple_cond_set_rhs (cond_stmt,
932 			   build_int_cst_type (TREE_TYPE (name),
933 					       0));
934 
935       /* Don't use gsi_replace here, none of the new assignments sets
936 	 the variable originally set in stmt.  Move bsi to stmt1, and
937 	 then remove the original stmt, so that we get a chance to
938 	 retain debug info for it.  */
939       rsi = *bsi;
940       gsi_insert_before (bsi, stmt1, GSI_NEW_STMT);
941       gsi_insert_before (&rsi, stmt2, GSI_SAME_STMT);
942       gimple *to_release = gsi_stmt (rsi);
943       gsi_remove (&rsi, true);
944       release_defs (to_release);
945 
946       return stmt1;
947     }
948 
949   return stmt;
950 }
951 
952 /* For each statement determines the outermost loop in that it is invariant,
953    -   statements on whose motion it depends and the cost of the computation.
954    -   This information is stored to the LIM_DATA structure associated with
955    -   each statement.  */
956 class invariantness_dom_walker : public dom_walker
957 {
958 public:
invariantness_dom_walker(cdi_direction direction)959   invariantness_dom_walker (cdi_direction direction)
960     : dom_walker (direction) {}
961 
962   virtual edge before_dom_children (basic_block);
963 };
964 
965 /* Determine the outermost loops in that statements in basic block BB are
966    invariant, and record them to the LIM_DATA associated with the statements.
967    Callback for dom_walker.  */
968 
969 edge
before_dom_children(basic_block bb)970 invariantness_dom_walker::before_dom_children (basic_block bb)
971 {
972   enum move_pos pos;
973   gimple_stmt_iterator bsi;
974   gimple *stmt;
975   bool maybe_never = ALWAYS_EXECUTED_IN (bb) == NULL;
976   struct loop *outermost = ALWAYS_EXECUTED_IN (bb);
977   struct lim_aux_data *lim_data;
978 
979   if (!loop_outer (bb->loop_father))
980     return NULL;
981 
982   if (dump_file && (dump_flags & TDF_DETAILS))
983     fprintf (dump_file, "Basic block %d (loop %d -- depth %d):\n\n",
984 	     bb->index, bb->loop_father->num, loop_depth (bb->loop_father));
985 
986   /* Look at PHI nodes, but only if there is at most two.
987      ???  We could relax this further by post-processing the inserted
988      code and transforming adjacent cond-exprs with the same predicate
989      to control flow again.  */
990   bsi = gsi_start_phis (bb);
991   if (!gsi_end_p (bsi)
992       && ((gsi_next (&bsi), gsi_end_p (bsi))
993 	  || (gsi_next (&bsi), gsi_end_p (bsi))))
994     for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi))
995       {
996 	stmt = gsi_stmt (bsi);
997 
998 	pos = movement_possibility (stmt);
999 	if (pos == MOVE_IMPOSSIBLE)
1000 	  continue;
1001 
1002 	lim_data = get_lim_data (stmt);
1003 	if (! lim_data)
1004 	  lim_data = init_lim_data (stmt);
1005 	lim_data->always_executed_in = outermost;
1006 
1007 	if (!determine_max_movement (stmt, false))
1008 	  {
1009 	    lim_data->max_loop = NULL;
1010 	    continue;
1011 	  }
1012 
1013 	if (dump_file && (dump_flags & TDF_DETAILS))
1014 	  {
1015 	    print_gimple_stmt (dump_file, stmt, 2);
1016 	    fprintf (dump_file, "  invariant up to level %d, cost %d.\n\n",
1017 		     loop_depth (lim_data->max_loop),
1018 		     lim_data->cost);
1019 	  }
1020 
1021 	if (lim_data->cost >= LIM_EXPENSIVE)
1022 	  set_profitable_level (stmt);
1023       }
1024 
1025   for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1026     {
1027       stmt = gsi_stmt (bsi);
1028 
1029       pos = movement_possibility (stmt);
1030       if (pos == MOVE_IMPOSSIBLE)
1031 	{
1032 	  if (nonpure_call_p (stmt))
1033 	    {
1034 	      maybe_never = true;
1035 	      outermost = NULL;
1036 	    }
1037 	  /* Make sure to note always_executed_in for stores to make
1038 	     store-motion work.  */
1039 	  else if (stmt_makes_single_store (stmt))
1040 	    {
1041 	      struct lim_aux_data *lim_data = get_lim_data (stmt);
1042 	      if (! lim_data)
1043 		lim_data = init_lim_data (stmt);
1044 	      lim_data->always_executed_in = outermost;
1045 	    }
1046 	  continue;
1047 	}
1048 
1049       if (is_gimple_assign (stmt)
1050 	  && (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
1051 	      == GIMPLE_BINARY_RHS))
1052 	{
1053 	  tree op0 = gimple_assign_rhs1 (stmt);
1054 	  tree op1 = gimple_assign_rhs2 (stmt);
1055 	  struct loop *ol1 = outermost_invariant_loop (op1,
1056 					loop_containing_stmt (stmt));
1057 
1058 	  /* If divisor is invariant, convert a/b to a*(1/b), allowing reciprocal
1059 	     to be hoisted out of loop, saving expensive divide.  */
1060 	  if (pos == MOVE_POSSIBLE
1061 	      && gimple_assign_rhs_code (stmt) == RDIV_EXPR
1062 	      && flag_unsafe_math_optimizations
1063 	      && !flag_trapping_math
1064 	      && ol1 != NULL
1065 	      && outermost_invariant_loop (op0, ol1) == NULL)
1066 	    stmt = rewrite_reciprocal (&bsi);
1067 
1068 	  /* If the shift count is invariant, convert (A >> B) & 1 to
1069 	     A & (1 << B) allowing the bit mask to be hoisted out of the loop
1070 	     saving an expensive shift.  */
1071 	  if (pos == MOVE_POSSIBLE
1072 	      && gimple_assign_rhs_code (stmt) == BIT_AND_EXPR
1073 	      && integer_onep (op1)
1074 	      && TREE_CODE (op0) == SSA_NAME
1075 	      && has_single_use (op0))
1076 	    stmt = rewrite_bittest (&bsi);
1077 	}
1078 
1079       lim_data = get_lim_data (stmt);
1080       if (! lim_data)
1081 	lim_data = init_lim_data (stmt);
1082       lim_data->always_executed_in = outermost;
1083 
1084       if (maybe_never && pos == MOVE_PRESERVE_EXECUTION)
1085 	continue;
1086 
1087       if (!determine_max_movement (stmt, pos == MOVE_PRESERVE_EXECUTION))
1088 	{
1089 	  lim_data->max_loop = NULL;
1090 	  continue;
1091 	}
1092 
1093       if (dump_file && (dump_flags & TDF_DETAILS))
1094 	{
1095 	  print_gimple_stmt (dump_file, stmt, 2);
1096 	  fprintf (dump_file, "  invariant up to level %d, cost %d.\n\n",
1097 		   loop_depth (lim_data->max_loop),
1098 		   lim_data->cost);
1099 	}
1100 
1101       if (lim_data->cost >= LIM_EXPENSIVE)
1102 	set_profitable_level (stmt);
1103     }
1104   return NULL;
1105 }
1106 
1107 /* Hoist the statements in basic block BB out of the loops prescribed by
1108    data stored in LIM_DATA structures associated with each statement.  Callback
1109    for walk_dominator_tree.  */
1110 
1111 unsigned int
move_computations_worker(basic_block bb)1112 move_computations_worker (basic_block bb)
1113 {
1114   struct loop *level;
1115   unsigned cost = 0;
1116   struct lim_aux_data *lim_data;
1117   unsigned int todo = 0;
1118 
1119   if (!loop_outer (bb->loop_father))
1120     return todo;
1121 
1122   for (gphi_iterator bsi = gsi_start_phis (bb); !gsi_end_p (bsi); )
1123     {
1124       gassign *new_stmt;
1125       gphi *stmt = bsi.phi ();
1126 
1127       lim_data = get_lim_data (stmt);
1128       if (lim_data == NULL)
1129 	{
1130 	  gsi_next (&bsi);
1131 	  continue;
1132 	}
1133 
1134       cost = lim_data->cost;
1135       level = lim_data->tgt_loop;
1136       clear_lim_data (stmt);
1137 
1138       if (!level)
1139 	{
1140 	  gsi_next (&bsi);
1141 	  continue;
1142 	}
1143 
1144       if (dump_file && (dump_flags & TDF_DETAILS))
1145 	{
1146 	  fprintf (dump_file, "Moving PHI node\n");
1147 	  print_gimple_stmt (dump_file, stmt, 0);
1148 	  fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1149 		   cost, level->num);
1150 	}
1151 
1152       if (gimple_phi_num_args (stmt) == 1)
1153 	{
1154 	  tree arg = PHI_ARG_DEF (stmt, 0);
1155 	  new_stmt = gimple_build_assign (gimple_phi_result (stmt),
1156 					  TREE_CODE (arg), arg);
1157 	}
1158       else
1159 	{
1160 	  basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
1161 	  gimple *cond = gsi_stmt (gsi_last_bb (dom));
1162 	  tree arg0 = NULL_TREE, arg1 = NULL_TREE, t;
1163 	  /* Get the PHI arguments corresponding to the true and false
1164 	     edges of COND.  */
1165 	  extract_true_false_args_from_phi (dom, stmt, &arg0, &arg1);
1166 	  gcc_assert (arg0 && arg1);
1167 	  t = build2 (gimple_cond_code (cond), boolean_type_node,
1168 		      gimple_cond_lhs (cond), gimple_cond_rhs (cond));
1169 	  new_stmt = gimple_build_assign (gimple_phi_result (stmt),
1170 					  COND_EXPR, t, arg0, arg1);
1171 	  todo |= TODO_cleanup_cfg;
1172 	}
1173       if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (new_stmt)))
1174 	  && (!ALWAYS_EXECUTED_IN (bb)
1175 	      || (ALWAYS_EXECUTED_IN (bb) != level
1176 		  && !flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1177 	{
1178 	  tree lhs = gimple_assign_lhs (new_stmt);
1179 	  SSA_NAME_RANGE_INFO (lhs) = NULL;
1180 	}
1181       gsi_insert_on_edge (loop_preheader_edge (level), new_stmt);
1182       remove_phi_node (&bsi, false);
1183     }
1184 
1185   for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi); )
1186     {
1187       edge e;
1188 
1189       gimple *stmt = gsi_stmt (bsi);
1190 
1191       lim_data = get_lim_data (stmt);
1192       if (lim_data == NULL)
1193 	{
1194 	  gsi_next (&bsi);
1195 	  continue;
1196 	}
1197 
1198       cost = lim_data->cost;
1199       level = lim_data->tgt_loop;
1200       clear_lim_data (stmt);
1201 
1202       if (!level)
1203 	{
1204 	  gsi_next (&bsi);
1205 	  continue;
1206 	}
1207 
1208       /* We do not really want to move conditionals out of the loop; we just
1209 	 placed it here to force its operands to be moved if necessary.  */
1210       if (gimple_code (stmt) == GIMPLE_COND)
1211 	continue;
1212 
1213       if (dump_file && (dump_flags & TDF_DETAILS))
1214 	{
1215 	  fprintf (dump_file, "Moving statement\n");
1216 	  print_gimple_stmt (dump_file, stmt, 0);
1217 	  fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
1218 		   cost, level->num);
1219 	}
1220 
1221       e = loop_preheader_edge (level);
1222       gcc_assert (!gimple_vdef (stmt));
1223       if (gimple_vuse (stmt))
1224 	{
1225 	  /* The new VUSE is the one from the virtual PHI in the loop
1226 	     header or the one already present.  */
1227 	  gphi_iterator gsi2;
1228 	  for (gsi2 = gsi_start_phis (e->dest);
1229 	       !gsi_end_p (gsi2); gsi_next (&gsi2))
1230 	    {
1231 	      gphi *phi = gsi2.phi ();
1232 	      if (virtual_operand_p (gimple_phi_result (phi)))
1233 		{
1234 		  gimple_set_vuse (stmt, PHI_ARG_DEF_FROM_EDGE (phi, e));
1235 		  break;
1236 		}
1237 	    }
1238 	}
1239       gsi_remove (&bsi, false);
1240       if (gimple_has_lhs (stmt)
1241 	  && TREE_CODE (gimple_get_lhs (stmt)) == SSA_NAME
1242 	  && INTEGRAL_TYPE_P (TREE_TYPE (gimple_get_lhs (stmt)))
1243 	  && (!ALWAYS_EXECUTED_IN (bb)
1244 	      || !(ALWAYS_EXECUTED_IN (bb) == level
1245 		   || flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1246 	{
1247 	  tree lhs = gimple_get_lhs (stmt);
1248 	  SSA_NAME_RANGE_INFO (lhs) = NULL;
1249 	}
1250       /* In case this is a stmt that is not unconditionally executed
1251          when the target loop header is executed and the stmt may
1252 	 invoke undefined integer or pointer overflow rewrite it to
1253 	 unsigned arithmetic.  */
1254       if (is_gimple_assign (stmt)
1255 	  && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (stmt)))
1256 	  && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (gimple_assign_lhs (stmt)))
1257 	  && arith_code_with_undefined_signed_overflow
1258 	       (gimple_assign_rhs_code (stmt))
1259 	  && (!ALWAYS_EXECUTED_IN (bb)
1260 	      || !(ALWAYS_EXECUTED_IN (bb) == level
1261 		   || flow_loop_nested_p (ALWAYS_EXECUTED_IN (bb), level))))
1262 	gsi_insert_seq_on_edge (e, rewrite_to_defined_overflow (stmt));
1263       else
1264 	gsi_insert_on_edge (e, stmt);
1265     }
1266 
1267   return todo;
1268 }
1269 
1270 /* Hoist the statements out of the loops prescribed by data stored in
1271    LIM_DATA structures associated with each statement.*/
1272 
1273 static unsigned int
move_computations(void)1274 move_computations (void)
1275 {
1276   int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
1277   int n = pre_and_rev_post_order_compute_fn (cfun, NULL, rpo, false);
1278   unsigned todo = 0;
1279 
1280   for (int i = 0; i < n; ++i)
1281     todo |= move_computations_worker (BASIC_BLOCK_FOR_FN (cfun, rpo[i]));
1282 
1283   free (rpo);
1284 
1285   gsi_commit_edge_inserts ();
1286   if (need_ssa_update_p (cfun))
1287     rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1288 
1289   return todo;
1290 }
1291 
1292 /* Checks whether the statement defining variable *INDEX can be hoisted
1293    out of the loop passed in DATA.  Callback for for_each_index.  */
1294 
1295 static bool
may_move_till(tree ref,tree * index,void * data)1296 may_move_till (tree ref, tree *index, void *data)
1297 {
1298   struct loop *loop = (struct loop *) data, *max_loop;
1299 
1300   /* If REF is an array reference, check also that the step and the lower
1301      bound is invariant in LOOP.  */
1302   if (TREE_CODE (ref) == ARRAY_REF)
1303     {
1304       tree step = TREE_OPERAND (ref, 3);
1305       tree lbound = TREE_OPERAND (ref, 2);
1306 
1307       max_loop = outermost_invariant_loop (step, loop);
1308       if (!max_loop)
1309 	return false;
1310 
1311       max_loop = outermost_invariant_loop (lbound, loop);
1312       if (!max_loop)
1313 	return false;
1314     }
1315 
1316   max_loop = outermost_invariant_loop (*index, loop);
1317   if (!max_loop)
1318     return false;
1319 
1320   return true;
1321 }
1322 
1323 /* If OP is SSA NAME, force the statement that defines it to be
1324    moved out of the LOOP.  ORIG_LOOP is the loop in that EXPR is used.  */
1325 
1326 static void
force_move_till_op(tree op,struct loop * orig_loop,struct loop * loop)1327 force_move_till_op (tree op, struct loop *orig_loop, struct loop *loop)
1328 {
1329   gimple *stmt;
1330 
1331   if (!op
1332       || is_gimple_min_invariant (op))
1333     return;
1334 
1335   gcc_assert (TREE_CODE (op) == SSA_NAME);
1336 
1337   stmt = SSA_NAME_DEF_STMT (op);
1338   if (gimple_nop_p (stmt))
1339     return;
1340 
1341   set_level (stmt, orig_loop, loop);
1342 }
1343 
1344 /* Forces statement defining invariants in REF (and *INDEX) to be moved out of
1345    the LOOP.  The reference REF is used in the loop ORIG_LOOP.  Callback for
1346    for_each_index.  */
1347 
1348 struct fmt_data
1349 {
1350   struct loop *loop;
1351   struct loop *orig_loop;
1352 };
1353 
1354 static bool
force_move_till(tree ref,tree * index,void * data)1355 force_move_till (tree ref, tree *index, void *data)
1356 {
1357   struct fmt_data *fmt_data = (struct fmt_data *) data;
1358 
1359   if (TREE_CODE (ref) == ARRAY_REF)
1360     {
1361       tree step = TREE_OPERAND (ref, 3);
1362       tree lbound = TREE_OPERAND (ref, 2);
1363 
1364       force_move_till_op (step, fmt_data->orig_loop, fmt_data->loop);
1365       force_move_till_op (lbound, fmt_data->orig_loop, fmt_data->loop);
1366     }
1367 
1368   force_move_till_op (*index, fmt_data->orig_loop, fmt_data->loop);
1369 
1370   return true;
1371 }
1372 
1373 /* A function to free the mem_ref object OBJ.  */
1374 
1375 static void
memref_free(struct im_mem_ref * mem)1376 memref_free (struct im_mem_ref *mem)
1377 {
1378   mem->accesses_in_loop.release ();
1379 }
1380 
1381 /* Allocates and returns a memory reference description for MEM whose hash
1382    value is HASH and id is ID.  */
1383 
1384 static im_mem_ref *
mem_ref_alloc(ao_ref * mem,unsigned hash,unsigned id)1385 mem_ref_alloc (ao_ref *mem, unsigned hash, unsigned id)
1386 {
1387   im_mem_ref *ref = XOBNEW (&mem_ref_obstack, struct im_mem_ref);
1388   if (mem)
1389     ref->mem = *mem;
1390   else
1391     ao_ref_init (&ref->mem, error_mark_node);
1392   ref->id = id;
1393   ref->ref_canonical = false;
1394   ref->ref_decomposed = false;
1395   ref->hash = hash;
1396   ref->stored = NULL;
1397   bitmap_initialize (&ref->indep_loop, &lim_bitmap_obstack);
1398   bitmap_initialize (&ref->dep_loop, &lim_bitmap_obstack);
1399   ref->accesses_in_loop.create (1);
1400 
1401   return ref;
1402 }
1403 
1404 /* Records memory reference location *LOC in LOOP to the memory reference
1405    description REF.  The reference occurs in statement STMT.  */
1406 
1407 static void
record_mem_ref_loc(im_mem_ref * ref,gimple * stmt,tree * loc)1408 record_mem_ref_loc (im_mem_ref *ref, gimple *stmt, tree *loc)
1409 {
1410   mem_ref_loc aref;
1411   aref.stmt = stmt;
1412   aref.ref = loc;
1413   ref->accesses_in_loop.safe_push (aref);
1414 }
1415 
1416 /* Set the LOOP bit in REF stored bitmap and allocate that if
1417    necessary.  Return whether a bit was changed.  */
1418 
1419 static bool
set_ref_stored_in_loop(im_mem_ref * ref,struct loop * loop)1420 set_ref_stored_in_loop (im_mem_ref *ref, struct loop *loop)
1421 {
1422   if (!ref->stored)
1423     ref->stored = BITMAP_ALLOC (&lim_bitmap_obstack);
1424   return bitmap_set_bit (ref->stored, loop->num);
1425 }
1426 
1427 /* Marks reference REF as stored in LOOP.  */
1428 
1429 static void
mark_ref_stored(im_mem_ref * ref,struct loop * loop)1430 mark_ref_stored (im_mem_ref *ref, struct loop *loop)
1431 {
1432   while (loop != current_loops->tree_root
1433 	 && set_ref_stored_in_loop (ref, loop))
1434     loop = loop_outer (loop);
1435 }
1436 
1437 /* Gathers memory references in statement STMT in LOOP, storing the
1438    information about them in the memory_accesses structure.  Marks
1439    the vops accessed through unrecognized statements there as
1440    well.  */
1441 
1442 static void
gather_mem_refs_stmt(struct loop * loop,gimple * stmt)1443 gather_mem_refs_stmt (struct loop *loop, gimple *stmt)
1444 {
1445   tree *mem = NULL;
1446   hashval_t hash;
1447   im_mem_ref **slot;
1448   im_mem_ref *ref;
1449   bool is_stored;
1450   unsigned id;
1451 
1452   if (!gimple_vuse (stmt))
1453     return;
1454 
1455   mem = simple_mem_ref_in_stmt (stmt, &is_stored);
1456   if (!mem)
1457     {
1458       /* We use the shared mem_ref for all unanalyzable refs.  */
1459       id = UNANALYZABLE_MEM_ID;
1460       ref = memory_accesses.refs_list[id];
1461       if (dump_file && (dump_flags & TDF_DETAILS))
1462 	{
1463 	  fprintf (dump_file, "Unanalyzed memory reference %u: ", id);
1464 	  print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1465 	}
1466       is_stored = gimple_vdef (stmt);
1467     }
1468   else
1469     {
1470       /* We are looking for equal refs that might differ in structure
1471          such as a.b vs. MEM[&a + 4].  So we key off the ao_ref but
1472 	 make sure we can canonicalize the ref in the hashtable if
1473 	 non-operand_equal_p refs are found.  For the lookup we mark
1474 	 the case we want strict equality with aor.max_size == -1.  */
1475       ao_ref aor;
1476       ao_ref_init (&aor, *mem);
1477       ao_ref_base (&aor);
1478       ao_ref_alias_set (&aor);
1479       HOST_WIDE_INT offset, size, max_size;
1480       poly_int64 saved_maxsize = aor.max_size, mem_off;
1481       tree mem_base;
1482       bool ref_decomposed;
1483       if (aor.max_size_known_p ()
1484 	  && aor.offset.is_constant (&offset)
1485 	  && aor.size.is_constant (&size)
1486 	  && aor.max_size.is_constant (&max_size)
1487 	  && size == max_size
1488 	  && (size % BITS_PER_UNIT) == 0
1489 	  /* We're canonicalizing to a MEM where TYPE_SIZE specifies the
1490 	     size.  Make sure this is consistent with the extraction.  */
1491 	  && poly_int_tree_p (TYPE_SIZE (TREE_TYPE (*mem)))
1492 	  && known_eq (wi::to_poly_offset (TYPE_SIZE (TREE_TYPE (*mem))),
1493 		       aor.size)
1494 	  && (mem_base = get_addr_base_and_unit_offset (aor.ref, &mem_off)))
1495 	{
1496 	  ref_decomposed = true;
1497 	  hash = iterative_hash_expr (ao_ref_base (&aor), 0);
1498 	  hash = iterative_hash_host_wide_int (offset, hash);
1499 	  hash = iterative_hash_host_wide_int (size, hash);
1500 	}
1501       else
1502 	{
1503 	  ref_decomposed = false;
1504 	  hash = iterative_hash_expr (aor.ref, 0);
1505 	  aor.max_size = -1;
1506 	}
1507       slot = memory_accesses.refs->find_slot_with_hash (&aor, hash, INSERT);
1508       aor.max_size = saved_maxsize;
1509       if (*slot)
1510 	{
1511 	  if (!(*slot)->ref_canonical
1512 	      && !operand_equal_p (*mem, (*slot)->mem.ref, 0))
1513 	    {
1514 	      /* If we didn't yet canonicalize the hashtable ref (which
1515 	         we'll end up using for code insertion) and hit a second
1516 		 equal ref that is not structurally equivalent create
1517 		 a canonical ref which is a bare MEM_REF.  */
1518 	      if (TREE_CODE (*mem) == MEM_REF
1519 		  || TREE_CODE (*mem) == TARGET_MEM_REF)
1520 		{
1521 		  (*slot)->mem.ref = *mem;
1522 		  (*slot)->mem.base_alias_set = ao_ref_base_alias_set (&aor);
1523 		}
1524 	      else
1525 		{
1526 		  tree ref_alias_type = reference_alias_ptr_type (*mem);
1527 		  unsigned int ref_align = get_object_alignment (*mem);
1528 		  tree ref_type = TREE_TYPE (*mem);
1529 		  tree tmp = build_fold_addr_expr (unshare_expr (mem_base));
1530 		  if (TYPE_ALIGN (ref_type) != ref_align)
1531 		    ref_type = build_aligned_type (ref_type, ref_align);
1532 		  (*slot)->mem.ref
1533 		    = fold_build2 (MEM_REF, ref_type, tmp,
1534 				   build_int_cst (ref_alias_type, mem_off));
1535 		  if ((*slot)->mem.volatile_p)
1536 		    TREE_THIS_VOLATILE ((*slot)->mem.ref) = 1;
1537 		  gcc_checking_assert (TREE_CODE ((*slot)->mem.ref) == MEM_REF
1538 				       && is_gimple_mem_ref_addr
1539 				            (TREE_OPERAND ((*slot)->mem.ref,
1540 							   0)));
1541 		  (*slot)->mem.base_alias_set = (*slot)->mem.ref_alias_set;
1542 		}
1543 	      (*slot)->ref_canonical = true;
1544 	    }
1545 	  ref = *slot;
1546 	  id = ref->id;
1547 	}
1548       else
1549 	{
1550 	  id = memory_accesses.refs_list.length ();
1551 	  ref = mem_ref_alloc (&aor, hash, id);
1552 	  ref->ref_decomposed = ref_decomposed;
1553 	  memory_accesses.refs_list.safe_push (ref);
1554 	  *slot = ref;
1555 
1556 	  if (dump_file && (dump_flags & TDF_DETAILS))
1557 	    {
1558 	      fprintf (dump_file, "Memory reference %u: ", id);
1559 	      print_generic_expr (dump_file, ref->mem.ref, TDF_SLIM);
1560 	      fprintf (dump_file, "\n");
1561 	    }
1562 	}
1563 
1564       record_mem_ref_loc (ref, stmt, mem);
1565     }
1566   bitmap_set_bit (&memory_accesses.refs_in_loop[loop->num], ref->id);
1567   if (is_stored)
1568     {
1569       bitmap_set_bit (&memory_accesses.refs_stored_in_loop[loop->num], ref->id);
1570       mark_ref_stored (ref, loop);
1571     }
1572   init_lim_data (stmt)->ref = ref->id;
1573   return;
1574 }
1575 
1576 static unsigned *bb_loop_postorder;
1577 
1578 /* qsort sort function to sort blocks after their loop fathers postorder.  */
1579 
1580 static int
sort_bbs_in_loop_postorder_cmp(const void * bb1_,const void * bb2_)1581 sort_bbs_in_loop_postorder_cmp (const void *bb1_, const void *bb2_)
1582 {
1583   basic_block bb1 = *(basic_block *)const_cast<void *>(bb1_);
1584   basic_block bb2 = *(basic_block *)const_cast<void *>(bb2_);
1585   struct loop *loop1 = bb1->loop_father;
1586   struct loop *loop2 = bb2->loop_father;
1587   if (loop1->num == loop2->num)
1588     return bb1->index - bb2->index;
1589   return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
1590 }
1591 
1592 /* qsort sort function to sort ref locs after their loop fathers postorder.  */
1593 
1594 static int
sort_locs_in_loop_postorder_cmp(const void * loc1_,const void * loc2_)1595 sort_locs_in_loop_postorder_cmp (const void *loc1_, const void *loc2_)
1596 {
1597   mem_ref_loc *loc1 = (mem_ref_loc *)const_cast<void *>(loc1_);
1598   mem_ref_loc *loc2 = (mem_ref_loc *)const_cast<void *>(loc2_);
1599   struct loop *loop1 = gimple_bb (loc1->stmt)->loop_father;
1600   struct loop *loop2 = gimple_bb (loc2->stmt)->loop_father;
1601   if (loop1->num == loop2->num)
1602     return 0;
1603   return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
1604 }
1605 
1606 /* Gathers memory references in loops.  */
1607 
1608 static void
analyze_memory_references(void)1609 analyze_memory_references (void)
1610 {
1611   gimple_stmt_iterator bsi;
1612   basic_block bb, *bbs;
1613   struct loop *loop, *outer;
1614   unsigned i, n;
1615 
1616   /* Collect all basic-blocks in loops and sort them after their
1617      loops postorder.  */
1618   i = 0;
1619   bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS);
1620   FOR_EACH_BB_FN (bb, cfun)
1621     if (bb->loop_father != current_loops->tree_root)
1622       bbs[i++] = bb;
1623   n = i;
1624   qsort (bbs, n, sizeof (basic_block), sort_bbs_in_loop_postorder_cmp);
1625 
1626   /* Visit blocks in loop postorder and assign mem-ref IDs in that order.
1627      That results in better locality for all the bitmaps.  */
1628   for (i = 0; i < n; ++i)
1629     {
1630       basic_block bb = bbs[i];
1631       for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
1632         gather_mem_refs_stmt (bb->loop_father, gsi_stmt (bsi));
1633     }
1634 
1635   /* Sort the location list of gathered memory references after their
1636      loop postorder number.  */
1637   im_mem_ref *ref;
1638   FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
1639     ref->accesses_in_loop.qsort (sort_locs_in_loop_postorder_cmp);
1640 
1641   free (bbs);
1642 //  free (bb_loop_postorder);
1643 
1644   /* Propagate the information about accessed memory references up
1645      the loop hierarchy.  */
1646   FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1647     {
1648       /* Finalize the overall touched references (including subloops).  */
1649       bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[loop->num],
1650 		       &memory_accesses.refs_stored_in_loop[loop->num]);
1651 
1652       /* Propagate the information about accessed memory references up
1653 	 the loop hierarchy.  */
1654       outer = loop_outer (loop);
1655       if (outer == current_loops->tree_root)
1656 	continue;
1657 
1658       bitmap_ior_into (&memory_accesses.all_refs_stored_in_loop[outer->num],
1659 		       &memory_accesses.all_refs_stored_in_loop[loop->num]);
1660     }
1661 }
1662 
1663 /* Returns true if MEM1 and MEM2 may alias.  TTAE_CACHE is used as a cache in
1664    tree_to_aff_combination_expand.  */
1665 
1666 static bool
mem_refs_may_alias_p(im_mem_ref * mem1,im_mem_ref * mem2,hash_map<tree,name_expansion * > ** ttae_cache)1667 mem_refs_may_alias_p (im_mem_ref *mem1, im_mem_ref *mem2,
1668 		      hash_map<tree, name_expansion *> **ttae_cache)
1669 {
1670   /* Perform BASE + OFFSET analysis -- if MEM1 and MEM2 are based on the same
1671      object and their offset differ in such a way that the locations cannot
1672      overlap, then they cannot alias.  */
1673   poly_widest_int size1, size2;
1674   aff_tree off1, off2;
1675 
1676   /* Perform basic offset and type-based disambiguation.  */
1677   if (!refs_may_alias_p_1 (&mem1->mem, &mem2->mem, true))
1678     return false;
1679 
1680   /* The expansion of addresses may be a bit expensive, thus we only do
1681      the check at -O2 and higher optimization levels.  */
1682   if (optimize < 2)
1683     return true;
1684 
1685   get_inner_reference_aff (mem1->mem.ref, &off1, &size1);
1686   get_inner_reference_aff (mem2->mem.ref, &off2, &size2);
1687   aff_combination_expand (&off1, ttae_cache);
1688   aff_combination_expand (&off2, ttae_cache);
1689   aff_combination_scale (&off1, -1);
1690   aff_combination_add (&off2, &off1);
1691 
1692   if (aff_comb_cannot_overlap_p (&off2, size1, size2))
1693     return false;
1694 
1695   return true;
1696 }
1697 
1698 /* Compare function for bsearch searching for reference locations
1699    in a loop.  */
1700 
1701 static int
find_ref_loc_in_loop_cmp(const void * loop_,const void * loc_)1702 find_ref_loc_in_loop_cmp (const void *loop_, const void *loc_)
1703 {
1704   struct loop *loop = (struct loop *)const_cast<void *>(loop_);
1705   mem_ref_loc *loc = (mem_ref_loc *)const_cast<void *>(loc_);
1706   struct loop *loc_loop = gimple_bb (loc->stmt)->loop_father;
1707   if (loop->num  == loc_loop->num
1708       || flow_loop_nested_p (loop, loc_loop))
1709     return 0;
1710   return (bb_loop_postorder[loop->num] < bb_loop_postorder[loc_loop->num]
1711 	  ? -1 : 1);
1712 }
1713 
1714 /* Iterates over all locations of REF in LOOP and its subloops calling
1715    fn.operator() with the location as argument.  When that operator
1716    returns true the iteration is stopped and true is returned.
1717    Otherwise false is returned.  */
1718 
1719 template <typename FN>
1720 static bool
for_all_locs_in_loop(struct loop * loop,im_mem_ref * ref,FN fn)1721 for_all_locs_in_loop (struct loop *loop, im_mem_ref *ref, FN fn)
1722 {
1723   unsigned i;
1724   mem_ref_loc *loc;
1725 
1726   /* Search for the cluster of locs in the accesses_in_loop vector
1727      which is sorted after postorder index of the loop father.  */
1728   loc = ref->accesses_in_loop.bsearch (loop, find_ref_loc_in_loop_cmp);
1729   if (!loc)
1730     return false;
1731 
1732   /* We have found one location inside loop or its sub-loops.  Iterate
1733      both forward and backward to cover the whole cluster.  */
1734   i = loc - ref->accesses_in_loop.address ();
1735   while (i > 0)
1736     {
1737       --i;
1738       mem_ref_loc *l = &ref->accesses_in_loop[i];
1739       if (!flow_bb_inside_loop_p (loop, gimple_bb (l->stmt)))
1740 	break;
1741       if (fn (l))
1742 	return true;
1743     }
1744   for (i = loc - ref->accesses_in_loop.address ();
1745        i < ref->accesses_in_loop.length (); ++i)
1746     {
1747       mem_ref_loc *l = &ref->accesses_in_loop[i];
1748       if (!flow_bb_inside_loop_p (loop, gimple_bb (l->stmt)))
1749 	break;
1750       if (fn (l))
1751 	return true;
1752     }
1753 
1754   return false;
1755 }
1756 
1757 /* Rewrites location LOC by TMP_VAR.  */
1758 
1759 struct rewrite_mem_ref_loc
1760 {
rewrite_mem_ref_locrewrite_mem_ref_loc1761   rewrite_mem_ref_loc (tree tmp_var_) : tmp_var (tmp_var_) {}
1762   bool operator () (mem_ref_loc *loc);
1763   tree tmp_var;
1764 };
1765 
1766 bool
operator()1767 rewrite_mem_ref_loc::operator () (mem_ref_loc *loc)
1768 {
1769   *loc->ref = tmp_var;
1770   update_stmt (loc->stmt);
1771   return false;
1772 }
1773 
1774 /* Rewrites all references to REF in LOOP by variable TMP_VAR.  */
1775 
1776 static void
rewrite_mem_refs(struct loop * loop,im_mem_ref * ref,tree tmp_var)1777 rewrite_mem_refs (struct loop *loop, im_mem_ref *ref, tree tmp_var)
1778 {
1779   for_all_locs_in_loop (loop, ref, rewrite_mem_ref_loc (tmp_var));
1780 }
1781 
1782 /* Stores the first reference location in LOCP.  */
1783 
1784 struct first_mem_ref_loc_1
1785 {
first_mem_ref_loc_1first_mem_ref_loc_11786   first_mem_ref_loc_1 (mem_ref_loc **locp_) : locp (locp_) {}
1787   bool operator () (mem_ref_loc *loc);
1788   mem_ref_loc **locp;
1789 };
1790 
1791 bool
operator()1792 first_mem_ref_loc_1::operator () (mem_ref_loc *loc)
1793 {
1794   *locp = loc;
1795   return true;
1796 }
1797 
1798 /* Returns the first reference location to REF in LOOP.  */
1799 
1800 static mem_ref_loc *
first_mem_ref_loc(struct loop * loop,im_mem_ref * ref)1801 first_mem_ref_loc (struct loop *loop, im_mem_ref *ref)
1802 {
1803   mem_ref_loc *locp = NULL;
1804   for_all_locs_in_loop (loop, ref, first_mem_ref_loc_1 (&locp));
1805   return locp;
1806 }
1807 
1808 struct prev_flag_edges {
1809   /* Edge to insert new flag comparison code.  */
1810   edge append_cond_position;
1811 
1812   /* Edge for fall through from previous flag comparison.  */
1813   edge last_cond_fallthru;
1814 };
1815 
1816 /* Helper function for execute_sm.  Emit code to store TMP_VAR into
1817    MEM along edge EX.
1818 
1819    The store is only done if MEM has changed.  We do this so no
1820    changes to MEM occur on code paths that did not originally store
1821    into it.
1822 
1823    The common case for execute_sm will transform:
1824 
1825      for (...) {
1826        if (foo)
1827          stuff;
1828        else
1829          MEM = TMP_VAR;
1830      }
1831 
1832    into:
1833 
1834      lsm = MEM;
1835      for (...) {
1836        if (foo)
1837          stuff;
1838        else
1839          lsm = TMP_VAR;
1840      }
1841      MEM = lsm;
1842 
1843   This function will generate:
1844 
1845      lsm = MEM;
1846 
1847      lsm_flag = false;
1848      ...
1849      for (...) {
1850        if (foo)
1851          stuff;
1852        else {
1853          lsm = TMP_VAR;
1854          lsm_flag = true;
1855        }
1856      }
1857      if (lsm_flag)	<--
1858        MEM = lsm;	<--
1859 */
1860 
1861 static void
execute_sm_if_changed(edge ex,tree mem,tree tmp_var,tree flag,edge preheader,hash_set<basic_block> * flag_bbs)1862 execute_sm_if_changed (edge ex, tree mem, tree tmp_var, tree flag,
1863 		       edge preheader, hash_set <basic_block> *flag_bbs)
1864 {
1865   basic_block new_bb, then_bb, old_dest;
1866   bool loop_has_only_one_exit;
1867   edge then_old_edge, orig_ex = ex;
1868   gimple_stmt_iterator gsi;
1869   gimple *stmt;
1870   struct prev_flag_edges *prev_edges = (struct prev_flag_edges *) ex->aux;
1871   bool irr = ex->flags & EDGE_IRREDUCIBLE_LOOP;
1872 
1873   profile_count count_sum = profile_count::zero ();
1874   int nbbs = 0, ncount = 0;
1875   profile_probability flag_probability = profile_probability::uninitialized ();
1876 
1877   /* Flag is set in FLAG_BBS. Determine probability that flag will be true
1878      at loop exit.
1879 
1880      This code may look fancy, but it cannot update profile very realistically
1881      because we do not know the probability that flag will be true at given
1882      loop exit.
1883 
1884      We look for two interesting extremes
1885        - when exit is dominated by block setting the flag, we know it will
1886          always be true.  This is a common case.
1887        - when all blocks setting the flag have very low frequency we know
1888          it will likely be false.
1889      In all other cases we default to 2/3 for flag being true.  */
1890 
1891   for (hash_set<basic_block>::iterator it = flag_bbs->begin ();
1892        it != flag_bbs->end (); ++it)
1893     {
1894        if ((*it)->count.initialized_p ())
1895          count_sum += (*it)->count, ncount ++;
1896        if (dominated_by_p (CDI_DOMINATORS, ex->src, *it))
1897 	 flag_probability = profile_probability::always ();
1898        nbbs++;
1899     }
1900 
1901   profile_probability cap = profile_probability::always ().apply_scale (2, 3);
1902 
1903   if (flag_probability.initialized_p ())
1904     ;
1905   else if (ncount == nbbs
1906 	   && preheader->count () >= count_sum && preheader->count ().nonzero_p ())
1907     {
1908       flag_probability = count_sum.probability_in (preheader->count ());
1909       if (flag_probability > cap)
1910 	flag_probability = cap;
1911     }
1912 
1913   if (!flag_probability.initialized_p ())
1914     flag_probability = cap;
1915 
1916   /* ?? Insert store after previous store if applicable.  See note
1917      below.  */
1918   if (prev_edges)
1919     ex = prev_edges->append_cond_position;
1920 
1921   loop_has_only_one_exit = single_pred_p (ex->dest);
1922 
1923   if (loop_has_only_one_exit)
1924     ex = split_block_after_labels (ex->dest);
1925   else
1926     {
1927       for (gphi_iterator gpi = gsi_start_phis (ex->dest);
1928 	   !gsi_end_p (gpi); gsi_next (&gpi))
1929 	{
1930 	  gphi *phi = gpi.phi ();
1931 	  if (virtual_operand_p (gimple_phi_result (phi)))
1932 	    continue;
1933 
1934 	  /* When the destination has a non-virtual PHI node with multiple
1935 	     predecessors make sure we preserve the PHI structure by
1936 	     forcing a forwarder block so that hoisting of that PHI will
1937 	     still work.  */
1938 	  split_edge (ex);
1939 	  break;
1940 	}
1941     }
1942 
1943   old_dest = ex->dest;
1944   new_bb = split_edge (ex);
1945   then_bb = create_empty_bb (new_bb);
1946   then_bb->count = new_bb->count.apply_probability (flag_probability);
1947   if (irr)
1948     then_bb->flags = BB_IRREDUCIBLE_LOOP;
1949   add_bb_to_loop (then_bb, new_bb->loop_father);
1950 
1951   gsi = gsi_start_bb (new_bb);
1952   stmt = gimple_build_cond (NE_EXPR, flag, boolean_false_node,
1953 			    NULL_TREE, NULL_TREE);
1954   gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1955 
1956   gsi = gsi_start_bb (then_bb);
1957   /* Insert actual store.  */
1958   stmt = gimple_build_assign (unshare_expr (mem), tmp_var);
1959   gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1960 
1961   edge e1 = single_succ_edge (new_bb);
1962   edge e2 = make_edge (new_bb, then_bb,
1963 	               EDGE_TRUE_VALUE | (irr ? EDGE_IRREDUCIBLE_LOOP : 0));
1964   e2->probability = flag_probability;
1965 
1966   e1->flags |= EDGE_FALSE_VALUE | (irr ? EDGE_IRREDUCIBLE_LOOP : 0);
1967   e1->flags &= ~EDGE_FALLTHRU;
1968 
1969   e1->probability = flag_probability.invert ();
1970 
1971   then_old_edge = make_single_succ_edge (then_bb, old_dest,
1972 			     EDGE_FALLTHRU | (irr ? EDGE_IRREDUCIBLE_LOOP : 0));
1973 
1974   set_immediate_dominator (CDI_DOMINATORS, then_bb, new_bb);
1975 
1976   if (prev_edges)
1977     {
1978       basic_block prevbb = prev_edges->last_cond_fallthru->src;
1979       redirect_edge_succ (prev_edges->last_cond_fallthru, new_bb);
1980       set_immediate_dominator (CDI_DOMINATORS, new_bb, prevbb);
1981       set_immediate_dominator (CDI_DOMINATORS, old_dest,
1982 			       recompute_dominator (CDI_DOMINATORS, old_dest));
1983     }
1984 
1985   /* ?? Because stores may alias, they must happen in the exact
1986      sequence they originally happened.  Save the position right after
1987      the (_lsm) store we just created so we can continue appending after
1988      it and maintain the original order.  */
1989   {
1990     struct prev_flag_edges *p;
1991 
1992     if (orig_ex->aux)
1993       orig_ex->aux = NULL;
1994     alloc_aux_for_edge (orig_ex, sizeof (struct prev_flag_edges));
1995     p = (struct prev_flag_edges *) orig_ex->aux;
1996     p->append_cond_position = then_old_edge;
1997     p->last_cond_fallthru = find_edge (new_bb, old_dest);
1998     orig_ex->aux = (void *) p;
1999   }
2000 
2001   if (!loop_has_only_one_exit)
2002     for (gphi_iterator gpi = gsi_start_phis (old_dest);
2003 	 !gsi_end_p (gpi); gsi_next (&gpi))
2004       {
2005 	gphi *phi = gpi.phi ();
2006 	unsigned i;
2007 
2008 	for (i = 0; i < gimple_phi_num_args (phi); i++)
2009 	  if (gimple_phi_arg_edge (phi, i)->src == new_bb)
2010 	    {
2011 	      tree arg = gimple_phi_arg_def (phi, i);
2012 	      add_phi_arg (phi, arg, then_old_edge, UNKNOWN_LOCATION);
2013 	      update_stmt (phi);
2014 	    }
2015       }
2016 }
2017 
2018 /* When REF is set on the location, set flag indicating the store.  */
2019 
2020 struct sm_set_flag_if_changed
2021 {
sm_set_flag_if_changedsm_set_flag_if_changed2022   sm_set_flag_if_changed (tree flag_, hash_set <basic_block> *bbs_)
2023 	 : flag (flag_), bbs (bbs_) {}
2024   bool operator () (mem_ref_loc *loc);
2025   tree flag;
2026   hash_set <basic_block> *bbs;
2027 };
2028 
2029 bool
operator()2030 sm_set_flag_if_changed::operator () (mem_ref_loc *loc)
2031 {
2032   /* Only set the flag for writes.  */
2033   if (is_gimple_assign (loc->stmt)
2034       && gimple_assign_lhs_ptr (loc->stmt) == loc->ref)
2035     {
2036       gimple_stmt_iterator gsi = gsi_for_stmt (loc->stmt);
2037       gimple *stmt = gimple_build_assign (flag, boolean_true_node);
2038       gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2039       bbs->add (gimple_bb (stmt));
2040     }
2041   return false;
2042 }
2043 
2044 /* Helper function for execute_sm.  On every location where REF is
2045    set, set an appropriate flag indicating the store.  */
2046 
2047 static tree
execute_sm_if_changed_flag_set(struct loop * loop,im_mem_ref * ref,hash_set<basic_block> * bbs)2048 execute_sm_if_changed_flag_set (struct loop *loop, im_mem_ref *ref,
2049 				hash_set <basic_block> *bbs)
2050 {
2051   tree flag;
2052   char *str = get_lsm_tmp_name (ref->mem.ref, ~0, "_flag");
2053   flag = create_tmp_reg (boolean_type_node, str);
2054   for_all_locs_in_loop (loop, ref, sm_set_flag_if_changed (flag, bbs));
2055   return flag;
2056 }
2057 
2058 /* Executes store motion of memory reference REF from LOOP.
2059    Exits from the LOOP are stored in EXITS.  The initialization of the
2060    temporary variable is put to the preheader of the loop, and assignments
2061    to the reference from the temporary variable are emitted to exits.  */
2062 
2063 static void
execute_sm(struct loop * loop,vec<edge> exits,im_mem_ref * ref)2064 execute_sm (struct loop *loop, vec<edge> exits, im_mem_ref *ref)
2065 {
2066   tree tmp_var, store_flag = NULL_TREE;
2067   unsigned i;
2068   gassign *load;
2069   struct fmt_data fmt_data;
2070   edge ex;
2071   struct lim_aux_data *lim_data;
2072   bool multi_threaded_model_p = false;
2073   gimple_stmt_iterator gsi;
2074   hash_set<basic_block> flag_bbs;
2075 
2076   if (dump_file && (dump_flags & TDF_DETAILS))
2077     {
2078       fprintf (dump_file, "Executing store motion of ");
2079       print_generic_expr (dump_file, ref->mem.ref);
2080       fprintf (dump_file, " from loop %d\n", loop->num);
2081     }
2082 
2083   tmp_var = create_tmp_reg (TREE_TYPE (ref->mem.ref),
2084 			    get_lsm_tmp_name (ref->mem.ref, ~0));
2085 
2086   fmt_data.loop = loop;
2087   fmt_data.orig_loop = loop;
2088   for_each_index (&ref->mem.ref, force_move_till, &fmt_data);
2089 
2090   if (bb_in_transaction (loop_preheader_edge (loop)->src)
2091       || (! PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES)
2092 	  && ! ref_always_accessed_p (loop, ref, true)))
2093     multi_threaded_model_p = true;
2094 
2095   if (multi_threaded_model_p)
2096     store_flag = execute_sm_if_changed_flag_set (loop, ref, &flag_bbs);
2097 
2098   rewrite_mem_refs (loop, ref, tmp_var);
2099 
2100   /* Emit the load code on a random exit edge or into the latch if
2101      the loop does not exit, so that we are sure it will be processed
2102      by move_computations after all dependencies.  */
2103   gsi = gsi_for_stmt (first_mem_ref_loc (loop, ref)->stmt);
2104 
2105   /* FIXME/TODO: For the multi-threaded variant, we could avoid this
2106      load altogether, since the store is predicated by a flag.  We
2107      could, do the load only if it was originally in the loop.  */
2108   load = gimple_build_assign (tmp_var, unshare_expr (ref->mem.ref));
2109   lim_data = init_lim_data (load);
2110   lim_data->max_loop = loop;
2111   lim_data->tgt_loop = loop;
2112   gsi_insert_before (&gsi, load, GSI_SAME_STMT);
2113 
2114   if (multi_threaded_model_p)
2115     {
2116       load = gimple_build_assign (store_flag, boolean_false_node);
2117       lim_data = init_lim_data (load);
2118       lim_data->max_loop = loop;
2119       lim_data->tgt_loop = loop;
2120       gsi_insert_before (&gsi, load, GSI_SAME_STMT);
2121     }
2122 
2123   /* Sink the store to every exit from the loop.  */
2124   FOR_EACH_VEC_ELT (exits, i, ex)
2125     if (!multi_threaded_model_p)
2126       {
2127 	gassign *store;
2128 	store = gimple_build_assign (unshare_expr (ref->mem.ref), tmp_var);
2129 	gsi_insert_on_edge (ex, store);
2130       }
2131     else
2132       execute_sm_if_changed (ex, ref->mem.ref, tmp_var, store_flag,
2133 			     loop_preheader_edge (loop), &flag_bbs);
2134 }
2135 
2136 /* Hoists memory references MEM_REFS out of LOOP.  EXITS is the list of exit
2137    edges of the LOOP.  */
2138 
2139 static void
hoist_memory_references(struct loop * loop,bitmap mem_refs,vec<edge> exits)2140 hoist_memory_references (struct loop *loop, bitmap mem_refs,
2141 			 vec<edge> exits)
2142 {
2143   im_mem_ref *ref;
2144   unsigned  i;
2145   bitmap_iterator bi;
2146 
2147   EXECUTE_IF_SET_IN_BITMAP (mem_refs, 0, i, bi)
2148     {
2149       ref = memory_accesses.refs_list[i];
2150       execute_sm (loop, exits, ref);
2151     }
2152 }
2153 
2154 struct ref_always_accessed
2155 {
ref_always_accessedref_always_accessed2156   ref_always_accessed (struct loop *loop_, bool stored_p_)
2157       : loop (loop_), stored_p (stored_p_) {}
2158   bool operator () (mem_ref_loc *loc);
2159   struct loop *loop;
2160   bool stored_p;
2161 };
2162 
2163 bool
operator()2164 ref_always_accessed::operator () (mem_ref_loc *loc)
2165 {
2166   struct loop *must_exec;
2167 
2168   if (!get_lim_data (loc->stmt))
2169     return false;
2170 
2171   /* If we require an always executed store make sure the statement
2172      stores to the reference.  */
2173   if (stored_p)
2174     {
2175       tree lhs = gimple_get_lhs (loc->stmt);
2176       if (!lhs
2177 	  || lhs != *loc->ref)
2178 	return false;
2179     }
2180 
2181   must_exec = get_lim_data (loc->stmt)->always_executed_in;
2182   if (!must_exec)
2183     return false;
2184 
2185   if (must_exec == loop
2186       || flow_loop_nested_p (must_exec, loop))
2187     return true;
2188 
2189   return false;
2190 }
2191 
2192 /* Returns true if REF is always accessed in LOOP.  If STORED_P is true
2193    make sure REF is always stored to in LOOP.  */
2194 
2195 static bool
ref_always_accessed_p(struct loop * loop,im_mem_ref * ref,bool stored_p)2196 ref_always_accessed_p (struct loop *loop, im_mem_ref *ref, bool stored_p)
2197 {
2198   return for_all_locs_in_loop (loop, ref,
2199 			       ref_always_accessed (loop, stored_p));
2200 }
2201 
2202 /* Returns true if REF1 and REF2 are independent.  */
2203 
2204 static bool
refs_independent_p(im_mem_ref * ref1,im_mem_ref * ref2)2205 refs_independent_p (im_mem_ref *ref1, im_mem_ref *ref2)
2206 {
2207   if (ref1 == ref2)
2208     return true;
2209 
2210   if (dump_file && (dump_flags & TDF_DETAILS))
2211     fprintf (dump_file, "Querying dependency of refs %u and %u: ",
2212 	     ref1->id, ref2->id);
2213 
2214   if (mem_refs_may_alias_p (ref1, ref2, &memory_accesses.ttae_cache))
2215     {
2216       if (dump_file && (dump_flags & TDF_DETAILS))
2217 	fprintf (dump_file, "dependent.\n");
2218       return false;
2219     }
2220   else
2221     {
2222       if (dump_file && (dump_flags & TDF_DETAILS))
2223 	fprintf (dump_file, "independent.\n");
2224       return true;
2225     }
2226 }
2227 
2228 /* Mark REF dependent on stores or loads (according to STORED_P) in LOOP
2229    and its super-loops.  */
2230 
2231 static void
record_dep_loop(struct loop * loop,im_mem_ref * ref,bool stored_p)2232 record_dep_loop (struct loop *loop, im_mem_ref *ref, bool stored_p)
2233 {
2234   /* We can propagate dependent-in-loop bits up the loop
2235      hierarchy to all outer loops.  */
2236   while (loop != current_loops->tree_root
2237 	 && bitmap_set_bit (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2238     loop = loop_outer (loop);
2239 }
2240 
2241 /* Returns true if REF is independent on all other memory
2242    references in LOOP.  */
2243 
2244 static bool
ref_indep_loop_p_1(struct loop * loop,im_mem_ref * ref,bool stored_p)2245 ref_indep_loop_p_1 (struct loop *loop, im_mem_ref *ref, bool stored_p)
2246 {
2247   stored_p |= (ref->stored && bitmap_bit_p (ref->stored, loop->num));
2248 
2249   bool indep_p = true;
2250   bitmap refs_to_check;
2251 
2252   if (stored_p)
2253     refs_to_check = &memory_accesses.refs_in_loop[loop->num];
2254   else
2255     refs_to_check = &memory_accesses.refs_stored_in_loop[loop->num];
2256 
2257   if (bitmap_bit_p (refs_to_check, UNANALYZABLE_MEM_ID))
2258     indep_p = false;
2259   else
2260     {
2261       if (bitmap_bit_p (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2262 	return true;
2263       if (bitmap_bit_p (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
2264 	return false;
2265 
2266       struct loop *inner = loop->inner;
2267       while (inner)
2268 	{
2269 	  if (!ref_indep_loop_p_1 (inner, ref, stored_p))
2270 	    {
2271 	      indep_p = false;
2272 	      break;
2273 	    }
2274 	  inner = inner->next;
2275 	}
2276 
2277       if (indep_p)
2278 	{
2279 	  unsigned i;
2280 	  bitmap_iterator bi;
2281 	  EXECUTE_IF_SET_IN_BITMAP (refs_to_check, 0, i, bi)
2282 	    {
2283 	      im_mem_ref *aref = memory_accesses.refs_list[i];
2284 	      if (!refs_independent_p (ref, aref))
2285 		{
2286 		  indep_p = false;
2287 		  break;
2288 		}
2289 	    }
2290 	}
2291     }
2292 
2293   if (dump_file && (dump_flags & TDF_DETAILS))
2294     fprintf (dump_file, "Querying dependencies of ref %u in loop %d: %s\n",
2295 	     ref->id, loop->num, indep_p ? "independent" : "dependent");
2296 
2297   /* Record the computed result in the cache.  */
2298   if (indep_p)
2299     {
2300       if (bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, stored_p))
2301 	  && stored_p)
2302 	{
2303 	  /* If it's independend against all refs then it's independent
2304 	     against stores, too.  */
2305 	  bitmap_set_bit (&ref->indep_loop, LOOP_DEP_BIT (loop->num, false));
2306 	}
2307     }
2308   else
2309     {
2310       record_dep_loop (loop, ref, stored_p);
2311       if (!stored_p)
2312 	{
2313 	  /* If it's dependent against stores it's dependent against
2314 	     all refs, too.  */
2315 	  record_dep_loop (loop, ref, true);
2316 	}
2317     }
2318 
2319   return indep_p;
2320 }
2321 
2322 /* Returns true if REF is independent on all other memory references in
2323    LOOP.  */
2324 
2325 static bool
ref_indep_loop_p(struct loop * loop,im_mem_ref * ref)2326 ref_indep_loop_p (struct loop *loop, im_mem_ref *ref)
2327 {
2328   gcc_checking_assert (MEM_ANALYZABLE (ref));
2329 
2330   return ref_indep_loop_p_1 (loop, ref, false);
2331 }
2332 
2333 /* Returns true if we can perform store motion of REF from LOOP.  */
2334 
2335 static bool
can_sm_ref_p(struct loop * loop,im_mem_ref * ref)2336 can_sm_ref_p (struct loop *loop, im_mem_ref *ref)
2337 {
2338   tree base;
2339 
2340   /* Can't hoist unanalyzable refs.  */
2341   if (!MEM_ANALYZABLE (ref))
2342     return false;
2343 
2344   /* It should be movable.  */
2345   if (!is_gimple_reg_type (TREE_TYPE (ref->mem.ref))
2346       || TREE_THIS_VOLATILE (ref->mem.ref)
2347       || !for_each_index (&ref->mem.ref, may_move_till, loop))
2348     return false;
2349 
2350   /* If it can throw fail, we do not properly update EH info.  */
2351   if (tree_could_throw_p (ref->mem.ref))
2352     return false;
2353 
2354   /* If it can trap, it must be always executed in LOOP.
2355      Readonly memory locations may trap when storing to them, but
2356      tree_could_trap_p is a predicate for rvalues, so check that
2357      explicitly.  */
2358   base = get_base_address (ref->mem.ref);
2359   if ((tree_could_trap_p (ref->mem.ref)
2360        || (DECL_P (base) && TREE_READONLY (base)))
2361       && !ref_always_accessed_p (loop, ref, true))
2362     return false;
2363 
2364   /* And it must be independent on all other memory references
2365      in LOOP.  */
2366   if (!ref_indep_loop_p (loop, ref))
2367     return false;
2368 
2369   return true;
2370 }
2371 
2372 /* Marks the references in LOOP for that store motion should be performed
2373    in REFS_TO_SM.  SM_EXECUTED is the set of references for that store
2374    motion was performed in one of the outer loops.  */
2375 
2376 static void
find_refs_for_sm(struct loop * loop,bitmap sm_executed,bitmap refs_to_sm)2377 find_refs_for_sm (struct loop *loop, bitmap sm_executed, bitmap refs_to_sm)
2378 {
2379   bitmap refs = &memory_accesses.all_refs_stored_in_loop[loop->num];
2380   unsigned i;
2381   bitmap_iterator bi;
2382   im_mem_ref *ref;
2383 
2384   EXECUTE_IF_AND_COMPL_IN_BITMAP (refs, sm_executed, 0, i, bi)
2385     {
2386       ref = memory_accesses.refs_list[i];
2387       if (can_sm_ref_p (loop, ref))
2388 	bitmap_set_bit (refs_to_sm, i);
2389     }
2390 }
2391 
2392 /* Checks whether LOOP (with exits stored in EXITS array) is suitable
2393    for a store motion optimization (i.e. whether we can insert statement
2394    on its exits).  */
2395 
2396 static bool
loop_suitable_for_sm(struct loop * loop ATTRIBUTE_UNUSED,vec<edge> exits)2397 loop_suitable_for_sm (struct loop *loop ATTRIBUTE_UNUSED,
2398 		      vec<edge> exits)
2399 {
2400   unsigned i;
2401   edge ex;
2402 
2403   FOR_EACH_VEC_ELT (exits, i, ex)
2404     if (ex->flags & (EDGE_ABNORMAL | EDGE_EH))
2405       return false;
2406 
2407   return true;
2408 }
2409 
2410 /* Try to perform store motion for all memory references modified inside
2411    LOOP.  SM_EXECUTED is the bitmap of the memory references for that
2412    store motion was executed in one of the outer loops.  */
2413 
2414 static void
store_motion_loop(struct loop * loop,bitmap sm_executed)2415 store_motion_loop (struct loop *loop, bitmap sm_executed)
2416 {
2417   vec<edge> exits = get_loop_exit_edges (loop);
2418   struct loop *subloop;
2419   bitmap sm_in_loop = BITMAP_ALLOC (&lim_bitmap_obstack);
2420 
2421   if (loop_suitable_for_sm (loop, exits))
2422     {
2423       find_refs_for_sm (loop, sm_executed, sm_in_loop);
2424       hoist_memory_references (loop, sm_in_loop, exits);
2425     }
2426   exits.release ();
2427 
2428   bitmap_ior_into (sm_executed, sm_in_loop);
2429   for (subloop = loop->inner; subloop != NULL; subloop = subloop->next)
2430     store_motion_loop (subloop, sm_executed);
2431   bitmap_and_compl_into (sm_executed, sm_in_loop);
2432   BITMAP_FREE (sm_in_loop);
2433 }
2434 
2435 /* Try to perform store motion for all memory references modified inside
2436    loops.  */
2437 
2438 static void
store_motion(void)2439 store_motion (void)
2440 {
2441   struct loop *loop;
2442   bitmap sm_executed = BITMAP_ALLOC (&lim_bitmap_obstack);
2443 
2444   for (loop = current_loops->tree_root->inner; loop != NULL; loop = loop->next)
2445     store_motion_loop (loop, sm_executed);
2446 
2447   BITMAP_FREE (sm_executed);
2448   gsi_commit_edge_inserts ();
2449 }
2450 
2451 /* Fills ALWAYS_EXECUTED_IN information for basic blocks of LOOP, i.e.
2452    for each such basic block bb records the outermost loop for that execution
2453    of its header implies execution of bb.  CONTAINS_CALL is the bitmap of
2454    blocks that contain a nonpure call.  */
2455 
2456 static void
fill_always_executed_in_1(struct loop * loop,sbitmap contains_call)2457 fill_always_executed_in_1 (struct loop *loop, sbitmap contains_call)
2458 {
2459   basic_block bb = NULL, *bbs, last = NULL;
2460   unsigned i;
2461   edge e;
2462   struct loop *inn_loop = loop;
2463 
2464   if (ALWAYS_EXECUTED_IN (loop->header) == NULL)
2465     {
2466       bbs = get_loop_body_in_dom_order (loop);
2467 
2468       for (i = 0; i < loop->num_nodes; i++)
2469 	{
2470 	  edge_iterator ei;
2471 	  bb = bbs[i];
2472 
2473 	  if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2474 	    last = bb;
2475 
2476 	  if (bitmap_bit_p (contains_call, bb->index))
2477 	    break;
2478 
2479 	  FOR_EACH_EDGE (e, ei, bb->succs)
2480 	    {
2481 	      /* If there is an exit from this BB.  */
2482 	      if (!flow_bb_inside_loop_p (loop, e->dest))
2483 		break;
2484 	      /* Or we enter a possibly non-finite loop.  */
2485 	      if (flow_loop_nested_p (bb->loop_father,
2486 				      e->dest->loop_father)
2487 		  && ! finite_loop_p (e->dest->loop_father))
2488 		break;
2489 	    }
2490 	  if (e)
2491 	    break;
2492 
2493 	  /* A loop might be infinite (TODO use simple loop analysis
2494 	     to disprove this if possible).  */
2495 	  if (bb->flags & BB_IRREDUCIBLE_LOOP)
2496 	    break;
2497 
2498 	  if (!flow_bb_inside_loop_p (inn_loop, bb))
2499 	    break;
2500 
2501 	  if (bb->loop_father->header == bb)
2502 	    {
2503 	      if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
2504 		break;
2505 
2506 	      /* In a loop that is always entered we may proceed anyway.
2507 		 But record that we entered it and stop once we leave it.  */
2508 	      inn_loop = bb->loop_father;
2509 	    }
2510 	}
2511 
2512       while (1)
2513 	{
2514 	  SET_ALWAYS_EXECUTED_IN (last, loop);
2515 	  if (last == loop->header)
2516 	    break;
2517 	  last = get_immediate_dominator (CDI_DOMINATORS, last);
2518 	}
2519 
2520       free (bbs);
2521     }
2522 
2523   for (loop = loop->inner; loop; loop = loop->next)
2524     fill_always_executed_in_1 (loop, contains_call);
2525 }
2526 
2527 /* Fills ALWAYS_EXECUTED_IN information for basic blocks, i.e.
2528    for each such basic block bb records the outermost loop for that execution
2529    of its header implies execution of bb.  */
2530 
2531 static void
fill_always_executed_in(void)2532 fill_always_executed_in (void)
2533 {
2534   basic_block bb;
2535   struct loop *loop;
2536 
2537   auto_sbitmap contains_call (last_basic_block_for_fn (cfun));
2538   bitmap_clear (contains_call);
2539   FOR_EACH_BB_FN (bb, cfun)
2540     {
2541       gimple_stmt_iterator gsi;
2542       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2543 	{
2544 	  if (nonpure_call_p (gsi_stmt (gsi)))
2545 	    break;
2546 	}
2547 
2548       if (!gsi_end_p (gsi))
2549 	bitmap_set_bit (contains_call, bb->index);
2550     }
2551 
2552   for (loop = current_loops->tree_root->inner; loop; loop = loop->next)
2553     fill_always_executed_in_1 (loop, contains_call);
2554 }
2555 
2556 
2557 /* Compute the global information needed by the loop invariant motion pass.  */
2558 
2559 static void
tree_ssa_lim_initialize(void)2560 tree_ssa_lim_initialize (void)
2561 {
2562   struct loop *loop;
2563   unsigned i;
2564 
2565   bitmap_obstack_initialize (&lim_bitmap_obstack);
2566   gcc_obstack_init (&mem_ref_obstack);
2567   lim_aux_data_map = new hash_map<gimple *, lim_aux_data *>;
2568 
2569   if (flag_tm)
2570     compute_transaction_bits ();
2571 
2572   alloc_aux_for_edges (0);
2573 
2574   memory_accesses.refs = new hash_table<mem_ref_hasher> (100);
2575   memory_accesses.refs_list.create (100);
2576   /* Allocate a special, unanalyzable mem-ref with ID zero.  */
2577   memory_accesses.refs_list.quick_push
2578     (mem_ref_alloc (NULL, 0, UNANALYZABLE_MEM_ID));
2579 
2580   memory_accesses.refs_in_loop.create (number_of_loops (cfun));
2581   memory_accesses.refs_in_loop.quick_grow (number_of_loops (cfun));
2582   memory_accesses.refs_stored_in_loop.create (number_of_loops (cfun));
2583   memory_accesses.refs_stored_in_loop.quick_grow (number_of_loops (cfun));
2584   memory_accesses.all_refs_stored_in_loop.create (number_of_loops (cfun));
2585   memory_accesses.all_refs_stored_in_loop.quick_grow (number_of_loops (cfun));
2586 
2587   for (i = 0; i < number_of_loops (cfun); i++)
2588     {
2589       bitmap_initialize (&memory_accesses.refs_in_loop[i],
2590 			 &lim_bitmap_obstack);
2591       bitmap_initialize (&memory_accesses.refs_stored_in_loop[i],
2592 			 &lim_bitmap_obstack);
2593       bitmap_initialize (&memory_accesses.all_refs_stored_in_loop[i],
2594 			 &lim_bitmap_obstack);
2595     }
2596 
2597   memory_accesses.ttae_cache = NULL;
2598 
2599   /* Initialize bb_loop_postorder with a mapping from loop->num to
2600      its postorder index.  */
2601   i = 0;
2602   bb_loop_postorder = XNEWVEC (unsigned, number_of_loops (cfun));
2603   FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
2604     bb_loop_postorder[loop->num] = i++;
2605 }
2606 
2607 /* Cleans up after the invariant motion pass.  */
2608 
2609 static void
tree_ssa_lim_finalize(void)2610 tree_ssa_lim_finalize (void)
2611 {
2612   basic_block bb;
2613   unsigned i;
2614   im_mem_ref *ref;
2615 
2616   free_aux_for_edges ();
2617 
2618   FOR_EACH_BB_FN (bb, cfun)
2619     SET_ALWAYS_EXECUTED_IN (bb, NULL);
2620 
2621   bitmap_obstack_release (&lim_bitmap_obstack);
2622   delete lim_aux_data_map;
2623 
2624   delete memory_accesses.refs;
2625   memory_accesses.refs = NULL;
2626 
2627   FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
2628     memref_free (ref);
2629   memory_accesses.refs_list.release ();
2630   obstack_free (&mem_ref_obstack, NULL);
2631 
2632   memory_accesses.refs_in_loop.release ();
2633   memory_accesses.refs_stored_in_loop.release ();
2634   memory_accesses.all_refs_stored_in_loop.release ();
2635 
2636   if (memory_accesses.ttae_cache)
2637     free_affine_expand_cache (&memory_accesses.ttae_cache);
2638 
2639   free (bb_loop_postorder);
2640 }
2641 
2642 /* Moves invariants from loops.  Only "expensive" invariants are moved out --
2643    i.e. those that are likely to be win regardless of the register pressure.  */
2644 
2645 static unsigned int
tree_ssa_lim(void)2646 tree_ssa_lim (void)
2647 {
2648   unsigned int todo;
2649 
2650   tree_ssa_lim_initialize ();
2651 
2652   /* Gathers information about memory accesses in the loops.  */
2653   analyze_memory_references ();
2654 
2655   /* Fills ALWAYS_EXECUTED_IN information for basic blocks.  */
2656   fill_always_executed_in ();
2657 
2658   /* For each statement determine the outermost loop in that it is
2659      invariant and cost for computing the invariant.  */
2660   invariantness_dom_walker (CDI_DOMINATORS)
2661     .walk (cfun->cfg->x_entry_block_ptr);
2662 
2663   /* Execute store motion.  Force the necessary invariants to be moved
2664      out of the loops as well.  */
2665   store_motion ();
2666 
2667   /* Move the expressions that are expensive enough.  */
2668   todo = move_computations ();
2669 
2670   tree_ssa_lim_finalize ();
2671 
2672   return todo;
2673 }
2674 
2675 /* Loop invariant motion pass.  */
2676 
2677 namespace {
2678 
2679 const pass_data pass_data_lim =
2680 {
2681   GIMPLE_PASS, /* type */
2682   "lim", /* name */
2683   OPTGROUP_LOOP, /* optinfo_flags */
2684   TV_LIM, /* tv_id */
2685   PROP_cfg, /* properties_required */
2686   0, /* properties_provided */
2687   0, /* properties_destroyed */
2688   0, /* todo_flags_start */
2689   0, /* todo_flags_finish */
2690 };
2691 
2692 class pass_lim : public gimple_opt_pass
2693 {
2694 public:
pass_lim(gcc::context * ctxt)2695   pass_lim (gcc::context *ctxt)
2696     : gimple_opt_pass (pass_data_lim, ctxt)
2697   {}
2698 
2699   /* opt_pass methods: */
clone()2700   opt_pass * clone () { return new pass_lim (m_ctxt); }
gate(function *)2701   virtual bool gate (function *) { return flag_tree_loop_im != 0; }
2702   virtual unsigned int execute (function *);
2703 
2704 }; // class pass_lim
2705 
2706 unsigned int
execute(function * fun)2707 pass_lim::execute (function *fun)
2708 {
2709   bool in_loop_pipeline = scev_initialized_p ();
2710   if (!in_loop_pipeline)
2711     loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
2712 
2713   if (number_of_loops (fun) <= 1)
2714     return 0;
2715   unsigned int todo = tree_ssa_lim ();
2716 
2717   if (!in_loop_pipeline)
2718     loop_optimizer_finalize ();
2719   else
2720     scev_reset ();
2721   return todo;
2722 }
2723 
2724 } // anon namespace
2725 
2726 gimple_opt_pass *
make_pass_lim(gcc::context * ctxt)2727 make_pass_lim (gcc::context *ctxt)
2728 {
2729   return new pass_lim (ctxt);
2730 }
2731 
2732 
2733