1 /* Natural loop functions
2    Copyright (C) 1987-2016 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #ifndef GCC_CFGLOOP_H
21 #define GCC_CFGLOOP_H
22 
23 #include "cfgloopmanip.h"
24 
25 /* Structure to hold decision about unrolling/peeling.  */
26 enum lpt_dec
27 {
28   LPT_NONE,
29   LPT_UNROLL_CONSTANT,
30   LPT_UNROLL_RUNTIME,
31   LPT_UNROLL_STUPID
32 };
33 
34 struct GTY (()) lpt_decision {
35   enum lpt_dec decision;
36   unsigned times;
37 };
38 
39 /* The type of extend applied to an IV.  */
40 enum iv_extend_code
41 {
42   IV_SIGN_EXTEND,
43   IV_ZERO_EXTEND,
44   IV_UNKNOWN_EXTEND
45 };
46 
47 /* The structure describing a bound on number of iterations of a loop.  */
48 
49 struct GTY ((chain_next ("%h.next"))) nb_iter_bound {
50   /* The statement STMT is executed at most ...  */
51   gimple *stmt;
52 
53   /* ... BOUND + 1 times (BOUND must be an unsigned constant).
54      The + 1 is added for the following reasons:
55 
56      a) 0 would otherwise be unused, while we would need to care more about
57         overflows (as MAX + 1 is sometimes produced as the estimate on number
58 	of executions of STMT).
59      b) it is consistent with the result of number_of_iterations_exit.  */
60   widest_int bound;
61 
62   /* True if the statement will cause the loop to be leaved the (at most)
63      BOUND + 1-st time it is executed, that is, all the statements after it
64      are executed at most BOUND times.  */
65   bool is_exit;
66 
67   /* The next bound in the list.  */
68   struct nb_iter_bound *next;
69 };
70 
71 /* Description of the loop exit.  */
72 
73 struct GTY ((for_user)) loop_exit {
74   /* The exit edge.  */
75   edge e;
76 
77   /* Previous and next exit in the list of the exits of the loop.  */
78   struct loop_exit *prev;
79   struct loop_exit *next;
80 
81   /* Next element in the list of loops from that E exits.  */
82   struct loop_exit *next_e;
83 };
84 
85 struct loop_exit_hasher : ggc_ptr_hash<loop_exit>
86 {
87   typedef edge compare_type;
88 
89   static hashval_t hash (loop_exit *);
90   static bool equal (loop_exit *, edge);
91   static void remove (loop_exit *);
92 };
93 
94 typedef struct loop *loop_p;
95 
96 /* An integer estimation of the number of iterations.  Estimate_state
97    describes what is the state of the estimation.  */
98 enum loop_estimation
99 {
100   /* Estimate was not computed yet.  */
101   EST_NOT_COMPUTED,
102   /* Estimate is ready.  */
103   EST_AVAILABLE,
104   EST_LAST
105 };
106 
107 /* The structure describing non-overflow control induction variable for
108    loop's exit edge.  */
109 struct GTY ((chain_next ("%h.next"))) control_iv {
110   tree base;
111   tree step;
112   struct control_iv *next;
113 };
114 
115 /* Structure to hold information for each natural loop.  */
116 struct GTY ((chain_next ("%h.next"))) loop {
117   /* Index into loops array.  */
118   int num;
119 
120   /* Number of loop insns.  */
121   unsigned ninsns;
122 
123   /* Basic block of loop header.  */
124   basic_block header;
125 
126   /* Basic block of loop latch.  */
127   basic_block latch;
128 
129   /* For loop unrolling/peeling decision.  */
130   struct lpt_decision lpt_decision;
131 
132   /* Average number of executed insns per iteration.  */
133   unsigned av_ninsns;
134 
135   /* Number of blocks contained within the loop.  */
136   unsigned num_nodes;
137 
138   /* Superloops of the loop, starting with the outermost loop.  */
139   vec<loop_p, va_gc> *superloops;
140 
141   /* The first inner (child) loop or NULL if innermost loop.  */
142   struct loop *inner;
143 
144   /* Link to the next (sibling) loop.  */
145   struct loop *next;
146 
147   /* Auxiliary info specific to a pass.  */
148   PTR GTY ((skip (""))) aux;
149 
150   /* The number of times the latch of the loop is executed.  This can be an
151      INTEGER_CST, or a symbolic expression representing the number of
152      iterations like "N - 1", or a COND_EXPR containing the runtime
153      conditions under which the number of iterations is non zero.
154 
155      Don't access this field directly: number_of_latch_executions
156      computes and caches the computed information in this field.  */
157   tree nb_iterations;
158 
159   /* An integer guaranteed to be greater or equal to nb_iterations.  Only
160      valid if any_upper_bound is true.  */
161   widest_int nb_iterations_upper_bound;
162 
163   /* An integer giving an estimate on nb_iterations.  Unlike
164      nb_iterations_upper_bound, there is no guarantee that it is at least
165      nb_iterations.  */
166   widest_int nb_iterations_estimate;
167 
168   bool any_upper_bound;
169   bool any_estimate;
170 
171   /* True if the loop can be parallel.  */
172   bool can_be_parallel;
173 
174   /* True if -Waggressive-loop-optimizations warned about this loop
175      already.  */
176   bool warned_aggressive_loop_optimizations;
177 
178   /* An integer estimation of the number of iterations.  Estimate_state
179      describes what is the state of the estimation.  */
180   enum loop_estimation estimate_state;
181 
182   /* If > 0, an integer, where the user asserted that for any
183      I in [ 0, nb_iterations ) and for any J in
184      [ I, min ( I + safelen, nb_iterations ) ), the Ith and Jth iterations
185      of the loop can be safely evaluated concurrently.  */
186   int safelen;
187 
188   /* True if this loop should never be vectorized.  */
189   bool dont_vectorize;
190 
191   /* True if we should try harder to vectorize this loop.  */
192   bool force_vectorize;
193 
194   /* True if the loop is part of an oacc kernels region.  */
195   bool in_oacc_kernels_region;
196 
197   /* For SIMD loops, this is a unique identifier of the loop, referenced
198      by IFN_GOMP_SIMD_VF, IFN_GOMP_SIMD_LANE and IFN_GOMP_SIMD_LAST_LANE
199      builtins.  */
200   tree simduid;
201 
202   /* Upper bound on number of iterations of a loop.  */
203   struct nb_iter_bound *bounds;
204 
205   /* Non-overflow control ivs of a loop.  */
206   struct control_iv *control_ivs;
207 
208   /* Head of the cyclic list of the exits of the loop.  */
209   struct loop_exit *exits;
210 
211   /* Number of iteration analysis data for RTL.  */
212   struct niter_desc *simple_loop_desc;
213 
214   /* For sanity checking during loop fixup we record here the former
215      loop header for loops marked for removal.  Note that this prevents
216      the basic-block from being collected but its index can still be
217      reused.  */
218   basic_block former_header;
219 };
220 
221 /* Flags for state of loop structure.  */
222 enum
223 {
224   LOOPS_HAVE_PREHEADERS = 1,
225   LOOPS_HAVE_SIMPLE_LATCHES = 2,
226   LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS = 4,
227   LOOPS_HAVE_RECORDED_EXITS = 8,
228   LOOPS_MAY_HAVE_MULTIPLE_LATCHES = 16,
229   LOOP_CLOSED_SSA = 32,
230   LOOPS_NEED_FIXUP = 64,
231   LOOPS_HAVE_FALLTHRU_PREHEADERS = 128
232 };
233 
234 #define LOOPS_NORMAL (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES \
235 		      | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)
236 #define AVOID_CFG_MODIFICATIONS (LOOPS_MAY_HAVE_MULTIPLE_LATCHES)
237 
238 /* Structure to hold CFG information about natural loops within a function.  */
239 struct GTY (()) loops {
240   /* State of loops.  */
241   int state;
242 
243   /* Array of the loops.  */
244   vec<loop_p, va_gc> *larray;
245 
246   /* Maps edges to the list of their descriptions as loop exits.  Edges
247      whose sources or destinations have loop_father == NULL (which may
248      happen during the cfg manipulations) should not appear in EXITS.  */
249   hash_table<loop_exit_hasher> *GTY(()) exits;
250 
251   /* Pointer to root of loop hierarchy tree.  */
252   struct loop *tree_root;
253 };
254 
255 /* Loop recognition.  */
256 bool bb_loop_header_p (basic_block);
257 void init_loops_structure (struct function *, struct loops *, unsigned);
258 extern struct loops *flow_loops_find (struct loops *);
259 extern void disambiguate_loops_with_multiple_latches (void);
260 extern void flow_loops_free (struct loops *);
261 extern void flow_loops_dump (FILE *,
262 			     void (*)(const struct loop *, FILE *, int), int);
263 extern void flow_loop_dump (const struct loop *, FILE *,
264 			    void (*)(const struct loop *, FILE *, int), int);
265 struct loop *alloc_loop (void);
266 extern void flow_loop_free (struct loop *);
267 int flow_loop_nodes_find (basic_block, struct loop *);
268 unsigned fix_loop_structure (bitmap changed_bbs);
269 bool mark_irreducible_loops (void);
270 void release_recorded_exits (function *);
271 void record_loop_exits (void);
272 void rescan_loop_exit (edge, bool, bool);
273 
274 /* Loop data structure manipulation/querying.  */
275 extern void flow_loop_tree_node_add (struct loop *, struct loop *);
276 extern void flow_loop_tree_node_remove (struct loop *);
277 extern bool flow_loop_nested_p	(const struct loop *, const struct loop *);
278 extern bool flow_bb_inside_loop_p (const struct loop *, const_basic_block);
279 extern struct loop * find_common_loop (struct loop *, struct loop *);
280 struct loop *superloop_at_depth (struct loop *, unsigned);
281 struct eni_weights;
282 extern int num_loop_insns (const struct loop *);
283 extern int average_num_loop_insns (const struct loop *);
284 extern unsigned get_loop_level (const struct loop *);
285 extern bool loop_exit_edge_p (const struct loop *, const_edge);
286 extern bool loop_exits_to_bb_p (struct loop *, basic_block);
287 extern bool loop_exits_from_bb_p (struct loop *, basic_block);
288 extern void mark_loop_exit_edges (void);
289 extern location_t get_loop_location (struct loop *loop);
290 
291 /* Loops & cfg manipulation.  */
292 extern basic_block *get_loop_body (const struct loop *);
293 extern unsigned get_loop_body_with_size (const struct loop *, basic_block *,
294 					 unsigned);
295 extern basic_block *get_loop_body_in_dom_order (const struct loop *);
296 extern basic_block *get_loop_body_in_bfs_order (const struct loop *);
297 extern basic_block *get_loop_body_in_custom_order (const struct loop *,
298 			       int (*) (const void *, const void *));
299 
300 extern vec<edge> get_loop_exit_edges (const struct loop *);
301 extern edge single_exit (const struct loop *);
302 extern edge single_likely_exit (struct loop *loop);
303 extern unsigned num_loop_branches (const struct loop *);
304 
305 extern edge loop_preheader_edge (const struct loop *);
306 extern edge loop_latch_edge (const struct loop *);
307 
308 extern void add_bb_to_loop (basic_block, struct loop *);
309 extern void remove_bb_from_loops (basic_block);
310 
311 extern void cancel_loop_tree (struct loop *);
312 extern void delete_loop (struct loop *);
313 
314 
315 extern void verify_loop_structure (void);
316 
317 /* Loop analysis.  */
318 extern bool just_once_each_iteration_p (const struct loop *, const_basic_block);
319 gcov_type expected_loop_iterations_unbounded (const struct loop *);
320 extern unsigned expected_loop_iterations (const struct loop *);
321 extern rtx doloop_condition_get (rtx);
322 
323 void mark_loop_for_removal (loop_p);
324 
325 /* Induction variable analysis.  */
326 
327 /* The description of induction variable.  The things are a bit complicated
328    due to need to handle subregs and extends.  The value of the object described
329    by it can be obtained as follows (all computations are done in extend_mode):
330 
331    Value in i-th iteration is
332      delta + mult * extend_{extend_mode} (subreg_{mode} (base + i * step)).
333 
334    If first_special is true, the value in the first iteration is
335      delta + mult * base
336 
337    If extend = UNKNOWN, first_special must be false, delta 0, mult 1 and value is
338      subreg_{mode} (base + i * step)
339 
340    The get_iv_value function can be used to obtain these expressions.
341 
342    ??? Add a third mode field that would specify the mode in that inner
343    computation is done, which would enable it to be different from the
344    outer one?  */
345 
346 struct rtx_iv
347 {
348   /* Its base and step (mode of base and step is supposed to be extend_mode,
349      see the description above).  */
350   rtx base, step;
351 
352   /* The type of extend applied to it (IV_SIGN_EXTEND, IV_ZERO_EXTEND,
353      or IV_UNKNOWN_EXTEND).  */
354   enum iv_extend_code extend;
355 
356   /* Operations applied in the extended mode.  */
357   rtx delta, mult;
358 
359   /* The mode it is extended to.  */
360   machine_mode extend_mode;
361 
362   /* The mode the variable iterates in.  */
363   machine_mode mode;
364 
365   /* Whether the first iteration needs to be handled specially.  */
366   unsigned first_special : 1;
367 };
368 
369 /* The description of an exit from the loop and of the number of iterations
370    till we take the exit.  */
371 
372 struct GTY(()) niter_desc
373 {
374   /* The edge out of the loop.  */
375   edge out_edge;
376 
377   /* The other edge leading from the condition.  */
378   edge in_edge;
379 
380   /* True if we are able to say anything about number of iterations of the
381      loop.  */
382   bool simple_p;
383 
384   /* True if the loop iterates the constant number of times.  */
385   bool const_iter;
386 
387   /* Number of iterations if constant.  */
388   uint64_t niter;
389 
390   /* Assumptions under that the rest of the information is valid.  */
391   rtx assumptions;
392 
393   /* Assumptions under that the loop ends before reaching the latch,
394      even if value of niter_expr says otherwise.  */
395   rtx noloop_assumptions;
396 
397   /* Condition under that the loop is infinite.  */
398   rtx infinite;
399 
400   /* Whether the comparison is signed.  */
401   bool signed_p;
402 
403   /* The mode in that niter_expr should be computed.  */
404   machine_mode mode;
405 
406   /* The number of iterations of the loop.  */
407   rtx niter_expr;
408 };
409 
410 extern void iv_analysis_loop_init (struct loop *);
411 extern bool iv_analyze (rtx_insn *, rtx, struct rtx_iv *);
412 extern bool iv_analyze_result (rtx_insn *, rtx, struct rtx_iv *);
413 extern bool iv_analyze_expr (rtx_insn *, rtx, machine_mode,
414 			     struct rtx_iv *);
415 extern rtx get_iv_value (struct rtx_iv *, rtx);
416 extern bool biv_p (rtx_insn *, rtx);
417 extern void find_simple_exit (struct loop *, struct niter_desc *);
418 extern void iv_analysis_done (void);
419 
420 extern struct niter_desc *get_simple_loop_desc (struct loop *loop);
421 extern void free_simple_loop_desc (struct loop *loop);
422 
423 static inline struct niter_desc *
simple_loop_desc(struct loop * loop)424 simple_loop_desc (struct loop *loop)
425 {
426   return loop->simple_loop_desc;
427 }
428 
429 /* Accessors for the loop structures.  */
430 
431 /* Returns the loop with index NUM from FNs loop tree.  */
432 
433 static inline struct loop *
get_loop(struct function * fn,unsigned num)434 get_loop (struct function *fn, unsigned num)
435 {
436   return (*loops_for_fn (fn)->larray)[num];
437 }
438 
439 /* Returns the number of superloops of LOOP.  */
440 
441 static inline unsigned
loop_depth(const struct loop * loop)442 loop_depth (const struct loop *loop)
443 {
444   return vec_safe_length (loop->superloops);
445 }
446 
447 /* Returns the immediate superloop of LOOP, or NULL if LOOP is the outermost
448    loop.  */
449 
450 static inline struct loop *
loop_outer(const struct loop * loop)451 loop_outer (const struct loop *loop)
452 {
453   unsigned n = vec_safe_length (loop->superloops);
454 
455   if (n == 0)
456     return NULL;
457 
458   return (*loop->superloops)[n - 1];
459 }
460 
461 /* Returns true if LOOP has at least one exit edge.  */
462 
463 static inline bool
loop_has_exit_edges(const struct loop * loop)464 loop_has_exit_edges (const struct loop *loop)
465 {
466   return loop->exits->next->e != NULL;
467 }
468 
469 /* Returns the list of loops in FN.  */
470 
471 inline vec<loop_p, va_gc> *
get_loops(struct function * fn)472 get_loops (struct function *fn)
473 {
474   struct loops *loops = loops_for_fn (fn);
475   if (!loops)
476     return NULL;
477 
478   return loops->larray;
479 }
480 
481 /* Returns the number of loops in FN (including the removed
482    ones and the fake loop that forms the root of the loop tree).  */
483 
484 static inline unsigned
number_of_loops(struct function * fn)485 number_of_loops (struct function *fn)
486 {
487   struct loops *loops = loops_for_fn (fn);
488   if (!loops)
489     return 0;
490 
491   return vec_safe_length (loops->larray);
492 }
493 
494 /* Returns true if state of the loops satisfies all properties
495    described by FLAGS.  */
496 
497 static inline bool
loops_state_satisfies_p(function * fn,unsigned flags)498 loops_state_satisfies_p (function *fn, unsigned flags)
499 {
500   return (loops_for_fn (fn)->state & flags) == flags;
501 }
502 
503 static inline bool
loops_state_satisfies_p(unsigned flags)504 loops_state_satisfies_p (unsigned flags)
505 {
506   return loops_state_satisfies_p (cfun, flags);
507 }
508 
509 /* Sets FLAGS to the loops state.  */
510 
511 static inline void
loops_state_set(function * fn,unsigned flags)512 loops_state_set (function *fn, unsigned flags)
513 {
514   loops_for_fn (fn)->state |= flags;
515 }
516 
517 static inline void
loops_state_set(unsigned flags)518 loops_state_set (unsigned flags)
519 {
520   loops_state_set (cfun, flags);
521 }
522 
523 /* Clears FLAGS from the loops state.  */
524 
525 static inline void
loops_state_clear(function * fn,unsigned flags)526 loops_state_clear (function *fn, unsigned flags)
527 {
528   loops_for_fn (fn)->state &= ~flags;
529 }
530 
531 static inline void
loops_state_clear(unsigned flags)532 loops_state_clear (unsigned flags)
533 {
534   if (!current_loops)
535     return;
536   loops_state_clear (cfun, flags);
537 }
538 
539 /* Check loop structure invariants, if internal consistency checks are
540    enabled.  */
541 
542 static inline void
checking_verify_loop_structure(void)543 checking_verify_loop_structure (void)
544 {
545   /* VERIFY_LOOP_STRUCTURE essentially asserts that no loops need fixups.
546 
547      The loop optimizers should never make changes to the CFG which
548      require loop fixups.  But the low level CFG manipulation code may
549      set the flag conservatively.
550 
551      Go ahead and clear the flag here.  That avoids the assert inside
552      VERIFY_LOOP_STRUCTURE, and if there is an inconsistency in the loop
553      structures VERIFY_LOOP_STRUCTURE will detect it.
554 
555      This also avoid the compile time cost of excessive fixups.  */
556   loops_state_clear (LOOPS_NEED_FIXUP);
557   if (flag_checking)
558     verify_loop_structure ();
559 }
560 
561 /* Loop iterators.  */
562 
563 /* Flags for loop iteration.  */
564 
565 enum li_flags
566 {
567   LI_INCLUDE_ROOT = 1,		/* Include the fake root of the loop tree.  */
568   LI_FROM_INNERMOST = 2,	/* Iterate over the loops in the reverse order,
569 				   starting from innermost ones.  */
570   LI_ONLY_INNERMOST = 4		/* Iterate only over innermost loops.  */
571 };
572 
573 /* The iterator for loops.  */
574 
575 struct loop_iterator
576 {
577   loop_iterator (function *fn, loop_p *loop, unsigned flags);
578   ~loop_iterator ();
579 
580   inline loop_p next ();
581 
582   /* The function we are visiting.  */
583   function *fn;
584 
585   /* The list of loops to visit.  */
586   vec<int> to_visit;
587 
588   /* The index of the actual loop.  */
589   unsigned idx;
590 };
591 
592 inline loop_p
next()593 loop_iterator::next ()
594 {
595   int anum;
596 
597   while (this->to_visit.iterate (this->idx, &anum))
598     {
599       this->idx++;
600       loop_p loop = get_loop (fn, anum);
601       if (loop)
602 	return loop;
603     }
604 
605   return NULL;
606 }
607 
608 inline
loop_iterator(function * fn,loop_p * loop,unsigned flags)609 loop_iterator::loop_iterator (function *fn, loop_p *loop, unsigned flags)
610 {
611   struct loop *aloop;
612   unsigned i;
613   int mn;
614 
615   this->idx = 0;
616   this->fn = fn;
617   if (!loops_for_fn (fn))
618     {
619       this->to_visit.create (0);
620       *loop = NULL;
621       return;
622     }
623 
624   this->to_visit.create (number_of_loops (fn));
625   mn = (flags & LI_INCLUDE_ROOT) ? 0 : 1;
626 
627   if (flags & LI_ONLY_INNERMOST)
628     {
629       for (i = 0; vec_safe_iterate (loops_for_fn (fn)->larray, i, &aloop); i++)
630 	if (aloop != NULL
631 	    && aloop->inner == NULL
632 	    && aloop->num >= mn)
633 	  this->to_visit.quick_push (aloop->num);
634     }
635   else if (flags & LI_FROM_INNERMOST)
636     {
637       /* Push the loops to LI->TO_VISIT in postorder.  */
638       for (aloop = loops_for_fn (fn)->tree_root;
639 	   aloop->inner != NULL;
640 	   aloop = aloop->inner)
641 	continue;
642 
643       while (1)
644 	{
645 	  if (aloop->num >= mn)
646 	    this->to_visit.quick_push (aloop->num);
647 
648 	  if (aloop->next)
649 	    {
650 	      for (aloop = aloop->next;
651 		   aloop->inner != NULL;
652 		   aloop = aloop->inner)
653 		continue;
654 	    }
655 	  else if (!loop_outer (aloop))
656 	    break;
657 	  else
658 	    aloop = loop_outer (aloop);
659 	}
660     }
661   else
662     {
663       /* Push the loops to LI->TO_VISIT in preorder.  */
664       aloop = loops_for_fn (fn)->tree_root;
665       while (1)
666 	{
667 	  if (aloop->num >= mn)
668 	    this->to_visit.quick_push (aloop->num);
669 
670 	  if (aloop->inner != NULL)
671 	    aloop = aloop->inner;
672 	  else
673 	    {
674 	      while (aloop != NULL && aloop->next == NULL)
675 		aloop = loop_outer (aloop);
676 	      if (aloop == NULL)
677 		break;
678 	      aloop = aloop->next;
679 	    }
680 	}
681     }
682 
683   *loop = this->next ();
684 }
685 
686 inline
~loop_iterator()687 loop_iterator::~loop_iterator ()
688 {
689   this->to_visit.release ();
690 }
691 
692 #define FOR_EACH_LOOP(LOOP, FLAGS) \
693   for (loop_iterator li(cfun, &(LOOP), FLAGS); \
694        (LOOP); \
695        (LOOP) = li.next ())
696 
697 #define FOR_EACH_LOOP_FN(FN, LOOP, FLAGS) \
698   for (loop_iterator li(fn, &(LOOP), FLAGS); \
699        (LOOP); \
700        (LOOP) = li.next ())
701 
702 /* The properties of the target.  */
703 struct target_cfgloop {
704   /* Number of available registers.  */
705   unsigned x_target_avail_regs;
706 
707   /* Number of available registers that are call-clobbered.  */
708   unsigned x_target_clobbered_regs;
709 
710   /* Number of registers reserved for temporary expressions.  */
711   unsigned x_target_res_regs;
712 
713   /* The cost for register when there still is some reserve, but we are
714      approaching the number of available registers.  */
715   unsigned x_target_reg_cost[2];
716 
717   /* The cost for register when we need to spill.  */
718   unsigned x_target_spill_cost[2];
719 };
720 
721 extern struct target_cfgloop default_target_cfgloop;
722 #if SWITCHABLE_TARGET
723 extern struct target_cfgloop *this_target_cfgloop;
724 #else
725 #define this_target_cfgloop (&default_target_cfgloop)
726 #endif
727 
728 #define target_avail_regs \
729   (this_target_cfgloop->x_target_avail_regs)
730 #define target_clobbered_regs \
731   (this_target_cfgloop->x_target_clobbered_regs)
732 #define target_res_regs \
733   (this_target_cfgloop->x_target_res_regs)
734 #define target_reg_cost \
735   (this_target_cfgloop->x_target_reg_cost)
736 #define target_spill_cost \
737   (this_target_cfgloop->x_target_spill_cost)
738 
739 /* Register pressure estimation for induction variable optimizations & loop
740    invariant motion.  */
741 extern unsigned estimate_reg_pressure_cost (unsigned, unsigned, bool, bool);
742 extern void init_set_costs (void);
743 
744 /* Loop optimizer initialization.  */
745 extern void loop_optimizer_init (unsigned);
746 extern void loop_optimizer_finalize (function *);
747 inline void
loop_optimizer_finalize()748 loop_optimizer_finalize ()
749 {
750   loop_optimizer_finalize (cfun);
751 }
752 
753 /* Optimization passes.  */
754 enum
755 {
756   UAP_UNROLL = 1,	/* Enables unrolling of loops if it seems profitable.  */
757   UAP_UNROLL_ALL = 2	/* Enables unrolling of all loops.  */
758 };
759 
760 extern void doloop_optimize_loops (void);
761 extern void move_loop_invariants (void);
762 extern vec<basic_block> get_loop_hot_path (const struct loop *loop);
763 
764 /* Returns the outermost loop of the loop nest that contains LOOP.*/
765 static inline struct loop *
loop_outermost(struct loop * loop)766 loop_outermost (struct loop *loop)
767 {
768   unsigned n = vec_safe_length (loop->superloops);
769 
770   if (n <= 1)
771     return loop;
772 
773   return (*loop->superloops)[1];
774 }
775 
776 extern void record_niter_bound (struct loop *, const widest_int &, bool, bool);
777 extern HOST_WIDE_INT get_estimated_loop_iterations_int (struct loop *);
778 extern HOST_WIDE_INT get_max_loop_iterations_int (struct loop *);
779 extern bool get_estimated_loop_iterations (struct loop *loop, widest_int *nit);
780 extern bool get_max_loop_iterations (struct loop *loop, widest_int *nit);
781 extern int bb_loop_depth (const_basic_block);
782 
783 /* Converts VAL to widest_int.  */
784 
785 static inline widest_int
gcov_type_to_wide_int(gcov_type val)786 gcov_type_to_wide_int (gcov_type val)
787 {
788   HOST_WIDE_INT a[2];
789 
790   a[0] = (unsigned HOST_WIDE_INT) val;
791   /* If HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_WIDEST_INT, avoid shifting by
792      the size of type.  */
793   val >>= HOST_BITS_PER_WIDE_INT - 1;
794   val >>= 1;
795   a[1] = (unsigned HOST_WIDE_INT) val;
796 
797   return widest_int::from_array (a, 2);
798 }
799 #endif /* GCC_CFGLOOP_H */
800