xref: /dragonfly/contrib/gcc-4.7/gcc/loop-doloop.c (revision 9348a738)
1 /* Perform doloop optimizations
2    Copyright (C) 2004, 2005, 2006, 2007, 2008, 2010
3    Free Software Foundation, Inc.
4    Based on code by Michael P. Hayes (m.hayes@elec.canterbury.ac.nz)
5 
6 This file is part of GCC.
7 
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12 
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16 for more details.
17 
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3.  If not see
20 <http://www.gnu.org/licenses/>.  */
21 
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "flags.h"
28 #include "expr.h"
29 #include "hard-reg-set.h"
30 #include "basic-block.h"
31 #include "diagnostic-core.h"
32 #include "tm_p.h"
33 #include "cfgloop.h"
34 #include "output.h"
35 #include "params.h"
36 #include "target.h"
37 
38 /* This module is used to modify loops with a determinable number of
39    iterations to use special low-overhead looping instructions.
40 
41    It first validates whether the loop is well behaved and has a
42    determinable number of iterations (either at compile or run-time).
43    It then modifies the loop to use a low-overhead looping pattern as
44    follows:
45 
46    1. A pseudo register is allocated as the loop iteration counter.
47 
48    2. The number of loop iterations is calculated and is stored
49       in the loop counter.
50 
51    3. At the end of the loop, the jump insn is replaced by the
52       doloop_end pattern.  The compare must remain because it might be
53       used elsewhere.  If the loop-variable or condition register are
54       used elsewhere, they will be eliminated by flow.
55 
56    4. An optional doloop_begin pattern is inserted at the top of the
57       loop.
58 
59    TODO The optimization should only performed when either the biv used for exit
60    condition is unused at all except for the exit test, or if we do not have to
61    change its value, since otherwise we have to add a new induction variable,
62    which usually will not pay up (unless the cost of the doloop pattern is
63    somehow extremely lower than the cost of compare & jump, or unless the bct
64    register cannot be used for anything else but doloop -- ??? detect these
65    cases).  */
66 
67 #ifdef HAVE_doloop_end
68 
69 /* Return the loop termination condition for PATTERN or zero
70    if it is not a decrement and branch jump insn.  */
71 
72 rtx
73 doloop_condition_get (rtx doloop_pat)
74 {
75   rtx cmp;
76   rtx inc;
77   rtx reg;
78   rtx inc_src;
79   rtx condition;
80   rtx pattern;
81   rtx cc_reg = NULL_RTX;
82   rtx reg_orig = NULL_RTX;
83 
84   /* The canonical doloop pattern we expect has one of the following
85      forms:
86 
87      1)  (parallel [(set (pc) (if_then_else (condition)
88 	  			            (label_ref (label))
89 				            (pc)))
90 	             (set (reg) (plus (reg) (const_int -1)))
91 	             (additional clobbers and uses)])
92 
93      The branch must be the first entry of the parallel (also required
94      by jump.c), and the second entry of the parallel must be a set of
95      the loop counter register.  Some targets (IA-64) wrap the set of
96      the loop counter in an if_then_else too.
97 
98      2)  (set (reg) (plus (reg) (const_int -1))
99          (set (pc) (if_then_else (reg != 0)
100 	                         (label_ref (label))
101 			         (pc))).
102 
103      Some targets (ARM) do the comparison before the branch, as in the
104      following form:
105 
106      3) (parallel [(set (cc) (compare ((plus (reg) (const_int -1), 0)))
107                    (set (reg) (plus (reg) (const_int -1)))])
108         (set (pc) (if_then_else (cc == NE)
109                                 (label_ref (label))
110                                 (pc))) */
111 
112   pattern = PATTERN (doloop_pat);
113 
114   if (GET_CODE (pattern) != PARALLEL)
115     {
116       rtx cond;
117       rtx prev_insn = prev_nondebug_insn (doloop_pat);
118       rtx cmp_arg1, cmp_arg2;
119       rtx cmp_orig;
120 
121       /* In case the pattern is not PARALLEL we expect two forms
122 	 of doloop which are cases 2) and 3) above: in case 2) the
123 	 decrement immediately precedes the branch, while in case 3)
124 	 the compare and decrement instructions immediately precede
125 	 the branch.  */
126 
127       if (prev_insn == NULL_RTX || !INSN_P (prev_insn))
128         return 0;
129 
130       cmp = pattern;
131       if (GET_CODE (PATTERN (prev_insn)) == PARALLEL)
132         {
133 	  /* The third case: the compare and decrement instructions
134 	     immediately precede the branch.  */
135 	  cmp_orig = XVECEXP (PATTERN (prev_insn), 0, 0);
136 	  if (GET_CODE (cmp_orig) != SET)
137 	    return 0;
138 	  if (GET_CODE (SET_SRC (cmp_orig)) != COMPARE)
139 	    return 0;
140 	  cmp_arg1 = XEXP (SET_SRC (cmp_orig), 0);
141           cmp_arg2 = XEXP (SET_SRC (cmp_orig), 1);
142 	  if (cmp_arg2 != const0_rtx
143 	      || GET_CODE (cmp_arg1) != PLUS)
144 	    return 0;
145 	  reg_orig = XEXP (cmp_arg1, 0);
146 	  if (XEXP (cmp_arg1, 1) != GEN_INT (-1)
147 	      || !REG_P (reg_orig))
148 	    return 0;
149 	  cc_reg = SET_DEST (cmp_orig);
150 
151 	  inc = XVECEXP (PATTERN (prev_insn), 0, 1);
152 	}
153       else
154         inc = PATTERN (prev_insn);
155       /* We expect the condition to be of the form (reg != 0)  */
156       cond = XEXP (SET_SRC (cmp), 0);
157       if (GET_CODE (cond) != NE || XEXP (cond, 1) != const0_rtx)
158         return 0;
159     }
160   else
161     {
162       cmp = XVECEXP (pattern, 0, 0);
163       inc = XVECEXP (pattern, 0, 1);
164     }
165 
166   /* Check for (set (reg) (something)).  */
167   if (GET_CODE (inc) != SET)
168     return 0;
169   reg = SET_DEST (inc);
170   if (! REG_P (reg))
171     return 0;
172 
173   /* Check if something = (plus (reg) (const_int -1)).
174      On IA-64, this decrement is wrapped in an if_then_else.  */
175   inc_src = SET_SRC (inc);
176   if (GET_CODE (inc_src) == IF_THEN_ELSE)
177     inc_src = XEXP (inc_src, 1);
178   if (GET_CODE (inc_src) != PLUS
179       || XEXP (inc_src, 0) != reg
180       || XEXP (inc_src, 1) != constm1_rtx)
181     return 0;
182 
183   /* Check for (set (pc) (if_then_else (condition)
184                                        (label_ref (label))
185                                        (pc))).  */
186   if (GET_CODE (cmp) != SET
187       || SET_DEST (cmp) != pc_rtx
188       || GET_CODE (SET_SRC (cmp)) != IF_THEN_ELSE
189       || GET_CODE (XEXP (SET_SRC (cmp), 1)) != LABEL_REF
190       || XEXP (SET_SRC (cmp), 2) != pc_rtx)
191     return 0;
192 
193   /* Extract loop termination condition.  */
194   condition = XEXP (SET_SRC (cmp), 0);
195 
196   /* We expect a GE or NE comparison with 0 or 1.  */
197   if ((GET_CODE (condition) != GE
198        && GET_CODE (condition) != NE)
199       || (XEXP (condition, 1) != const0_rtx
200           && XEXP (condition, 1) != const1_rtx))
201     return 0;
202 
203   if ((XEXP (condition, 0) == reg)
204       /* For the third case:  */
205       || ((cc_reg != NULL_RTX)
206 	  && (XEXP (condition, 0) == cc_reg)
207 	  && (reg_orig == reg))
208       || (GET_CODE (XEXP (condition, 0)) == PLUS
209 	  && XEXP (XEXP (condition, 0), 0) == reg))
210    {
211      if (GET_CODE (pattern) != PARALLEL)
212      /*  For the second form we expect:
213 
214          (set (reg) (plus (reg) (const_int -1))
215          (set (pc) (if_then_else (reg != 0)
216                                  (label_ref (label))
217                                  (pc))).
218 
219          is equivalent to the following:
220 
221          (parallel [(set (pc) (if_then_else (reg != 1)
222                                             (label_ref (label))
223                                             (pc)))
224                      (set (reg) (plus (reg) (const_int -1)))
225                      (additional clobbers and uses)])
226 
227         For the third form we expect:
228 
229         (parallel [(set (cc) (compare ((plus (reg) (const_int -1)), 0))
230                    (set (reg) (plus (reg) (const_int -1)))])
231         (set (pc) (if_then_else (cc == NE)
232                                 (label_ref (label))
233                                 (pc)))
234 
235         which is equivalent to the following:
236 
237         (parallel [(set (cc) (compare (reg,  1))
238                    (set (reg) (plus (reg) (const_int -1)))
239                    (set (pc) (if_then_else (NE == cc)
240                                            (label_ref (label))
241                                            (pc))))])
242 
243         So we return the second form instead for the two cases.
244 
245      */
246         condition = gen_rtx_fmt_ee (NE, VOIDmode, inc_src, const1_rtx);
247 
248     return condition;
249    }
250 
251   /* ??? If a machine uses a funny comparison, we could return a
252      canonicalized form here.  */
253 
254   return 0;
255 }
256 
257 /* Return nonzero if the loop specified by LOOP is suitable for
258    the use of special low-overhead looping instructions.  DESC
259    describes the number of iterations of the loop.  */
260 
261 static bool
262 doloop_valid_p (struct loop *loop, struct niter_desc *desc)
263 {
264   basic_block *body = get_loop_body (loop), bb;
265   rtx insn;
266   unsigned i;
267   bool result = true;
268 
269   /* Check for loops that may not terminate under special conditions.  */
270   if (!desc->simple_p
271       || desc->assumptions
272       || desc->infinite)
273     {
274       /* There are some cases that would require a special attention.
275 	 For example if the comparison is LEU and the comparison value
276 	 is UINT_MAX then the loop will not terminate.  Similarly, if the
277 	 comparison code is GEU and the comparison value is 0, the
278 	 loop will not terminate.
279 
280 	 If the absolute increment is not 1, the loop can be infinite
281 	 even with LTU/GTU, e.g. for (i = 3; i > 0; i -= 2)
282 
283 	 ??? We could compute these conditions at run-time and have a
284 	 additional jump around the loop to ensure an infinite loop.
285 	 However, it is very unlikely that this is the intended
286 	 behavior of the loop and checking for these rare boundary
287 	 conditions would pessimize all other code.
288 
289 	 If the loop is executed only a few times an extra check to
290 	 restart the loop could use up most of the benefits of using a
291 	 count register loop.  Note however, that normally, this
292 	 restart branch would never execute, so it could be predicted
293 	 well by the CPU.  We should generate the pessimistic code by
294 	 default, and have an option, e.g. -funsafe-loops that would
295 	 enable count-register loops in this case.  */
296       if (dump_file)
297 	fprintf (dump_file, "Doloop: Possible infinite iteration case.\n");
298       result = false;
299       goto cleanup;
300     }
301 
302   for (i = 0; i < loop->num_nodes; i++)
303     {
304       bb = body[i];
305 
306       for (insn = BB_HEAD (bb);
307 	   insn != NEXT_INSN (BB_END (bb));
308 	   insn = NEXT_INSN (insn))
309 	{
310 	  /* Different targets have different necessities for low-overhead
311 	     looping.  Call the back end for each instruction within the loop
312 	     to let it decide whether the insn prohibits a low-overhead loop.
313 	     It will then return the cause for it to emit to the dump file.  */
314 	  const char * invalid = targetm.invalid_within_doloop (insn);
315 	  if (invalid)
316 	    {
317 	      if (dump_file)
318 		fprintf (dump_file, "Doloop: %s\n", invalid);
319 	      result = false;
320 	      goto cleanup;
321 	    }
322 	}
323     }
324   result = true;
325 
326 cleanup:
327   free (body);
328 
329   return result;
330 }
331 
332 /* Adds test of COND jumping to DEST on edge *E and set *E to the new fallthru
333    edge.  If the condition is always false, do not do anything.  If it is always
334    true, redirect E to DEST and return false.  In all other cases, true is
335    returned.  */
336 
337 static bool
338 add_test (rtx cond, edge *e, basic_block dest)
339 {
340   rtx seq, jump, label;
341   enum machine_mode mode;
342   rtx op0 = XEXP (cond, 0), op1 = XEXP (cond, 1);
343   enum rtx_code code = GET_CODE (cond);
344   basic_block bb;
345 
346   mode = GET_MODE (XEXP (cond, 0));
347   if (mode == VOIDmode)
348     mode = GET_MODE (XEXP (cond, 1));
349 
350   start_sequence ();
351   op0 = force_operand (op0, NULL_RTX);
352   op1 = force_operand (op1, NULL_RTX);
353   label = block_label (dest);
354   do_compare_rtx_and_jump (op0, op1, code, 0, mode, NULL_RTX,
355 			   NULL_RTX, label, -1);
356 
357   jump = get_last_insn ();
358   if (!jump || !JUMP_P (jump))
359     {
360       /* The condition is always false and the jump was optimized out.  */
361       end_sequence ();
362       return true;
363     }
364 
365   seq = get_insns ();
366   end_sequence ();
367 
368   /* There always is at least the jump insn in the sequence.  */
369   gcc_assert (seq != NULL_RTX);
370 
371   bb = split_edge_and_insert (*e, seq);
372   *e = single_succ_edge (bb);
373 
374   if (any_uncondjump_p (jump))
375     {
376       /* The condition is always true.  */
377       delete_insn (jump);
378       redirect_edge_and_branch_force (*e, dest);
379       return false;
380     }
381 
382   JUMP_LABEL (jump) = label;
383 
384   /* The jump is supposed to handle an unlikely special case.  */
385   add_reg_note (jump, REG_BR_PROB, const0_rtx);
386 
387   LABEL_NUSES (label)++;
388 
389   make_edge (bb, dest, (*e)->flags & ~EDGE_FALLTHRU);
390   return true;
391 }
392 
393 /* Modify the loop to use the low-overhead looping insn where LOOP
394    describes the loop, DESC describes the number of iterations of the
395    loop, and DOLOOP_INSN is the low-overhead looping insn to emit at the
396    end of the loop.  CONDITION is the condition separated from the
397    DOLOOP_SEQ.  COUNT is the number of iterations of the LOOP.  */
398 
399 static void
400 doloop_modify (struct loop *loop, struct niter_desc *desc,
401 	       rtx doloop_seq, rtx condition, rtx count)
402 {
403   rtx counter_reg;
404   rtx tmp, noloop = NULL_RTX;
405   rtx sequence;
406   rtx jump_insn;
407   rtx jump_label;
408   int nonneg = 0;
409   bool increment_count;
410   basic_block loop_end = desc->out_edge->src;
411   enum machine_mode mode;
412   rtx true_prob_val;
413 
414   jump_insn = BB_END (loop_end);
415 
416   if (dump_file)
417     {
418       fprintf (dump_file, "Doloop: Inserting doloop pattern (");
419       if (desc->const_iter)
420 	fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC, desc->niter);
421       else
422 	fputs ("runtime", dump_file);
423       fputs (" iterations).\n", dump_file);
424     }
425 
426   /* Get the probability of the original branch. If it exists we would
427      need to update REG_BR_PROB of the new jump_insn.  */
428   true_prob_val = find_reg_note (jump_insn, REG_BR_PROB, NULL_RTX);
429 
430   /* Discard original jump to continue loop.  The original compare
431      result may still be live, so it cannot be discarded explicitly.  */
432   delete_insn (jump_insn);
433 
434   counter_reg = XEXP (condition, 0);
435   if (GET_CODE (counter_reg) == PLUS)
436     counter_reg = XEXP (counter_reg, 0);
437   mode = GET_MODE (counter_reg);
438 
439   increment_count = false;
440   switch (GET_CODE (condition))
441     {
442     case NE:
443       /* Currently only NE tests against zero and one are supported.  */
444       noloop = XEXP (condition, 1);
445       if (noloop != const0_rtx)
446 	{
447 	  gcc_assert (noloop == const1_rtx);
448 	  increment_count = true;
449 	}
450       break;
451 
452     case GE:
453       /* Currently only GE tests against zero are supported.  */
454       gcc_assert (XEXP (condition, 1) == const0_rtx);
455 
456       noloop = constm1_rtx;
457 
458       /* The iteration count does not need incrementing for a GE test.  */
459       increment_count = false;
460 
461       /* Determine if the iteration counter will be non-negative.
462 	 Note that the maximum value loaded is iterations_max - 1.  */
463       if (desc->niter_max
464 	  <= ((unsigned HOST_WIDEST_INT) 1
465 	      << (GET_MODE_PRECISION (mode) - 1)))
466 	nonneg = 1;
467       break;
468 
469       /* Abort if an invalid doloop pattern has been generated.  */
470     default:
471       gcc_unreachable ();
472     }
473 
474   if (increment_count)
475     count = simplify_gen_binary (PLUS, mode, count, const1_rtx);
476 
477   /* Insert initialization of the count register into the loop header.  */
478   start_sequence ();
479   tmp = force_operand (count, counter_reg);
480   convert_move (counter_reg, tmp, 1);
481   sequence = get_insns ();
482   end_sequence ();
483   emit_insn_after (sequence, BB_END (loop_preheader_edge (loop)->src));
484 
485   if (desc->noloop_assumptions)
486     {
487       rtx ass = copy_rtx (desc->noloop_assumptions);
488       basic_block preheader = loop_preheader_edge (loop)->src;
489       basic_block set_zero
490 	      = split_edge (loop_preheader_edge (loop));
491       basic_block new_preheader
492 	      = split_edge (loop_preheader_edge (loop));
493       edge te;
494 
495       /* Expand the condition testing the assumptions and if it does not pass,
496 	 reset the count register to 0.  */
497       redirect_edge_and_branch_force (single_succ_edge (preheader), new_preheader);
498       set_immediate_dominator (CDI_DOMINATORS, new_preheader, preheader);
499 
500       set_zero->count = 0;
501       set_zero->frequency = 0;
502 
503       te = single_succ_edge (preheader);
504       for (; ass; ass = XEXP (ass, 1))
505 	if (!add_test (XEXP (ass, 0), &te, set_zero))
506 	  break;
507 
508       if (ass)
509 	{
510 	  /* We reached a condition that is always true.  This is very hard to
511 	     reproduce (such a loop does not roll, and thus it would most
512 	     likely get optimized out by some of the preceding optimizations).
513 	     In fact, I do not have any testcase for it.  However, it would
514 	     also be very hard to show that it is impossible, so we must
515 	     handle this case.  */
516 	  set_zero->count = preheader->count;
517 	  set_zero->frequency = preheader->frequency;
518 	}
519 
520       if (EDGE_COUNT (set_zero->preds) == 0)
521 	{
522 	  /* All the conditions were simplified to false, remove the
523 	     unreachable set_zero block.  */
524 	  delete_basic_block (set_zero);
525 	}
526       else
527 	{
528 	  /* Reset the counter to zero in the set_zero block.  */
529 	  start_sequence ();
530 	  convert_move (counter_reg, noloop, 0);
531 	  sequence = get_insns ();
532 	  end_sequence ();
533 	  emit_insn_after (sequence, BB_END (set_zero));
534 
535 	  set_immediate_dominator (CDI_DOMINATORS, set_zero,
536 				   recompute_dominator (CDI_DOMINATORS,
537 							set_zero));
538 	}
539 
540       set_immediate_dominator (CDI_DOMINATORS, new_preheader,
541 			       recompute_dominator (CDI_DOMINATORS,
542 						    new_preheader));
543     }
544 
545   /* Some targets (eg, C4x) need to initialize special looping
546      registers.  */
547 #ifdef HAVE_doloop_begin
548   {
549     rtx init;
550     unsigned level = get_loop_level (loop) + 1;
551     init = gen_doloop_begin (counter_reg,
552 			     desc->const_iter ? desc->niter_expr : const0_rtx,
553 			     GEN_INT (desc->niter_max),
554 			     GEN_INT (level));
555     if (init)
556       {
557 	start_sequence ();
558 	emit_insn (init);
559 	sequence = get_insns ();
560 	end_sequence ();
561 	emit_insn_after (sequence, BB_END (loop_preheader_edge (loop)->src));
562       }
563   }
564 #endif
565 
566   /* Insert the new low-overhead looping insn.  */
567   emit_jump_insn_after (doloop_seq, BB_END (loop_end));
568   jump_insn = BB_END (loop_end);
569   jump_label = block_label (desc->in_edge->dest);
570   JUMP_LABEL (jump_insn) = jump_label;
571   LABEL_NUSES (jump_label)++;
572 
573   /* Ensure the right fallthru edge is marked, for case we have reversed
574      the condition.  */
575   desc->in_edge->flags &= ~EDGE_FALLTHRU;
576   desc->out_edge->flags |= EDGE_FALLTHRU;
577 
578   /* Add a REG_NONNEG note if the actual or estimated maximum number
579      of iterations is non-negative.  */
580   if (nonneg)
581     add_reg_note (jump_insn, REG_NONNEG, NULL_RTX);
582 
583   /* Update the REG_BR_PROB note.  */
584   if (true_prob_val)
585     {
586       /* Seems safer to use the branch probability.  */
587       add_reg_note (jump_insn, REG_BR_PROB,
588 		    GEN_INT (desc->in_edge->probability));
589     }
590 }
591 
592 /* Process loop described by LOOP validating that the loop is suitable for
593    conversion to use a low overhead looping instruction, replacing the jump
594    insn where suitable.  Returns true if the loop was successfully
595    modified.  */
596 
597 static bool
598 doloop_optimize (struct loop *loop)
599 {
600   enum machine_mode mode;
601   rtx doloop_seq, doloop_pat, doloop_reg;
602   rtx iterations, count;
603   rtx iterations_max;
604   rtx start_label;
605   rtx condition;
606   unsigned level, est_niter;
607   int max_cost;
608   struct niter_desc *desc;
609   unsigned word_mode_size;
610   unsigned HOST_WIDE_INT word_mode_max;
611 
612   if (dump_file)
613     fprintf (dump_file, "Doloop: Processing loop %d.\n", loop->num);
614 
615   iv_analysis_loop_init (loop);
616 
617   /* Find the simple exit of a LOOP.  */
618   desc = get_simple_loop_desc (loop);
619 
620   /* Check that loop is a candidate for a low-overhead looping insn.  */
621   if (!doloop_valid_p (loop, desc))
622     {
623       if (dump_file)
624 	fprintf (dump_file,
625 		 "Doloop: The loop is not suitable.\n");
626       return false;
627     }
628   mode = desc->mode;
629 
630   est_niter = 3;
631   if (desc->const_iter)
632     est_niter = desc->niter;
633   /* If the estimate on number of iterations is reliable (comes from profile
634      feedback), use it.  Do not use it normally, since the expected number
635      of iterations of an unrolled loop is 2.  */
636   if (loop->header->count)
637     est_niter = expected_loop_iterations (loop);
638 
639   if (est_niter < 3)
640     {
641       if (dump_file)
642 	fprintf (dump_file,
643 		 "Doloop: Too few iterations (%u) to be profitable.\n",
644 		 est_niter);
645       return false;
646     }
647 
648   max_cost
649     = COSTS_N_INSNS (PARAM_VALUE (PARAM_MAX_ITERATIONS_COMPUTATION_COST));
650   if (set_src_cost (desc->niter_expr, optimize_loop_for_speed_p (loop))
651       > max_cost)
652     {
653       if (dump_file)
654 	fprintf (dump_file,
655 		 "Doloop: number of iterations too costly to compute.\n");
656       return false;
657     }
658 
659   count = copy_rtx (desc->niter_expr);
660   iterations = desc->const_iter ? desc->niter_expr : const0_rtx;
661   iterations_max = GEN_INT (desc->niter_max);
662   level = get_loop_level (loop) + 1;
663 
664   /* Generate looping insn.  If the pattern FAILs then give up trying
665      to modify the loop since there is some aspect the back-end does
666      not like.  */
667   start_label = block_label (desc->in_edge->dest);
668   doloop_reg = gen_reg_rtx (mode);
669   doloop_seq = gen_doloop_end (doloop_reg, iterations, iterations_max,
670 			       GEN_INT (level), start_label);
671 
672   word_mode_size = GET_MODE_PRECISION (word_mode);
673   word_mode_max
674 	  = ((unsigned HOST_WIDE_INT) 1 << (word_mode_size - 1) << 1) - 1;
675   if (! doloop_seq
676       && mode != word_mode
677       /* Before trying mode different from the one in that # of iterations is
678 	 computed, we must be sure that the number of iterations fits into
679 	 the new mode.  */
680       && (word_mode_size >= GET_MODE_PRECISION (mode)
681 	  || desc->niter_max <= word_mode_max))
682     {
683       if (word_mode_size > GET_MODE_PRECISION (mode))
684 	{
685 	  count = simplify_gen_unary (ZERO_EXTEND, word_mode,
686 				      count, mode);
687 	  iterations = simplify_gen_unary (ZERO_EXTEND, word_mode,
688 					   iterations, mode);
689 	  iterations_max = simplify_gen_unary (ZERO_EXTEND, word_mode,
690 					       iterations_max, mode);
691 	}
692       else
693 	{
694 	  count = lowpart_subreg (word_mode, count, mode);
695 	  iterations = lowpart_subreg (word_mode, iterations, mode);
696 	  iterations_max = lowpart_subreg (word_mode, iterations_max, mode);
697 	}
698       PUT_MODE (doloop_reg, word_mode);
699       doloop_seq = gen_doloop_end (doloop_reg, iterations, iterations_max,
700 				   GEN_INT (level), start_label);
701     }
702   if (! doloop_seq)
703     {
704       if (dump_file)
705 	fprintf (dump_file,
706 		 "Doloop: Target unwilling to use doloop pattern!\n");
707       return false;
708     }
709 
710   /* If multiple instructions were created, the last must be the
711      jump instruction.  Also, a raw define_insn may yield a plain
712      pattern.  */
713   doloop_pat = doloop_seq;
714   if (INSN_P (doloop_pat))
715     {
716       while (NEXT_INSN (doloop_pat) != NULL_RTX)
717 	doloop_pat = NEXT_INSN (doloop_pat);
718       if (!JUMP_P (doloop_pat))
719 	doloop_pat = NULL_RTX;
720     }
721 
722   if (! doloop_pat
723       || ! (condition = doloop_condition_get (doloop_pat)))
724     {
725       if (dump_file)
726 	fprintf (dump_file, "Doloop: Unrecognizable doloop pattern!\n");
727       return false;
728     }
729 
730   doloop_modify (loop, desc, doloop_seq, condition, count);
731   return true;
732 }
733 
734 /* This is the main entry point.  Process all loops using doloop_optimize.  */
735 
736 void
737 doloop_optimize_loops (void)
738 {
739   loop_iterator li;
740   struct loop *loop;
741 
742   FOR_EACH_LOOP (li, loop, 0)
743     {
744       doloop_optimize (loop);
745     }
746 
747   iv_analysis_done ();
748 
749 #ifdef ENABLE_CHECKING
750   verify_dominators (CDI_DOMINATORS);
751   verify_loop_structure ();
752 #endif
753 }
754 #endif /* HAVE_doloop_end */
755 
756