1 /* Thread edges through blocks and update the control flow and SSA graphs.
2    Copyright (C) 2004-2019 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10 
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 GNU General Public License for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "tree.h"
25 #include "gimple.h"
26 #include "cfghooks.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "fold-const.h"
30 #include "cfganal.h"
31 #include "gimple-iterator.h"
32 #include "tree-ssa.h"
33 #include "tree-ssa-threadupdate.h"
34 #include "cfgloop.h"
35 #include "dbgcnt.h"
36 #include "tree-cfg.h"
37 #include "tree-vectorizer.h"
38 
39 /* Given a block B, update the CFG and SSA graph to reflect redirecting
40    one or more in-edges to B to instead reach the destination of an
41    out-edge from B while preserving any side effects in B.
42 
43    i.e., given A->B and B->C, change A->B to be A->C yet still preserve the
44    side effects of executing B.
45 
46      1. Make a copy of B (including its outgoing edges and statements).  Call
47 	the copy B'.  Note B' has no incoming edges or PHIs at this time.
48 
49      2. Remove the control statement at the end of B' and all outgoing edges
50 	except B'->C.
51 
52      3. Add a new argument to each PHI in C with the same value as the existing
53 	argument associated with edge B->C.  Associate the new PHI arguments
54 	with the edge B'->C.
55 
56      4. For each PHI in B, find or create a PHI in B' with an identical
57 	PHI_RESULT.  Add an argument to the PHI in B' which has the same
58 	value as the PHI in B associated with the edge A->B.  Associate
59 	the new argument in the PHI in B' with the edge A->B.
60 
61      5. Change the edge A->B to A->B'.
62 
63 	5a. This automatically deletes any PHI arguments associated with the
64 	    edge A->B in B.
65 
66 	5b. This automatically associates each new argument added in step 4
67 	    with the edge A->B'.
68 
69      6. Repeat for other incoming edges into B.
70 
71      7. Put the duplicated resources in B and all the B' blocks into SSA form.
72 
73    Note that block duplication can be minimized by first collecting the
74    set of unique destination blocks that the incoming edges should
75    be threaded to.
76 
77    We reduce the number of edges and statements we create by not copying all
78    the outgoing edges and the control statement in step #1.  We instead create
79    a template block without the outgoing edges and duplicate the template.
80 
81    Another case this code handles is threading through a "joiner" block.  In
82    this case, we do not know the destination of the joiner block, but one
83    of the outgoing edges from the joiner block leads to a threadable path.  This
84    case largely works as outlined above, except the duplicate of the joiner
85    block still contains a full set of outgoing edges and its control statement.
86    We just redirect one of its outgoing edges to our jump threading path.  */
87 
88 
89 /* Steps #5 and #6 of the above algorithm are best implemented by walking
90    all the incoming edges which thread to the same destination edge at
91    the same time.  That avoids lots of table lookups to get information
92    for the destination edge.
93 
94    To realize that implementation we create a list of incoming edges
95    which thread to the same outgoing edge.  Thus to implement steps
96    #5 and #6 we traverse our hash table of outgoing edge information.
97    For each entry we walk the list of incoming edges which thread to
98    the current outgoing edge.  */
99 
100 struct el
101 {
102   edge e;
103   struct el *next;
104 };
105 
106 /* Main data structure recording information regarding B's duplicate
107    blocks.  */
108 
109 /* We need to efficiently record the unique thread destinations of this
110    block and specific information associated with those destinations.  We
111    may have many incoming edges threaded to the same outgoing edge.  This
112    can be naturally implemented with a hash table.  */
113 
114 struct redirection_data : free_ptr_hash<redirection_data>
115 {
116   /* We support wiring up two block duplicates in a jump threading path.
117 
118      One is a normal block copy where we remove the control statement
119      and wire up its single remaining outgoing edge to the thread path.
120 
121      The other is a joiner block where we leave the control statement
122      in place, but wire one of the outgoing edges to a thread path.
123 
124      In theory we could have multiple block duplicates in a jump
125      threading path, but I haven't tried that.
126 
127      The duplicate blocks appear in this array in the same order in
128      which they appear in the jump thread path.  */
129   basic_block dup_blocks[2];
130 
131   /* The jump threading path.  */
132   vec<jump_thread_edge *> *path;
133 
134   /* A list of incoming edges which we want to thread to the
135      same path.  */
136   struct el *incoming_edges;
137 
138   /* hash_table support.  */
139   static inline hashval_t hash (const redirection_data *);
140   static inline int equal (const redirection_data *, const redirection_data *);
141 };
142 
143 /* Dump a jump threading path, including annotations about each
144    edge in the path.  */
145 
146 static void
dump_jump_thread_path(FILE * dump_file,vec<jump_thread_edge * > path,bool registering)147 dump_jump_thread_path (FILE *dump_file, vec<jump_thread_edge *> path,
148 		       bool registering)
149 {
150   fprintf (dump_file,
151 	   "  %s%s jump thread: (%d, %d) incoming edge; ",
152 	   (registering ? "Registering" : "Cancelling"),
153 	   (path[0]->type == EDGE_FSM_THREAD ? " FSM": ""),
154 	   path[0]->e->src->index, path[0]->e->dest->index);
155 
156   for (unsigned int i = 1; i < path.length (); i++)
157     {
158       /* We can get paths with a NULL edge when the final destination
159 	 of a jump thread turns out to be a constant address.  We dump
160 	 those paths when debugging, so we have to be prepared for that
161 	 possibility here.  */
162       if (path[i]->e == NULL)
163 	continue;
164 
165       if (path[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
166 	fprintf (dump_file, " (%d, %d) joiner; ",
167 		 path[i]->e->src->index, path[i]->e->dest->index);
168       if (path[i]->type == EDGE_COPY_SRC_BLOCK)
169        fprintf (dump_file, " (%d, %d) normal;",
170 		 path[i]->e->src->index, path[i]->e->dest->index);
171       if (path[i]->type == EDGE_NO_COPY_SRC_BLOCK)
172        fprintf (dump_file, " (%d, %d) nocopy;",
173 		 path[i]->e->src->index, path[i]->e->dest->index);
174       if (path[0]->type == EDGE_FSM_THREAD)
175 	fprintf (dump_file, " (%d, %d) ",
176 		 path[i]->e->src->index, path[i]->e->dest->index);
177     }
178   fputc ('\n', dump_file);
179 }
180 
181 /* Simple hashing function.  For any given incoming edge E, we're going
182    to be most concerned with the final destination of its jump thread
183    path.  So hash on the block index of the final edge in the path.  */
184 
185 inline hashval_t
hash(const redirection_data * p)186 redirection_data::hash (const redirection_data *p)
187 {
188   vec<jump_thread_edge *> *path = p->path;
189   return path->last ()->e->dest->index;
190 }
191 
192 /* Given two hash table entries, return true if they have the same
193    jump threading path.  */
194 inline int
equal(const redirection_data * p1,const redirection_data * p2)195 redirection_data::equal (const redirection_data *p1, const redirection_data *p2)
196 {
197   vec<jump_thread_edge *> *path1 = p1->path;
198   vec<jump_thread_edge *> *path2 = p2->path;
199 
200   if (path1->length () != path2->length ())
201     return false;
202 
203   for (unsigned int i = 1; i < path1->length (); i++)
204     {
205       if ((*path1)[i]->type != (*path2)[i]->type
206 	  || (*path1)[i]->e != (*path2)[i]->e)
207 	return false;
208     }
209 
210   return true;
211 }
212 
213 /* Rather than search all the edges in jump thread paths each time
214    DOM is able to simply if control statement, we build a hash table
215    with the deleted edges.  We only care about the address of the edge,
216    not its contents.  */
217 struct removed_edges : nofree_ptr_hash<edge_def>
218 {
hashremoved_edges219   static hashval_t hash (edge e) { return htab_hash_pointer (e); }
equalremoved_edges220   static bool equal (edge e1, edge e2) { return e1 == e2; }
221 };
222 
223 static hash_table<removed_edges> *removed_edges;
224 
225 /* Data structure of information to pass to hash table traversal routines.  */
226 struct ssa_local_info_t
227 {
228   /* The current block we are working on.  */
229   basic_block bb;
230 
231   /* We only create a template block for the first duplicated block in a
232      jump threading path as we may need many duplicates of that block.
233 
234      The second duplicate block in a path is specific to that path.  Creating
235      and sharing a template for that block is considerably more difficult.  */
236   basic_block template_block;
237 
238   /* Blocks duplicated for the thread.  */
239   bitmap duplicate_blocks;
240 
241   /* TRUE if we thread one or more jumps, FALSE otherwise.  */
242   bool jumps_threaded;
243 
244   /* When we have multiple paths through a joiner which reach different
245      final destinations, then we may need to correct for potential
246      profile insanities.  */
247   bool need_profile_correction;
248 };
249 
250 /* Passes which use the jump threading code register jump threading
251    opportunities as they are discovered.  We keep the registered
252    jump threading opportunities in this vector as edge pairs
253    (original_edge, target_edge).  */
254 static vec<vec<jump_thread_edge *> *> paths;
255 
256 /* When we start updating the CFG for threading, data necessary for jump
257    threading is attached to the AUX field for the incoming edge.  Use these
258    macros to access the underlying structure attached to the AUX field.  */
259 #define THREAD_PATH(E) ((vec<jump_thread_edge *> *)(E)->aux)
260 
261 /* Jump threading statistics.  */
262 
263 struct thread_stats_d
264 {
265   unsigned long num_threaded_edges;
266 };
267 
268 struct thread_stats_d thread_stats;
269 
270 
271 /* Remove the last statement in block BB if it is a control statement
272    Also remove all outgoing edges except the edge which reaches DEST_BB.
273    If DEST_BB is NULL, then remove all outgoing edges.  */
274 
275 void
remove_ctrl_stmt_and_useless_edges(basic_block bb,basic_block dest_bb)276 remove_ctrl_stmt_and_useless_edges (basic_block bb, basic_block dest_bb)
277 {
278   gimple_stmt_iterator gsi;
279   edge e;
280   edge_iterator ei;
281 
282   gsi = gsi_last_bb (bb);
283 
284   /* If the duplicate ends with a control statement, then remove it.
285 
286      Note that if we are duplicating the template block rather than the
287      original basic block, then the duplicate might not have any real
288      statements in it.  */
289   if (!gsi_end_p (gsi)
290       && gsi_stmt (gsi)
291       && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
292 	  || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
293 	  || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH))
294     gsi_remove (&gsi, true);
295 
296   for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
297     {
298       if (e->dest != dest_bb)
299 	{
300 	  free_dom_edge_info (e);
301 	  remove_edge (e);
302 	}
303       else
304 	{
305 	  e->probability = profile_probability::always ();
306 	  ei_next (&ei);
307 	}
308     }
309 
310   /* If the remaining edge is a loop exit, there must have
311      a removed edge that was not a loop exit.
312 
313      In that case BB and possibly other blocks were previously
314      in the loop, but are now outside the loop.  Thus, we need
315      to update the loop structures.  */
316   if (single_succ_p (bb)
317       && loop_outer (bb->loop_father)
318       && loop_exit_edge_p (bb->loop_father, single_succ_edge (bb)))
319     loops_state_set (LOOPS_NEED_FIXUP);
320 }
321 
322 /* Create a duplicate of BB.  Record the duplicate block in an array
323    indexed by COUNT stored in RD.  */
324 
325 static void
create_block_for_threading(basic_block bb,struct redirection_data * rd,unsigned int count,bitmap * duplicate_blocks)326 create_block_for_threading (basic_block bb,
327 			    struct redirection_data *rd,
328 			    unsigned int count,
329 			    bitmap *duplicate_blocks)
330 {
331   edge_iterator ei;
332   edge e;
333 
334   /* We can use the generic block duplication code and simply remove
335      the stuff we do not need.  */
336   rd->dup_blocks[count] = duplicate_block (bb, NULL, NULL);
337 
338   FOR_EACH_EDGE (e, ei, rd->dup_blocks[count]->succs)
339     {
340       e->aux = NULL;
341 
342       /* If we duplicate a block with an outgoing edge marked as
343 	 EDGE_IGNORE, we must clear EDGE_IGNORE so that it doesn't
344 	 leak out of the current pass.
345 
346 	 It would be better to simplify switch statements and remove
347 	 the edges before we get here, but the sequencing is nontrivial.  */
348       e->flags &= ~EDGE_IGNORE;
349     }
350 
351   /* Zero out the profile, since the block is unreachable for now.  */
352   rd->dup_blocks[count]->count = profile_count::uninitialized ();
353   if (duplicate_blocks)
354     bitmap_set_bit (*duplicate_blocks, rd->dup_blocks[count]->index);
355 }
356 
357 /* Main data structure to hold information for duplicates of BB.  */
358 
359 static hash_table<redirection_data> *redirection_data;
360 
361 /* Given an outgoing edge E lookup and return its entry in our hash table.
362 
363    If INSERT is true, then we insert the entry into the hash table if
364    it is not already present.  INCOMING_EDGE is added to the list of incoming
365    edges associated with E in the hash table.  */
366 
367 static struct redirection_data *
lookup_redirection_data(edge e,enum insert_option insert)368 lookup_redirection_data (edge e, enum insert_option insert)
369 {
370   struct redirection_data **slot;
371   struct redirection_data *elt;
372   vec<jump_thread_edge *> *path = THREAD_PATH (e);
373 
374   /* Build a hash table element so we can see if E is already
375      in the table.  */
376   elt = XNEW (struct redirection_data);
377   elt->path = path;
378   elt->dup_blocks[0] = NULL;
379   elt->dup_blocks[1] = NULL;
380   elt->incoming_edges = NULL;
381 
382   slot = redirection_data->find_slot (elt, insert);
383 
384   /* This will only happen if INSERT is false and the entry is not
385      in the hash table.  */
386   if (slot == NULL)
387     {
388       free (elt);
389       return NULL;
390     }
391 
392   /* This will only happen if E was not in the hash table and
393      INSERT is true.  */
394   if (*slot == NULL)
395     {
396       *slot = elt;
397       elt->incoming_edges = XNEW (struct el);
398       elt->incoming_edges->e = e;
399       elt->incoming_edges->next = NULL;
400       return elt;
401     }
402   /* E was in the hash table.  */
403   else
404     {
405       /* Free ELT as we do not need it anymore, we will extract the
406 	 relevant entry from the hash table itself.  */
407       free (elt);
408 
409       /* Get the entry stored in the hash table.  */
410       elt = *slot;
411 
412       /* If insertion was requested, then we need to add INCOMING_EDGE
413 	 to the list of incoming edges associated with E.  */
414       if (insert)
415 	{
416 	  struct el *el = XNEW (struct el);
417 	  el->next = elt->incoming_edges;
418 	  el->e = e;
419 	  elt->incoming_edges = el;
420 	}
421 
422       return elt;
423     }
424 }
425 
426 /* Similar to copy_phi_args, except that the PHI arg exists, it just
427    does not have a value associated with it.  */
428 
429 static void
copy_phi_arg_into_existing_phi(edge src_e,edge tgt_e)430 copy_phi_arg_into_existing_phi (edge src_e, edge tgt_e)
431 {
432   int src_idx = src_e->dest_idx;
433   int tgt_idx = tgt_e->dest_idx;
434 
435   /* Iterate over each PHI in e->dest.  */
436   for (gphi_iterator gsi = gsi_start_phis (src_e->dest),
437 			   gsi2 = gsi_start_phis (tgt_e->dest);
438        !gsi_end_p (gsi);
439        gsi_next (&gsi), gsi_next (&gsi2))
440     {
441       gphi *src_phi = gsi.phi ();
442       gphi *dest_phi = gsi2.phi ();
443       tree val = gimple_phi_arg_def (src_phi, src_idx);
444       location_t locus = gimple_phi_arg_location (src_phi, src_idx);
445 
446       SET_PHI_ARG_DEF (dest_phi, tgt_idx, val);
447       gimple_phi_arg_set_location (dest_phi, tgt_idx, locus);
448     }
449 }
450 
451 /* Given ssa_name DEF, backtrack jump threading PATH from node IDX
452    to see if it has constant value in a flow sensitive manner.  Set
453    LOCUS to location of the constant phi arg and return the value.
454    Return DEF directly if either PATH or idx is ZERO.  */
455 
456 static tree
get_value_locus_in_path(tree def,vec<jump_thread_edge * > * path,basic_block bb,int idx,location_t * locus)457 get_value_locus_in_path (tree def, vec<jump_thread_edge *> *path,
458 			 basic_block bb, int idx, location_t *locus)
459 {
460   tree arg;
461   gphi *def_phi;
462   basic_block def_bb;
463 
464   if (path == NULL || idx == 0)
465     return def;
466 
467   def_phi = dyn_cast <gphi *> (SSA_NAME_DEF_STMT (def));
468   if (!def_phi)
469     return def;
470 
471   def_bb = gimple_bb (def_phi);
472   /* Don't propagate loop invariants into deeper loops.  */
473   if (!def_bb || bb_loop_depth (def_bb) < bb_loop_depth (bb))
474     return def;
475 
476   /* Backtrack jump threading path from IDX to see if def has constant
477      value.  */
478   for (int j = idx - 1; j >= 0; j--)
479     {
480       edge e = (*path)[j]->e;
481       if (e->dest == def_bb)
482 	{
483 	  arg = gimple_phi_arg_def (def_phi, e->dest_idx);
484 	  if (is_gimple_min_invariant (arg))
485 	    {
486 	      *locus = gimple_phi_arg_location (def_phi, e->dest_idx);
487 	      return arg;
488 	    }
489 	  break;
490 	}
491     }
492 
493   return def;
494 }
495 
496 /* For each PHI in BB, copy the argument associated with SRC_E to TGT_E.
497    Try to backtrack jump threading PATH from node IDX to see if the arg
498    has constant value, copy constant value instead of argument itself
499    if yes.  */
500 
501 static void
copy_phi_args(basic_block bb,edge src_e,edge tgt_e,vec<jump_thread_edge * > * path,int idx)502 copy_phi_args (basic_block bb, edge src_e, edge tgt_e,
503 	       vec<jump_thread_edge *> *path, int idx)
504 {
505   gphi_iterator gsi;
506   int src_indx = src_e->dest_idx;
507 
508   for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
509     {
510       gphi *phi = gsi.phi ();
511       tree def = gimple_phi_arg_def (phi, src_indx);
512       location_t locus = gimple_phi_arg_location (phi, src_indx);
513 
514       if (TREE_CODE (def) == SSA_NAME
515 	  && !virtual_operand_p (gimple_phi_result (phi)))
516 	def = get_value_locus_in_path (def, path, bb, idx, &locus);
517 
518       add_phi_arg (phi, def, tgt_e, locus);
519     }
520 }
521 
522 /* We have recently made a copy of ORIG_BB, including its outgoing
523    edges.  The copy is NEW_BB.  Every PHI node in every direct successor of
524    ORIG_BB has a new argument associated with edge from NEW_BB to the
525    successor.  Initialize the PHI argument so that it is equal to the PHI
526    argument associated with the edge from ORIG_BB to the successor.
527    PATH and IDX are used to check if the new PHI argument has constant
528    value in a flow sensitive manner.  */
529 
530 static void
update_destination_phis(basic_block orig_bb,basic_block new_bb,vec<jump_thread_edge * > * path,int idx)531 update_destination_phis (basic_block orig_bb, basic_block new_bb,
532 			 vec<jump_thread_edge *> *path, int idx)
533 {
534   edge_iterator ei;
535   edge e;
536 
537   FOR_EACH_EDGE (e, ei, orig_bb->succs)
538     {
539       edge e2 = find_edge (new_bb, e->dest);
540       copy_phi_args (e->dest, e, e2, path, idx);
541     }
542 }
543 
544 /* Given a duplicate block and its single destination (both stored
545    in RD).  Create an edge between the duplicate and its single
546    destination.
547 
548    Add an additional argument to any PHI nodes at the single
549    destination.  IDX is the start node in jump threading path
550    we start to check to see if the new PHI argument has constant
551    value along the jump threading path.  */
552 
553 static void
create_edge_and_update_destination_phis(struct redirection_data * rd,basic_block bb,int idx)554 create_edge_and_update_destination_phis (struct redirection_data *rd,
555 					 basic_block bb, int idx)
556 {
557   edge e = make_single_succ_edge (bb, rd->path->last ()->e->dest, EDGE_FALLTHRU);
558 
559   rescan_loop_exit (e, true, false);
560 
561   /* We used to copy the thread path here.  That was added in 2007
562      and dutifully updated through the representation changes in 2013.
563 
564      In 2013 we added code to thread from an interior node through
565      the backedge to another interior node.  That runs after the code
566      to thread through loop headers from outside the loop.
567 
568      The latter may delete edges in the CFG, including those
569      which appeared in the jump threading path we copied here.  Thus
570      we'd end up using a dangling pointer.
571 
572      After reviewing the 2007/2011 code, I can't see how anything
573      depended on copying the AUX field and clearly copying the jump
574      threading path is problematical due to embedded edge pointers.
575      It has been removed.  */
576   e->aux = NULL;
577 
578   /* If there are any PHI nodes at the destination of the outgoing edge
579      from the duplicate block, then we will need to add a new argument
580      to them.  The argument should have the same value as the argument
581      associated with the outgoing edge stored in RD.  */
582   copy_phi_args (e->dest, rd->path->last ()->e, e, rd->path, idx);
583 }
584 
585 /* Look through PATH beginning at START and return TRUE if there are
586    any additional blocks that need to be duplicated.  Otherwise,
587    return FALSE.  */
588 static bool
any_remaining_duplicated_blocks(vec<jump_thread_edge * > * path,unsigned int start)589 any_remaining_duplicated_blocks (vec<jump_thread_edge *> *path,
590 				 unsigned int start)
591 {
592   for (unsigned int i = start + 1; i < path->length (); i++)
593     {
594       if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK
595 	  || (*path)[i]->type == EDGE_COPY_SRC_BLOCK)
596 	return true;
597     }
598   return false;
599 }
600 
601 
602 /* Compute the amount of profile count coming into the jump threading
603    path stored in RD that we are duplicating, returned in PATH_IN_COUNT_PTR and
604    PATH_IN_FREQ_PTR, as well as the amount of counts flowing out of the
605    duplicated path, returned in PATH_OUT_COUNT_PTR.  LOCAL_INFO is used to
606    identify blocks duplicated for jump threading, which have duplicated
607    edges that need to be ignored in the analysis.  Return true if path contains
608    a joiner, false otherwise.
609 
610    In the non-joiner case, this is straightforward - all the counts
611    flowing into the jump threading path should flow through the duplicated
612    block and out of the duplicated path.
613 
614    In the joiner case, it is very tricky.  Some of the counts flowing into
615    the original path go offpath at the joiner.  The problem is that while
616    we know how much total count goes off-path in the original control flow,
617    we don't know how many of the counts corresponding to just the jump
618    threading path go offpath at the joiner.
619 
620    For example, assume we have the following control flow and identified
621    jump threading paths:
622 
623 		A     B     C
624 		 \    |    /
625 	       Ea \   |Eb / Ec
626 		   \  |  /
627 		    v v v
628 		      J       <-- Joiner
629 		     / \
630 		Eoff/   \Eon
631 		   /     \
632 		  v       v
633 		Soff     Son  <--- Normal
634 			 /\
635 		      Ed/  \ Ee
636 		       /    \
637 		      v     v
638 		      D      E
639 
640 	    Jump threading paths: A -> J -> Son -> D (path 1)
641 				  C -> J -> Son -> E (path 2)
642 
643    Note that the control flow could be more complicated:
644    - Each jump threading path may have more than one incoming edge.  I.e. A and
645    Ea could represent multiple incoming blocks/edges that are included in
646    path 1.
647    - There could be EDGE_NO_COPY_SRC_BLOCK edges after the joiner (either
648    before or after the "normal" copy block).  These are not duplicated onto
649    the jump threading path, as they are single-successor.
650    - Any of the blocks along the path may have other incoming edges that
651    are not part of any jump threading path, but add profile counts along
652    the path.
653 
654    In the above example, after all jump threading is complete, we will
655    end up with the following control flow:
656 
657 		A	   B	       C
658 		|	   |	       |
659 	      Ea|	   |Eb	       |Ec
660 		|	   |	       |
661 		v	   v	       v
662 	       Ja	   J	      Jc
663 	       / \	  / \Eon'     / \
664 	  Eona/   \   ---/---\--------   \Eonc
665 	     /     \ /  /     \		  \
666 	    v       v  v       v	  v
667 	   Sona     Soff      Son	Sonc
668 	     \		       /\	  /
669 	      \___________    /  \  _____/
670 			  \  /    \/
671 			   vv      v
672 			    D      E
673 
674    The main issue to notice here is that when we are processing path 1
675    (A->J->Son->D) we need to figure out the outgoing edge weights to
676    the duplicated edges Ja->Sona and Ja->Soff, while ensuring that the
677    sum of the incoming weights to D remain Ed.  The problem with simply
678    assuming that Ja (and Jc when processing path 2) has the same outgoing
679    probabilities to its successors as the original block J, is that after
680    all paths are processed and other edges/counts removed (e.g. none
681    of Ec will reach D after processing path 2), we may end up with not
682    enough count flowing along duplicated edge Sona->D.
683 
684    Therefore, in the case of a joiner, we keep track of all counts
685    coming in along the current path, as well as from predecessors not
686    on any jump threading path (Eb in the above example).  While we
687    first assume that the duplicated Eona for Ja->Sona has the same
688    probability as the original, we later compensate for other jump
689    threading paths that may eliminate edges.  We do that by keep track
690    of all counts coming into the original path that are not in a jump
691    thread (Eb in the above example, but as noted earlier, there could
692    be other predecessors incoming to the path at various points, such
693    as at Son).  Call this cumulative non-path count coming into the path
694    before D as Enonpath.  We then ensure that the count from Sona->D is as at
695    least as big as (Ed - Enonpath), but no bigger than the minimum
696    weight along the jump threading path.  The probabilities of both the
697    original and duplicated joiner block J and Ja will be adjusted
698    accordingly after the updates.  */
699 
700 static bool
compute_path_counts(struct redirection_data * rd,ssa_local_info_t * local_info,profile_count * path_in_count_ptr,profile_count * path_out_count_ptr)701 compute_path_counts (struct redirection_data *rd,
702 		     ssa_local_info_t *local_info,
703 		     profile_count *path_in_count_ptr,
704 		     profile_count *path_out_count_ptr)
705 {
706   edge e = rd->incoming_edges->e;
707   vec<jump_thread_edge *> *path = THREAD_PATH (e);
708   edge elast = path->last ()->e;
709   profile_count nonpath_count = profile_count::zero ();
710   bool has_joiner = false;
711   profile_count path_in_count = profile_count::zero ();
712 
713   /* Start by accumulating incoming edge counts to the path's first bb
714      into a couple buckets:
715 	path_in_count: total count of incoming edges that flow into the
716 		  current path.
717 	nonpath_count: total count of incoming edges that are not
718 		  flowing along *any* path.  These are the counts
719 		  that will still flow along the original path after
720 		  all path duplication is done by potentially multiple
721 		  calls to this routine.
722      (any other incoming edge counts are for a different jump threading
723      path that will be handled by a later call to this routine.)
724      To make this easier, start by recording all incoming edges that flow into
725      the current path in a bitmap.  We could add up the path's incoming edge
726      counts here, but we still need to walk all the first bb's incoming edges
727      below to add up the counts of the other edges not included in this jump
728      threading path.  */
729   struct el *next, *el;
730   auto_bitmap in_edge_srcs;
731   for (el = rd->incoming_edges; el; el = next)
732     {
733       next = el->next;
734       bitmap_set_bit (in_edge_srcs, el->e->src->index);
735     }
736   edge ein;
737   edge_iterator ei;
738   FOR_EACH_EDGE (ein, ei, e->dest->preds)
739     {
740       vec<jump_thread_edge *> *ein_path = THREAD_PATH (ein);
741       /* Simply check the incoming edge src against the set captured above.  */
742       if (ein_path
743 	  && bitmap_bit_p (in_edge_srcs, (*ein_path)[0]->e->src->index))
744 	{
745 	  /* It is necessary but not sufficient that the last path edges
746 	     are identical.  There may be different paths that share the
747 	     same last path edge in the case where the last edge has a nocopy
748 	     source block.  */
749 	  gcc_assert (ein_path->last ()->e == elast);
750 	  path_in_count += ein->count ();
751 	}
752       else if (!ein_path)
753 	{
754 	  /* Keep track of the incoming edges that are not on any jump-threading
755 	     path.  These counts will still flow out of original path after all
756 	     jump threading is complete.  */
757 	    nonpath_count += ein->count ();
758 	}
759     }
760 
761   /* Now compute the fraction of the total count coming into the first
762      path bb that is from the current threading path.  */
763   profile_count total_count = e->dest->count;
764   /* Handle incoming profile insanities.  */
765   if (total_count < path_in_count)
766     path_in_count = total_count;
767   profile_probability onpath_scale = path_in_count.probability_in (total_count);
768 
769   /* Walk the entire path to do some more computation in order to estimate
770      how much of the path_in_count will flow out of the duplicated threading
771      path.  In the non-joiner case this is straightforward (it should be
772      the same as path_in_count, although we will handle incoming profile
773      insanities by setting it equal to the minimum count along the path).
774 
775      In the joiner case, we need to estimate how much of the path_in_count
776      will stay on the threading path after the joiner's conditional branch.
777      We don't really know for sure how much of the counts
778      associated with this path go to each successor of the joiner, but we'll
779      estimate based on the fraction of the total count coming into the path
780      bb was from the threading paths (computed above in onpath_scale).
781      Afterwards, we will need to do some fixup to account for other threading
782      paths and possible profile insanities.
783 
784      In order to estimate the joiner case's counts we also need to update
785      nonpath_count with any additional counts coming into the path.  Other
786      blocks along the path may have additional predecessors from outside
787      the path.  */
788   profile_count path_out_count = path_in_count;
789   profile_count min_path_count = path_in_count;
790   for (unsigned int i = 1; i < path->length (); i++)
791     {
792       edge epath = (*path)[i]->e;
793       profile_count cur_count = epath->count ();
794       if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
795 	{
796 	  has_joiner = true;
797 	  cur_count = cur_count.apply_probability (onpath_scale);
798 	}
799       /* In the joiner case we need to update nonpath_count for any edges
800 	 coming into the path that will contribute to the count flowing
801 	 into the path successor.  */
802       if (has_joiner && epath != elast)
803 	{
804 	  /* Look for other incoming edges after joiner.  */
805 	  FOR_EACH_EDGE (ein, ei, epath->dest->preds)
806 	    {
807 	      if (ein != epath
808 		  /* Ignore in edges from blocks we have duplicated for a
809 		     threading path, which have duplicated edge counts until
810 		     they are redirected by an invocation of this routine.  */
811 		  && !bitmap_bit_p (local_info->duplicate_blocks,
812 				    ein->src->index))
813 		nonpath_count += ein->count ();
814 	    }
815 	}
816       if (cur_count < path_out_count)
817 	path_out_count = cur_count;
818       if (epath->count () < min_path_count)
819 	min_path_count = epath->count ();
820     }
821 
822   /* We computed path_out_count above assuming that this path targeted
823      the joiner's on-path successor with the same likelihood as it
824      reached the joiner.  However, other thread paths through the joiner
825      may take a different path through the normal copy source block
826      (i.e. they have a different elast), meaning that they do not
827      contribute any counts to this path's elast.  As a result, it may
828      turn out that this path must have more count flowing to the on-path
829      successor of the joiner.  Essentially, all of this path's elast
830      count must be contributed by this path and any nonpath counts
831      (since any path through the joiner with a different elast will not
832      include a copy of this elast in its duplicated path).
833      So ensure that this path's path_out_count is at least the
834      difference between elast->count () and nonpath_count.  Otherwise the edge
835      counts after threading will not be sane.  */
836   if (local_info->need_profile_correction
837       && has_joiner && path_out_count < elast->count () - nonpath_count)
838     {
839       path_out_count = elast->count () - nonpath_count;
840       /* But neither can we go above the minimum count along the path
841 	 we are duplicating.  This can be an issue due to profile
842 	 insanities coming in to this pass.  */
843       if (path_out_count > min_path_count)
844 	path_out_count = min_path_count;
845     }
846 
847   *path_in_count_ptr = path_in_count;
848   *path_out_count_ptr = path_out_count;
849   return has_joiner;
850 }
851 
852 
853 /* Update the counts and frequencies for both an original path
854    edge EPATH and its duplicate EDUP.  The duplicate source block
855    will get a count of PATH_IN_COUNT and PATH_IN_FREQ,
856    and the duplicate edge EDUP will have a count of PATH_OUT_COUNT.  */
857 static void
update_profile(edge epath,edge edup,profile_count path_in_count,profile_count path_out_count)858 update_profile (edge epath, edge edup, profile_count path_in_count,
859 		profile_count path_out_count)
860 {
861 
862   /* First update the duplicated block's count.  */
863   if (edup)
864     {
865       basic_block dup_block = edup->src;
866 
867       /* Edup's count is reduced by path_out_count.  We need to redistribute
868          probabilities to the remaining edges.  */
869 
870       edge esucc;
871       edge_iterator ei;
872       profile_probability edup_prob
873 	 = path_out_count.probability_in (path_in_count);
874 
875       /* Either scale up or down the remaining edges.
876 	 probabilities are always in range <0,1> and thus we can't do
877 	 both by same loop.  */
878       if (edup->probability > edup_prob)
879 	{
880 	   profile_probability rev_scale
881 	     = (profile_probability::always () - edup->probability)
882 	       / (profile_probability::always () - edup_prob);
883 	   FOR_EACH_EDGE (esucc, ei, dup_block->succs)
884 	     if (esucc != edup)
885 	       esucc->probability /= rev_scale;
886 	}
887       else if (edup->probability < edup_prob)
888 	{
889 	   profile_probability scale
890 	     = (profile_probability::always () - edup_prob)
891 	       / (profile_probability::always () - edup->probability);
892 	  FOR_EACH_EDGE (esucc, ei, dup_block->succs)
893 	    if (esucc != edup)
894 	      esucc->probability *= scale;
895 	}
896       if (edup_prob.initialized_p ())
897         edup->probability = edup_prob;
898 
899       gcc_assert (!dup_block->count.initialized_p ());
900       dup_block->count = path_in_count;
901     }
902 
903   if (path_in_count == profile_count::zero ())
904     return;
905 
906   profile_count final_count = epath->count () - path_out_count;
907 
908   /* Now update the original block's count in the
909      opposite manner - remove the counts/freq that will flow
910      into the duplicated block.  Handle underflow due to precision/
911      rounding issues.  */
912   epath->src->count -= path_in_count;
913 
914   /* Next update this path edge's original and duplicated counts.  We know
915      that the duplicated path will have path_out_count flowing
916      out of it (in the joiner case this is the count along the duplicated path
917      out of the duplicated joiner).  This count can then be removed from the
918      original path edge.  */
919 
920   edge esucc;
921   edge_iterator ei;
922   profile_probability epath_prob = final_count.probability_in (epath->src->count);
923 
924   if (epath->probability > epath_prob)
925     {
926        profile_probability rev_scale
927 	 = (profile_probability::always () - epath->probability)
928 	   / (profile_probability::always () - epath_prob);
929        FOR_EACH_EDGE (esucc, ei, epath->src->succs)
930 	 if (esucc != epath)
931 	   esucc->probability /= rev_scale;
932     }
933   else if (epath->probability < epath_prob)
934     {
935        profile_probability scale
936 	 = (profile_probability::always () - epath_prob)
937 	   / (profile_probability::always () - epath->probability);
938       FOR_EACH_EDGE (esucc, ei, epath->src->succs)
939 	if (esucc != epath)
940 	  esucc->probability *= scale;
941     }
942   if (epath_prob.initialized_p ())
943     epath->probability = epath_prob;
944 }
945 
946 /* Wire up the outgoing edges from the duplicate blocks and
947    update any PHIs as needed.  Also update the profile counts
948    on the original and duplicate blocks and edges.  */
949 void
ssa_fix_duplicate_block_edges(struct redirection_data * rd,ssa_local_info_t * local_info)950 ssa_fix_duplicate_block_edges (struct redirection_data *rd,
951 			       ssa_local_info_t *local_info)
952 {
953   bool multi_incomings = (rd->incoming_edges->next != NULL);
954   edge e = rd->incoming_edges->e;
955   vec<jump_thread_edge *> *path = THREAD_PATH (e);
956   edge elast = path->last ()->e;
957   profile_count path_in_count = profile_count::zero ();
958   profile_count path_out_count = profile_count::zero ();
959 
960   /* First determine how much profile count to move from original
961      path to the duplicate path.  This is tricky in the presence of
962      a joiner (see comments for compute_path_counts), where some portion
963      of the path's counts will flow off-path from the joiner.  In the
964      non-joiner case the path_in_count and path_out_count should be the
965      same.  */
966   bool has_joiner = compute_path_counts (rd, local_info,
967 					 &path_in_count, &path_out_count);
968 
969   for (unsigned int count = 0, i = 1; i < path->length (); i++)
970     {
971       edge epath = (*path)[i]->e;
972 
973       /* If we were threading through an joiner block, then we want
974 	 to keep its control statement and redirect an outgoing edge.
975 	 Else we want to remove the control statement & edges, then create
976 	 a new outgoing edge.  In both cases we may need to update PHIs.  */
977       if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
978 	{
979 	  edge victim;
980 	  edge e2;
981 
982 	  gcc_assert (has_joiner);
983 
984 	  /* This updates the PHIs at the destination of the duplicate
985 	     block.  Pass 0 instead of i if we are threading a path which
986 	     has multiple incoming edges.  */
987 	  update_destination_phis (local_info->bb, rd->dup_blocks[count],
988 				   path, multi_incomings ? 0 : i);
989 
990 	  /* Find the edge from the duplicate block to the block we're
991 	     threading through.  That's the edge we want to redirect.  */
992 	  victim = find_edge (rd->dup_blocks[count], (*path)[i]->e->dest);
993 
994 	  /* If there are no remaining blocks on the path to duplicate,
995 	     then redirect VICTIM to the final destination of the jump
996 	     threading path.  */
997 	  if (!any_remaining_duplicated_blocks (path, i))
998 	    {
999 	      e2 = redirect_edge_and_branch (victim, elast->dest);
1000 	      /* If we redirected the edge, then we need to copy PHI arguments
1001 		 at the target.  If the edge already existed (e2 != victim
1002 		 case), then the PHIs in the target already have the correct
1003 		 arguments.  */
1004 	      if (e2 == victim)
1005 		copy_phi_args (e2->dest, elast, e2,
1006 			       path, multi_incomings ? 0 : i);
1007 	    }
1008 	  else
1009 	    {
1010 	      /* Redirect VICTIM to the next duplicated block in the path.  */
1011 	      e2 = redirect_edge_and_branch (victim, rd->dup_blocks[count + 1]);
1012 
1013 	      /* We need to update the PHIs in the next duplicated block.  We
1014 		 want the new PHI args to have the same value as they had
1015 		 in the source of the next duplicate block.
1016 
1017 		 Thus, we need to know which edge we traversed into the
1018 		 source of the duplicate.  Furthermore, we may have
1019 		 traversed many edges to reach the source of the duplicate.
1020 
1021 		 Walk through the path starting at element I until we
1022 		 hit an edge marked with EDGE_COPY_SRC_BLOCK.  We want
1023 		 the edge from the prior element.  */
1024 	      for (unsigned int j = i + 1; j < path->length (); j++)
1025 		{
1026 		  if ((*path)[j]->type == EDGE_COPY_SRC_BLOCK)
1027 		    {
1028 		      copy_phi_arg_into_existing_phi ((*path)[j - 1]->e, e2);
1029 		      break;
1030 		    }
1031 		}
1032 	    }
1033 
1034 	  /* Update the counts of both the original block
1035 	     and path edge, and the duplicates.  The path duplicate's
1036 	     incoming count are the totals for all edges
1037 	     incoming to this jump threading path computed earlier.
1038 	     And we know that the duplicated path will have path_out_count
1039 	     flowing out of it (i.e. along the duplicated path out of the
1040 	     duplicated joiner).  */
1041 	  update_profile (epath, e2, path_in_count, path_out_count);
1042 	}
1043       else if ((*path)[i]->type == EDGE_COPY_SRC_BLOCK)
1044 	{
1045 	  remove_ctrl_stmt_and_useless_edges (rd->dup_blocks[count], NULL);
1046 	  create_edge_and_update_destination_phis (rd, rd->dup_blocks[count],
1047 						   multi_incomings ? 0 : i);
1048 	  if (count == 1)
1049 	    single_succ_edge (rd->dup_blocks[1])->aux = NULL;
1050 
1051 	  /* Update the counts of both the original block
1052 	     and path edge, and the duplicates.  Since we are now after
1053 	     any joiner that may have existed on the path, the count
1054 	     flowing along the duplicated threaded path is path_out_count.
1055 	     If we didn't have a joiner, then cur_path_freq was the sum
1056 	     of the total frequencies along all incoming edges to the
1057 	     thread path (path_in_freq).  If we had a joiner, it would have
1058 	     been updated at the end of that handling to the edge frequency
1059 	     along the duplicated joiner path edge.  */
1060 	  update_profile (epath, EDGE_SUCC (rd->dup_blocks[count], 0),
1061 			  path_out_count, path_out_count);
1062 	}
1063       else
1064 	{
1065 	  /* No copy case.  In this case we don't have an equivalent block
1066 	     on the duplicated thread path to update, but we do need
1067 	     to remove the portion of the counts/freqs that were moved
1068 	     to the duplicated path from the counts/freqs flowing through
1069 	     this block on the original path.  Since all the no-copy edges
1070 	     are after any joiner, the removed count is the same as
1071 	     path_out_count.
1072 
1073 	     If we didn't have a joiner, then cur_path_freq was the sum
1074 	     of the total frequencies along all incoming edges to the
1075 	     thread path (path_in_freq).  If we had a joiner, it would have
1076 	     been updated at the end of that handling to the edge frequency
1077 	     along the duplicated joiner path edge.  */
1078 	   update_profile (epath, NULL, path_out_count, path_out_count);
1079 	}
1080 
1081       /* Increment the index into the duplicated path when we processed
1082 	 a duplicated block.  */
1083       if ((*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK
1084 	  || (*path)[i]->type == EDGE_COPY_SRC_BLOCK)
1085 	{
1086 	  count++;
1087 	}
1088     }
1089 }
1090 
1091 /* Hash table traversal callback routine to create duplicate blocks.  */
1092 
1093 int
ssa_create_duplicates(struct redirection_data ** slot,ssa_local_info_t * local_info)1094 ssa_create_duplicates (struct redirection_data **slot,
1095 		       ssa_local_info_t *local_info)
1096 {
1097   struct redirection_data *rd = *slot;
1098 
1099   /* The second duplicated block in a jump threading path is specific
1100      to the path.  So it gets stored in RD rather than in LOCAL_DATA.
1101 
1102      Each time we're called, we have to look through the path and see
1103      if a second block needs to be duplicated.
1104 
1105      Note the search starts with the third edge on the path.  The first
1106      edge is the incoming edge, the second edge always has its source
1107      duplicated.  Thus we start our search with the third edge.  */
1108   vec<jump_thread_edge *> *path = rd->path;
1109   for (unsigned int i = 2; i < path->length (); i++)
1110     {
1111       if ((*path)[i]->type == EDGE_COPY_SRC_BLOCK
1112 	  || (*path)[i]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1113 	{
1114 	  create_block_for_threading ((*path)[i]->e->src, rd, 1,
1115 				      &local_info->duplicate_blocks);
1116 	  break;
1117 	}
1118     }
1119 
1120   /* Create a template block if we have not done so already.  Otherwise
1121      use the template to create a new block.  */
1122   if (local_info->template_block == NULL)
1123     {
1124       create_block_for_threading ((*path)[1]->e->src, rd, 0,
1125 				  &local_info->duplicate_blocks);
1126       local_info->template_block = rd->dup_blocks[0];
1127 
1128       /* We do not create any outgoing edges for the template.  We will
1129 	 take care of that in a later traversal.  That way we do not
1130 	 create edges that are going to just be deleted.  */
1131     }
1132   else
1133     {
1134       create_block_for_threading (local_info->template_block, rd, 0,
1135 				  &local_info->duplicate_blocks);
1136 
1137       /* Go ahead and wire up outgoing edges and update PHIs for the duplicate
1138 	 block.   */
1139       ssa_fix_duplicate_block_edges (rd, local_info);
1140     }
1141 
1142   /* Keep walking the hash table.  */
1143   return 1;
1144 }
1145 
1146 /* We did not create any outgoing edges for the template block during
1147    block creation.  This hash table traversal callback creates the
1148    outgoing edge for the template block.  */
1149 
1150 inline int
ssa_fixup_template_block(struct redirection_data ** slot,ssa_local_info_t * local_info)1151 ssa_fixup_template_block (struct redirection_data **slot,
1152 			  ssa_local_info_t *local_info)
1153 {
1154   struct redirection_data *rd = *slot;
1155 
1156   /* If this is the template block halt the traversal after updating
1157      it appropriately.
1158 
1159      If we were threading through an joiner block, then we want
1160      to keep its control statement and redirect an outgoing edge.
1161      Else we want to remove the control statement & edges, then create
1162      a new outgoing edge.  In both cases we may need to update PHIs.  */
1163   if (rd->dup_blocks[0] && rd->dup_blocks[0] == local_info->template_block)
1164     {
1165       ssa_fix_duplicate_block_edges (rd, local_info);
1166       return 0;
1167     }
1168 
1169   return 1;
1170 }
1171 
1172 /* Hash table traversal callback to redirect each incoming edge
1173    associated with this hash table element to its new destination.  */
1174 
1175 int
ssa_redirect_edges(struct redirection_data ** slot,ssa_local_info_t * local_info)1176 ssa_redirect_edges (struct redirection_data **slot,
1177 		    ssa_local_info_t *local_info)
1178 {
1179   struct redirection_data *rd = *slot;
1180   struct el *next, *el;
1181 
1182   /* Walk over all the incoming edges associated with this hash table
1183      entry.  */
1184   for (el = rd->incoming_edges; el; el = next)
1185     {
1186       edge e = el->e;
1187       vec<jump_thread_edge *> *path = THREAD_PATH (e);
1188 
1189       /* Go ahead and free this element from the list.  Doing this now
1190 	 avoids the need for another list walk when we destroy the hash
1191 	 table.  */
1192       next = el->next;
1193       free (el);
1194 
1195       thread_stats.num_threaded_edges++;
1196 
1197       if (rd->dup_blocks[0])
1198 	{
1199 	  edge e2;
1200 
1201 	  if (dump_file && (dump_flags & TDF_DETAILS))
1202 	    fprintf (dump_file, "  Threaded jump %d --> %d to %d\n",
1203 		     e->src->index, e->dest->index, rd->dup_blocks[0]->index);
1204 
1205 	  /* Redirect the incoming edge (possibly to the joiner block) to the
1206 	     appropriate duplicate block.  */
1207 	  e2 = redirect_edge_and_branch (e, rd->dup_blocks[0]);
1208 	  gcc_assert (e == e2);
1209 	  flush_pending_stmts (e2);
1210 	}
1211 
1212       /* Go ahead and clear E->aux.  It's not needed anymore and failure
1213 	 to clear it will cause all kinds of unpleasant problems later.  */
1214       delete_jump_thread_path (path);
1215       e->aux = NULL;
1216 
1217     }
1218 
1219   /* Indicate that we actually threaded one or more jumps.  */
1220   if (rd->incoming_edges)
1221     local_info->jumps_threaded = true;
1222 
1223   return 1;
1224 }
1225 
1226 /* Return true if this block has no executable statements other than
1227    a simple ctrl flow instruction.  When the number of outgoing edges
1228    is one, this is equivalent to a "forwarder" block.  */
1229 
1230 static bool
redirection_block_p(basic_block bb)1231 redirection_block_p (basic_block bb)
1232 {
1233   gimple_stmt_iterator gsi;
1234 
1235   /* Advance to the first executable statement.  */
1236   gsi = gsi_start_bb (bb);
1237   while (!gsi_end_p (gsi)
1238 	 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL
1239 	     || is_gimple_debug (gsi_stmt (gsi))
1240 	     || gimple_nop_p (gsi_stmt (gsi))
1241 	     || gimple_clobber_p (gsi_stmt (gsi))))
1242     gsi_next (&gsi);
1243 
1244   /* Check if this is an empty block.  */
1245   if (gsi_end_p (gsi))
1246     return true;
1247 
1248   /* Test that we've reached the terminating control statement.  */
1249   return gsi_stmt (gsi)
1250 	 && (gimple_code (gsi_stmt (gsi)) == GIMPLE_COND
1251 	     || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO
1252 	     || gimple_code (gsi_stmt (gsi)) == GIMPLE_SWITCH);
1253 }
1254 
1255 /* BB is a block which ends with a COND_EXPR or SWITCH_EXPR and when BB
1256    is reached via one or more specific incoming edges, we know which
1257    outgoing edge from BB will be traversed.
1258 
1259    We want to redirect those incoming edges to the target of the
1260    appropriate outgoing edge.  Doing so avoids a conditional branch
1261    and may expose new optimization opportunities.  Note that we have
1262    to update dominator tree and SSA graph after such changes.
1263 
1264    The key to keeping the SSA graph update manageable is to duplicate
1265    the side effects occurring in BB so that those side effects still
1266    occur on the paths which bypass BB after redirecting edges.
1267 
1268    We accomplish this by creating duplicates of BB and arranging for
1269    the duplicates to unconditionally pass control to one specific
1270    successor of BB.  We then revector the incoming edges into BB to
1271    the appropriate duplicate of BB.
1272 
1273    If NOLOOP_ONLY is true, we only perform the threading as long as it
1274    does not affect the structure of the loops in a nontrivial way.
1275 
1276    If JOINERS is true, then thread through joiner blocks as well.  */
1277 
1278 static bool
thread_block_1(basic_block bb,bool noloop_only,bool joiners)1279 thread_block_1 (basic_block bb, bool noloop_only, bool joiners)
1280 {
1281   /* E is an incoming edge into BB that we may or may not want to
1282      redirect to a duplicate of BB.  */
1283   edge e, e2;
1284   edge_iterator ei;
1285   ssa_local_info_t local_info;
1286 
1287   local_info.duplicate_blocks = BITMAP_ALLOC (NULL);
1288   local_info.need_profile_correction = false;
1289 
1290   /* To avoid scanning a linear array for the element we need we instead
1291      use a hash table.  For normal code there should be no noticeable
1292      difference.  However, if we have a block with a large number of
1293      incoming and outgoing edges such linear searches can get expensive.  */
1294   redirection_data
1295     = new hash_table<struct redirection_data> (EDGE_COUNT (bb->succs));
1296 
1297   /* Record each unique threaded destination into a hash table for
1298      efficient lookups.  */
1299   edge last = NULL;
1300   FOR_EACH_EDGE (e, ei, bb->preds)
1301     {
1302       if (e->aux == NULL)
1303 	continue;
1304 
1305       vec<jump_thread_edge *> *path = THREAD_PATH (e);
1306 
1307       if (((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK && !joiners)
1308 	  || ((*path)[1]->type == EDGE_COPY_SRC_BLOCK && joiners))
1309 	continue;
1310 
1311       e2 = path->last ()->e;
1312       if (!e2 || noloop_only)
1313 	{
1314 	  /* If NOLOOP_ONLY is true, we only allow threading through the
1315 	     header of a loop to exit edges.  */
1316 
1317 	  /* One case occurs when there was loop header buried in a jump
1318 	     threading path that crosses loop boundaries.  We do not try
1319 	     and thread this elsewhere, so just cancel the jump threading
1320 	     request by clearing the AUX field now.  */
1321 	  if (bb->loop_father != e2->src->loop_father
1322 	      && (!loop_exit_edge_p (e2->src->loop_father, e2)
1323 		  || flow_loop_nested_p (bb->loop_father,
1324 					 e2->dest->loop_father)))
1325 	    {
1326 	      /* Since this case is not handled by our special code
1327 		 to thread through a loop header, we must explicitly
1328 		 cancel the threading request here.  */
1329 	      delete_jump_thread_path (path);
1330 	      e->aux = NULL;
1331 	      continue;
1332 	    }
1333 
1334 	  /* Another case occurs when trying to thread through our
1335 	     own loop header, possibly from inside the loop.  We will
1336 	     thread these later.  */
1337 	  unsigned int i;
1338 	  for (i = 1; i < path->length (); i++)
1339 	    {
1340 	      if ((*path)[i]->e->src == bb->loop_father->header
1341 		  && (!loop_exit_edge_p (bb->loop_father, e2)
1342 		      || (*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK))
1343 		break;
1344 	    }
1345 
1346 	  if (i != path->length ())
1347 	    continue;
1348 
1349 	  /* Loop parallelization can be confused by the result of
1350 	     threading through the loop exit test back into the loop.
1351 	     However, theading those jumps seems to help other codes.
1352 
1353 	     I have been unable to find anything related to the shape of
1354 	     the CFG, the contents of the affected blocks, etc which would
1355 	     allow a more sensible test than what we're using below which
1356 	     merely avoids the optimization when parallelizing loops.  */
1357 	  if (flag_tree_parallelize_loops > 1)
1358 	    {
1359 	      for (i = 1; i < path->length (); i++)
1360 	        if (bb->loop_father == e2->src->loop_father
1361 		    && loop_exits_from_bb_p (bb->loop_father,
1362 					     (*path)[i]->e->src)
1363 		    && !loop_exit_edge_p (bb->loop_father, e2))
1364 		  break;
1365 
1366 	      if (i != path->length ())
1367 		{
1368 		  delete_jump_thread_path (path);
1369 		  e->aux = NULL;
1370 		  continue;
1371 		}
1372 	    }
1373 	}
1374 
1375       /* Insert the outgoing edge into the hash table if it is not
1376 	 already in the hash table.  */
1377       lookup_redirection_data (e, INSERT);
1378 
1379       /* When we have thread paths through a common joiner with different
1380 	 final destinations, then we may need corrections to deal with
1381 	 profile insanities.  See the big comment before compute_path_counts.  */
1382       if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1383 	{
1384 	  if (!last)
1385 	    last = e2;
1386 	  else if (e2 != last)
1387 	    local_info.need_profile_correction = true;
1388 	}
1389     }
1390 
1391   /* We do not update dominance info.  */
1392   free_dominance_info (CDI_DOMINATORS);
1393 
1394   /* We know we only thread through the loop header to loop exits.
1395      Let the basic block duplication hook know we are not creating
1396      a multiple entry loop.  */
1397   if (noloop_only
1398       && bb == bb->loop_father->header)
1399     set_loop_copy (bb->loop_father, loop_outer (bb->loop_father));
1400 
1401   /* Now create duplicates of BB.
1402 
1403      Note that for a block with a high outgoing degree we can waste
1404      a lot of time and memory creating and destroying useless edges.
1405 
1406      So we first duplicate BB and remove the control structure at the
1407      tail of the duplicate as well as all outgoing edges from the
1408      duplicate.  We then use that duplicate block as a template for
1409      the rest of the duplicates.  */
1410   local_info.template_block = NULL;
1411   local_info.bb = bb;
1412   local_info.jumps_threaded = false;
1413   redirection_data->traverse <ssa_local_info_t *, ssa_create_duplicates>
1414 			    (&local_info);
1415 
1416   /* The template does not have an outgoing edge.  Create that outgoing
1417      edge and update PHI nodes as the edge's target as necessary.
1418 
1419      We do this after creating all the duplicates to avoid creating
1420      unnecessary edges.  */
1421   redirection_data->traverse <ssa_local_info_t *, ssa_fixup_template_block>
1422 			    (&local_info);
1423 
1424   /* The hash table traversals above created the duplicate blocks (and the
1425      statements within the duplicate blocks).  This loop creates PHI nodes for
1426      the duplicated blocks and redirects the incoming edges into BB to reach
1427      the duplicates of BB.  */
1428   redirection_data->traverse <ssa_local_info_t *, ssa_redirect_edges>
1429 			    (&local_info);
1430 
1431   /* Done with this block.  Clear REDIRECTION_DATA.  */
1432   delete redirection_data;
1433   redirection_data = NULL;
1434 
1435   if (noloop_only
1436       && bb == bb->loop_father->header)
1437     set_loop_copy (bb->loop_father, NULL);
1438 
1439   BITMAP_FREE (local_info.duplicate_blocks);
1440   local_info.duplicate_blocks = NULL;
1441 
1442   /* Indicate to our caller whether or not any jumps were threaded.  */
1443   return local_info.jumps_threaded;
1444 }
1445 
1446 /* Wrapper for thread_block_1 so that we can first handle jump
1447    thread paths which do not involve copying joiner blocks, then
1448    handle jump thread paths which have joiner blocks.
1449 
1450    By doing things this way we can be as aggressive as possible and
1451    not worry that copying a joiner block will create a jump threading
1452    opportunity.  */
1453 
1454 static bool
thread_block(basic_block bb,bool noloop_only)1455 thread_block (basic_block bb, bool noloop_only)
1456 {
1457   bool retval;
1458   retval = thread_block_1 (bb, noloop_only, false);
1459   retval |= thread_block_1 (bb, noloop_only, true);
1460   return retval;
1461 }
1462 
1463 /* Callback for dfs_enumerate_from.  Returns true if BB is different
1464    from STOP and DBDS_CE_STOP.  */
1465 
1466 static basic_block dbds_ce_stop;
1467 static bool
dbds_continue_enumeration_p(const_basic_block bb,const void * stop)1468 dbds_continue_enumeration_p (const_basic_block bb, const void *stop)
1469 {
1470   return (bb != (const_basic_block) stop
1471 	  && bb != dbds_ce_stop);
1472 }
1473 
1474 /* Evaluates the dominance relationship of latch of the LOOP and BB, and
1475    returns the state.  */
1476 
1477 enum bb_dom_status
determine_bb_domination_status(struct loop * loop,basic_block bb)1478 determine_bb_domination_status (struct loop *loop, basic_block bb)
1479 {
1480   basic_block *bblocks;
1481   unsigned nblocks, i;
1482   bool bb_reachable = false;
1483   edge_iterator ei;
1484   edge e;
1485 
1486   /* This function assumes BB is a successor of LOOP->header.
1487      If that is not the case return DOMST_NONDOMINATING which
1488      is always safe.  */
1489     {
1490       bool ok = false;
1491 
1492       FOR_EACH_EDGE (e, ei, bb->preds)
1493 	{
1494      	  if (e->src == loop->header)
1495 	    {
1496 	      ok = true;
1497 	      break;
1498 	    }
1499 	}
1500 
1501       if (!ok)
1502 	return DOMST_NONDOMINATING;
1503     }
1504 
1505   if (bb == loop->latch)
1506     return DOMST_DOMINATING;
1507 
1508   /* Check that BB dominates LOOP->latch, and that it is back-reachable
1509      from it.  */
1510 
1511   bblocks = XCNEWVEC (basic_block, loop->num_nodes);
1512   dbds_ce_stop = loop->header;
1513   nblocks = dfs_enumerate_from (loop->latch, 1, dbds_continue_enumeration_p,
1514 				bblocks, loop->num_nodes, bb);
1515   for (i = 0; i < nblocks; i++)
1516     FOR_EACH_EDGE (e, ei, bblocks[i]->preds)
1517       {
1518 	if (e->src == loop->header)
1519 	  {
1520 	    free (bblocks);
1521 	    return DOMST_NONDOMINATING;
1522 	  }
1523 	if (e->src == bb)
1524 	  bb_reachable = true;
1525       }
1526 
1527   free (bblocks);
1528   return (bb_reachable ? DOMST_DOMINATING : DOMST_LOOP_BROKEN);
1529 }
1530 
1531 /* Thread jumps through the header of LOOP.  Returns true if cfg changes.
1532    If MAY_PEEL_LOOP_HEADERS is false, we avoid threading from entry edges
1533    to the inside of the loop.  */
1534 
1535 static bool
thread_through_loop_header(struct loop * loop,bool may_peel_loop_headers)1536 thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
1537 {
1538   basic_block header = loop->header;
1539   edge e, tgt_edge, latch = loop_latch_edge (loop);
1540   edge_iterator ei;
1541   basic_block tgt_bb, atgt_bb;
1542   enum bb_dom_status domst;
1543 
1544   /* We have already threaded through headers to exits, so all the threading
1545      requests now are to the inside of the loop.  We need to avoid creating
1546      irreducible regions (i.e., loops with more than one entry block), and
1547      also loop with several latch edges, or new subloops of the loop (although
1548      there are cases where it might be appropriate, it is difficult to decide,
1549      and doing it wrongly may confuse other optimizers).
1550 
1551      We could handle more general cases here.  However, the intention is to
1552      preserve some information about the loop, which is impossible if its
1553      structure changes significantly, in a way that is not well understood.
1554      Thus we only handle few important special cases, in which also updating
1555      of the loop-carried information should be feasible:
1556 
1557      1) Propagation of latch edge to a block that dominates the latch block
1558 	of a loop.  This aims to handle the following idiom:
1559 
1560 	first = 1;
1561 	while (1)
1562 	  {
1563 	    if (first)
1564 	      initialize;
1565 	    first = 0;
1566 	    body;
1567 	  }
1568 
1569 	After threading the latch edge, this becomes
1570 
1571 	first = 1;
1572 	if (first)
1573 	  initialize;
1574 	while (1)
1575 	  {
1576 	    first = 0;
1577 	    body;
1578 	  }
1579 
1580 	The original header of the loop is moved out of it, and we may thread
1581 	the remaining edges through it without further constraints.
1582 
1583      2) All entry edges are propagated to a single basic block that dominates
1584 	the latch block of the loop.  This aims to handle the following idiom
1585 	(normally created for "for" loops):
1586 
1587 	i = 0;
1588 	while (1)
1589 	  {
1590 	    if (i >= 100)
1591 	      break;
1592 	    body;
1593 	    i++;
1594 	  }
1595 
1596 	This becomes
1597 
1598 	i = 0;
1599 	while (1)
1600 	  {
1601 	    body;
1602 	    i++;
1603 	    if (i >= 100)
1604 	      break;
1605 	  }
1606      */
1607 
1608   /* Threading through the header won't improve the code if the header has just
1609      one successor.  */
1610   if (single_succ_p (header))
1611     goto fail;
1612 
1613   if (!may_peel_loop_headers && !redirection_block_p (loop->header))
1614     goto fail;
1615   else
1616     {
1617       tgt_bb = NULL;
1618       tgt_edge = NULL;
1619       FOR_EACH_EDGE (e, ei, header->preds)
1620 	{
1621 	  if (!e->aux)
1622 	    {
1623 	      if (e == latch)
1624 		continue;
1625 
1626 	      /* If latch is not threaded, and there is a header
1627 		 edge that is not threaded, we would create loop
1628 		 with multiple entries.  */
1629 	      goto fail;
1630 	    }
1631 
1632 	  vec<jump_thread_edge *> *path = THREAD_PATH (e);
1633 
1634 	  if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1635 	    goto fail;
1636 	  tgt_edge = (*path)[1]->e;
1637 	  atgt_bb = tgt_edge->dest;
1638 	  if (!tgt_bb)
1639 	    tgt_bb = atgt_bb;
1640 	  /* Two targets of threading would make us create loop
1641 	     with multiple entries.  */
1642 	  else if (tgt_bb != atgt_bb)
1643 	    goto fail;
1644 	}
1645 
1646       if (!tgt_bb)
1647 	{
1648 	  /* There are no threading requests.  */
1649 	  return false;
1650 	}
1651 
1652       /* Redirecting to empty loop latch is useless.  */
1653       if (tgt_bb == loop->latch
1654 	  && empty_block_p (loop->latch))
1655 	goto fail;
1656     }
1657 
1658   /* The target block must dominate the loop latch, otherwise we would be
1659      creating a subloop.  */
1660   domst = determine_bb_domination_status (loop, tgt_bb);
1661   if (domst == DOMST_NONDOMINATING)
1662     goto fail;
1663   if (domst == DOMST_LOOP_BROKEN)
1664     {
1665       /* If the loop ceased to exist, mark it as such, and thread through its
1666 	 original header.  */
1667       mark_loop_for_removal (loop);
1668       return thread_block (header, false);
1669     }
1670 
1671   if (tgt_bb->loop_father->header == tgt_bb)
1672     {
1673       /* If the target of the threading is a header of a subloop, we need
1674 	 to create a preheader for it, so that the headers of the two loops
1675 	 do not merge.  */
1676       if (EDGE_COUNT (tgt_bb->preds) > 2)
1677 	{
1678 	  tgt_bb = create_preheader (tgt_bb->loop_father, 0);
1679 	  gcc_assert (tgt_bb != NULL);
1680 	}
1681       else
1682 	tgt_bb = split_edge (tgt_edge);
1683     }
1684 
1685   basic_block new_preheader;
1686 
1687   /* Now consider the case entry edges are redirected to the new entry
1688      block.  Remember one entry edge, so that we can find the new
1689      preheader (its destination after threading).  */
1690   FOR_EACH_EDGE (e, ei, header->preds)
1691     {
1692       if (e->aux)
1693 	break;
1694     }
1695 
1696   /* The duplicate of the header is the new preheader of the loop.  Ensure
1697      that it is placed correctly in the loop hierarchy.  */
1698   set_loop_copy (loop, loop_outer (loop));
1699 
1700   thread_block (header, false);
1701   set_loop_copy (loop, NULL);
1702   new_preheader = e->dest;
1703 
1704   /* Create the new latch block.  This is always necessary, as the latch
1705      must have only a single successor, but the original header had at
1706      least two successors.  */
1707   loop->latch = NULL;
1708   mfb_kj_edge = single_succ_edge (new_preheader);
1709   loop->header = mfb_kj_edge->dest;
1710   latch = make_forwarder_block (tgt_bb, mfb_keep_just, NULL);
1711   loop->header = latch->dest;
1712   loop->latch = latch->src;
1713   return true;
1714 
1715 fail:
1716   /* We failed to thread anything.  Cancel the requests.  */
1717   FOR_EACH_EDGE (e, ei, header->preds)
1718     {
1719       vec<jump_thread_edge *> *path = THREAD_PATH (e);
1720 
1721       if (path)
1722 	{
1723 	  delete_jump_thread_path (path);
1724 	  e->aux = NULL;
1725 	}
1726     }
1727   return false;
1728 }
1729 
1730 /* E1 and E2 are edges into the same basic block.  Return TRUE if the
1731    PHI arguments associated with those edges are equal or there are no
1732    PHI arguments, otherwise return FALSE.  */
1733 
1734 static bool
phi_args_equal_on_edges(edge e1,edge e2)1735 phi_args_equal_on_edges (edge e1, edge e2)
1736 {
1737   gphi_iterator gsi;
1738   int indx1 = e1->dest_idx;
1739   int indx2 = e2->dest_idx;
1740 
1741   for (gsi = gsi_start_phis (e1->dest); !gsi_end_p (gsi); gsi_next (&gsi))
1742     {
1743       gphi *phi = gsi.phi ();
1744 
1745       if (!operand_equal_p (gimple_phi_arg_def (phi, indx1),
1746 			    gimple_phi_arg_def (phi, indx2), 0))
1747 	return false;
1748     }
1749   return true;
1750 }
1751 
1752 /* Return the number of non-debug statements and non-virtual PHIs in a
1753    block.  */
1754 
1755 static unsigned int
count_stmts_and_phis_in_block(basic_block bb)1756 count_stmts_and_phis_in_block (basic_block bb)
1757 {
1758   unsigned int num_stmts = 0;
1759 
1760   gphi_iterator gpi;
1761   for (gpi = gsi_start_phis (bb); !gsi_end_p (gpi); gsi_next (&gpi))
1762     if (!virtual_operand_p (PHI_RESULT (gpi.phi ())))
1763       num_stmts++;
1764 
1765   gimple_stmt_iterator gsi;
1766   for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1767     {
1768       gimple *stmt = gsi_stmt (gsi);
1769       if (!is_gimple_debug (stmt))
1770         num_stmts++;
1771     }
1772 
1773   return num_stmts;
1774 }
1775 
1776 
1777 /* Walk through the registered jump threads and convert them into a
1778    form convenient for this pass.
1779 
1780    Any block which has incoming edges threaded to outgoing edges
1781    will have its entry in THREADED_BLOCK set.
1782 
1783    Any threaded edge will have its new outgoing edge stored in the
1784    original edge's AUX field.
1785 
1786    This form avoids the need to walk all the edges in the CFG to
1787    discover blocks which need processing and avoids unnecessary
1788    hash table lookups to map from threaded edge to new target.  */
1789 
1790 static void
mark_threaded_blocks(bitmap threaded_blocks)1791 mark_threaded_blocks (bitmap threaded_blocks)
1792 {
1793   unsigned int i;
1794   bitmap_iterator bi;
1795   auto_bitmap tmp;
1796   basic_block bb;
1797   edge e;
1798   edge_iterator ei;
1799 
1800   /* It is possible to have jump threads in which one is a subpath
1801      of the other.  ie, (A, B), (B, C), (C, D) where B is a joiner
1802      block and (B, C), (C, D) where no joiner block exists.
1803 
1804      When this occurs ignore the jump thread request with the joiner
1805      block.  It's totally subsumed by the simpler jump thread request.
1806 
1807      This results in less block copying, simpler CFGs.  More importantly,
1808      when we duplicate the joiner block, B, in this case we will create
1809      a new threading opportunity that we wouldn't be able to optimize
1810      until the next jump threading iteration.
1811 
1812      So first convert the jump thread requests which do not require a
1813      joiner block.  */
1814   for (i = 0; i < paths.length (); i++)
1815     {
1816       vec<jump_thread_edge *> *path = paths[i];
1817 
1818       if (path->length () > 1
1819 	  && (*path)[1]->type != EDGE_COPY_SRC_JOINER_BLOCK)
1820 	{
1821 	  edge e = (*path)[0]->e;
1822 	  e->aux = (void *)path;
1823 	  bitmap_set_bit (tmp, e->dest->index);
1824 	}
1825     }
1826 
1827   /* Now iterate again, converting cases where we want to thread
1828      through a joiner block, but only if no other edge on the path
1829      already has a jump thread attached to it.  We do this in two passes,
1830      to avoid situations where the order in the paths vec can hide overlapping
1831      threads (the path is recorded on the incoming edge, so we would miss
1832      cases where the second path starts at a downstream edge on the same
1833      path).  First record all joiner paths, deleting any in the unexpected
1834      case where there is already a path for that incoming edge.  */
1835   for (i = 0; i < paths.length ();)
1836     {
1837       vec<jump_thread_edge *> *path = paths[i];
1838 
1839       if (path->length () > 1
1840 	  && (*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
1841 	{
1842 	  /* Attach the path to the starting edge if none is yet recorded.  */
1843 	  if ((*path)[0]->e->aux == NULL)
1844 	    {
1845 	      (*path)[0]->e->aux = path;
1846 	      i++;
1847 	    }
1848 	  else
1849 	    {
1850 	      paths.unordered_remove (i);
1851 	      if (dump_file && (dump_flags & TDF_DETAILS))
1852 		dump_jump_thread_path (dump_file, *path, false);
1853 	      delete_jump_thread_path (path);
1854 	    }
1855 	}
1856       else
1857 	{
1858 	  i++;
1859 	}
1860     }
1861 
1862   /* Second, look for paths that have any other jump thread attached to
1863      them, and either finish converting them or cancel them.  */
1864   for (i = 0; i < paths.length ();)
1865     {
1866       vec<jump_thread_edge *> *path = paths[i];
1867       edge e = (*path)[0]->e;
1868 
1869       if (path->length () > 1
1870 	  && (*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK && e->aux == path)
1871 	{
1872 	  unsigned int j;
1873 	  for (j = 1; j < path->length (); j++)
1874 	    if ((*path)[j]->e->aux != NULL)
1875 	      break;
1876 
1877 	  /* If we iterated through the entire path without exiting the loop,
1878 	     then we are good to go, record it.  */
1879 	  if (j == path->length ())
1880 	    {
1881 	      bitmap_set_bit (tmp, e->dest->index);
1882 	      i++;
1883 	    }
1884 	  else
1885 	    {
1886 	      e->aux = NULL;
1887 	      paths.unordered_remove (i);
1888 	      if (dump_file && (dump_flags & TDF_DETAILS))
1889 		dump_jump_thread_path (dump_file, *path, false);
1890 	      delete_jump_thread_path (path);
1891 	    }
1892 	}
1893       else
1894 	{
1895 	  i++;
1896 	}
1897     }
1898 
1899   /* When optimizing for size, prune all thread paths where statement
1900      duplication is necessary.
1901 
1902      We walk the jump thread path looking for copied blocks.  There's
1903      two types of copied blocks.
1904 
1905        EDGE_COPY_SRC_JOINER_BLOCK is always copied and thus we will
1906        cancel the jump threading request when optimizing for size.
1907 
1908        EDGE_COPY_SRC_BLOCK which is copied, but some of its statements
1909        will be killed by threading.  If threading does not kill all of
1910        its statements, then we should cancel the jump threading request
1911        when optimizing for size.  */
1912   if (optimize_function_for_size_p (cfun))
1913     {
1914       EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
1915 	{
1916 	  FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, i)->preds)
1917 	    if (e->aux)
1918 	      {
1919 		vec<jump_thread_edge *> *path = THREAD_PATH (e);
1920 
1921 		unsigned int j;
1922 		for (j = 1; j < path->length (); j++)
1923 		  {
1924 		    bb = (*path)[j]->e->src;
1925 		    if (redirection_block_p (bb))
1926 		      ;
1927 		    else if ((*path)[j]->type == EDGE_COPY_SRC_JOINER_BLOCK
1928 			     || ((*path)[j]->type == EDGE_COPY_SRC_BLOCK
1929 			         && (count_stmts_and_phis_in_block (bb)
1930 				     != estimate_threading_killed_stmts (bb))))
1931 		      break;
1932 		  }
1933 
1934 		if (j != path->length ())
1935 		  {
1936 		    if (dump_file && (dump_flags & TDF_DETAILS))
1937 		      dump_jump_thread_path (dump_file, *path, 0);
1938 		    delete_jump_thread_path (path);
1939 		    e->aux = NULL;
1940 		  }
1941 		else
1942 		  bitmap_set_bit (threaded_blocks, i);
1943 	      }
1944 	}
1945     }
1946   else
1947     bitmap_copy (threaded_blocks, tmp);
1948 
1949   /* If we have a joiner block (J) which has two successors S1 and S2 and
1950      we are threading though S1 and the final destination of the thread
1951      is S2, then we must verify that any PHI nodes in S2 have the same
1952      PHI arguments for the edge J->S2 and J->S1->...->S2.
1953 
1954      We used to detect this prior to registering the jump thread, but
1955      that prohibits propagation of edge equivalences into non-dominated
1956      PHI nodes as the equivalency test might occur before propagation.
1957 
1958      This must also occur after we truncate any jump threading paths
1959      as this scenario may only show up after truncation.
1960 
1961      This works for now, but will need improvement as part of the FSA
1962      optimization.
1963 
1964      Note since we've moved the thread request data to the edges,
1965      we have to iterate on those rather than the threaded_edges vector.  */
1966   EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
1967     {
1968       bb = BASIC_BLOCK_FOR_FN (cfun, i);
1969       FOR_EACH_EDGE (e, ei, bb->preds)
1970 	{
1971 	  if (e->aux)
1972 	    {
1973 	      vec<jump_thread_edge *> *path = THREAD_PATH (e);
1974 	      bool have_joiner = ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK);
1975 
1976 	      if (have_joiner)
1977 		{
1978 		  basic_block joiner = e->dest;
1979 		  edge final_edge = path->last ()->e;
1980 		  basic_block final_dest = final_edge->dest;
1981 		  edge e2 = find_edge (joiner, final_dest);
1982 
1983 		  if (e2 && !phi_args_equal_on_edges (e2, final_edge))
1984 		    {
1985 		      delete_jump_thread_path (path);
1986 		      e->aux = NULL;
1987 		    }
1988 		}
1989 	    }
1990 	}
1991     }
1992 
1993   /* Look for jump threading paths which cross multiple loop headers.
1994 
1995      The code to thread through loop headers will change the CFG in ways
1996      that invalidate the cached loop iteration information.  So we must
1997      detect that case and wipe the cached information.  */
1998   EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
1999     {
2000       basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
2001       FOR_EACH_EDGE (e, ei, bb->preds)
2002 	{
2003 	  if (e->aux)
2004 	    {
2005 	      vec<jump_thread_edge *> *path = THREAD_PATH (e);
2006 
2007 	      for (unsigned int i = 0, crossed_headers = 0;
2008 		   i < path->length ();
2009 		   i++)
2010 		{
2011 		  basic_block dest = (*path)[i]->e->dest;
2012 		  basic_block src = (*path)[i]->e->src;
2013 		  /* If we enter a loop.  */
2014 		  if (flow_loop_nested_p (src->loop_father, dest->loop_father))
2015 		    ++crossed_headers;
2016 		  /* If we step from a block outside an irreducible region
2017 		     to a block inside an irreducible region, then we have
2018 		     crossed into a loop.  */
2019 		  else if (! (src->flags & BB_IRREDUCIBLE_LOOP)
2020 			   && (dest->flags & BB_IRREDUCIBLE_LOOP))
2021 		      ++crossed_headers;
2022 		  if (crossed_headers > 1)
2023 		    {
2024 		      vect_free_loop_info_assumptions
2025 			((*path)[path->length () - 1]->e->dest->loop_father);
2026 		      break;
2027 		    }
2028 		}
2029 	    }
2030 	}
2031     }
2032 }
2033 
2034 
2035 /* Verify that the REGION is a valid jump thread.  A jump thread is a special
2036    case of SEME Single Entry Multiple Exits region in which all nodes in the
2037    REGION have exactly one incoming edge.  The only exception is the first block
2038    that may not have been connected to the rest of the cfg yet.  */
2039 
2040 DEBUG_FUNCTION void
verify_jump_thread(basic_block * region,unsigned n_region)2041 verify_jump_thread (basic_block *region, unsigned n_region)
2042 {
2043   for (unsigned i = 0; i < n_region; i++)
2044     gcc_assert (EDGE_COUNT (region[i]->preds) <= 1);
2045 }
2046 
2047 /* Return true when BB is one of the first N items in BBS.  */
2048 
2049 static inline bool
bb_in_bbs(basic_block bb,basic_block * bbs,int n)2050 bb_in_bbs (basic_block bb, basic_block *bbs, int n)
2051 {
2052   for (int i = 0; i < n; i++)
2053     if (bb == bbs[i])
2054       return true;
2055 
2056   return false;
2057 }
2058 
2059 DEBUG_FUNCTION void
debug_path(FILE * dump_file,int pathno)2060 debug_path (FILE *dump_file, int pathno)
2061 {
2062   vec<jump_thread_edge *> *p = paths[pathno];
2063   fprintf (dump_file, "path: ");
2064   for (unsigned i = 0; i < p->length (); ++i)
2065     fprintf (dump_file, "%d -> %d, ",
2066 	     (*p)[i]->e->src->index, (*p)[i]->e->dest->index);
2067   fprintf (dump_file, "\n");
2068 }
2069 
2070 DEBUG_FUNCTION void
debug_all_paths()2071 debug_all_paths ()
2072 {
2073   for (unsigned i = 0; i < paths.length (); ++i)
2074     debug_path (stderr, i);
2075 }
2076 
2077 /* Rewire a jump_thread_edge so that the source block is now a
2078    threaded source block.
2079 
2080    PATH_NUM is an index into the global path table PATHS.
2081    EDGE_NUM is the jump thread edge number into said path.
2082 
2083    Returns TRUE if we were able to successfully rewire the edge.  */
2084 
2085 static bool
rewire_first_differing_edge(unsigned path_num,unsigned edge_num)2086 rewire_first_differing_edge (unsigned path_num, unsigned edge_num)
2087 {
2088   vec<jump_thread_edge *> *path = paths[path_num];
2089   edge &e = (*path)[edge_num]->e;
2090   if (dump_file && (dump_flags & TDF_DETAILS))
2091     fprintf (dump_file, "rewiring edge candidate: %d -> %d\n",
2092 	     e->src->index, e->dest->index);
2093   basic_block src_copy = get_bb_copy (e->src);
2094   if (src_copy == NULL)
2095     {
2096       if (dump_file && (dump_flags & TDF_DETAILS))
2097 	fprintf (dump_file, "ignoring candidate: there is no src COPY\n");
2098       return false;
2099     }
2100   edge new_edge = find_edge (src_copy, e->dest);
2101   /* If the previously threaded paths created a flow graph where we
2102      can no longer figure out where to go, give up.  */
2103   if (new_edge == NULL)
2104     {
2105       if (dump_file && (dump_flags & TDF_DETAILS))
2106 	fprintf (dump_file, "ignoring candidate: we lost our way\n");
2107       return false;
2108     }
2109   e = new_edge;
2110   return true;
2111 }
2112 
2113 /* After an FSM path has been jump threaded, adjust the remaining FSM
2114    paths that are subsets of this path, so these paths can be safely
2115    threaded within the context of the new threaded path.
2116 
2117    For example, suppose we have just threaded:
2118 
2119    5 -> 6 -> 7 -> 8 -> 12	=>	5 -> 6' -> 7' -> 8' -> 12'
2120 
2121    And we have an upcoming threading candidate:
2122    5 -> 6 -> 7 -> 8 -> 15 -> 20
2123 
2124    This function adjusts the upcoming path into:
2125    8' -> 15 -> 20
2126 
2127    CURR_PATH_NUM is an index into the global paths table.  It
2128    specifies the path that was just threaded.  */
2129 
2130 static void
adjust_paths_after_duplication(unsigned curr_path_num)2131 adjust_paths_after_duplication (unsigned curr_path_num)
2132 {
2133   vec<jump_thread_edge *> *curr_path = paths[curr_path_num];
2134   gcc_assert ((*curr_path)[0]->type == EDGE_FSM_THREAD);
2135 
2136   if (dump_file && (dump_flags & TDF_DETAILS))
2137     {
2138       fprintf (dump_file, "just threaded: ");
2139       debug_path (dump_file, curr_path_num);
2140     }
2141 
2142   /* Iterate through all the other paths and adjust them.  */
2143   for (unsigned cand_path_num = 0; cand_path_num < paths.length (); )
2144     {
2145       if (cand_path_num == curr_path_num)
2146 	{
2147 	  ++cand_path_num;
2148 	  continue;
2149 	}
2150       /* Make sure the candidate to adjust starts with the same path
2151 	 as the recently threaded path and is an FSM thread.  */
2152       vec<jump_thread_edge *> *cand_path = paths[cand_path_num];
2153       if ((*cand_path)[0]->type != EDGE_FSM_THREAD
2154 	  || (*cand_path)[0]->e != (*curr_path)[0]->e)
2155 	{
2156 	  ++cand_path_num;
2157 	  continue;
2158 	}
2159       if (dump_file && (dump_flags & TDF_DETAILS))
2160 	{
2161 	  fprintf (dump_file, "adjusting candidate: ");
2162 	  debug_path (dump_file, cand_path_num);
2163 	}
2164 
2165       /* Chop off from the candidate path any prefix it shares with
2166 	 the recently threaded path.  */
2167       unsigned minlength = MIN (curr_path->length (), cand_path->length ());
2168       unsigned j;
2169       for (j = 0; j < minlength; ++j)
2170 	{
2171 	  edge cand_edge = (*cand_path)[j]->e;
2172 	  edge curr_edge = (*curr_path)[j]->e;
2173 
2174 	  /* Once the prefix no longer matches, adjust the first
2175 	     non-matching edge to point from an adjusted edge to
2176 	     wherever it was going.  */
2177 	  if (cand_edge != curr_edge)
2178 	    {
2179 	      gcc_assert (cand_edge->src == curr_edge->src);
2180 	      if (!rewire_first_differing_edge (cand_path_num, j))
2181 		goto remove_candidate_from_list;
2182 	      break;
2183 	    }
2184 	}
2185       if (j == minlength)
2186 	{
2187 	  /* If we consumed the max subgraph we could look at, and
2188 	     still didn't find any different edges, it's the
2189 	     last edge after MINLENGTH.  */
2190 	  if (cand_path->length () > minlength)
2191 	    {
2192 	      if (!rewire_first_differing_edge (cand_path_num, j))
2193 		goto remove_candidate_from_list;
2194 	    }
2195 	  else if (dump_file && (dump_flags & TDF_DETAILS))
2196 	    fprintf (dump_file, "adjusting first edge after MINLENGTH.\n");
2197 	}
2198       if (j > 0)
2199 	{
2200 	  /* If we are removing everything, delete the entire candidate.  */
2201 	  if (j == cand_path->length ())
2202 	    {
2203 	    remove_candidate_from_list:
2204 	      if (dump_file && (dump_flags & TDF_DETAILS))
2205 		fprintf (dump_file, "adjusted candidate: [EMPTY]\n");
2206 	      delete_jump_thread_path (cand_path);
2207 	      paths.unordered_remove (cand_path_num);
2208 	      continue;
2209 	    }
2210 	  /* Otherwise, just remove the redundant sub-path.  */
2211 	  cand_path->block_remove (0, j);
2212 	}
2213       if (dump_file && (dump_flags & TDF_DETAILS))
2214 	{
2215 	  fprintf (dump_file, "adjusted candidate: ");
2216 	  debug_path (dump_file, cand_path_num);
2217 	}
2218       ++cand_path_num;
2219     }
2220 }
2221 
2222 /* Duplicates a jump-thread path of N_REGION basic blocks.
2223    The ENTRY edge is redirected to the duplicate of the region.
2224 
2225    Remove the last conditional statement in the last basic block in the REGION,
2226    and create a single fallthru edge pointing to the same destination as the
2227    EXIT edge.
2228 
2229    CURRENT_PATH_NO is an index into the global paths[] table
2230    specifying the jump-thread path.
2231 
2232    Returns false if it is unable to copy the region, true otherwise.  */
2233 
2234 static bool
duplicate_thread_path(edge entry,edge exit,basic_block * region,unsigned n_region,unsigned current_path_no)2235 duplicate_thread_path (edge entry, edge exit, basic_block *region,
2236 		       unsigned n_region, unsigned current_path_no)
2237 {
2238   unsigned i;
2239   struct loop *loop = entry->dest->loop_father;
2240   edge exit_copy;
2241   edge redirected;
2242   profile_count curr_count;
2243 
2244   if (!can_copy_bbs_p (region, n_region))
2245     return false;
2246 
2247   if (dump_file && (dump_flags & TDF_DETAILS))
2248     {
2249       fprintf (dump_file, "\nabout to thread: ");
2250       debug_path (dump_file, current_path_no);
2251     }
2252 
2253   /* Some sanity checking.  Note that we do not check for all possible
2254      missuses of the functions.  I.e. if you ask to copy something weird,
2255      it will work, but the state of structures probably will not be
2256      correct.  */
2257   for (i = 0; i < n_region; i++)
2258     {
2259       /* We do not handle subloops, i.e. all the blocks must belong to the
2260 	 same loop.  */
2261       if (region[i]->loop_father != loop)
2262 	return false;
2263     }
2264 
2265   initialize_original_copy_tables ();
2266 
2267   set_loop_copy (loop, loop);
2268 
2269   basic_block *region_copy = XNEWVEC (basic_block, n_region);
2270   copy_bbs (region, n_region, region_copy, &exit, 1, &exit_copy, loop,
2271 	    split_edge_bb_loc (entry), false);
2272 
2273   /* Fix up: copy_bbs redirects all edges pointing to copied blocks.  The
2274      following code ensures that all the edges exiting the jump-thread path are
2275      redirected back to the original code: these edges are exceptions
2276      invalidating the property that is propagated by executing all the blocks of
2277      the jump-thread path in order.  */
2278 
2279   curr_count = entry->count ();
2280 
2281   for (i = 0; i < n_region; i++)
2282     {
2283       edge e;
2284       edge_iterator ei;
2285       basic_block bb = region_copy[i];
2286 
2287       /* Watch inconsistent profile.  */
2288       if (curr_count > region[i]->count)
2289 	curr_count = region[i]->count;
2290       /* Scale current BB.  */
2291       if (region[i]->count.nonzero_p () && curr_count.initialized_p ())
2292 	{
2293 	  /* In the middle of the path we only scale the frequencies.
2294 	     In last BB we need to update probabilities of outgoing edges
2295 	     because we know which one is taken at the threaded path.  */
2296 	  if (i + 1 != n_region)
2297 	    scale_bbs_frequencies_profile_count (region + i, 1,
2298 					         region[i]->count - curr_count,
2299 					         region[i]->count);
2300 	  else
2301 	    update_bb_profile_for_threading (region[i],
2302 					     curr_count,
2303 					     exit);
2304 	  scale_bbs_frequencies_profile_count (region_copy + i, 1, curr_count,
2305 					       region_copy[i]->count);
2306 	}
2307 
2308       if (single_succ_p (bb))
2309 	{
2310 	  /* Make sure the successor is the next node in the path.  */
2311 	  gcc_assert (i + 1 == n_region
2312 		      || region_copy[i + 1] == single_succ_edge (bb)->dest);
2313 	  if (i + 1 != n_region)
2314 	    {
2315 	      curr_count = single_succ_edge (bb)->count ();
2316 	    }
2317 	  continue;
2318 	}
2319 
2320       /* Special case the last block on the path: make sure that it does not
2321 	 jump back on the copied path, including back to itself.  */
2322       if (i + 1 == n_region)
2323 	{
2324 	  FOR_EACH_EDGE (e, ei, bb->succs)
2325 	    if (bb_in_bbs (e->dest, region_copy, n_region))
2326 	      {
2327 		basic_block orig = get_bb_original (e->dest);
2328 		if (orig)
2329 		  redirect_edge_and_branch_force (e, orig);
2330 	      }
2331 	  continue;
2332 	}
2333 
2334       /* Redirect all other edges jumping to non-adjacent blocks back to the
2335 	 original code.  */
2336       FOR_EACH_EDGE (e, ei, bb->succs)
2337 	if (region_copy[i + 1] != e->dest)
2338 	  {
2339 	    basic_block orig = get_bb_original (e->dest);
2340 	    if (orig)
2341 	      redirect_edge_and_branch_force (e, orig);
2342 	  }
2343 	else
2344 	  {
2345 	    curr_count = e->count ();
2346 	  }
2347     }
2348 
2349 
2350   if (flag_checking)
2351     verify_jump_thread (region_copy, n_region);
2352 
2353   /* Remove the last branch in the jump thread path.  */
2354   remove_ctrl_stmt_and_useless_edges (region_copy[n_region - 1], exit->dest);
2355 
2356   /* And fixup the flags on the single remaining edge.  */
2357   edge fix_e = find_edge (region_copy[n_region - 1], exit->dest);
2358   fix_e->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE | EDGE_ABNORMAL);
2359   fix_e->flags |= EDGE_FALLTHRU;
2360 
2361   edge e = make_edge (region_copy[n_region - 1], exit->dest, EDGE_FALLTHRU);
2362 
2363   if (e)
2364     {
2365       rescan_loop_exit (e, true, false);
2366       e->probability = profile_probability::always ();
2367     }
2368 
2369   /* Redirect the entry and add the phi node arguments.  */
2370   if (entry->dest == loop->header)
2371     mark_loop_for_removal (loop);
2372   redirected = redirect_edge_and_branch (entry, get_bb_copy (entry->dest));
2373   gcc_assert (redirected != NULL);
2374   flush_pending_stmts (entry);
2375 
2376   /* Add the other PHI node arguments.  */
2377   add_phi_args_after_copy (region_copy, n_region, NULL);
2378 
2379   free (region_copy);
2380 
2381   adjust_paths_after_duplication (current_path_no);
2382 
2383   free_original_copy_tables ();
2384   return true;
2385 }
2386 
2387 /* Return true when PATH is a valid jump-thread path.  */
2388 
2389 static bool
valid_jump_thread_path(vec<jump_thread_edge * > * path)2390 valid_jump_thread_path (vec<jump_thread_edge *> *path)
2391 {
2392   unsigned len = path->length ();
2393 
2394   /* Check that the path is connected.  */
2395   for (unsigned int j = 0; j < len - 1; j++)
2396     {
2397       edge e = (*path)[j]->e;
2398       if (e->dest != (*path)[j+1]->e->src)
2399 	return false;
2400     }
2401   return true;
2402 }
2403 
2404 /* Remove any queued jump threads that include edge E.
2405 
2406    We don't actually remove them here, just record the edges into ax
2407    hash table.  That way we can do the search once per iteration of
2408    DOM/VRP rather than for every case where DOM optimizes away a COND_EXPR.  */
2409 
2410 void
remove_jump_threads_including(edge_def * e)2411 remove_jump_threads_including (edge_def *e)
2412 {
2413   if (!paths.exists ())
2414     return;
2415 
2416   if (!removed_edges)
2417     removed_edges = new hash_table<struct removed_edges> (17);
2418 
2419   edge *slot = removed_edges->find_slot (e, INSERT);
2420   *slot = e;
2421 }
2422 
2423 /* Walk through all blocks and thread incoming edges to the appropriate
2424    outgoing edge for each edge pair recorded in THREADED_EDGES.
2425 
2426    It is the caller's responsibility to fix the dominance information
2427    and rewrite duplicated SSA_NAMEs back into SSA form.
2428 
2429    If MAY_PEEL_LOOP_HEADERS is false, we avoid threading edges through
2430    loop headers if it does not simplify the loop.
2431 
2432    Returns true if one or more edges were threaded, false otherwise.  */
2433 
2434 bool
thread_through_all_blocks(bool may_peel_loop_headers)2435 thread_through_all_blocks (bool may_peel_loop_headers)
2436 {
2437   bool retval = false;
2438   unsigned int i;
2439   struct loop *loop;
2440   auto_bitmap threaded_blocks;
2441   hash_set<edge> visited_starting_edges;
2442 
2443   if (!paths.exists ())
2444     {
2445       retval = false;
2446       goto out;
2447     }
2448 
2449   memset (&thread_stats, 0, sizeof (thread_stats));
2450 
2451   /* Remove any paths that referenced removed edges.  */
2452   if (removed_edges)
2453     for (i = 0; i < paths.length (); )
2454       {
2455 	unsigned int j;
2456 	vec<jump_thread_edge *> *path = paths[i];
2457 
2458 	for (j = 0; j < path->length (); j++)
2459 	  {
2460 	    edge e = (*path)[j]->e;
2461 	    if (removed_edges->find_slot (e, NO_INSERT))
2462 	      break;
2463 	  }
2464 
2465 	if (j != path->length ())
2466 	  {
2467 	    delete_jump_thread_path (path);
2468 	    paths.unordered_remove (i);
2469 	    continue;
2470 	  }
2471 	i++;
2472       }
2473 
2474   /* Jump-thread all FSM threads before other jump-threads.  */
2475   for (i = 0; i < paths.length ();)
2476     {
2477       vec<jump_thread_edge *> *path = paths[i];
2478       edge entry = (*path)[0]->e;
2479 
2480       /* Only code-generate FSM jump-threads in this loop.  */
2481       if ((*path)[0]->type != EDGE_FSM_THREAD)
2482 	{
2483 	  i++;
2484 	  continue;
2485 	}
2486 
2487       /* Do not jump-thread twice from the same starting edge.
2488 
2489 	 Previously we only checked that we weren't threading twice
2490 	 from the same BB, but that was too restrictive.  Imagine a
2491 	 path that starts from GIMPLE_COND(x_123 == 0,...), where both
2492 	 edges out of this conditional yield paths that can be
2493 	 threaded (for example, both lead to an x_123==0 or x_123!=0
2494 	 conditional further down the line.  */
2495       if (visited_starting_edges.contains (entry)
2496 	  /* We may not want to realize this jump thread path for
2497 	     various reasons.  So check it first.  */
2498 	  || !valid_jump_thread_path (path))
2499 	{
2500 	  /* Remove invalid FSM jump-thread paths.  */
2501 	  delete_jump_thread_path (path);
2502 	  paths.unordered_remove (i);
2503 	  continue;
2504 	}
2505 
2506       unsigned len = path->length ();
2507       edge exit = (*path)[len - 1]->e;
2508       basic_block *region = XNEWVEC (basic_block, len - 1);
2509 
2510       for (unsigned int j = 0; j < len - 1; j++)
2511 	region[j] = (*path)[j]->e->dest;
2512 
2513       if (duplicate_thread_path (entry, exit, region, len - 1, i))
2514 	{
2515 	  /* We do not update dominance info.  */
2516 	  free_dominance_info (CDI_DOMINATORS);
2517 	  visited_starting_edges.add (entry);
2518 	  retval = true;
2519 	  thread_stats.num_threaded_edges++;
2520 	}
2521 
2522       delete_jump_thread_path (path);
2523       paths.unordered_remove (i);
2524       free (region);
2525     }
2526 
2527   /* Remove from PATHS all the jump-threads starting with an edge already
2528      jump-threaded.  */
2529   for (i = 0; i < paths.length ();)
2530     {
2531       vec<jump_thread_edge *> *path = paths[i];
2532       edge entry = (*path)[0]->e;
2533 
2534       /* Do not jump-thread twice from the same block.  */
2535       if (visited_starting_edges.contains (entry))
2536 	{
2537 	  delete_jump_thread_path (path);
2538 	  paths.unordered_remove (i);
2539 	}
2540       else
2541 	i++;
2542     }
2543 
2544   mark_threaded_blocks (threaded_blocks);
2545 
2546   initialize_original_copy_tables ();
2547 
2548   /* The order in which we process jump threads can be important.
2549 
2550      Consider if we have two jump threading paths A and B.  If the
2551      target edge of A is the starting edge of B and we thread path A
2552      first, then we create an additional incoming edge into B->dest that
2553      we cannot discover as a jump threading path on this iteration.
2554 
2555      If we instead thread B first, then the edge into B->dest will have
2556      already been redirected before we process path A and path A will
2557      natually, with no further work, target the redirected path for B.
2558 
2559      An post-order is sufficient here.  Compute the ordering first, then
2560      process the blocks.  */
2561   if (!bitmap_empty_p (threaded_blocks))
2562     {
2563       int *postorder = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
2564       unsigned int postorder_num = post_order_compute (postorder, false, false);
2565       for (unsigned int i = 0; i < postorder_num; i++)
2566 	{
2567 	  unsigned int indx = postorder[i];
2568 	  if (bitmap_bit_p (threaded_blocks, indx))
2569 	    {
2570 	      basic_block bb = BASIC_BLOCK_FOR_FN (cfun, indx);
2571 	      retval |= thread_block (bb, true);
2572 	    }
2573 	}
2574       free (postorder);
2575     }
2576 
2577   /* Then perform the threading through loop headers.  We start with the
2578      innermost loop, so that the changes in cfg we perform won't affect
2579      further threading.  */
2580   FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
2581     {
2582       if (!loop->header
2583 	  || !bitmap_bit_p (threaded_blocks, loop->header->index))
2584 	continue;
2585 
2586       retval |= thread_through_loop_header (loop, may_peel_loop_headers);
2587     }
2588 
2589   /* All jump threading paths should have been resolved at this
2590      point.  Verify that is the case.  */
2591   basic_block bb;
2592   FOR_EACH_BB_FN (bb, cfun)
2593     {
2594       edge_iterator ei;
2595       edge e;
2596       FOR_EACH_EDGE (e, ei, bb->preds)
2597 	gcc_assert (e->aux == NULL);
2598     }
2599 
2600   statistics_counter_event (cfun, "Jumps threaded",
2601 			    thread_stats.num_threaded_edges);
2602 
2603   free_original_copy_tables ();
2604 
2605   paths.release ();
2606 
2607   if (retval)
2608     loops_state_set (LOOPS_NEED_FIXUP);
2609 
2610  out:
2611   delete removed_edges;
2612   removed_edges = NULL;
2613   return retval;
2614 }
2615 
2616 /* Delete the jump threading path PATH.  We have to explicitly delete
2617    each entry in the vector, then the container.  */
2618 
2619 void
delete_jump_thread_path(vec<jump_thread_edge * > * path)2620 delete_jump_thread_path (vec<jump_thread_edge *> *path)
2621 {
2622   for (unsigned int i = 0; i < path->length (); i++)
2623     delete (*path)[i];
2624   path->release();
2625   delete path;
2626 }
2627 
2628 /* Register a jump threading opportunity.  We queue up all the jump
2629    threading opportunities discovered by a pass and update the CFG
2630    and SSA form all at once.
2631 
2632    E is the edge we can thread, E2 is the new target edge, i.e., we
2633    are effectively recording that E->dest can be changed to E2->dest
2634    after fixing the SSA graph.  */
2635 
2636 void
register_jump_thread(vec<jump_thread_edge * > * path)2637 register_jump_thread (vec<jump_thread_edge *> *path)
2638 {
2639   if (!dbg_cnt (registered_jump_thread))
2640     {
2641       delete_jump_thread_path (path);
2642       return;
2643     }
2644 
2645   /* First make sure there are no NULL outgoing edges on the jump threading
2646      path.  That can happen for jumping to a constant address.  */
2647   for (unsigned int i = 0; i < path->length (); i++)
2648     {
2649       if ((*path)[i]->e == NULL)
2650 	{
2651 	  if (dump_file && (dump_flags & TDF_DETAILS))
2652 	    {
2653 	      fprintf (dump_file,
2654 		       "Found NULL edge in jump threading path.  Cancelling jump thread:\n");
2655 	      dump_jump_thread_path (dump_file, *path, false);
2656 	    }
2657 
2658 	  delete_jump_thread_path (path);
2659 	  return;
2660 	}
2661 
2662       /* Only the FSM threader is allowed to thread across
2663 	 backedges in the CFG.  */
2664       if (flag_checking
2665 	  && (*path)[0]->type != EDGE_FSM_THREAD)
2666 	gcc_assert (((*path)[i]->e->flags & EDGE_DFS_BACK) == 0);
2667     }
2668 
2669   if (dump_file && (dump_flags & TDF_DETAILS))
2670     dump_jump_thread_path (dump_file, *path, true);
2671 
2672   if (!paths.exists ())
2673     paths.create (5);
2674 
2675   paths.safe_push (path);
2676 }
2677 
2678 /* Return how many uses of T there are within BB, as long as there
2679    aren't any uses outside BB.  If there are any uses outside BB,
2680    return -1 if there's at most one use within BB, or -2 if there is
2681    more than one use within BB.  */
2682 
2683 static int
uses_in_bb(tree t,basic_block bb)2684 uses_in_bb (tree t, basic_block bb)
2685 {
2686   int uses = 0;
2687   bool outside_bb = false;
2688 
2689   imm_use_iterator iter;
2690   use_operand_p use_p;
2691   FOR_EACH_IMM_USE_FAST (use_p, iter, t)
2692     {
2693       if (is_gimple_debug (USE_STMT (use_p)))
2694 	continue;
2695 
2696       if (gimple_bb (USE_STMT (use_p)) != bb)
2697 	outside_bb = true;
2698       else
2699 	uses++;
2700 
2701       if (outside_bb && uses > 1)
2702 	return -2;
2703     }
2704 
2705   if (outside_bb)
2706     return -1;
2707 
2708   return uses;
2709 }
2710 
2711 /* Starting from the final control flow stmt in BB, assuming it will
2712    be removed, follow uses in to-be-removed stmts back to their defs
2713    and count how many defs are to become dead and be removed as
2714    well.  */
2715 
2716 unsigned int
estimate_threading_killed_stmts(basic_block bb)2717 estimate_threading_killed_stmts (basic_block bb)
2718 {
2719   int killed_stmts = 0;
2720   hash_map<tree, int> ssa_remaining_uses;
2721   auto_vec<gimple *, 4> dead_worklist;
2722 
2723   /* If the block has only two predecessors, threading will turn phi
2724      dsts into either src, so count them as dead stmts.  */
2725   bool drop_all_phis = EDGE_COUNT (bb->preds) == 2;
2726 
2727   if (drop_all_phis)
2728     for (gphi_iterator gsi = gsi_start_phis (bb);
2729 	 !gsi_end_p (gsi); gsi_next (&gsi))
2730       {
2731 	gphi *phi = gsi.phi ();
2732 	tree dst = gimple_phi_result (phi);
2733 
2734 	/* We don't count virtual PHIs as stmts in
2735 	   record_temporary_equivalences_from_phis.  */
2736 	if (virtual_operand_p (dst))
2737 	  continue;
2738 
2739 	killed_stmts++;
2740       }
2741 
2742   if (gsi_end_p (gsi_last_bb (bb)))
2743     return killed_stmts;
2744 
2745   gimple *stmt = gsi_stmt (gsi_last_bb (bb));
2746   if (gimple_code (stmt) != GIMPLE_COND
2747       && gimple_code (stmt) != GIMPLE_GOTO
2748       && gimple_code (stmt) != GIMPLE_SWITCH)
2749     return killed_stmts;
2750 
2751   /* The control statement is always dead.  */
2752   killed_stmts++;
2753   dead_worklist.quick_push (stmt);
2754   while (!dead_worklist.is_empty ())
2755     {
2756       stmt = dead_worklist.pop ();
2757 
2758       ssa_op_iter iter;
2759       use_operand_p use_p;
2760       FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
2761 	{
2762 	  tree t = USE_FROM_PTR (use_p);
2763 	  gimple *def = SSA_NAME_DEF_STMT (t);
2764 
2765 	  if (gimple_bb (def) == bb
2766 	      && (gimple_code (def) != GIMPLE_PHI
2767 		  || !drop_all_phis)
2768 	      && !gimple_has_side_effects (def))
2769 	    {
2770 	      int *usesp = ssa_remaining_uses.get (t);
2771 	      int uses;
2772 
2773 	      if (usesp)
2774 		uses = *usesp;
2775 	      else
2776 		uses = uses_in_bb (t, bb);
2777 
2778 	      gcc_assert (uses);
2779 
2780 	      /* Don't bother recording the expected use count if we
2781 		 won't find any further uses within BB.  */
2782 	      if (!usesp && (uses < -1 || uses > 1))
2783 		{
2784 		  usesp = &ssa_remaining_uses.get_or_insert (t);
2785 		  *usesp = uses;
2786 		}
2787 
2788 	      if (uses < 0)
2789 		continue;
2790 
2791 	      --uses;
2792 	      if (usesp)
2793 		*usesp = uses;
2794 
2795 	      if (!uses)
2796 		{
2797 		  killed_stmts++;
2798 		  if (usesp)
2799 		    ssa_remaining_uses.remove (t);
2800 		  if (gimple_code (def) != GIMPLE_PHI)
2801 		    dead_worklist.safe_push (def);
2802 		}
2803 	    }
2804 	}
2805     }
2806 
2807   if (dump_file)
2808     fprintf (dump_file, "threading bb %i kills %i stmts\n",
2809 	     bb->index, killed_stmts);
2810 
2811   return killed_stmts;
2812 }
2813