1 /* Allocation for dataflow support routines.
2    Copyright (C) 1999-2020 Free Software Foundation, Inc.
3    Originally contributed by Michael P. Hayes
4              (m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com)
5    Major rewrite contributed by Danny Berlin (dberlin@dberlin.org)
6              and Kenneth Zadeck (zadeck@naturalbridge.com).
7 
8 This file is part of GCC.
9 
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14 
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
18 for more details.
19 
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3.  If not see
22 <http://www.gnu.org/licenses/>.  */
23 
24 /*
25 OVERVIEW:
26 
27 The files in this collection (df*.c,df.h) provide a general framework
28 for solving dataflow problems.  The global dataflow is performed using
29 a good implementation of iterative dataflow analysis.
30 
31 The file df-problems.c provides problem instance for the most common
32 dataflow problems: reaching defs, upward exposed uses, live variables,
33 uninitialized variables, def-use chains, and use-def chains.  However,
34 the interface allows other dataflow problems to be defined as well.
35 
36 Dataflow analysis is available in most of the rtl backend (the parts
37 between pass_df_initialize and pass_df_finish).  It is quite likely
38 that these boundaries will be expanded in the future.  The only
39 requirement is that there be a correct control flow graph.
40 
41 There are three variations of the live variable problem that are
42 available whenever dataflow is available.  The LR problem finds the
43 areas that can reach a use of a variable, the UR problems finds the
44 areas that can be reached from a definition of a variable.  The LIVE
45 problem finds the intersection of these two areas.
46 
47 There are several optional problems.  These can be enabled when they
48 are needed and disabled when they are not needed.
49 
50 Dataflow problems are generally solved in three layers.  The bottom
51 layer is called scanning where a data structure is built for each rtl
52 insn that describes the set of defs and uses of that insn.  Scanning
53 is generally kept up to date, i.e. as the insns changes, the scanned
54 version of that insn changes also.  There are various mechanisms for
55 making this happen and are described in the INCREMENTAL SCANNING
56 section.
57 
58 In the middle layer, basic blocks are scanned to produce transfer
59 functions which describe the effects of that block on the global
60 dataflow solution.  The transfer functions are only rebuilt if the
61 some instruction within the block has changed.
62 
63 The top layer is the dataflow solution itself.  The dataflow solution
64 is computed by using an efficient iterative solver and the transfer
65 functions.  The dataflow solution must be recomputed whenever the
66 control changes or if one of the transfer function changes.
67 
68 
69 USAGE:
70 
71 Here is an example of using the dataflow routines.
72 
73       df_[chain,live,note,rd]_add_problem (flags);
74 
75       df_set_blocks (blocks);
76 
77       df_analyze ();
78 
79       df_dump (stderr);
80 
81       df_finish_pass (false);
82 
83 DF_[chain,live,note,rd]_ADD_PROBLEM adds a problem, defined by an
84 instance to struct df_problem, to the set of problems solved in this
85 instance of df.  All calls to add a problem for a given instance of df
86 must occur before the first call to DF_ANALYZE.
87 
88 Problems can be dependent on other problems.  For instance, solving
89 def-use or use-def chains is dependent on solving reaching
90 definitions. As long as these dependencies are listed in the problem
91 definition, the order of adding the problems is not material.
92 Otherwise, the problems will be solved in the order of calls to
93 df_add_problem.  Note that it is not necessary to have a problem.  In
94 that case, df will just be used to do the scanning.
95 
96 
97 
98 DF_SET_BLOCKS is an optional call used to define a region of the
99 function on which the analysis will be performed.  The normal case is
100 to analyze the entire function and no call to df_set_blocks is made.
101 DF_SET_BLOCKS only effects the blocks that are effected when computing
102 the transfer functions and final solution.  The insn level information
103 is always kept up to date.
104 
105 When a subset is given, the analysis behaves as if the function only
106 contains those blocks and any edges that occur directly between the
107 blocks in the set.  Care should be taken to call df_set_blocks right
108 before the call to analyze in order to eliminate the possibility that
109 optimizations that reorder blocks invalidate the bitvector.
110 
111 DF_ANALYZE causes all of the defined problems to be (re)solved.  When
112 DF_ANALYZE is completes, the IN and OUT sets for each basic block
113 contain the computer information.  The DF_*_BB_INFO macros can be used
114 to access these bitvectors.  All deferred rescannings are down before
115 the transfer functions are recomputed.
116 
117 DF_DUMP can then be called to dump the information produce to some
118 file.  This calls DF_DUMP_START, to print the information that is not
119 basic block specific, and then calls DF_DUMP_TOP and DF_DUMP_BOTTOM
120 for each block to print the basic specific information.  These parts
121 can all be called separately as part of a larger dump function.
122 
123 
124 DF_FINISH_PASS causes df_remove_problem to be called on all of the
125 optional problems.  It also causes any insns whose scanning has been
126 deferred to be rescanned as well as clears all of the changeable flags.
127 Setting the pass manager TODO_df_finish flag causes this function to
128 be run.  However, the pass manager will call df_finish_pass AFTER the
129 pass dumping has been done, so if you want to see the results of the
130 optional problems in the pass dumps, use the TODO flag rather than
131 calling the function yourself.
132 
133 INCREMENTAL SCANNING
134 
135 There are four ways of doing the incremental scanning:
136 
137 1) Immediate rescanning - Calls to df_insn_rescan, df_notes_rescan,
138    df_bb_delete, df_insn_change_bb have been added to most of
139    the low level service functions that maintain the cfg and change
140    rtl.  Calling and of these routines many cause some number of insns
141    to be rescanned.
142 
143    For most modern rtl passes, this is certainly the easiest way to
144    manage rescanning the insns.  This technique also has the advantage
145    that the scanning information is always correct and can be relied
146    upon even after changes have been made to the instructions.  This
147    technique is contra indicated in several cases:
148 
149    a) If def-use chains OR use-def chains (but not both) are built,
150       using this is SIMPLY WRONG.  The problem is that when a ref is
151       deleted that is the target of an edge, there is not enough
152       information to efficiently find the source of the edge and
153       delete the edge.  This leaves a dangling reference that may
154       cause problems.
155 
156    b) If def-use chains AND use-def chains are built, this may
157       produce unexpected results.  The problem is that the incremental
158       scanning of an insn does not know how to repair the chains that
159       point into an insn when the insn changes.  So the incremental
160       scanning just deletes the chains that enter and exit the insn
161       being changed.  The dangling reference issue in (a) is not a
162       problem here, but if the pass is depending on the chains being
163       maintained after insns have been modified, this technique will
164       not do the correct thing.
165 
166    c) If the pass modifies insns several times, this incremental
167       updating may be expensive.
168 
169    d) If the pass modifies all of the insns, as does register
170       allocation, it is simply better to rescan the entire function.
171 
172 2) Deferred rescanning - Calls to df_insn_rescan, df_notes_rescan, and
173    df_insn_delete do not immediately change the insn but instead make
174    a note that the insn needs to be rescanned.  The next call to
175    df_analyze, df_finish_pass, or df_process_deferred_rescans will
176    cause all of the pending rescans to be processed.
177 
178    This is the technique of choice if either 1a, 1b, or 1c are issues
179    in the pass.  In the case of 1a or 1b, a call to df_finish_pass
180    (either manually or via TODO_df_finish) should be made before the
181    next call to df_analyze or df_process_deferred_rescans.
182 
183    This mode is also used by a few passes that still rely on note_uses,
184    note_stores and rtx iterators instead of using the DF data.  This
185    can be said to fall under case 1c.
186 
187    To enable this mode, call df_set_flags (DF_DEFER_INSN_RESCAN).
188    (This mode can be cleared by calling df_clear_flags
189    (DF_DEFER_INSN_RESCAN) but this does not cause the deferred insns to
190    be rescanned.
191 
192 3) Total rescanning - In this mode the rescanning is disabled.
193    Only when insns are deleted is the df information associated with
194    it also deleted.  At the end of the pass, a call must be made to
195    df_insn_rescan_all.  This method is used by the register allocator
196    since it generally changes each insn multiple times (once for each ref)
197    and does not need to make use of the updated scanning information.
198 
199 4) Do it yourself - In this mechanism, the pass updates the insns
200    itself using the low level df primitives.  Currently no pass does
201    this, but it has the advantage that it is quite efficient given
202    that the pass generally has exact knowledge of what it is changing.
203 
204 DATA STRUCTURES
205 
206 Scanning produces a `struct df_ref' data structure (ref) is allocated
207 for every register reference (def or use) and this records the insn
208 and bb the ref is found within.  The refs are linked together in
209 chains of uses and defs for each insn and for each register.  Each ref
210 also has a chain field that links all the use refs for a def or all
211 the def refs for a use.  This is used to create use-def or def-use
212 chains.
213 
214 Different optimizations have different needs.  Ultimately, only
215 register allocation and schedulers should be using the bitmaps
216 produced for the live register and uninitialized register problems.
217 The rest of the backend should be upgraded to using and maintaining
218 the linked information such as def use or use def chains.
219 
220 
221 PHILOSOPHY:
222 
223 While incremental bitmaps are not worthwhile to maintain, incremental
224 chains may be perfectly reasonable.  The fastest way to build chains
225 from scratch or after significant modifications is to build reaching
226 definitions (RD) and build the chains from this.
227 
228 However, general algorithms for maintaining use-def or def-use chains
229 are not practical.  The amount of work to recompute the chain any
230 chain after an arbitrary change is large.  However, with a modest
231 amount of work it is generally possible to have the application that
232 uses the chains keep them up to date.  The high level knowledge of
233 what is really happening is essential to crafting efficient
234 incremental algorithms.
235 
236 As for the bit vector problems, there is no interface to give a set of
237 blocks over with to resolve the iteration.  In general, restarting a
238 dataflow iteration is difficult and expensive.  Again, the best way to
239 keep the dataflow information up to data (if this is really what is
240 needed) it to formulate a problem specific solution.
241 
242 There are fine grained calls for creating and deleting references from
243 instructions in df-scan.c.  However, these are not currently connected
244 to the engine that resolves the dataflow equations.
245 
246 
247 DATA STRUCTURES:
248 
249 The basic object is a DF_REF (reference) and this may either be a
250 DEF (definition) or a USE of a register.
251 
252 These are linked into a variety of lists; namely reg-def, reg-use,
253 insn-def, insn-use, def-use, and use-def lists.  For example, the
254 reg-def lists contain all the locations that define a given register
255 while the insn-use lists contain all the locations that use a
256 register.
257 
258 Note that the reg-def and reg-use chains are generally short for
259 pseudos and long for the hard registers.
260 
261 ACCESSING INSNS:
262 
263 1) The df insn information is kept in an array of DF_INSN_INFO objects.
264    The array is indexed by insn uid, and every DF_REF points to the
265    DF_INSN_INFO object of the insn that contains the reference.
266 
267 2) Each insn has three sets of refs, which are linked into one of three
268    lists: The insn's defs list (accessed by the DF_INSN_INFO_DEFS,
269    DF_INSN_DEFS, or DF_INSN_UID_DEFS macros), the insn's uses list
270    (accessed by the DF_INSN_INFO_USES, DF_INSN_USES, or
271    DF_INSN_UID_USES macros) or the insn's eq_uses list (accessed by the
272    DF_INSN_INFO_EQ_USES, DF_INSN_EQ_USES or DF_INSN_UID_EQ_USES macros).
273    The latter list are the list of references in REG_EQUAL or REG_EQUIV
274    notes.  These macros produce a ref (or NULL), the rest of the list
275    can be obtained by traversal of the NEXT_REF field (accessed by the
276    DF_REF_NEXT_REF macro.)  There is no significance to the ordering of
277    the uses or refs in an instruction.
278 
279 3) Each insn has a logical uid field (LUID) which is stored in the
280    DF_INSN_INFO object for the insn.  The LUID field is accessed by
281    the DF_INSN_INFO_LUID, DF_INSN_LUID, and DF_INSN_UID_LUID macros.
282    When properly set, the LUID is an integer that numbers each insn in
283    the basic block, in order from the start of the block.
284    The numbers are only correct after a call to df_analyze.  They will
285    rot after insns are added deleted or moved round.
286 
287 ACCESSING REFS:
288 
289 There are 4 ways to obtain access to refs:
290 
291 1) References are divided into two categories, REAL and ARTIFICIAL.
292 
293    REAL refs are associated with instructions.
294 
295    ARTIFICIAL refs are associated with basic blocks.  The heads of
296    these lists can be accessed by calling df_get_artificial_defs or
297    df_get_artificial_uses for the particular basic block.
298 
299    Artificial defs and uses occur both at the beginning and ends of blocks.
300 
301      For blocks that are at the destination of eh edges, the
302      artificial uses and defs occur at the beginning.  The defs relate
303      to the registers specified in EH_RETURN_DATA_REGNO and the uses
304      relate to the registers specified in EH_USES.  Logically these
305      defs and uses should really occur along the eh edge, but there is
306      no convenient way to do this.  Artificial defs that occur at the
307      beginning of the block have the DF_REF_AT_TOP flag set.
308 
309      Artificial uses occur at the end of all blocks.  These arise from
310      the hard registers that are always live, such as the stack
311      register and are put there to keep the code from forgetting about
312      them.
313 
314      Artificial defs occur at the end of the entry block.  These arise
315      from registers that are live at entry to the function.
316 
317 2) There are three types of refs: defs, uses and eq_uses.  (Eq_uses are
318    uses that appear inside a REG_EQUAL or REG_EQUIV note.)
319 
320    All of the eq_uses, uses and defs associated with each pseudo or
321    hard register may be linked in a bidirectional chain.  These are
322    called reg-use or reg_def chains.  If the changeable flag
323    DF_EQ_NOTES is set when the chains are built, the eq_uses will be
324    treated like uses.  If it is not set they are ignored.
325 
326    The first use, eq_use or def for a register can be obtained using
327    the DF_REG_USE_CHAIN, DF_REG_EQ_USE_CHAIN or DF_REG_DEF_CHAIN
328    macros.  Subsequent uses for the same regno can be obtained by
329    following the next_reg field of the ref.  The number of elements in
330    each of the chains can be found by using the DF_REG_USE_COUNT,
331    DF_REG_EQ_USE_COUNT or DF_REG_DEF_COUNT macros.
332 
333    In previous versions of this code, these chains were ordered.  It
334    has not been practical to continue this practice.
335 
336 3) If def-use or use-def chains are built, these can be traversed to
337    get to other refs.  If the flag DF_EQ_NOTES has been set, the chains
338    include the eq_uses.  Otherwise these are ignored when building the
339    chains.
340 
341 4) An array of all of the uses (and an array of all of the defs) can
342    be built.  These arrays are indexed by the value in the id
343    structure.  These arrays are only lazily kept up to date, and that
344    process can be expensive.  To have these arrays built, call
345    df_reorganize_defs or df_reorganize_uses.  If the flag DF_EQ_NOTES
346    has been set the array will contain the eq_uses.  Otherwise these
347    are ignored when building the array and assigning the ids.  Note
348    that the values in the id field of a ref may change across calls to
349    df_analyze or df_reorganize_defs or df_reorganize_uses.
350 
351    If the only use of this array is to find all of the refs, it is
352    better to traverse all of the registers and then traverse all of
353    reg-use or reg-def chains.
354 
355 NOTES:
356 
357 Embedded addressing side-effects, such as POST_INC or PRE_INC, generate
358 both a use and a def.  These are both marked read/write to show that they
359 are dependent. For example, (set (reg 40) (mem (post_inc (reg 42))))
360 will generate a use of reg 42 followed by a def of reg 42 (both marked
361 read/write).  Similarly, (set (reg 40) (mem (pre_dec (reg 41))))
362 generates a use of reg 41 then a def of reg 41 (both marked read/write),
363 even though reg 41 is decremented before it is used for the memory
364 address in this second example.
365 
366 A set to a REG inside a ZERO_EXTRACT, or a set to a non-paradoxical SUBREG
367 for which the number of word_mode units covered by the outer mode is
368 smaller than that covered by the inner mode, invokes a read-modify-write
369 operation.  We generate both a use and a def and again mark them
370 read/write.
371 
372 Paradoxical subreg writes do not leave a trace of the old content, so they
373 are write-only operations.
374 */
375 
376 
377 #include "config.h"
378 #include "system.h"
379 #include "coretypes.h"
380 #include "backend.h"
381 #include "rtl.h"
382 #include "df.h"
383 #include "memmodel.h"
384 #include "emit-rtl.h"
385 #include "cfganal.h"
386 #include "tree-pass.h"
387 #include "cfgloop.h"
388 
389 static void *df_get_bb_info (struct dataflow *, unsigned int);
390 static void df_set_bb_info (struct dataflow *, unsigned int, void *);
391 static void df_clear_bb_info (struct dataflow *, unsigned int);
392 #ifdef DF_DEBUG_CFG
393 static void df_set_clean_cfg (void);
394 #endif
395 
396 /* The obstack on which regsets are allocated.  */
397 struct bitmap_obstack reg_obstack;
398 
399 /* An obstack for bitmap not related to specific dataflow problems.
400    This obstack should e.g. be used for bitmaps with a short life time
401    such as temporary bitmaps.  */
402 
403 bitmap_obstack df_bitmap_obstack;
404 
405 
406 /*----------------------------------------------------------------------------
407   Functions to create, destroy and manipulate an instance of df.
408 ----------------------------------------------------------------------------*/
409 
410 class df_d *df;
411 
412 /* Add PROBLEM (and any dependent problems) to the DF instance.  */
413 
414 void
df_add_problem(const struct df_problem * problem)415 df_add_problem (const struct df_problem *problem)
416 {
417   struct dataflow *dflow;
418   int i;
419 
420   /* First try to add the dependent problem. */
421   if (problem->dependent_problem)
422     df_add_problem (problem->dependent_problem);
423 
424   /* Check to see if this problem has already been defined.  If it
425      has, just return that instance, if not, add it to the end of the
426      vector.  */
427   dflow = df->problems_by_index[problem->id];
428   if (dflow)
429     return;
430 
431   /* Make a new one and add it to the end.  */
432   dflow = XCNEW (struct dataflow);
433   dflow->problem = problem;
434   dflow->computed = false;
435   dflow->solutions_dirty = true;
436   df->problems_by_index[dflow->problem->id] = dflow;
437 
438   /* Keep the defined problems ordered by index.  This solves the
439      problem that RI will use the information from UREC if UREC has
440      been defined, or from LIVE if LIVE is defined and otherwise LR.
441      However for this to work, the computation of RI must be pushed
442      after which ever of those problems is defined, but we do not
443      require any of those except for LR to have actually been
444      defined.  */
445   df->num_problems_defined++;
446   for (i = df->num_problems_defined - 2; i >= 0; i--)
447     {
448       if (problem->id < df->problems_in_order[i]->problem->id)
449 	df->problems_in_order[i+1] = df->problems_in_order[i];
450       else
451 	{
452 	  df->problems_in_order[i+1] = dflow;
453 	  return;
454 	}
455     }
456   df->problems_in_order[0] = dflow;
457 }
458 
459 
460 /* Set the MASK flags in the DFLOW problem.  The old flags are
461    returned.  If a flag is not allowed to be changed this will fail if
462    checking is enabled.  */
463 int
df_set_flags(int changeable_flags)464 df_set_flags (int changeable_flags)
465 {
466   int old_flags = df->changeable_flags;
467   df->changeable_flags |= changeable_flags;
468   return old_flags;
469 }
470 
471 
472 /* Clear the MASK flags in the DFLOW problem.  The old flags are
473    returned.  If a flag is not allowed to be changed this will fail if
474    checking is enabled.  */
475 int
df_clear_flags(int changeable_flags)476 df_clear_flags (int changeable_flags)
477 {
478   int old_flags = df->changeable_flags;
479   df->changeable_flags &= ~changeable_flags;
480   return old_flags;
481 }
482 
483 
484 /* Set the blocks that are to be considered for analysis.  If this is
485    not called or is called with null, the entire function in
486    analyzed.  */
487 
488 void
df_set_blocks(bitmap blocks)489 df_set_blocks (bitmap blocks)
490 {
491   if (blocks)
492     {
493       if (dump_file)
494 	bitmap_print (dump_file, blocks, "setting blocks to analyze ", "\n");
495       if (df->blocks_to_analyze)
496 	{
497 	  /* This block is called to change the focus from one subset
498 	     to another.  */
499 	  int p;
500 	  auto_bitmap diff (&df_bitmap_obstack);
501 	  bitmap_and_compl (diff, df->blocks_to_analyze, blocks);
502 	  for (p = 0; p < df->num_problems_defined; p++)
503 	    {
504 	      struct dataflow *dflow = df->problems_in_order[p];
505 	      if (dflow->optional_p && dflow->problem->reset_fun)
506 		dflow->problem->reset_fun (df->blocks_to_analyze);
507 	      else if (dflow->problem->free_blocks_on_set_blocks)
508 		{
509 		  bitmap_iterator bi;
510 		  unsigned int bb_index;
511 
512 		  EXECUTE_IF_SET_IN_BITMAP (diff, 0, bb_index, bi)
513 		    {
514 		      basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
515 		      if (bb)
516 			{
517 			  void *bb_info = df_get_bb_info (dflow, bb_index);
518 			  dflow->problem->free_bb_fun (bb, bb_info);
519 			  df_clear_bb_info (dflow, bb_index);
520 			}
521 		    }
522 		}
523 	    }
524 	}
525       else
526 	{
527 	  /* This block of code is executed to change the focus from
528 	     the entire function to a subset.  */
529 	  bitmap_head blocks_to_reset;
530 	  bool initialized = false;
531 	  int p;
532 	  for (p = 0; p < df->num_problems_defined; p++)
533 	    {
534 	      struct dataflow *dflow = df->problems_in_order[p];
535 	      if (dflow->optional_p && dflow->problem->reset_fun)
536 		{
537 		  if (!initialized)
538 		    {
539 		      basic_block bb;
540 		      bitmap_initialize (&blocks_to_reset, &df_bitmap_obstack);
541 		      FOR_ALL_BB_FN (bb, cfun)
542 			{
543 			  bitmap_set_bit (&blocks_to_reset, bb->index);
544 			}
545 		    }
546 		  dflow->problem->reset_fun (&blocks_to_reset);
547 		}
548 	    }
549 	  if (initialized)
550 	    bitmap_clear (&blocks_to_reset);
551 
552 	  df->blocks_to_analyze = BITMAP_ALLOC (&df_bitmap_obstack);
553 	}
554       bitmap_copy (df->blocks_to_analyze, blocks);
555       df->analyze_subset = true;
556     }
557   else
558     {
559       /* This block is executed to reset the focus to the entire
560 	 function.  */
561       if (dump_file)
562 	fprintf (dump_file, "clearing blocks_to_analyze\n");
563       if (df->blocks_to_analyze)
564 	{
565 	  BITMAP_FREE (df->blocks_to_analyze);
566 	  df->blocks_to_analyze = NULL;
567 	}
568       df->analyze_subset = false;
569     }
570 
571   /* Setting the blocks causes the refs to be unorganized since only
572      the refs in the blocks are seen.  */
573   df_maybe_reorganize_def_refs (DF_REF_ORDER_NO_TABLE);
574   df_maybe_reorganize_use_refs (DF_REF_ORDER_NO_TABLE);
575   df_mark_solutions_dirty ();
576 }
577 
578 
579 /* Delete a DFLOW problem (and any problems that depend on this
580    problem).  */
581 
582 void
df_remove_problem(struct dataflow * dflow)583 df_remove_problem (struct dataflow *dflow)
584 {
585   const struct df_problem *problem;
586   int i;
587 
588   if (!dflow)
589     return;
590 
591   problem = dflow->problem;
592   gcc_assert (problem->remove_problem_fun);
593 
594   /* Delete any problems that depended on this problem first.  */
595   for (i = 0; i < df->num_problems_defined; i++)
596     if (df->problems_in_order[i]->problem->dependent_problem == problem)
597       df_remove_problem (df->problems_in_order[i]);
598 
599   /* Now remove this problem.  */
600   for (i = 0; i < df->num_problems_defined; i++)
601     if (df->problems_in_order[i] == dflow)
602       {
603 	int j;
604 	for (j = i + 1; j < df->num_problems_defined; j++)
605 	  df->problems_in_order[j-1] = df->problems_in_order[j];
606 	df->problems_in_order[j-1] = NULL;
607 	df->num_problems_defined--;
608 	break;
609       }
610 
611   (problem->remove_problem_fun) ();
612   df->problems_by_index[problem->id] = NULL;
613 }
614 
615 
616 /* Remove all of the problems that are not permanent.  Scanning, LR
617    and (at -O2 or higher) LIVE are permanent, the rest are removable.
618    Also clear all of the changeable_flags.  */
619 
620 void
df_finish_pass(bool verify ATTRIBUTE_UNUSED)621 df_finish_pass (bool verify ATTRIBUTE_UNUSED)
622 {
623   int i;
624 
625 #ifdef ENABLE_DF_CHECKING
626   int saved_flags;
627 #endif
628 
629   if (!df)
630     return;
631 
632   df_maybe_reorganize_def_refs (DF_REF_ORDER_NO_TABLE);
633   df_maybe_reorganize_use_refs (DF_REF_ORDER_NO_TABLE);
634 
635 #ifdef ENABLE_DF_CHECKING
636   saved_flags = df->changeable_flags;
637 #endif
638 
639   /* We iterate over problems by index as each problem removed will
640      lead to problems_in_order to be reordered.  */
641   for (i = 0; i < DF_LAST_PROBLEM_PLUS1; i++)
642     {
643       struct dataflow *dflow = df->problems_by_index[i];
644 
645       if (dflow && dflow->optional_p)
646 	df_remove_problem (dflow);
647     }
648 
649   /* Clear all of the flags.  */
650   df->changeable_flags = 0;
651   df_process_deferred_rescans ();
652 
653   /* Set the focus back to the whole function.  */
654   if (df->blocks_to_analyze)
655     {
656       BITMAP_FREE (df->blocks_to_analyze);
657       df->blocks_to_analyze = NULL;
658       df_mark_solutions_dirty ();
659       df->analyze_subset = false;
660     }
661 
662 #ifdef ENABLE_DF_CHECKING
663   /* Verification will fail in DF_NO_INSN_RESCAN.  */
664   if (!(saved_flags & DF_NO_INSN_RESCAN))
665     {
666       df_lr_verify_transfer_functions ();
667       if (df_live)
668 	df_live_verify_transfer_functions ();
669     }
670 
671 #ifdef DF_DEBUG_CFG
672   df_set_clean_cfg ();
673 #endif
674 #endif
675 
676   if (flag_checking && verify)
677     df->changeable_flags |= DF_VERIFY_SCHEDULED;
678 }
679 
680 
681 /* Set up the dataflow instance for the entire back end.  */
682 
683 static unsigned int
rest_of_handle_df_initialize(void)684 rest_of_handle_df_initialize (void)
685 {
686   gcc_assert (!df);
687   df = XCNEW (class df_d);
688   df->changeable_flags = 0;
689 
690   bitmap_obstack_initialize (&df_bitmap_obstack);
691 
692   /* Set this to a conservative value.  Stack_ptr_mod will compute it
693      correctly later.  */
694   crtl->sp_is_unchanging = 0;
695 
696   df_scan_add_problem ();
697   df_scan_alloc (NULL);
698 
699   /* These three problems are permanent.  */
700   df_lr_add_problem ();
701   if (optimize > 1)
702     df_live_add_problem ();
703 
704   df->postorder = XNEWVEC (int, last_basic_block_for_fn (cfun));
705   df->n_blocks = post_order_compute (df->postorder, true, true);
706   inverted_post_order_compute (&df->postorder_inverted);
707   gcc_assert ((unsigned) df->n_blocks == df->postorder_inverted.length ());
708 
709   df->hard_regs_live_count = XCNEWVEC (unsigned int, FIRST_PSEUDO_REGISTER);
710 
711   df_hard_reg_init ();
712   /* After reload, some ports add certain bits to regs_ever_live so
713      this cannot be reset.  */
714   df_compute_regs_ever_live (true);
715   df_scan_blocks ();
716   df_compute_regs_ever_live (false);
717   return 0;
718 }
719 
720 
721 namespace {
722 
723 const pass_data pass_data_df_initialize_opt =
724 {
725   RTL_PASS, /* type */
726   "dfinit", /* name */
727   OPTGROUP_NONE, /* optinfo_flags */
728   TV_DF_SCAN, /* tv_id */
729   0, /* properties_required */
730   0, /* properties_provided */
731   0, /* properties_destroyed */
732   0, /* todo_flags_start */
733   0, /* todo_flags_finish */
734 };
735 
736 class pass_df_initialize_opt : public rtl_opt_pass
737 {
738 public:
pass_df_initialize_opt(gcc::context * ctxt)739   pass_df_initialize_opt (gcc::context *ctxt)
740     : rtl_opt_pass (pass_data_df_initialize_opt, ctxt)
741   {}
742 
743   /* opt_pass methods: */
gate(function *)744   virtual bool gate (function *) { return optimize > 0; }
execute(function *)745   virtual unsigned int execute (function *)
746     {
747       return rest_of_handle_df_initialize ();
748     }
749 
750 }; // class pass_df_initialize_opt
751 
752 } // anon namespace
753 
754 rtl_opt_pass *
make_pass_df_initialize_opt(gcc::context * ctxt)755 make_pass_df_initialize_opt (gcc::context *ctxt)
756 {
757   return new pass_df_initialize_opt (ctxt);
758 }
759 
760 
761 namespace {
762 
763 const pass_data pass_data_df_initialize_no_opt =
764 {
765   RTL_PASS, /* type */
766   "no-opt dfinit", /* name */
767   OPTGROUP_NONE, /* optinfo_flags */
768   TV_DF_SCAN, /* tv_id */
769   0, /* properties_required */
770   0, /* properties_provided */
771   0, /* properties_destroyed */
772   0, /* todo_flags_start */
773   0, /* todo_flags_finish */
774 };
775 
776 class pass_df_initialize_no_opt : public rtl_opt_pass
777 {
778 public:
pass_df_initialize_no_opt(gcc::context * ctxt)779   pass_df_initialize_no_opt (gcc::context *ctxt)
780     : rtl_opt_pass (pass_data_df_initialize_no_opt, ctxt)
781   {}
782 
783   /* opt_pass methods: */
gate(function *)784   virtual bool gate (function *) { return optimize == 0; }
execute(function *)785   virtual unsigned int execute (function *)
786     {
787       return rest_of_handle_df_initialize ();
788     }
789 
790 }; // class pass_df_initialize_no_opt
791 
792 } // anon namespace
793 
794 rtl_opt_pass *
make_pass_df_initialize_no_opt(gcc::context * ctxt)795 make_pass_df_initialize_no_opt (gcc::context *ctxt)
796 {
797   return new pass_df_initialize_no_opt (ctxt);
798 }
799 
800 
801 /* Free all the dataflow info and the DF structure.  This should be
802    called from the df_finish macro which also NULLs the parm.  */
803 
804 static unsigned int
rest_of_handle_df_finish(void)805 rest_of_handle_df_finish (void)
806 {
807   int i;
808 
809   gcc_assert (df);
810 
811   for (i = 0; i < df->num_problems_defined; i++)
812     {
813       struct dataflow *dflow = df->problems_in_order[i];
814       dflow->problem->free_fun ();
815     }
816 
817   free (df->postorder);
818   df->postorder_inverted.release ();
819   free (df->hard_regs_live_count);
820   free (df);
821   df = NULL;
822 
823   bitmap_obstack_release (&df_bitmap_obstack);
824   return 0;
825 }
826 
827 
828 namespace {
829 
830 const pass_data pass_data_df_finish =
831 {
832   RTL_PASS, /* type */
833   "dfinish", /* name */
834   OPTGROUP_NONE, /* optinfo_flags */
835   TV_NONE, /* tv_id */
836   0, /* properties_required */
837   0, /* properties_provided */
838   0, /* properties_destroyed */
839   0, /* todo_flags_start */
840   0, /* todo_flags_finish */
841 };
842 
843 class pass_df_finish : public rtl_opt_pass
844 {
845 public:
pass_df_finish(gcc::context * ctxt)846   pass_df_finish (gcc::context *ctxt)
847     : rtl_opt_pass (pass_data_df_finish, ctxt)
848   {}
849 
850   /* opt_pass methods: */
execute(function *)851   virtual unsigned int execute (function *)
852     {
853       return rest_of_handle_df_finish ();
854     }
855 
856 }; // class pass_df_finish
857 
858 } // anon namespace
859 
860 rtl_opt_pass *
make_pass_df_finish(gcc::context * ctxt)861 make_pass_df_finish (gcc::context *ctxt)
862 {
863   return new pass_df_finish (ctxt);
864 }
865 
866 
867 
868 
869 
870 /*----------------------------------------------------------------------------
871    The general data flow analysis engine.
872 ----------------------------------------------------------------------------*/
873 
874 /* Helper function for df_worklist_dataflow.
875    Propagate the dataflow forward.
876    Given a BB_INDEX, do the dataflow propagation
877    and set bits on for successors in PENDING
878    if the out set of the dataflow has changed.
879 
880    AGE specify time when BB was visited last time.
881    AGE of 0 means we are visiting for first time and need to
882    compute transfer function to initialize datastructures.
883    Otherwise we re-do transfer function only if something change
884    while computing confluence functions.
885    We need to compute confluence only of basic block that are younger
886    then last visit of the BB.
887 
888    Return true if BB info has changed.  This is always the case
889    in the first visit.  */
890 
891 static bool
df_worklist_propagate_forward(struct dataflow * dataflow,unsigned bb_index,unsigned * bbindex_to_postorder,bitmap pending,sbitmap considered,vec<int> & last_change_age,int age)892 df_worklist_propagate_forward (struct dataflow *dataflow,
893                                unsigned bb_index,
894                                unsigned *bbindex_to_postorder,
895                                bitmap pending,
896                                sbitmap considered,
897 			       vec<int> &last_change_age,
898 			       int age)
899 {
900   edge e;
901   edge_iterator ei;
902   basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
903   bool changed = !age;
904 
905   /*  Calculate <conf_op> of incoming edges.  */
906   if (EDGE_COUNT (bb->preds) > 0)
907     FOR_EACH_EDGE (e, ei, bb->preds)
908       {
909 	if (bbindex_to_postorder[e->src->index] < last_change_age.length ()
910 	    && age <= last_change_age[bbindex_to_postorder[e->src->index]]
911 	    && bitmap_bit_p (considered, e->src->index))
912           changed |= dataflow->problem->con_fun_n (e);
913       }
914   else if (dataflow->problem->con_fun_0)
915     dataflow->problem->con_fun_0 (bb);
916 
917   if (changed
918       && dataflow->problem->trans_fun (bb_index))
919     {
920       /* The out set of this block has changed.
921          Propagate to the outgoing blocks.  */
922       FOR_EACH_EDGE (e, ei, bb->succs)
923         {
924           unsigned ob_index = e->dest->index;
925 
926           if (bitmap_bit_p (considered, ob_index))
927             bitmap_set_bit (pending, bbindex_to_postorder[ob_index]);
928         }
929       return true;
930     }
931   return false;
932 }
933 
934 
935 /* Helper function for df_worklist_dataflow.
936    Propagate the dataflow backward.  */
937 
938 static bool
df_worklist_propagate_backward(struct dataflow * dataflow,unsigned bb_index,unsigned * bbindex_to_postorder,bitmap pending,sbitmap considered,vec<int> & last_change_age,int age)939 df_worklist_propagate_backward (struct dataflow *dataflow,
940                                 unsigned bb_index,
941                                 unsigned *bbindex_to_postorder,
942                                 bitmap pending,
943                                 sbitmap considered,
944 				vec<int> &last_change_age,
945 				int age)
946 {
947   edge e;
948   edge_iterator ei;
949   basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
950   bool changed = !age;
951 
952   /*  Calculate <conf_op> of incoming edges.  */
953   if (EDGE_COUNT (bb->succs) > 0)
954     FOR_EACH_EDGE (e, ei, bb->succs)
955       {
956 	if (bbindex_to_postorder[e->dest->index] < last_change_age.length ()
957 	    && age <= last_change_age[bbindex_to_postorder[e->dest->index]]
958 	    && bitmap_bit_p (considered, e->dest->index))
959           changed |= dataflow->problem->con_fun_n (e);
960       }
961   else if (dataflow->problem->con_fun_0)
962     dataflow->problem->con_fun_0 (bb);
963 
964   if (changed
965       && dataflow->problem->trans_fun (bb_index))
966     {
967       /* The out set of this block has changed.
968          Propagate to the outgoing blocks.  */
969       FOR_EACH_EDGE (e, ei, bb->preds)
970         {
971           unsigned ob_index = e->src->index;
972 
973           if (bitmap_bit_p (considered, ob_index))
974             bitmap_set_bit (pending, bbindex_to_postorder[ob_index]);
975         }
976       return true;
977     }
978   return false;
979 }
980 
981 /* Main dataflow solver loop.
982 
983    DATAFLOW is problem we are solving, PENDING is worklist of basic blocks we
984    need to visit.
985    BLOCK_IN_POSTORDER is array of size N_BLOCKS specifying postorder in BBs and
986    BBINDEX_TO_POSTORDER is array mapping back BB->index to postorder position.
987    PENDING will be freed.
988 
989    The worklists are bitmaps indexed by postorder positions.
990 
991    The function implements standard algorithm for dataflow solving with two
992    worklists (we are processing WORKLIST and storing new BBs to visit in
993    PENDING).
994 
995    As an optimization we maintain ages when BB was changed (stored in
996    last_change_age) and when it was last visited (stored in last_visit_age).
997    This avoids need to re-do confluence function for edges to basic blocks
998    whose source did not change since destination was visited last time.  */
999 
1000 static void
df_worklist_dataflow_doublequeue(struct dataflow * dataflow,bitmap pending,sbitmap considered,int * blocks_in_postorder,unsigned * bbindex_to_postorder,int n_blocks)1001 df_worklist_dataflow_doublequeue (struct dataflow *dataflow,
1002 			  	  bitmap pending,
1003                                   sbitmap considered,
1004                                   int *blocks_in_postorder,
1005 				  unsigned *bbindex_to_postorder,
1006 				  int n_blocks)
1007 {
1008   enum df_flow_dir dir = dataflow->problem->dir;
1009   int dcount = 0;
1010   bitmap worklist = BITMAP_ALLOC (&df_bitmap_obstack);
1011   int age = 0;
1012   bool changed;
1013   vec<int> last_visit_age = vNULL;
1014   vec<int> last_change_age = vNULL;
1015   int prev_age;
1016 
1017   last_visit_age.safe_grow_cleared (n_blocks);
1018   last_change_age.safe_grow_cleared (n_blocks);
1019 
1020   /* Double-queueing. Worklist is for the current iteration,
1021      and pending is for the next. */
1022   while (!bitmap_empty_p (pending))
1023     {
1024       bitmap_iterator bi;
1025       unsigned int index;
1026 
1027       std::swap (pending, worklist);
1028 
1029       EXECUTE_IF_SET_IN_BITMAP (worklist, 0, index, bi)
1030 	{
1031 	  unsigned bb_index;
1032 	  dcount++;
1033 
1034 	  bitmap_clear_bit (pending, index);
1035 	  bb_index = blocks_in_postorder[index];
1036 	  prev_age = last_visit_age[index];
1037 	  if (dir == DF_FORWARD)
1038 	    changed = df_worklist_propagate_forward (dataflow, bb_index,
1039 						     bbindex_to_postorder,
1040 						     pending, considered,
1041 						     last_change_age,
1042 						     prev_age);
1043 	  else
1044 	    changed = df_worklist_propagate_backward (dataflow, bb_index,
1045 						      bbindex_to_postorder,
1046 						      pending, considered,
1047 						      last_change_age,
1048 						      prev_age);
1049 	  last_visit_age[index] = ++age;
1050 	  if (changed)
1051 	    last_change_age[index] = age;
1052 	}
1053       bitmap_clear (worklist);
1054     }
1055 
1056   BITMAP_FREE (worklist);
1057   BITMAP_FREE (pending);
1058   last_visit_age.release ();
1059   last_change_age.release ();
1060 
1061   /* Dump statistics. */
1062   if (dump_file)
1063     fprintf (dump_file, "df_worklist_dataflow_doublequeue:"
1064 	     " n_basic_blocks %d n_edges %d"
1065 	     " count %d (%5.2g)\n",
1066 	     n_basic_blocks_for_fn (cfun), n_edges_for_fn (cfun),
1067 	     dcount, dcount / (double)n_basic_blocks_for_fn (cfun));
1068 }
1069 
1070 /* Worklist-based dataflow solver. It uses sbitmap as a worklist,
1071    with "n"-th bit representing the n-th block in the reverse-postorder order.
1072    The solver is a double-queue algorithm similar to the "double stack" solver
1073    from Cooper, Harvey and Kennedy, "Iterative data-flow analysis, Revisited".
1074    The only significant difference is that the worklist in this implementation
1075    is always sorted in RPO of the CFG visiting direction.  */
1076 
1077 void
df_worklist_dataflow(struct dataflow * dataflow,bitmap blocks_to_consider,int * blocks_in_postorder,int n_blocks)1078 df_worklist_dataflow (struct dataflow *dataflow,
1079                       bitmap blocks_to_consider,
1080                       int *blocks_in_postorder,
1081                       int n_blocks)
1082 {
1083   bitmap pending = BITMAP_ALLOC (&df_bitmap_obstack);
1084   bitmap_iterator bi;
1085   unsigned int *bbindex_to_postorder;
1086   int i;
1087   unsigned int index;
1088   enum df_flow_dir dir = dataflow->problem->dir;
1089 
1090   gcc_assert (dir != DF_NONE);
1091 
1092   /* BBINDEX_TO_POSTORDER maps the bb->index to the reverse postorder.  */
1093   bbindex_to_postorder = XNEWVEC (unsigned int,
1094 				  last_basic_block_for_fn (cfun));
1095 
1096   /* Initialize the array to an out-of-bound value.  */
1097   for (i = 0; i < last_basic_block_for_fn (cfun); i++)
1098     bbindex_to_postorder[i] = last_basic_block_for_fn (cfun);
1099 
1100   /* Initialize the considered map.  */
1101   auto_sbitmap considered (last_basic_block_for_fn (cfun));
1102   bitmap_clear (considered);
1103   EXECUTE_IF_SET_IN_BITMAP (blocks_to_consider, 0, index, bi)
1104     {
1105       bitmap_set_bit (considered, index);
1106     }
1107 
1108   /* Initialize the mapping of block index to postorder.  */
1109   for (i = 0; i < n_blocks; i++)
1110     {
1111       bbindex_to_postorder[blocks_in_postorder[i]] = i;
1112       /* Add all blocks to the worklist.  */
1113       bitmap_set_bit (pending, i);
1114     }
1115 
1116   /* Initialize the problem. */
1117   if (dataflow->problem->init_fun)
1118     dataflow->problem->init_fun (blocks_to_consider);
1119 
1120   /* Solve it.  */
1121   df_worklist_dataflow_doublequeue (dataflow, pending, considered,
1122 				    blocks_in_postorder,
1123 				    bbindex_to_postorder,
1124 				    n_blocks);
1125   free (bbindex_to_postorder);
1126 }
1127 
1128 
1129 /* Remove the entries not in BLOCKS from the LIST of length LEN, preserving
1130    the order of the remaining entries.  Returns the length of the resulting
1131    list.  */
1132 
1133 static unsigned
df_prune_to_subcfg(int list[],unsigned len,bitmap blocks)1134 df_prune_to_subcfg (int list[], unsigned len, bitmap blocks)
1135 {
1136   unsigned act, last;
1137 
1138   for (act = 0, last = 0; act < len; act++)
1139     if (bitmap_bit_p (blocks, list[act]))
1140       list[last++] = list[act];
1141 
1142   return last;
1143 }
1144 
1145 
1146 /* Execute dataflow analysis on a single dataflow problem.
1147 
1148    BLOCKS_TO_CONSIDER are the blocks whose solution can either be
1149    examined or will be computed.  For calls from DF_ANALYZE, this is
1150    the set of blocks that has been passed to DF_SET_BLOCKS.
1151 */
1152 
1153 void
df_analyze_problem(struct dataflow * dflow,bitmap blocks_to_consider,int * postorder,int n_blocks)1154 df_analyze_problem (struct dataflow *dflow,
1155 		    bitmap blocks_to_consider,
1156 		    int *postorder, int n_blocks)
1157 {
1158   timevar_push (dflow->problem->tv_id);
1159 
1160   /* (Re)Allocate the datastructures necessary to solve the problem.  */
1161   if (dflow->problem->alloc_fun)
1162     dflow->problem->alloc_fun (blocks_to_consider);
1163 
1164 #ifdef ENABLE_DF_CHECKING
1165   if (dflow->problem->verify_start_fun)
1166     dflow->problem->verify_start_fun ();
1167 #endif
1168 
1169   /* Set up the problem and compute the local information.  */
1170   if (dflow->problem->local_compute_fun)
1171     dflow->problem->local_compute_fun (blocks_to_consider);
1172 
1173   /* Solve the equations.  */
1174   if (dflow->problem->dataflow_fun)
1175     dflow->problem->dataflow_fun (dflow, blocks_to_consider,
1176 				  postorder, n_blocks);
1177 
1178   /* Massage the solution.  */
1179   if (dflow->problem->finalize_fun)
1180     dflow->problem->finalize_fun (blocks_to_consider);
1181 
1182 #ifdef ENABLE_DF_CHECKING
1183   if (dflow->problem->verify_end_fun)
1184     dflow->problem->verify_end_fun ();
1185 #endif
1186 
1187   timevar_pop (dflow->problem->tv_id);
1188 
1189   dflow->computed = true;
1190 }
1191 
1192 
1193 /* Analyze dataflow info.  */
1194 
1195 static void
df_analyze_1(void)1196 df_analyze_1 (void)
1197 {
1198   int i;
1199 
1200   /* These should be the same.  */
1201   gcc_assert ((unsigned) df->n_blocks == df->postorder_inverted.length ());
1202 
1203   /* We need to do this before the df_verify_all because this is
1204      not kept incrementally up to date.  */
1205   df_compute_regs_ever_live (false);
1206   df_process_deferred_rescans ();
1207 
1208   if (dump_file)
1209     fprintf (dump_file, "df_analyze called\n");
1210 
1211 #ifndef ENABLE_DF_CHECKING
1212   if (df->changeable_flags & DF_VERIFY_SCHEDULED)
1213 #endif
1214     df_verify ();
1215 
1216   /* Skip over the DF_SCAN problem. */
1217   for (i = 1; i < df->num_problems_defined; i++)
1218     {
1219       struct dataflow *dflow = df->problems_in_order[i];
1220       if (dflow->solutions_dirty)
1221         {
1222           if (dflow->problem->dir == DF_FORWARD)
1223             df_analyze_problem (dflow,
1224                                 df->blocks_to_analyze,
1225 				df->postorder_inverted.address (),
1226 				df->postorder_inverted.length ());
1227           else
1228             df_analyze_problem (dflow,
1229                                 df->blocks_to_analyze,
1230                                 df->postorder,
1231                                 df->n_blocks);
1232         }
1233     }
1234 
1235   if (!df->analyze_subset)
1236     {
1237       BITMAP_FREE (df->blocks_to_analyze);
1238       df->blocks_to_analyze = NULL;
1239     }
1240 
1241 #ifdef DF_DEBUG_CFG
1242   df_set_clean_cfg ();
1243 #endif
1244 }
1245 
1246 /* Analyze dataflow info.  */
1247 
1248 void
df_analyze(void)1249 df_analyze (void)
1250 {
1251   bitmap current_all_blocks = BITMAP_ALLOC (&df_bitmap_obstack);
1252 
1253   free (df->postorder);
1254   df->postorder = XNEWVEC (int, last_basic_block_for_fn (cfun));
1255   df->n_blocks = post_order_compute (df->postorder, true, true);
1256   df->postorder_inverted.truncate (0);
1257   inverted_post_order_compute (&df->postorder_inverted);
1258 
1259   for (int i = 0; i < df->n_blocks; i++)
1260     bitmap_set_bit (current_all_blocks, df->postorder[i]);
1261 
1262   if (flag_checking)
1263     {
1264       /* Verify that POSTORDER_INVERTED only contains blocks reachable from
1265 	 the ENTRY block.  */
1266       for (unsigned int i = 0; i < df->postorder_inverted.length (); i++)
1267 	gcc_assert (bitmap_bit_p (current_all_blocks,
1268 				  df->postorder_inverted[i]));
1269     }
1270 
1271   /* Make sure that we have pruned any unreachable blocks from these
1272      sets.  */
1273   if (df->analyze_subset)
1274     {
1275       bitmap_and_into (df->blocks_to_analyze, current_all_blocks);
1276       df->n_blocks = df_prune_to_subcfg (df->postorder,
1277 					 df->n_blocks, df->blocks_to_analyze);
1278       unsigned int newlen = df_prune_to_subcfg (df->postorder_inverted.address (),
1279 						df->postorder_inverted.length (),
1280 						  df->blocks_to_analyze);
1281       df->postorder_inverted.truncate (newlen);
1282       BITMAP_FREE (current_all_blocks);
1283     }
1284   else
1285     {
1286       df->blocks_to_analyze = current_all_blocks;
1287       current_all_blocks = NULL;
1288     }
1289 
1290   df_analyze_1 ();
1291 }
1292 
1293 /* Compute the reverse top sort order of the sub-CFG specified by LOOP.
1294    Returns the number of blocks which is always loop->num_nodes.  */
1295 
1296 static int
loop_post_order_compute(int * post_order,class loop * loop)1297 loop_post_order_compute (int *post_order, class loop *loop)
1298 {
1299   edge_iterator *stack;
1300   int sp;
1301   int post_order_num = 0;
1302 
1303   /* Allocate stack for back-tracking up CFG.  */
1304   stack = XNEWVEC (edge_iterator, loop->num_nodes + 1);
1305   sp = 0;
1306 
1307   /* Allocate bitmap to track nodes that have been visited.  */
1308   auto_bitmap visited;
1309 
1310   /* Push the first edge on to the stack.  */
1311   stack[sp++] = ei_start (loop_preheader_edge (loop)->src->succs);
1312 
1313   while (sp)
1314     {
1315       edge_iterator ei;
1316       basic_block src;
1317       basic_block dest;
1318 
1319       /* Look at the edge on the top of the stack.  */
1320       ei = stack[sp - 1];
1321       src = ei_edge (ei)->src;
1322       dest = ei_edge (ei)->dest;
1323 
1324       /* Check if the edge destination has been visited yet and mark it
1325          if not so.  */
1326       if (flow_bb_inside_loop_p (loop, dest)
1327 	  && bitmap_set_bit (visited, dest->index))
1328 	{
1329 	  if (EDGE_COUNT (dest->succs) > 0)
1330 	    /* Since the DEST node has been visited for the first
1331 	       time, check its successors.  */
1332 	    stack[sp++] = ei_start (dest->succs);
1333 	  else
1334 	    post_order[post_order_num++] = dest->index;
1335 	}
1336       else
1337 	{
1338 	  if (ei_one_before_end_p (ei)
1339 	      && src != loop_preheader_edge (loop)->src)
1340 	    post_order[post_order_num++] = src->index;
1341 
1342 	  if (!ei_one_before_end_p (ei))
1343 	    ei_next (&stack[sp - 1]);
1344 	  else
1345 	    sp--;
1346 	}
1347     }
1348 
1349   free (stack);
1350 
1351   return post_order_num;
1352 }
1353 
1354 /* Compute the reverse top sort order of the inverted sub-CFG specified
1355    by LOOP.  Returns the number of blocks which is always loop->num_nodes.  */
1356 
1357 static void
loop_inverted_post_order_compute(vec<int> * post_order,class loop * loop)1358 loop_inverted_post_order_compute (vec<int> *post_order, class loop *loop)
1359 {
1360   basic_block bb;
1361   edge_iterator *stack;
1362   int sp;
1363 
1364   post_order->reserve_exact (loop->num_nodes);
1365 
1366   /* Allocate stack for back-tracking up CFG.  */
1367   stack = XNEWVEC (edge_iterator, loop->num_nodes + 1);
1368   sp = 0;
1369 
1370   /* Allocate bitmap to track nodes that have been visited.  */
1371   auto_bitmap visited;
1372 
1373   /* Put all latches into the initial work list.  In theory we'd want
1374      to start from loop exits but then we'd have the special case of
1375      endless loops.  It doesn't really matter for DF iteration order and
1376      handling latches last is probably even better.  */
1377   stack[sp++] = ei_start (loop->header->preds);
1378   bitmap_set_bit (visited, loop->header->index);
1379 
1380   /* The inverted traversal loop. */
1381   while (sp)
1382     {
1383       edge_iterator ei;
1384       basic_block pred;
1385 
1386       /* Look at the edge on the top of the stack.  */
1387       ei = stack[sp - 1];
1388       bb = ei_edge (ei)->dest;
1389       pred = ei_edge (ei)->src;
1390 
1391       /* Check if the predecessor has been visited yet and mark it
1392 	 if not so.  */
1393       if (flow_bb_inside_loop_p (loop, pred)
1394 	  && bitmap_set_bit (visited, pred->index))
1395 	{
1396 	  if (EDGE_COUNT (pred->preds) > 0)
1397 	    /* Since the predecessor node has been visited for the first
1398 	       time, check its predecessors.  */
1399 	    stack[sp++] = ei_start (pred->preds);
1400 	  else
1401 	    post_order->quick_push (pred->index);
1402 	}
1403       else
1404 	{
1405 	  if (flow_bb_inside_loop_p (loop, bb)
1406 	      && ei_one_before_end_p (ei))
1407 	    post_order->quick_push (bb->index);
1408 
1409 	  if (!ei_one_before_end_p (ei))
1410 	    ei_next (&stack[sp - 1]);
1411 	  else
1412 	    sp--;
1413 	}
1414     }
1415 
1416   free (stack);
1417 }
1418 
1419 
1420 /* Analyze dataflow info for the basic blocks contained in LOOP.  */
1421 
1422 void
df_analyze_loop(class loop * loop)1423 df_analyze_loop (class loop *loop)
1424 {
1425   free (df->postorder);
1426 
1427   df->postorder = XNEWVEC (int, loop->num_nodes);
1428   df->postorder_inverted.truncate (0);
1429   df->n_blocks = loop_post_order_compute (df->postorder, loop);
1430     loop_inverted_post_order_compute (&df->postorder_inverted, loop);
1431   gcc_assert ((unsigned) df->n_blocks == loop->num_nodes);
1432   gcc_assert (df->postorder_inverted.length () == loop->num_nodes);
1433 
1434   bitmap blocks = BITMAP_ALLOC (&df_bitmap_obstack);
1435   for (int i = 0; i < df->n_blocks; ++i)
1436     bitmap_set_bit (blocks, df->postorder[i]);
1437   df_set_blocks (blocks);
1438   BITMAP_FREE (blocks);
1439 
1440   df_analyze_1 ();
1441 }
1442 
1443 
1444 /* Return the number of basic blocks from the last call to df_analyze.  */
1445 
1446 int
df_get_n_blocks(enum df_flow_dir dir)1447 df_get_n_blocks (enum df_flow_dir dir)
1448 {
1449   gcc_assert (dir != DF_NONE);
1450 
1451   if (dir == DF_FORWARD)
1452     {
1453       gcc_assert (df->postorder_inverted.length ());
1454       return df->postorder_inverted.length ();
1455     }
1456 
1457   gcc_assert (df->postorder);
1458   return df->n_blocks;
1459 }
1460 
1461 
1462 /* Return a pointer to the array of basic blocks in the reverse postorder.
1463    Depending on the direction of the dataflow problem,
1464    it returns either the usual reverse postorder array
1465    or the reverse postorder of inverted traversal. */
1466 int *
df_get_postorder(enum df_flow_dir dir)1467 df_get_postorder (enum df_flow_dir dir)
1468 {
1469   gcc_assert (dir != DF_NONE);
1470 
1471   if (dir == DF_FORWARD)
1472     {
1473       gcc_assert (df->postorder_inverted.length ());
1474       return df->postorder_inverted.address ();
1475     }
1476   gcc_assert (df->postorder);
1477   return df->postorder;
1478 }
1479 
1480 static struct df_problem user_problem;
1481 static struct dataflow user_dflow;
1482 
1483 /* Interface for calling iterative dataflow with user defined
1484    confluence and transfer functions.  All that is necessary is to
1485    supply DIR, a direction, CONF_FUN_0, a confluence function for
1486    blocks with no logical preds (or NULL), CONF_FUN_N, the normal
1487    confluence function, TRANS_FUN, the basic block transfer function,
1488    and BLOCKS, the set of blocks to examine, POSTORDER the blocks in
1489    postorder, and N_BLOCKS, the number of blocks in POSTORDER. */
1490 
1491 void
df_simple_dataflow(enum df_flow_dir dir,df_init_function init_fun,df_confluence_function_0 con_fun_0,df_confluence_function_n con_fun_n,df_transfer_function trans_fun,bitmap blocks,int * postorder,int n_blocks)1492 df_simple_dataflow (enum df_flow_dir dir,
1493 		    df_init_function init_fun,
1494 		    df_confluence_function_0 con_fun_0,
1495 		    df_confluence_function_n con_fun_n,
1496 		    df_transfer_function trans_fun,
1497 		    bitmap blocks, int * postorder, int n_blocks)
1498 {
1499   memset (&user_problem, 0, sizeof (struct df_problem));
1500   user_problem.dir = dir;
1501   user_problem.init_fun = init_fun;
1502   user_problem.con_fun_0 = con_fun_0;
1503   user_problem.con_fun_n = con_fun_n;
1504   user_problem.trans_fun = trans_fun;
1505   user_dflow.problem = &user_problem;
1506   df_worklist_dataflow (&user_dflow, blocks, postorder, n_blocks);
1507 }
1508 
1509 
1510 
1511 /*----------------------------------------------------------------------------
1512    Functions to support limited incremental change.
1513 ----------------------------------------------------------------------------*/
1514 
1515 
1516 /* Get basic block info.  */
1517 
1518 static void *
df_get_bb_info(struct dataflow * dflow,unsigned int index)1519 df_get_bb_info (struct dataflow *dflow, unsigned int index)
1520 {
1521   if (dflow->block_info == NULL)
1522     return NULL;
1523   if (index >= dflow->block_info_size)
1524     return NULL;
1525   return (void *)((char *)dflow->block_info
1526 		  + index * dflow->problem->block_info_elt_size);
1527 }
1528 
1529 
1530 /* Set basic block info.  */
1531 
1532 static void
df_set_bb_info(struct dataflow * dflow,unsigned int index,void * bb_info)1533 df_set_bb_info (struct dataflow *dflow, unsigned int index,
1534 		void *bb_info)
1535 {
1536   gcc_assert (dflow->block_info);
1537   memcpy ((char *)dflow->block_info
1538 	  + index * dflow->problem->block_info_elt_size,
1539 	  bb_info, dflow->problem->block_info_elt_size);
1540 }
1541 
1542 
1543 /* Clear basic block info.  */
1544 
1545 static void
df_clear_bb_info(struct dataflow * dflow,unsigned int index)1546 df_clear_bb_info (struct dataflow *dflow, unsigned int index)
1547 {
1548   gcc_assert (dflow->block_info);
1549   gcc_assert (dflow->block_info_size > index);
1550   memset ((char *)dflow->block_info
1551 	  + index * dflow->problem->block_info_elt_size,
1552 	  0, dflow->problem->block_info_elt_size);
1553 }
1554 
1555 
1556 /* Mark the solutions as being out of date.  */
1557 
1558 void
df_mark_solutions_dirty(void)1559 df_mark_solutions_dirty (void)
1560 {
1561   if (df)
1562     {
1563       int p;
1564       for (p = 1; p < df->num_problems_defined; p++)
1565 	df->problems_in_order[p]->solutions_dirty = true;
1566     }
1567 }
1568 
1569 
1570 /* Return true if BB needs it's transfer functions recomputed.  */
1571 
1572 bool
df_get_bb_dirty(basic_block bb)1573 df_get_bb_dirty (basic_block bb)
1574 {
1575   return bitmap_bit_p ((df_live
1576 			? df_live : df_lr)->out_of_date_transfer_functions,
1577 		       bb->index);
1578 }
1579 
1580 
1581 /* Mark BB as needing it's transfer functions as being out of
1582    date.  */
1583 
1584 void
df_set_bb_dirty(basic_block bb)1585 df_set_bb_dirty (basic_block bb)
1586 {
1587   bb->flags |= BB_MODIFIED;
1588   if (df)
1589     {
1590       int p;
1591       for (p = 1; p < df->num_problems_defined; p++)
1592 	{
1593 	  struct dataflow *dflow = df->problems_in_order[p];
1594 	  if (dflow->out_of_date_transfer_functions)
1595 	    bitmap_set_bit (dflow->out_of_date_transfer_functions, bb->index);
1596 	}
1597       df_mark_solutions_dirty ();
1598     }
1599 }
1600 
1601 
1602 /* Grow the bb_info array.  */
1603 
1604 void
df_grow_bb_info(struct dataflow * dflow)1605 df_grow_bb_info (struct dataflow *dflow)
1606 {
1607   unsigned int new_size = last_basic_block_for_fn (cfun) + 1;
1608   if (dflow->block_info_size < new_size)
1609     {
1610       new_size += new_size / 4;
1611       dflow->block_info
1612          = (void *)XRESIZEVEC (char, (char *)dflow->block_info,
1613 			       new_size
1614 			       * dflow->problem->block_info_elt_size);
1615       memset ((char *)dflow->block_info
1616 	      + dflow->block_info_size
1617 	      * dflow->problem->block_info_elt_size,
1618 	      0,
1619 	      (new_size - dflow->block_info_size)
1620 	      * dflow->problem->block_info_elt_size);
1621       dflow->block_info_size = new_size;
1622     }
1623 }
1624 
1625 
1626 /* Clear the dirty bits.  This is called from places that delete
1627    blocks.  */
1628 static void
df_clear_bb_dirty(basic_block bb)1629 df_clear_bb_dirty (basic_block bb)
1630 {
1631   int p;
1632   for (p = 1; p < df->num_problems_defined; p++)
1633     {
1634       struct dataflow *dflow = df->problems_in_order[p];
1635       if (dflow->out_of_date_transfer_functions)
1636 	bitmap_clear_bit (dflow->out_of_date_transfer_functions, bb->index);
1637     }
1638 }
1639 
1640 /* Called from the rtl_compact_blocks to reorganize the problems basic
1641    block info.  */
1642 
1643 void
df_compact_blocks(void)1644 df_compact_blocks (void)
1645 {
1646   int i, p;
1647   basic_block bb;
1648   void *problem_temps;
1649 
1650   auto_bitmap tmp (&df_bitmap_obstack);
1651   for (p = 0; p < df->num_problems_defined; p++)
1652     {
1653       struct dataflow *dflow = df->problems_in_order[p];
1654 
1655       /* Need to reorganize the out_of_date_transfer_functions for the
1656 	 dflow problem.  */
1657       if (dflow->out_of_date_transfer_functions)
1658 	{
1659 	  bitmap_copy (tmp, dflow->out_of_date_transfer_functions);
1660 	  bitmap_clear (dflow->out_of_date_transfer_functions);
1661 	  if (bitmap_bit_p (tmp, ENTRY_BLOCK))
1662 	    bitmap_set_bit (dflow->out_of_date_transfer_functions, ENTRY_BLOCK);
1663 	  if (bitmap_bit_p (tmp, EXIT_BLOCK))
1664 	    bitmap_set_bit (dflow->out_of_date_transfer_functions, EXIT_BLOCK);
1665 
1666 	  i = NUM_FIXED_BLOCKS;
1667 	  FOR_EACH_BB_FN (bb, cfun)
1668 	    {
1669 	      if (bitmap_bit_p (tmp, bb->index))
1670 		bitmap_set_bit (dflow->out_of_date_transfer_functions, i);
1671 	      i++;
1672 	    }
1673 	}
1674 
1675       /* Now shuffle the block info for the problem.  */
1676       if (dflow->problem->free_bb_fun)
1677 	{
1678 	  int size = (last_basic_block_for_fn (cfun)
1679 		      * dflow->problem->block_info_elt_size);
1680 	  problem_temps = XNEWVAR (char, size);
1681 	  df_grow_bb_info (dflow);
1682 	  memcpy (problem_temps, dflow->block_info, size);
1683 
1684 	  /* Copy the bb info from the problem tmps to the proper
1685 	     place in the block_info vector.  Null out the copied
1686 	     item.  The entry and exit blocks never move.  */
1687 	  i = NUM_FIXED_BLOCKS;
1688 	  FOR_EACH_BB_FN (bb, cfun)
1689 	    {
1690 	      df_set_bb_info (dflow, i,
1691 			      (char *)problem_temps
1692 			      + bb->index * dflow->problem->block_info_elt_size);
1693 	      i++;
1694 	    }
1695 	  memset ((char *)dflow->block_info
1696 		  + i * dflow->problem->block_info_elt_size, 0,
1697 		  (last_basic_block_for_fn (cfun) - i)
1698 		  * dflow->problem->block_info_elt_size);
1699 	  free (problem_temps);
1700 	}
1701     }
1702 
1703   /* Shuffle the bits in the basic_block indexed arrays.  */
1704 
1705   if (df->blocks_to_analyze)
1706     {
1707       if (bitmap_bit_p (tmp, ENTRY_BLOCK))
1708 	bitmap_set_bit (df->blocks_to_analyze, ENTRY_BLOCK);
1709       if (bitmap_bit_p (tmp, EXIT_BLOCK))
1710 	bitmap_set_bit (df->blocks_to_analyze, EXIT_BLOCK);
1711       bitmap_copy (tmp, df->blocks_to_analyze);
1712       bitmap_clear (df->blocks_to_analyze);
1713       i = NUM_FIXED_BLOCKS;
1714       FOR_EACH_BB_FN (bb, cfun)
1715 	{
1716 	  if (bitmap_bit_p (tmp, bb->index))
1717 	    bitmap_set_bit (df->blocks_to_analyze, i);
1718 	  i++;
1719 	}
1720     }
1721 
1722   i = NUM_FIXED_BLOCKS;
1723   FOR_EACH_BB_FN (bb, cfun)
1724     {
1725       SET_BASIC_BLOCK_FOR_FN (cfun, i, bb);
1726       bb->index = i;
1727       i++;
1728     }
1729 
1730   gcc_assert (i == n_basic_blocks_for_fn (cfun));
1731 
1732   for (; i < last_basic_block_for_fn (cfun); i++)
1733     SET_BASIC_BLOCK_FOR_FN (cfun, i, NULL);
1734 
1735 #ifdef DF_DEBUG_CFG
1736   if (!df_lr->solutions_dirty)
1737     df_set_clean_cfg ();
1738 #endif
1739 }
1740 
1741 
1742 /* Shove NEW_BLOCK in at OLD_INDEX.  Called from ifcvt to hack a
1743    block.  There is no excuse for people to do this kind of thing.  */
1744 
1745 void
df_bb_replace(int old_index,basic_block new_block)1746 df_bb_replace (int old_index, basic_block new_block)
1747 {
1748   int new_block_index = new_block->index;
1749   int p;
1750 
1751   if (dump_file)
1752     fprintf (dump_file, "shoving block %d into %d\n", new_block_index, old_index);
1753 
1754   gcc_assert (df);
1755   gcc_assert (BASIC_BLOCK_FOR_FN (cfun, old_index) == NULL);
1756 
1757   for (p = 0; p < df->num_problems_defined; p++)
1758     {
1759       struct dataflow *dflow = df->problems_in_order[p];
1760       if (dflow->block_info)
1761 	{
1762 	  df_grow_bb_info (dflow);
1763 	  df_set_bb_info (dflow, old_index,
1764 			  df_get_bb_info (dflow, new_block_index));
1765 	}
1766     }
1767 
1768   df_clear_bb_dirty (new_block);
1769   SET_BASIC_BLOCK_FOR_FN (cfun, old_index, new_block);
1770   new_block->index = old_index;
1771   df_set_bb_dirty (BASIC_BLOCK_FOR_FN (cfun, old_index));
1772   SET_BASIC_BLOCK_FOR_FN (cfun, new_block_index, NULL);
1773 }
1774 
1775 
1776 /* Free all of the per basic block dataflow from all of the problems.
1777    This is typically called before a basic block is deleted and the
1778    problem will be reanalyzed.  */
1779 
1780 void
df_bb_delete(int bb_index)1781 df_bb_delete (int bb_index)
1782 {
1783   basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
1784   int i;
1785 
1786   if (!df)
1787     return;
1788 
1789   for (i = 0; i < df->num_problems_defined; i++)
1790     {
1791       struct dataflow *dflow = df->problems_in_order[i];
1792       if (dflow->problem->free_bb_fun)
1793 	{
1794 	  void *bb_info = df_get_bb_info (dflow, bb_index);
1795 	  if (bb_info)
1796 	    {
1797 	      dflow->problem->free_bb_fun (bb, bb_info);
1798 	      df_clear_bb_info (dflow, bb_index);
1799 	    }
1800 	}
1801     }
1802   df_clear_bb_dirty (bb);
1803   df_mark_solutions_dirty ();
1804 }
1805 
1806 
1807 /* Verify that there is a place for everything and everything is in
1808    its place.  This is too expensive to run after every pass in the
1809    mainline.  However this is an excellent debugging tool if the
1810    dataflow information is not being updated properly.  You can just
1811    sprinkle calls in until you find the place that is changing an
1812    underlying structure without calling the proper updating
1813    routine.  */
1814 
1815 void
df_verify(void)1816 df_verify (void)
1817 {
1818   df_scan_verify ();
1819 #ifdef ENABLE_DF_CHECKING
1820   df_lr_verify_transfer_functions ();
1821   if (df_live)
1822     df_live_verify_transfer_functions ();
1823 #endif
1824   df->changeable_flags &= ~DF_VERIFY_SCHEDULED;
1825 }
1826 
1827 #ifdef DF_DEBUG_CFG
1828 
1829 /* Compute an array of ints that describes the cfg.  This can be used
1830    to discover places where the cfg is modified by the appropriate
1831    calls have not been made to the keep df informed.  The internals of
1832    this are unexciting, the key is that two instances of this can be
1833    compared to see if any changes have been made to the cfg.  */
1834 
1835 static int *
df_compute_cfg_image(void)1836 df_compute_cfg_image (void)
1837 {
1838   basic_block bb;
1839   int size = 2 + (2 * n_basic_blocks_for_fn (cfun));
1840   int i;
1841   int * map;
1842 
1843   FOR_ALL_BB_FN (bb, cfun)
1844     {
1845       size += EDGE_COUNT (bb->succs);
1846     }
1847 
1848   map = XNEWVEC (int, size);
1849   map[0] = size;
1850   i = 1;
1851   FOR_ALL_BB_FN (bb, cfun)
1852     {
1853       edge_iterator ei;
1854       edge e;
1855 
1856       map[i++] = bb->index;
1857       FOR_EACH_EDGE (e, ei, bb->succs)
1858 	map[i++] = e->dest->index;
1859       map[i++] = -1;
1860     }
1861   map[i] = -1;
1862   return map;
1863 }
1864 
1865 static int *saved_cfg = NULL;
1866 
1867 
1868 /* This function compares the saved version of the cfg with the
1869    current cfg and aborts if the two are identical.  The function
1870    silently returns if the cfg has been marked as dirty or the two are
1871    the same.  */
1872 
1873 void
df_check_cfg_clean(void)1874 df_check_cfg_clean (void)
1875 {
1876   int *new_map;
1877 
1878   if (!df)
1879     return;
1880 
1881   if (df_lr->solutions_dirty)
1882     return;
1883 
1884   if (saved_cfg == NULL)
1885     return;
1886 
1887   new_map = df_compute_cfg_image ();
1888   gcc_assert (memcmp (saved_cfg, new_map, saved_cfg[0] * sizeof (int)) == 0);
1889   free (new_map);
1890 }
1891 
1892 
1893 /* This function builds a cfg fingerprint and squirrels it away in
1894    saved_cfg.  */
1895 
1896 static void
df_set_clean_cfg(void)1897 df_set_clean_cfg (void)
1898 {
1899   free (saved_cfg);
1900   saved_cfg = df_compute_cfg_image ();
1901 }
1902 
1903 #endif /* DF_DEBUG_CFG  */
1904 /*----------------------------------------------------------------------------
1905    PUBLIC INTERFACES TO QUERY INFORMATION.
1906 ----------------------------------------------------------------------------*/
1907 
1908 
1909 /* Return first def of REGNO within BB.  */
1910 
1911 df_ref
df_bb_regno_first_def_find(basic_block bb,unsigned int regno)1912 df_bb_regno_first_def_find (basic_block bb, unsigned int regno)
1913 {
1914   rtx_insn *insn;
1915   df_ref def;
1916 
1917   FOR_BB_INSNS (bb, insn)
1918     {
1919       if (!INSN_P (insn))
1920 	continue;
1921 
1922       FOR_EACH_INSN_DEF (def, insn)
1923 	if (DF_REF_REGNO (def) == regno)
1924 	  return def;
1925     }
1926   return NULL;
1927 }
1928 
1929 
1930 /* Return last def of REGNO within BB.  */
1931 
1932 df_ref
df_bb_regno_last_def_find(basic_block bb,unsigned int regno)1933 df_bb_regno_last_def_find (basic_block bb, unsigned int regno)
1934 {
1935   rtx_insn *insn;
1936   df_ref def;
1937 
1938   FOR_BB_INSNS_REVERSE (bb, insn)
1939     {
1940       if (!INSN_P (insn))
1941 	continue;
1942 
1943       FOR_EACH_INSN_DEF (def, insn)
1944 	if (DF_REF_REGNO (def) == regno)
1945 	  return def;
1946     }
1947 
1948   return NULL;
1949 }
1950 
1951 /* Finds the reference corresponding to the definition of REG in INSN.
1952    DF is the dataflow object.  */
1953 
1954 df_ref
df_find_def(rtx_insn * insn,rtx reg)1955 df_find_def (rtx_insn *insn, rtx reg)
1956 {
1957   df_ref def;
1958 
1959   if (GET_CODE (reg) == SUBREG)
1960     reg = SUBREG_REG (reg);
1961   gcc_assert (REG_P (reg));
1962 
1963   FOR_EACH_INSN_DEF (def, insn)
1964     if (DF_REF_REGNO (def) == REGNO (reg))
1965       return def;
1966 
1967   return NULL;
1968 }
1969 
1970 
1971 /* Return true if REG is defined in INSN, zero otherwise.  */
1972 
1973 bool
df_reg_defined(rtx_insn * insn,rtx reg)1974 df_reg_defined (rtx_insn *insn, rtx reg)
1975 {
1976   return df_find_def (insn, reg) != NULL;
1977 }
1978 
1979 
1980 /* Finds the reference corresponding to the use of REG in INSN.
1981    DF is the dataflow object.  */
1982 
1983 df_ref
df_find_use(rtx_insn * insn,rtx reg)1984 df_find_use (rtx_insn *insn, rtx reg)
1985 {
1986   df_ref use;
1987 
1988   if (GET_CODE (reg) == SUBREG)
1989     reg = SUBREG_REG (reg);
1990   gcc_assert (REG_P (reg));
1991 
1992   df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
1993   FOR_EACH_INSN_INFO_USE (use, insn_info)
1994     if (DF_REF_REGNO (use) == REGNO (reg))
1995       return use;
1996   if (df->changeable_flags & DF_EQ_NOTES)
1997     FOR_EACH_INSN_INFO_EQ_USE (use, insn_info)
1998       if (DF_REF_REGNO (use) == REGNO (reg))
1999 	return use;
2000   return NULL;
2001 }
2002 
2003 
2004 /* Return true if REG is referenced in INSN, zero otherwise.  */
2005 
2006 bool
df_reg_used(rtx_insn * insn,rtx reg)2007 df_reg_used (rtx_insn *insn, rtx reg)
2008 {
2009   return df_find_use (insn, reg) != NULL;
2010 }
2011 
2012 
2013 /*----------------------------------------------------------------------------
2014    Debugging and printing functions.
2015 ----------------------------------------------------------------------------*/
2016 
2017 /* Write information about registers and basic blocks into FILE.
2018    This is part of making a debugging dump.  */
2019 
2020 void
dump_regset(regset r,FILE * outf)2021 dump_regset (regset r, FILE *outf)
2022 {
2023   unsigned i;
2024   reg_set_iterator rsi;
2025 
2026   if (r == NULL)
2027     {
2028       fputs (" (nil)", outf);
2029       return;
2030     }
2031 
2032   EXECUTE_IF_SET_IN_REG_SET (r, 0, i, rsi)
2033     {
2034       fprintf (outf, " %d", i);
2035       if (i < FIRST_PSEUDO_REGISTER)
2036 	fprintf (outf, " [%s]",
2037 		 reg_names[i]);
2038     }
2039 }
2040 
2041 /* Print a human-readable representation of R on the standard error
2042    stream.  This function is designed to be used from within the
2043    debugger.  */
2044 extern void debug_regset (regset);
2045 DEBUG_FUNCTION void
debug_regset(regset r)2046 debug_regset (regset r)
2047 {
2048   dump_regset (r, stderr);
2049   putc ('\n', stderr);
2050 }
2051 
2052 /* Write information about registers and basic blocks into FILE.
2053    This is part of making a debugging dump.  */
2054 
2055 void
df_print_regset(FILE * file,const_bitmap r)2056 df_print_regset (FILE *file, const_bitmap r)
2057 {
2058   unsigned int i;
2059   bitmap_iterator bi;
2060 
2061   if (r == NULL)
2062     fputs (" (nil)", file);
2063   else
2064     {
2065       EXECUTE_IF_SET_IN_BITMAP (r, 0, i, bi)
2066 	{
2067 	  fprintf (file, " %d", i);
2068 	  if (i < FIRST_PSEUDO_REGISTER)
2069 	    fprintf (file, " [%s]", reg_names[i]);
2070 	}
2071     }
2072   fprintf (file, "\n");
2073 }
2074 
2075 
2076 /* Write information about registers and basic blocks into FILE.  The
2077    bitmap is in the form used by df_byte_lr.  This is part of making a
2078    debugging dump.  */
2079 
2080 void
df_print_word_regset(FILE * file,const_bitmap r)2081 df_print_word_regset (FILE *file, const_bitmap r)
2082 {
2083   unsigned int max_reg = max_reg_num ();
2084 
2085   if (r == NULL)
2086     fputs (" (nil)", file);
2087   else
2088     {
2089       unsigned int i;
2090       for (i = FIRST_PSEUDO_REGISTER; i < max_reg; i++)
2091 	{
2092 	  bool found = (bitmap_bit_p (r, 2 * i)
2093 			|| bitmap_bit_p (r, 2 * i + 1));
2094 	  if (found)
2095 	    {
2096 	      int word;
2097 	      const char * sep = "";
2098 	      fprintf (file, " %d", i);
2099 	      fprintf (file, "(");
2100 	      for (word = 0; word < 2; word++)
2101 		if (bitmap_bit_p (r, 2 * i + word))
2102 		  {
2103 		    fprintf (file, "%s%d", sep, word);
2104 		    sep = ", ";
2105 		  }
2106 	      fprintf (file, ")");
2107 	    }
2108 	}
2109     }
2110   fprintf (file, "\n");
2111 }
2112 
2113 
2114 /* Dump dataflow info.  */
2115 
2116 void
df_dump(FILE * file)2117 df_dump (FILE *file)
2118 {
2119   basic_block bb;
2120   df_dump_start (file);
2121 
2122   FOR_ALL_BB_FN (bb, cfun)
2123     {
2124       df_print_bb_index (bb, file);
2125       df_dump_top (bb, file);
2126       df_dump_bottom (bb, file);
2127     }
2128 
2129   fprintf (file, "\n");
2130 }
2131 
2132 
2133 /* Dump dataflow info for df->blocks_to_analyze.  */
2134 
2135 void
df_dump_region(FILE * file)2136 df_dump_region (FILE *file)
2137 {
2138   if (df->blocks_to_analyze)
2139     {
2140       bitmap_iterator bi;
2141       unsigned int bb_index;
2142 
2143       fprintf (file, "\n\nstarting region dump\n");
2144       df_dump_start (file);
2145 
2146       EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, bb_index, bi)
2147 	{
2148 	  basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
2149 	  dump_bb (file, bb, 0, TDF_DETAILS);
2150 	}
2151       fprintf (file, "\n");
2152     }
2153   else
2154     df_dump (file);
2155 }
2156 
2157 
2158 /* Dump the introductory information for each problem defined.  */
2159 
2160 void
df_dump_start(FILE * file)2161 df_dump_start (FILE *file)
2162 {
2163   int i;
2164 
2165   if (!df || !file)
2166     return;
2167 
2168   fprintf (file, "\n\n%s\n", current_function_name ());
2169   fprintf (file, "\nDataflow summary:\n");
2170   if (df->blocks_to_analyze)
2171     fprintf (file, "def_info->table_size = %d, use_info->table_size = %d\n",
2172 	     DF_DEFS_TABLE_SIZE (), DF_USES_TABLE_SIZE ());
2173 
2174   for (i = 0; i < df->num_problems_defined; i++)
2175     {
2176       struct dataflow *dflow = df->problems_in_order[i];
2177       if (dflow->computed)
2178 	{
2179 	  df_dump_problem_function fun = dflow->problem->dump_start_fun;
2180 	  if (fun)
2181 	    fun (file);
2182 	}
2183     }
2184 }
2185 
2186 
2187 /* Dump the top or bottom of the block information for BB.  */
2188 static void
df_dump_bb_problem_data(basic_block bb,FILE * file,bool top)2189 df_dump_bb_problem_data (basic_block bb, FILE *file, bool top)
2190 {
2191   int i;
2192 
2193   if (!df || !file)
2194     return;
2195 
2196   for (i = 0; i < df->num_problems_defined; i++)
2197     {
2198       struct dataflow *dflow = df->problems_in_order[i];
2199       if (dflow->computed)
2200 	{
2201 	  df_dump_bb_problem_function bbfun;
2202 
2203 	  if (top)
2204 	    bbfun = dflow->problem->dump_top_fun;
2205 	  else
2206 	    bbfun = dflow->problem->dump_bottom_fun;
2207 
2208 	  if (bbfun)
2209 	    bbfun (bb, file);
2210 	}
2211     }
2212 }
2213 
2214 /* Dump the top of the block information for BB.  */
2215 
2216 void
df_dump_top(basic_block bb,FILE * file)2217 df_dump_top (basic_block bb, FILE *file)
2218 {
2219   df_dump_bb_problem_data (bb, file, /*top=*/true);
2220 }
2221 
2222 /* Dump the bottom of the block information for BB.  */
2223 
2224 void
df_dump_bottom(basic_block bb,FILE * file)2225 df_dump_bottom (basic_block bb, FILE *file)
2226 {
2227   df_dump_bb_problem_data (bb, file, /*top=*/false);
2228 }
2229 
2230 
2231 /* Dump information about INSN just before or after dumping INSN itself.  */
2232 static void
df_dump_insn_problem_data(const rtx_insn * insn,FILE * file,bool top)2233 df_dump_insn_problem_data (const rtx_insn *insn, FILE *file, bool top)
2234 {
2235   int i;
2236 
2237   if (!df || !file)
2238     return;
2239 
2240   for (i = 0; i < df->num_problems_defined; i++)
2241     {
2242       struct dataflow *dflow = df->problems_in_order[i];
2243       if (dflow->computed)
2244 	{
2245 	  df_dump_insn_problem_function insnfun;
2246 
2247 	  if (top)
2248 	    insnfun = dflow->problem->dump_insn_top_fun;
2249 	  else
2250 	    insnfun = dflow->problem->dump_insn_bottom_fun;
2251 
2252 	  if (insnfun)
2253 	    insnfun (insn, file);
2254 	}
2255     }
2256 }
2257 
2258 /* Dump information about INSN before dumping INSN itself.  */
2259 
2260 void
df_dump_insn_top(const rtx_insn * insn,FILE * file)2261 df_dump_insn_top (const rtx_insn *insn, FILE *file)
2262 {
2263   df_dump_insn_problem_data (insn,  file, /*top=*/true);
2264 }
2265 
2266 /* Dump information about INSN after dumping INSN itself.  */
2267 
2268 void
df_dump_insn_bottom(const rtx_insn * insn,FILE * file)2269 df_dump_insn_bottom (const rtx_insn *insn, FILE *file)
2270 {
2271   df_dump_insn_problem_data (insn,  file, /*top=*/false);
2272 }
2273 
2274 
2275 static void
df_ref_dump(df_ref ref,FILE * file)2276 df_ref_dump (df_ref ref, FILE *file)
2277 {
2278   fprintf (file, "%c%d(%d)",
2279 	   DF_REF_REG_DEF_P (ref)
2280 	   ? 'd'
2281 	   : (DF_REF_FLAGS (ref) & DF_REF_IN_NOTE) ? 'e' : 'u',
2282 	   DF_REF_ID (ref),
2283 	   DF_REF_REGNO (ref));
2284 }
2285 
2286 void
df_refs_chain_dump(df_ref ref,bool follow_chain,FILE * file)2287 df_refs_chain_dump (df_ref ref, bool follow_chain, FILE *file)
2288 {
2289   fprintf (file, "{ ");
2290   for (; ref; ref = DF_REF_NEXT_LOC (ref))
2291     {
2292       df_ref_dump (ref, file);
2293       if (follow_chain)
2294 	df_chain_dump (DF_REF_CHAIN (ref), file);
2295     }
2296   fprintf (file, "}");
2297 }
2298 
2299 
2300 /* Dump either a ref-def or reg-use chain.  */
2301 
2302 void
df_regs_chain_dump(df_ref ref,FILE * file)2303 df_regs_chain_dump (df_ref ref,  FILE *file)
2304 {
2305   fprintf (file, "{ ");
2306   while (ref)
2307     {
2308       df_ref_dump (ref, file);
2309       ref = DF_REF_NEXT_REG (ref);
2310     }
2311   fprintf (file, "}");
2312 }
2313 
2314 
2315 static void
df_mws_dump(struct df_mw_hardreg * mws,FILE * file)2316 df_mws_dump (struct df_mw_hardreg *mws, FILE *file)
2317 {
2318   for (; mws; mws = DF_MWS_NEXT (mws))
2319     fprintf (file, "mw %c r[%d..%d]\n",
2320 	     DF_MWS_REG_DEF_P (mws) ? 'd' : 'u',
2321 	     mws->start_regno, mws->end_regno);
2322 }
2323 
2324 
2325 static void
df_insn_uid_debug(unsigned int uid,bool follow_chain,FILE * file)2326 df_insn_uid_debug (unsigned int uid,
2327 		   bool follow_chain, FILE *file)
2328 {
2329   fprintf (file, "insn %d luid %d",
2330 	   uid, DF_INSN_UID_LUID (uid));
2331 
2332   if (DF_INSN_UID_DEFS (uid))
2333     {
2334       fprintf (file, " defs ");
2335       df_refs_chain_dump (DF_INSN_UID_DEFS (uid), follow_chain, file);
2336     }
2337 
2338   if (DF_INSN_UID_USES (uid))
2339     {
2340       fprintf (file, " uses ");
2341       df_refs_chain_dump (DF_INSN_UID_USES (uid), follow_chain, file);
2342     }
2343 
2344   if (DF_INSN_UID_EQ_USES (uid))
2345     {
2346       fprintf (file, " eq uses ");
2347       df_refs_chain_dump (DF_INSN_UID_EQ_USES (uid), follow_chain, file);
2348     }
2349 
2350   if (DF_INSN_UID_MWS (uid))
2351     {
2352       fprintf (file, " mws ");
2353       df_mws_dump (DF_INSN_UID_MWS (uid), file);
2354     }
2355   fprintf (file, "\n");
2356 }
2357 
2358 
2359 DEBUG_FUNCTION void
df_insn_debug(rtx_insn * insn,bool follow_chain,FILE * file)2360 df_insn_debug (rtx_insn *insn, bool follow_chain, FILE *file)
2361 {
2362   df_insn_uid_debug (INSN_UID (insn), follow_chain, file);
2363 }
2364 
2365 DEBUG_FUNCTION void
df_insn_debug_regno(rtx_insn * insn,FILE * file)2366 df_insn_debug_regno (rtx_insn *insn, FILE *file)
2367 {
2368   struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
2369 
2370   fprintf (file, "insn %d bb %d luid %d defs ",
2371 	   INSN_UID (insn), BLOCK_FOR_INSN (insn)->index,
2372 	   DF_INSN_INFO_LUID (insn_info));
2373   df_refs_chain_dump (DF_INSN_INFO_DEFS (insn_info), false, file);
2374 
2375   fprintf (file, " uses ");
2376   df_refs_chain_dump (DF_INSN_INFO_USES (insn_info), false, file);
2377 
2378   fprintf (file, " eq_uses ");
2379   df_refs_chain_dump (DF_INSN_INFO_EQ_USES (insn_info), false, file);
2380   fprintf (file, "\n");
2381 }
2382 
2383 DEBUG_FUNCTION void
df_regno_debug(unsigned int regno,FILE * file)2384 df_regno_debug (unsigned int regno, FILE *file)
2385 {
2386   fprintf (file, "reg %d defs ", regno);
2387   df_regs_chain_dump (DF_REG_DEF_CHAIN (regno), file);
2388   fprintf (file, " uses ");
2389   df_regs_chain_dump (DF_REG_USE_CHAIN (regno), file);
2390   fprintf (file, " eq_uses ");
2391   df_regs_chain_dump (DF_REG_EQ_USE_CHAIN (regno), file);
2392   fprintf (file, "\n");
2393 }
2394 
2395 
2396 DEBUG_FUNCTION void
df_ref_debug(df_ref ref,FILE * file)2397 df_ref_debug (df_ref ref, FILE *file)
2398 {
2399   fprintf (file, "%c%d ",
2400 	   DF_REF_REG_DEF_P (ref) ? 'd' : 'u',
2401 	   DF_REF_ID (ref));
2402   fprintf (file, "reg %d bb %d insn %d flag %#x type %#x ",
2403 	   DF_REF_REGNO (ref),
2404 	   DF_REF_BBNO (ref),
2405 	   DF_REF_IS_ARTIFICIAL (ref) ? -1 : DF_REF_INSN_UID (ref),
2406 	   DF_REF_FLAGS (ref),
2407 	   DF_REF_TYPE (ref));
2408   if (DF_REF_LOC (ref))
2409     {
2410       if (flag_dump_noaddr)
2411 	fprintf (file, "loc #(#) chain ");
2412       else
2413 	fprintf (file, "loc %p(%p) chain ", (void *)DF_REF_LOC (ref),
2414 		 (void *)*DF_REF_LOC (ref));
2415     }
2416   else
2417     fprintf (file, "chain ");
2418   df_chain_dump (DF_REF_CHAIN (ref), file);
2419   fprintf (file, "\n");
2420 }
2421 
2422 /* Functions for debugging from GDB.  */
2423 
2424 DEBUG_FUNCTION void
debug_df_insn(rtx_insn * insn)2425 debug_df_insn (rtx_insn *insn)
2426 {
2427   df_insn_debug (insn, true, stderr);
2428   debug_rtx (insn);
2429 }
2430 
2431 
2432 DEBUG_FUNCTION void
debug_df_reg(rtx reg)2433 debug_df_reg (rtx reg)
2434 {
2435   df_regno_debug (REGNO (reg), stderr);
2436 }
2437 
2438 
2439 DEBUG_FUNCTION void
debug_df_regno(unsigned int regno)2440 debug_df_regno (unsigned int regno)
2441 {
2442   df_regno_debug (regno, stderr);
2443 }
2444 
2445 
2446 DEBUG_FUNCTION void
debug_df_ref(df_ref ref)2447 debug_df_ref (df_ref ref)
2448 {
2449   df_ref_debug (ref, stderr);
2450 }
2451 
2452 
2453 DEBUG_FUNCTION void
debug_df_defno(unsigned int defno)2454 debug_df_defno (unsigned int defno)
2455 {
2456   df_ref_debug (DF_DEFS_GET (defno), stderr);
2457 }
2458 
2459 
2460 DEBUG_FUNCTION void
debug_df_useno(unsigned int defno)2461 debug_df_useno (unsigned int defno)
2462 {
2463   df_ref_debug (DF_USES_GET (defno), stderr);
2464 }
2465 
2466 
2467 DEBUG_FUNCTION void
debug_df_chain(struct df_link * link)2468 debug_df_chain (struct df_link *link)
2469 {
2470   df_chain_dump (link, stderr);
2471   fputc ('\n', stderr);
2472 }
2473