1 /* Allocation for dataflow support routines.
2    Copyright (C) 1999-2018 Free Software Foundation, Inc.
3    Originally contributed by Michael P. Hayes
4              (m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com)
5    Major rewrite contributed by Danny Berlin (dberlin@dberlin.org)
6              and Kenneth Zadeck (zadeck@naturalbridge.com).
7 
8 This file is part of GCC.
9 
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14 
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
18 for more details.
19 
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3.  If not see
22 <http://www.gnu.org/licenses/>.  */
23 
24 /*
25 OVERVIEW:
26 
27 The files in this collection (df*.c,df.h) provide a general framework
28 for solving dataflow problems.  The global dataflow is performed using
29 a good implementation of iterative dataflow analysis.
30 
31 The file df-problems.c provides problem instance for the most common
32 dataflow problems: reaching defs, upward exposed uses, live variables,
33 uninitialized variables, def-use chains, and use-def chains.  However,
34 the interface allows other dataflow problems to be defined as well.
35 
36 Dataflow analysis is available in most of the rtl backend (the parts
37 between pass_df_initialize and pass_df_finish).  It is quite likely
38 that these boundaries will be expanded in the future.  The only
39 requirement is that there be a correct control flow graph.
40 
41 There are three variations of the live variable problem that are
42 available whenever dataflow is available.  The LR problem finds the
43 areas that can reach a use of a variable, the UR problems finds the
44 areas that can be reached from a definition of a variable.  The LIVE
45 problem finds the intersection of these two areas.
46 
47 There are several optional problems.  These can be enabled when they
48 are needed and disabled when they are not needed.
49 
50 Dataflow problems are generally solved in three layers.  The bottom
51 layer is called scanning where a data structure is built for each rtl
52 insn that describes the set of defs and uses of that insn.  Scanning
53 is generally kept up to date, i.e. as the insns changes, the scanned
54 version of that insn changes also.  There are various mechanisms for
55 making this happen and are described in the INCREMENTAL SCANNING
56 section.
57 
58 In the middle layer, basic blocks are scanned to produce transfer
59 functions which describe the effects of that block on the global
60 dataflow solution.  The transfer functions are only rebuilt if the
61 some instruction within the block has changed.
62 
63 The top layer is the dataflow solution itself.  The dataflow solution
64 is computed by using an efficient iterative solver and the transfer
65 functions.  The dataflow solution must be recomputed whenever the
66 control changes or if one of the transfer function changes.
67 
68 
69 USAGE:
70 
71 Here is an example of using the dataflow routines.
72 
73       df_[chain,live,note,rd]_add_problem (flags);
74 
75       df_set_blocks (blocks);
76 
77       df_analyze ();
78 
79       df_dump (stderr);
80 
81       df_finish_pass (false);
82 
83 DF_[chain,live,note,rd]_ADD_PROBLEM adds a problem, defined by an
84 instance to struct df_problem, to the set of problems solved in this
85 instance of df.  All calls to add a problem for a given instance of df
86 must occur before the first call to DF_ANALYZE.
87 
88 Problems can be dependent on other problems.  For instance, solving
89 def-use or use-def chains is dependent on solving reaching
90 definitions. As long as these dependencies are listed in the problem
91 definition, the order of adding the problems is not material.
92 Otherwise, the problems will be solved in the order of calls to
93 df_add_problem.  Note that it is not necessary to have a problem.  In
94 that case, df will just be used to do the scanning.
95 
96 
97 
98 DF_SET_BLOCKS is an optional call used to define a region of the
99 function on which the analysis will be performed.  The normal case is
100 to analyze the entire function and no call to df_set_blocks is made.
101 DF_SET_BLOCKS only effects the blocks that are effected when computing
102 the transfer functions and final solution.  The insn level information
103 is always kept up to date.
104 
105 When a subset is given, the analysis behaves as if the function only
106 contains those blocks and any edges that occur directly between the
107 blocks in the set.  Care should be taken to call df_set_blocks right
108 before the call to analyze in order to eliminate the possibility that
109 optimizations that reorder blocks invalidate the bitvector.
110 
111 DF_ANALYZE causes all of the defined problems to be (re)solved.  When
112 DF_ANALYZE is completes, the IN and OUT sets for each basic block
113 contain the computer information.  The DF_*_BB_INFO macros can be used
114 to access these bitvectors.  All deferred rescannings are down before
115 the transfer functions are recomputed.
116 
117 DF_DUMP can then be called to dump the information produce to some
118 file.  This calls DF_DUMP_START, to print the information that is not
119 basic block specific, and then calls DF_DUMP_TOP and DF_DUMP_BOTTOM
120 for each block to print the basic specific information.  These parts
121 can all be called separately as part of a larger dump function.
122 
123 
124 DF_FINISH_PASS causes df_remove_problem to be called on all of the
125 optional problems.  It also causes any insns whose scanning has been
126 deferred to be rescanned as well as clears all of the changeable flags.
127 Setting the pass manager TODO_df_finish flag causes this function to
128 be run.  However, the pass manager will call df_finish_pass AFTER the
129 pass dumping has been done, so if you want to see the results of the
130 optional problems in the pass dumps, use the TODO flag rather than
131 calling the function yourself.
132 
133 INCREMENTAL SCANNING
134 
135 There are four ways of doing the incremental scanning:
136 
137 1) Immediate rescanning - Calls to df_insn_rescan, df_notes_rescan,
138    df_bb_delete, df_insn_change_bb have been added to most of
139    the low level service functions that maintain the cfg and change
140    rtl.  Calling and of these routines many cause some number of insns
141    to be rescanned.
142 
143    For most modern rtl passes, this is certainly the easiest way to
144    manage rescanning the insns.  This technique also has the advantage
145    that the scanning information is always correct and can be relied
146    upon even after changes have been made to the instructions.  This
147    technique is contra indicated in several cases:
148 
149    a) If def-use chains OR use-def chains (but not both) are built,
150       using this is SIMPLY WRONG.  The problem is that when a ref is
151       deleted that is the target of an edge, there is not enough
152       information to efficiently find the source of the edge and
153       delete the edge.  This leaves a dangling reference that may
154       cause problems.
155 
156    b) If def-use chains AND use-def chains are built, this may
157       produce unexpected results.  The problem is that the incremental
158       scanning of an insn does not know how to repair the chains that
159       point into an insn when the insn changes.  So the incremental
160       scanning just deletes the chains that enter and exit the insn
161       being changed.  The dangling reference issue in (a) is not a
162       problem here, but if the pass is depending on the chains being
163       maintained after insns have been modified, this technique will
164       not do the correct thing.
165 
166    c) If the pass modifies insns several times, this incremental
167       updating may be expensive.
168 
169    d) If the pass modifies all of the insns, as does register
170       allocation, it is simply better to rescan the entire function.
171 
172 2) Deferred rescanning - Calls to df_insn_rescan, df_notes_rescan, and
173    df_insn_delete do not immediately change the insn but instead make
174    a note that the insn needs to be rescanned.  The next call to
175    df_analyze, df_finish_pass, or df_process_deferred_rescans will
176    cause all of the pending rescans to be processed.
177 
178    This is the technique of choice if either 1a, 1b, or 1c are issues
179    in the pass.  In the case of 1a or 1b, a call to df_finish_pass
180    (either manually or via TODO_df_finish) should be made before the
181    next call to df_analyze or df_process_deferred_rescans.
182 
183    This mode is also used by a few passes that still rely on note_uses,
184    note_stores and rtx iterators instead of using the DF data.  This
185    can be said to fall under case 1c.
186 
187    To enable this mode, call df_set_flags (DF_DEFER_INSN_RESCAN).
188    (This mode can be cleared by calling df_clear_flags
189    (DF_DEFER_INSN_RESCAN) but this does not cause the deferred insns to
190    be rescanned.
191 
192 3) Total rescanning - In this mode the rescanning is disabled.
193    Only when insns are deleted is the df information associated with
194    it also deleted.  At the end of the pass, a call must be made to
195    df_insn_rescan_all.  This method is used by the register allocator
196    since it generally changes each insn multiple times (once for each ref)
197    and does not need to make use of the updated scanning information.
198 
199 4) Do it yourself - In this mechanism, the pass updates the insns
200    itself using the low level df primitives.  Currently no pass does
201    this, but it has the advantage that it is quite efficient given
202    that the pass generally has exact knowledge of what it is changing.
203 
204 DATA STRUCTURES
205 
206 Scanning produces a `struct df_ref' data structure (ref) is allocated
207 for every register reference (def or use) and this records the insn
208 and bb the ref is found within.  The refs are linked together in
209 chains of uses and defs for each insn and for each register.  Each ref
210 also has a chain field that links all the use refs for a def or all
211 the def refs for a use.  This is used to create use-def or def-use
212 chains.
213 
214 Different optimizations have different needs.  Ultimately, only
215 register allocation and schedulers should be using the bitmaps
216 produced for the live register and uninitialized register problems.
217 The rest of the backend should be upgraded to using and maintaining
218 the linked information such as def use or use def chains.
219 
220 
221 PHILOSOPHY:
222 
223 While incremental bitmaps are not worthwhile to maintain, incremental
224 chains may be perfectly reasonable.  The fastest way to build chains
225 from scratch or after significant modifications is to build reaching
226 definitions (RD) and build the chains from this.
227 
228 However, general algorithms for maintaining use-def or def-use chains
229 are not practical.  The amount of work to recompute the chain any
230 chain after an arbitrary change is large.  However, with a modest
231 amount of work it is generally possible to have the application that
232 uses the chains keep them up to date.  The high level knowledge of
233 what is really happening is essential to crafting efficient
234 incremental algorithms.
235 
236 As for the bit vector problems, there is no interface to give a set of
237 blocks over with to resolve the iteration.  In general, restarting a
238 dataflow iteration is difficult and expensive.  Again, the best way to
239 keep the dataflow information up to data (if this is really what is
240 needed) it to formulate a problem specific solution.
241 
242 There are fine grained calls for creating and deleting references from
243 instructions in df-scan.c.  However, these are not currently connected
244 to the engine that resolves the dataflow equations.
245 
246 
247 DATA STRUCTURES:
248 
249 The basic object is a DF_REF (reference) and this may either be a
250 DEF (definition) or a USE of a register.
251 
252 These are linked into a variety of lists; namely reg-def, reg-use,
253 insn-def, insn-use, def-use, and use-def lists.  For example, the
254 reg-def lists contain all the locations that define a given register
255 while the insn-use lists contain all the locations that use a
256 register.
257 
258 Note that the reg-def and reg-use chains are generally short for
259 pseudos and long for the hard registers.
260 
261 ACCESSING INSNS:
262 
263 1) The df insn information is kept in an array of DF_INSN_INFO objects.
264    The array is indexed by insn uid, and every DF_REF points to the
265    DF_INSN_INFO object of the insn that contains the reference.
266 
267 2) Each insn has three sets of refs, which are linked into one of three
268    lists: The insn's defs list (accessed by the DF_INSN_INFO_DEFS,
269    DF_INSN_DEFS, or DF_INSN_UID_DEFS macros), the insn's uses list
270    (accessed by the DF_INSN_INFO_USES, DF_INSN_USES, or
271    DF_INSN_UID_USES macros) or the insn's eq_uses list (accessed by the
272    DF_INSN_INFO_EQ_USES, DF_INSN_EQ_USES or DF_INSN_UID_EQ_USES macros).
273    The latter list are the list of references in REG_EQUAL or REG_EQUIV
274    notes.  These macros produce a ref (or NULL), the rest of the list
275    can be obtained by traversal of the NEXT_REF field (accessed by the
276    DF_REF_NEXT_REF macro.)  There is no significance to the ordering of
277    the uses or refs in an instruction.
278 
279 3) Each insn has a logical uid field (LUID) which is stored in the
280    DF_INSN_INFO object for the insn.  The LUID field is accessed by
281    the DF_INSN_INFO_LUID, DF_INSN_LUID, and DF_INSN_UID_LUID macros.
282    When properly set, the LUID is an integer that numbers each insn in
283    the basic block, in order from the start of the block.
284    The numbers are only correct after a call to df_analyze.  They will
285    rot after insns are added deleted or moved round.
286 
287 ACCESSING REFS:
288 
289 There are 4 ways to obtain access to refs:
290 
291 1) References are divided into two categories, REAL and ARTIFICIAL.
292 
293    REAL refs are associated with instructions.
294 
295    ARTIFICIAL refs are associated with basic blocks.  The heads of
296    these lists can be accessed by calling df_get_artificial_defs or
297    df_get_artificial_uses for the particular basic block.
298 
299    Artificial defs and uses occur both at the beginning and ends of blocks.
300 
301      For blocks that are at the destination of eh edges, the
302      artificial uses and defs occur at the beginning.  The defs relate
303      to the registers specified in EH_RETURN_DATA_REGNO and the uses
304      relate to the registers specified in EH_USES.  Logically these
305      defs and uses should really occur along the eh edge, but there is
306      no convenient way to do this.  Artificial defs that occur at the
307      beginning of the block have the DF_REF_AT_TOP flag set.
308 
309      Artificial uses occur at the end of all blocks.  These arise from
310      the hard registers that are always live, such as the stack
311      register and are put there to keep the code from forgetting about
312      them.
313 
314      Artificial defs occur at the end of the entry block.  These arise
315      from registers that are live at entry to the function.
316 
317 2) There are three types of refs: defs, uses and eq_uses.  (Eq_uses are
318    uses that appear inside a REG_EQUAL or REG_EQUIV note.)
319 
320    All of the eq_uses, uses and defs associated with each pseudo or
321    hard register may be linked in a bidirectional chain.  These are
322    called reg-use or reg_def chains.  If the changeable flag
323    DF_EQ_NOTES is set when the chains are built, the eq_uses will be
324    treated like uses.  If it is not set they are ignored.
325 
326    The first use, eq_use or def for a register can be obtained using
327    the DF_REG_USE_CHAIN, DF_REG_EQ_USE_CHAIN or DF_REG_DEF_CHAIN
328    macros.  Subsequent uses for the same regno can be obtained by
329    following the next_reg field of the ref.  The number of elements in
330    each of the chains can be found by using the DF_REG_USE_COUNT,
331    DF_REG_EQ_USE_COUNT or DF_REG_DEF_COUNT macros.
332 
333    In previous versions of this code, these chains were ordered.  It
334    has not been practical to continue this practice.
335 
336 3) If def-use or use-def chains are built, these can be traversed to
337    get to other refs.  If the flag DF_EQ_NOTES has been set, the chains
338    include the eq_uses.  Otherwise these are ignored when building the
339    chains.
340 
341 4) An array of all of the uses (and an array of all of the defs) can
342    be built.  These arrays are indexed by the value in the id
343    structure.  These arrays are only lazily kept up to date, and that
344    process can be expensive.  To have these arrays built, call
345    df_reorganize_defs or df_reorganize_uses.  If the flag DF_EQ_NOTES
346    has been set the array will contain the eq_uses.  Otherwise these
347    are ignored when building the array and assigning the ids.  Note
348    that the values in the id field of a ref may change across calls to
349    df_analyze or df_reorganize_defs or df_reorganize_uses.
350 
351    If the only use of this array is to find all of the refs, it is
352    better to traverse all of the registers and then traverse all of
353    reg-use or reg-def chains.
354 
355 NOTES:
356 
357 Embedded addressing side-effects, such as POST_INC or PRE_INC, generate
358 both a use and a def.  These are both marked read/write to show that they
359 are dependent. For example, (set (reg 40) (mem (post_inc (reg 42))))
360 will generate a use of reg 42 followed by a def of reg 42 (both marked
361 read/write).  Similarly, (set (reg 40) (mem (pre_dec (reg 41))))
362 generates a use of reg 41 then a def of reg 41 (both marked read/write),
363 even though reg 41 is decremented before it is used for the memory
364 address in this second example.
365 
366 A set to a REG inside a ZERO_EXTRACT, or a set to a non-paradoxical SUBREG
367 for which the number of word_mode units covered by the outer mode is
368 smaller than that covered by the inner mode, invokes a read-modify-write
369 operation.  We generate both a use and a def and again mark them
370 read/write.
371 
372 Paradoxical subreg writes do not leave a trace of the old content, so they
373 are write-only operations.
374 */
375 
376 
377 #include "config.h"
378 #include "system.h"
379 #include "coretypes.h"
380 #include "backend.h"
381 #include "rtl.h"
382 #include "df.h"
383 #include "memmodel.h"
384 #include "emit-rtl.h"
385 #include "cfganal.h"
386 #include "tree-pass.h"
387 #include "cfgloop.h"
388 
389 static void *df_get_bb_info (struct dataflow *, unsigned int);
390 static void df_set_bb_info (struct dataflow *, unsigned int, void *);
391 static void df_clear_bb_info (struct dataflow *, unsigned int);
392 #ifdef DF_DEBUG_CFG
393 static void df_set_clean_cfg (void);
394 #endif
395 
396 /* The obstack on which regsets are allocated.  */
397 struct bitmap_obstack reg_obstack;
398 
399 /* An obstack for bitmap not related to specific dataflow problems.
400    This obstack should e.g. be used for bitmaps with a short life time
401    such as temporary bitmaps.  */
402 
403 bitmap_obstack df_bitmap_obstack;
404 
405 
406 /*----------------------------------------------------------------------------
407   Functions to create, destroy and manipulate an instance of df.
408 ----------------------------------------------------------------------------*/
409 
410 struct df_d *df;
411 
412 /* Add PROBLEM (and any dependent problems) to the DF instance.  */
413 
414 void
df_add_problem(const struct df_problem * problem)415 df_add_problem (const struct df_problem *problem)
416 {
417   struct dataflow *dflow;
418   int i;
419 
420   /* First try to add the dependent problem. */
421   if (problem->dependent_problem)
422     df_add_problem (problem->dependent_problem);
423 
424   /* Check to see if this problem has already been defined.  If it
425      has, just return that instance, if not, add it to the end of the
426      vector.  */
427   dflow = df->problems_by_index[problem->id];
428   if (dflow)
429     return;
430 
431   /* Make a new one and add it to the end.  */
432   dflow = XCNEW (struct dataflow);
433   dflow->problem = problem;
434   dflow->computed = false;
435   dflow->solutions_dirty = true;
436   df->problems_by_index[dflow->problem->id] = dflow;
437 
438   /* Keep the defined problems ordered by index.  This solves the
439      problem that RI will use the information from UREC if UREC has
440      been defined, or from LIVE if LIVE is defined and otherwise LR.
441      However for this to work, the computation of RI must be pushed
442      after which ever of those problems is defined, but we do not
443      require any of those except for LR to have actually been
444      defined.  */
445   df->num_problems_defined++;
446   for (i = df->num_problems_defined - 2; i >= 0; i--)
447     {
448       if (problem->id < df->problems_in_order[i]->problem->id)
449 	df->problems_in_order[i+1] = df->problems_in_order[i];
450       else
451 	{
452 	  df->problems_in_order[i+1] = dflow;
453 	  return;
454 	}
455     }
456   df->problems_in_order[0] = dflow;
457 }
458 
459 
460 /* Set the MASK flags in the DFLOW problem.  The old flags are
461    returned.  If a flag is not allowed to be changed this will fail if
462    checking is enabled.  */
463 int
df_set_flags(int changeable_flags)464 df_set_flags (int changeable_flags)
465 {
466   int old_flags = df->changeable_flags;
467   df->changeable_flags |= changeable_flags;
468   return old_flags;
469 }
470 
471 
472 /* Clear the MASK flags in the DFLOW problem.  The old flags are
473    returned.  If a flag is not allowed to be changed this will fail if
474    checking is enabled.  */
475 int
df_clear_flags(int changeable_flags)476 df_clear_flags (int changeable_flags)
477 {
478   int old_flags = df->changeable_flags;
479   df->changeable_flags &= ~changeable_flags;
480   return old_flags;
481 }
482 
483 
484 /* Set the blocks that are to be considered for analysis.  If this is
485    not called or is called with null, the entire function in
486    analyzed.  */
487 
488 void
df_set_blocks(bitmap blocks)489 df_set_blocks (bitmap blocks)
490 {
491   if (blocks)
492     {
493       if (dump_file)
494 	bitmap_print (dump_file, blocks, "setting blocks to analyze ", "\n");
495       if (df->blocks_to_analyze)
496 	{
497 	  /* This block is called to change the focus from one subset
498 	     to another.  */
499 	  int p;
500 	  auto_bitmap diff (&df_bitmap_obstack);
501 	  bitmap_and_compl (diff, df->blocks_to_analyze, blocks);
502 	  for (p = 0; p < df->num_problems_defined; p++)
503 	    {
504 	      struct dataflow *dflow = df->problems_in_order[p];
505 	      if (dflow->optional_p && dflow->problem->reset_fun)
506 		dflow->problem->reset_fun (df->blocks_to_analyze);
507 	      else if (dflow->problem->free_blocks_on_set_blocks)
508 		{
509 		  bitmap_iterator bi;
510 		  unsigned int bb_index;
511 
512 		  EXECUTE_IF_SET_IN_BITMAP (diff, 0, bb_index, bi)
513 		    {
514 		      basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
515 		      if (bb)
516 			{
517 			  void *bb_info = df_get_bb_info (dflow, bb_index);
518 			  dflow->problem->free_bb_fun (bb, bb_info);
519 			  df_clear_bb_info (dflow, bb_index);
520 			}
521 		    }
522 		}
523 	    }
524 	}
525       else
526 	{
527 	  /* This block of code is executed to change the focus from
528 	     the entire function to a subset.  */
529 	  bitmap_head blocks_to_reset;
530 	  bool initialized = false;
531 	  int p;
532 	  for (p = 0; p < df->num_problems_defined; p++)
533 	    {
534 	      struct dataflow *dflow = df->problems_in_order[p];
535 	      if (dflow->optional_p && dflow->problem->reset_fun)
536 		{
537 		  if (!initialized)
538 		    {
539 		      basic_block bb;
540 		      bitmap_initialize (&blocks_to_reset, &df_bitmap_obstack);
541 		      FOR_ALL_BB_FN (bb, cfun)
542 			{
543 			  bitmap_set_bit (&blocks_to_reset, bb->index);
544 			}
545 		    }
546 		  dflow->problem->reset_fun (&blocks_to_reset);
547 		}
548 	    }
549 	  if (initialized)
550 	    bitmap_clear (&blocks_to_reset);
551 
552 	  df->blocks_to_analyze = BITMAP_ALLOC (&df_bitmap_obstack);
553 	}
554       bitmap_copy (df->blocks_to_analyze, blocks);
555       df->analyze_subset = true;
556     }
557   else
558     {
559       /* This block is executed to reset the focus to the entire
560 	 function.  */
561       if (dump_file)
562 	fprintf (dump_file, "clearing blocks_to_analyze\n");
563       if (df->blocks_to_analyze)
564 	{
565 	  BITMAP_FREE (df->blocks_to_analyze);
566 	  df->blocks_to_analyze = NULL;
567 	}
568       df->analyze_subset = false;
569     }
570 
571   /* Setting the blocks causes the refs to be unorganized since only
572      the refs in the blocks are seen.  */
573   df_maybe_reorganize_def_refs (DF_REF_ORDER_NO_TABLE);
574   df_maybe_reorganize_use_refs (DF_REF_ORDER_NO_TABLE);
575   df_mark_solutions_dirty ();
576 }
577 
578 
579 /* Delete a DFLOW problem (and any problems that depend on this
580    problem).  */
581 
582 void
df_remove_problem(struct dataflow * dflow)583 df_remove_problem (struct dataflow *dflow)
584 {
585   const struct df_problem *problem;
586   int i;
587 
588   if (!dflow)
589     return;
590 
591   problem = dflow->problem;
592   gcc_assert (problem->remove_problem_fun);
593 
594   /* Delete any problems that depended on this problem first.  */
595   for (i = 0; i < df->num_problems_defined; i++)
596     if (df->problems_in_order[i]->problem->dependent_problem == problem)
597       df_remove_problem (df->problems_in_order[i]);
598 
599   /* Now remove this problem.  */
600   for (i = 0; i < df->num_problems_defined; i++)
601     if (df->problems_in_order[i] == dflow)
602       {
603 	int j;
604 	for (j = i + 1; j < df->num_problems_defined; j++)
605 	  df->problems_in_order[j-1] = df->problems_in_order[j];
606 	df->problems_in_order[j-1] = NULL;
607 	df->num_problems_defined--;
608 	break;
609       }
610 
611   (problem->remove_problem_fun) ();
612   df->problems_by_index[problem->id] = NULL;
613 }
614 
615 
616 /* Remove all of the problems that are not permanent.  Scanning, LR
617    and (at -O2 or higher) LIVE are permanent, the rest are removable.
618    Also clear all of the changeable_flags.  */
619 
620 void
df_finish_pass(bool verify ATTRIBUTE_UNUSED)621 df_finish_pass (bool verify ATTRIBUTE_UNUSED)
622 {
623   int i;
624 
625 #ifdef ENABLE_DF_CHECKING
626   int saved_flags;
627 #endif
628 
629   if (!df)
630     return;
631 
632   df_maybe_reorganize_def_refs (DF_REF_ORDER_NO_TABLE);
633   df_maybe_reorganize_use_refs (DF_REF_ORDER_NO_TABLE);
634 
635 #ifdef ENABLE_DF_CHECKING
636   saved_flags = df->changeable_flags;
637 #endif
638 
639   /* We iterate over problems by index as each problem removed will
640      lead to problems_in_order to be reordered.  */
641   for (i = 0; i < DF_LAST_PROBLEM_PLUS1; i++)
642     {
643       struct dataflow *dflow = df->problems_by_index[i];
644 
645       if (dflow && dflow->optional_p)
646 	df_remove_problem (dflow);
647     }
648 
649   /* Clear all of the flags.  */
650   df->changeable_flags = 0;
651   df_process_deferred_rescans ();
652 
653   /* Set the focus back to the whole function.  */
654   if (df->blocks_to_analyze)
655     {
656       BITMAP_FREE (df->blocks_to_analyze);
657       df->blocks_to_analyze = NULL;
658       df_mark_solutions_dirty ();
659       df->analyze_subset = false;
660     }
661 
662 #ifdef ENABLE_DF_CHECKING
663   /* Verification will fail in DF_NO_INSN_RESCAN.  */
664   if (!(saved_flags & DF_NO_INSN_RESCAN))
665     {
666       df_lr_verify_transfer_functions ();
667       if (df_live)
668 	df_live_verify_transfer_functions ();
669     }
670 
671 #ifdef DF_DEBUG_CFG
672   df_set_clean_cfg ();
673 #endif
674 #endif
675 
676   if (flag_checking && verify)
677     df->changeable_flags |= DF_VERIFY_SCHEDULED;
678 }
679 
680 
681 /* Set up the dataflow instance for the entire back end.  */
682 
683 static unsigned int
rest_of_handle_df_initialize(void)684 rest_of_handle_df_initialize (void)
685 {
686   gcc_assert (!df);
687   df = XCNEW (struct df_d);
688   df->changeable_flags = 0;
689 
690   bitmap_obstack_initialize (&df_bitmap_obstack);
691 
692   /* Set this to a conservative value.  Stack_ptr_mod will compute it
693      correctly later.  */
694   crtl->sp_is_unchanging = 0;
695 
696   df_scan_add_problem ();
697   df_scan_alloc (NULL);
698 
699   /* These three problems are permanent.  */
700   df_lr_add_problem ();
701   if (optimize > 1)
702     df_live_add_problem ();
703 
704   df->postorder = XNEWVEC (int, last_basic_block_for_fn (cfun));
705   df->n_blocks = post_order_compute (df->postorder, true, true);
706   inverted_post_order_compute (&df->postorder_inverted);
707   gcc_assert ((unsigned) df->n_blocks == df->postorder_inverted.length ());
708 
709   df->hard_regs_live_count = XCNEWVEC (unsigned int, FIRST_PSEUDO_REGISTER);
710 
711   df_hard_reg_init ();
712   /* After reload, some ports add certain bits to regs_ever_live so
713      this cannot be reset.  */
714   df_compute_regs_ever_live (true);
715   df_scan_blocks ();
716   df_compute_regs_ever_live (false);
717   return 0;
718 }
719 
720 
721 namespace {
722 
723 const pass_data pass_data_df_initialize_opt =
724 {
725   RTL_PASS, /* type */
726   "dfinit", /* name */
727   OPTGROUP_NONE, /* optinfo_flags */
728   TV_DF_SCAN, /* tv_id */
729   0, /* properties_required */
730   0, /* properties_provided */
731   0, /* properties_destroyed */
732   0, /* todo_flags_start */
733   0, /* todo_flags_finish */
734 };
735 
736 class pass_df_initialize_opt : public rtl_opt_pass
737 {
738 public:
pass_df_initialize_opt(gcc::context * ctxt)739   pass_df_initialize_opt (gcc::context *ctxt)
740     : rtl_opt_pass (pass_data_df_initialize_opt, ctxt)
741   {}
742 
743   /* opt_pass methods: */
gate(function *)744   virtual bool gate (function *) { return optimize > 0; }
execute(function *)745   virtual unsigned int execute (function *)
746     {
747       return rest_of_handle_df_initialize ();
748     }
749 
750 }; // class pass_df_initialize_opt
751 
752 } // anon namespace
753 
754 rtl_opt_pass *
make_pass_df_initialize_opt(gcc::context * ctxt)755 make_pass_df_initialize_opt (gcc::context *ctxt)
756 {
757   return new pass_df_initialize_opt (ctxt);
758 }
759 
760 
761 namespace {
762 
763 const pass_data pass_data_df_initialize_no_opt =
764 {
765   RTL_PASS, /* type */
766   "no-opt dfinit", /* name */
767   OPTGROUP_NONE, /* optinfo_flags */
768   TV_DF_SCAN, /* tv_id */
769   0, /* properties_required */
770   0, /* properties_provided */
771   0, /* properties_destroyed */
772   0, /* todo_flags_start */
773   0, /* todo_flags_finish */
774 };
775 
776 class pass_df_initialize_no_opt : public rtl_opt_pass
777 {
778 public:
pass_df_initialize_no_opt(gcc::context * ctxt)779   pass_df_initialize_no_opt (gcc::context *ctxt)
780     : rtl_opt_pass (pass_data_df_initialize_no_opt, ctxt)
781   {}
782 
783   /* opt_pass methods: */
gate(function *)784   virtual bool gate (function *) { return optimize == 0; }
execute(function *)785   virtual unsigned int execute (function *)
786     {
787       return rest_of_handle_df_initialize ();
788     }
789 
790 }; // class pass_df_initialize_no_opt
791 
792 } // anon namespace
793 
794 rtl_opt_pass *
make_pass_df_initialize_no_opt(gcc::context * ctxt)795 make_pass_df_initialize_no_opt (gcc::context *ctxt)
796 {
797   return new pass_df_initialize_no_opt (ctxt);
798 }
799 
800 
801 /* Free all the dataflow info and the DF structure.  This should be
802    called from the df_finish macro which also NULLs the parm.  */
803 
804 static unsigned int
rest_of_handle_df_finish(void)805 rest_of_handle_df_finish (void)
806 {
807   int i;
808 
809   gcc_assert (df);
810 
811   for (i = 0; i < df->num_problems_defined; i++)
812     {
813       struct dataflow *dflow = df->problems_in_order[i];
814       dflow->problem->free_fun ();
815     }
816 
817   free (df->postorder);
818   df->postorder_inverted.release ();
819   free (df->hard_regs_live_count);
820   free (df);
821   df = NULL;
822 
823   bitmap_obstack_release (&df_bitmap_obstack);
824   return 0;
825 }
826 
827 
828 namespace {
829 
830 const pass_data pass_data_df_finish =
831 {
832   RTL_PASS, /* type */
833   "dfinish", /* name */
834   OPTGROUP_NONE, /* optinfo_flags */
835   TV_NONE, /* tv_id */
836   0, /* properties_required */
837   0, /* properties_provided */
838   0, /* properties_destroyed */
839   0, /* todo_flags_start */
840   0, /* todo_flags_finish */
841 };
842 
843 class pass_df_finish : public rtl_opt_pass
844 {
845 public:
pass_df_finish(gcc::context * ctxt)846   pass_df_finish (gcc::context *ctxt)
847     : rtl_opt_pass (pass_data_df_finish, ctxt)
848   {}
849 
850   /* opt_pass methods: */
execute(function *)851   virtual unsigned int execute (function *)
852     {
853       return rest_of_handle_df_finish ();
854     }
855 
856 }; // class pass_df_finish
857 
858 } // anon namespace
859 
860 rtl_opt_pass *
make_pass_df_finish(gcc::context * ctxt)861 make_pass_df_finish (gcc::context *ctxt)
862 {
863   return new pass_df_finish (ctxt);
864 }
865 
866 
867 
868 
869 
870 /*----------------------------------------------------------------------------
871    The general data flow analysis engine.
872 ----------------------------------------------------------------------------*/
873 
874 /* Return time BB when it was visited for last time.  */
875 #define BB_LAST_CHANGE_AGE(bb) ((ptrdiff_t)(bb)->aux)
876 
877 /* Helper function for df_worklist_dataflow.
878    Propagate the dataflow forward.
879    Given a BB_INDEX, do the dataflow propagation
880    and set bits on for successors in PENDING
881    if the out set of the dataflow has changed.
882 
883    AGE specify time when BB was visited last time.
884    AGE of 0 means we are visiting for first time and need to
885    compute transfer function to initialize datastructures.
886    Otherwise we re-do transfer function only if something change
887    while computing confluence functions.
888    We need to compute confluence only of basic block that are younger
889    then last visit of the BB.
890 
891    Return true if BB info has changed.  This is always the case
892    in the first visit.  */
893 
894 static bool
df_worklist_propagate_forward(struct dataflow * dataflow,unsigned bb_index,unsigned * bbindex_to_postorder,bitmap pending,sbitmap considered,ptrdiff_t age)895 df_worklist_propagate_forward (struct dataflow *dataflow,
896                                unsigned bb_index,
897                                unsigned *bbindex_to_postorder,
898                                bitmap pending,
899                                sbitmap considered,
900 			       ptrdiff_t age)
901 {
902   edge e;
903   edge_iterator ei;
904   basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
905   bool changed = !age;
906 
907   /*  Calculate <conf_op> of incoming edges.  */
908   if (EDGE_COUNT (bb->preds) > 0)
909     FOR_EACH_EDGE (e, ei, bb->preds)
910       {
911         if (age <= BB_LAST_CHANGE_AGE (e->src)
912 	    && bitmap_bit_p (considered, e->src->index))
913           changed |= dataflow->problem->con_fun_n (e);
914       }
915   else if (dataflow->problem->con_fun_0)
916     dataflow->problem->con_fun_0 (bb);
917 
918   if (changed
919       && dataflow->problem->trans_fun (bb_index))
920     {
921       /* The out set of this block has changed.
922          Propagate to the outgoing blocks.  */
923       FOR_EACH_EDGE (e, ei, bb->succs)
924         {
925           unsigned ob_index = e->dest->index;
926 
927           if (bitmap_bit_p (considered, ob_index))
928             bitmap_set_bit (pending, bbindex_to_postorder[ob_index]);
929         }
930       return true;
931     }
932   return false;
933 }
934 
935 
936 /* Helper function for df_worklist_dataflow.
937    Propagate the dataflow backward.  */
938 
939 static bool
df_worklist_propagate_backward(struct dataflow * dataflow,unsigned bb_index,unsigned * bbindex_to_postorder,bitmap pending,sbitmap considered,ptrdiff_t age)940 df_worklist_propagate_backward (struct dataflow *dataflow,
941                                 unsigned bb_index,
942                                 unsigned *bbindex_to_postorder,
943                                 bitmap pending,
944                                 sbitmap considered,
945 			        ptrdiff_t age)
946 {
947   edge e;
948   edge_iterator ei;
949   basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
950   bool changed = !age;
951 
952   /*  Calculate <conf_op> of incoming edges.  */
953   if (EDGE_COUNT (bb->succs) > 0)
954     FOR_EACH_EDGE (e, ei, bb->succs)
955       {
956         if (age <= BB_LAST_CHANGE_AGE (e->dest)
957 	    && bitmap_bit_p (considered, e->dest->index))
958           changed |= dataflow->problem->con_fun_n (e);
959       }
960   else if (dataflow->problem->con_fun_0)
961     dataflow->problem->con_fun_0 (bb);
962 
963   if (changed
964       && dataflow->problem->trans_fun (bb_index))
965     {
966       /* The out set of this block has changed.
967          Propagate to the outgoing blocks.  */
968       FOR_EACH_EDGE (e, ei, bb->preds)
969         {
970           unsigned ob_index = e->src->index;
971 
972           if (bitmap_bit_p (considered, ob_index))
973             bitmap_set_bit (pending, bbindex_to_postorder[ob_index]);
974         }
975       return true;
976     }
977   return false;
978 }
979 
980 /* Main dataflow solver loop.
981 
982    DATAFLOW is problem we are solving, PENDING is worklist of basic blocks we
983    need to visit.
984    BLOCK_IN_POSTORDER is array of size N_BLOCKS specifying postorder in BBs and
985    BBINDEX_TO_POSTORDER is array mapping back BB->index to postorder position.
986    PENDING will be freed.
987 
988    The worklists are bitmaps indexed by postorder positions.
989 
990    The function implements standard algorithm for dataflow solving with two
991    worklists (we are processing WORKLIST and storing new BBs to visit in
992    PENDING).
993 
994    As an optimization we maintain ages when BB was changed (stored in bb->aux)
995    and when it was last visited (stored in last_visit_age).  This avoids need
996    to re-do confluence function for edges to basic blocks whose source
997    did not change since destination was visited last time.  */
998 
999 static void
df_worklist_dataflow_doublequeue(struct dataflow * dataflow,bitmap pending,sbitmap considered,int * blocks_in_postorder,unsigned * bbindex_to_postorder,int n_blocks)1000 df_worklist_dataflow_doublequeue (struct dataflow *dataflow,
1001 			  	  bitmap pending,
1002                                   sbitmap considered,
1003                                   int *blocks_in_postorder,
1004 				  unsigned *bbindex_to_postorder,
1005 				  int n_blocks)
1006 {
1007   enum df_flow_dir dir = dataflow->problem->dir;
1008   int dcount = 0;
1009   bitmap worklist = BITMAP_ALLOC (&df_bitmap_obstack);
1010   int age = 0;
1011   bool changed;
1012   vec<int> last_visit_age = vNULL;
1013   int prev_age;
1014   basic_block bb;
1015   int i;
1016 
1017   last_visit_age.safe_grow_cleared (n_blocks);
1018 
1019   /* Double-queueing. Worklist is for the current iteration,
1020      and pending is for the next. */
1021   while (!bitmap_empty_p (pending))
1022     {
1023       bitmap_iterator bi;
1024       unsigned int index;
1025 
1026       std::swap (pending, worklist);
1027 
1028       EXECUTE_IF_SET_IN_BITMAP (worklist, 0, index, bi)
1029 	{
1030 	  unsigned bb_index;
1031 	  dcount++;
1032 
1033 	  bitmap_clear_bit (pending, index);
1034 	  bb_index = blocks_in_postorder[index];
1035 	  bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
1036 	  prev_age = last_visit_age[index];
1037 	  if (dir == DF_FORWARD)
1038 	    changed = df_worklist_propagate_forward (dataflow, bb_index,
1039 						     bbindex_to_postorder,
1040 						     pending, considered,
1041 						     prev_age);
1042 	  else
1043 	    changed = df_worklist_propagate_backward (dataflow, bb_index,
1044 						      bbindex_to_postorder,
1045 						      pending, considered,
1046 						      prev_age);
1047 	  last_visit_age[index] = ++age;
1048 	  if (changed)
1049 	    bb->aux = (void *)(ptrdiff_t)age;
1050 	}
1051       bitmap_clear (worklist);
1052     }
1053   for (i = 0; i < n_blocks; i++)
1054     BASIC_BLOCK_FOR_FN (cfun, blocks_in_postorder[i])->aux = NULL;
1055 
1056   BITMAP_FREE (worklist);
1057   BITMAP_FREE (pending);
1058   last_visit_age.release ();
1059 
1060   /* Dump statistics. */
1061   if (dump_file)
1062     fprintf (dump_file, "df_worklist_dataflow_doublequeue:"
1063 	     " n_basic_blocks %d n_edges %d"
1064 	     " count %d (%5.2g)\n",
1065 	     n_basic_blocks_for_fn (cfun), n_edges_for_fn (cfun),
1066 	     dcount, dcount / (float)n_basic_blocks_for_fn (cfun));
1067 }
1068 
1069 /* Worklist-based dataflow solver. It uses sbitmap as a worklist,
1070    with "n"-th bit representing the n-th block in the reverse-postorder order.
1071    The solver is a double-queue algorithm similar to the "double stack" solver
1072    from Cooper, Harvey and Kennedy, "Iterative data-flow analysis, Revisited".
1073    The only significant difference is that the worklist in this implementation
1074    is always sorted in RPO of the CFG visiting direction.  */
1075 
1076 void
df_worklist_dataflow(struct dataflow * dataflow,bitmap blocks_to_consider,int * blocks_in_postorder,int n_blocks)1077 df_worklist_dataflow (struct dataflow *dataflow,
1078                       bitmap blocks_to_consider,
1079                       int *blocks_in_postorder,
1080                       int n_blocks)
1081 {
1082   bitmap pending = BITMAP_ALLOC (&df_bitmap_obstack);
1083   bitmap_iterator bi;
1084   unsigned int *bbindex_to_postorder;
1085   int i;
1086   unsigned int index;
1087   enum df_flow_dir dir = dataflow->problem->dir;
1088 
1089   gcc_assert (dir != DF_NONE);
1090 
1091   /* BBINDEX_TO_POSTORDER maps the bb->index to the reverse postorder.  */
1092   bbindex_to_postorder = XNEWVEC (unsigned int,
1093 				  last_basic_block_for_fn (cfun));
1094 
1095   /* Initialize the array to an out-of-bound value.  */
1096   for (i = 0; i < last_basic_block_for_fn (cfun); i++)
1097     bbindex_to_postorder[i] = last_basic_block_for_fn (cfun);
1098 
1099   /* Initialize the considered map.  */
1100   auto_sbitmap considered (last_basic_block_for_fn (cfun));
1101   bitmap_clear (considered);
1102   EXECUTE_IF_SET_IN_BITMAP (blocks_to_consider, 0, index, bi)
1103     {
1104       bitmap_set_bit (considered, index);
1105     }
1106 
1107   /* Initialize the mapping of block index to postorder.  */
1108   for (i = 0; i < n_blocks; i++)
1109     {
1110       bbindex_to_postorder[blocks_in_postorder[i]] = i;
1111       /* Add all blocks to the worklist.  */
1112       bitmap_set_bit (pending, i);
1113     }
1114 
1115   /* Initialize the problem. */
1116   if (dataflow->problem->init_fun)
1117     dataflow->problem->init_fun (blocks_to_consider);
1118 
1119   /* Solve it.  */
1120   df_worklist_dataflow_doublequeue (dataflow, pending, considered,
1121 				    blocks_in_postorder,
1122 				    bbindex_to_postorder,
1123 				    n_blocks);
1124   free (bbindex_to_postorder);
1125 }
1126 
1127 
1128 /* Remove the entries not in BLOCKS from the LIST of length LEN, preserving
1129    the order of the remaining entries.  Returns the length of the resulting
1130    list.  */
1131 
1132 static unsigned
df_prune_to_subcfg(int list[],unsigned len,bitmap blocks)1133 df_prune_to_subcfg (int list[], unsigned len, bitmap blocks)
1134 {
1135   unsigned act, last;
1136 
1137   for (act = 0, last = 0; act < len; act++)
1138     if (bitmap_bit_p (blocks, list[act]))
1139       list[last++] = list[act];
1140 
1141   return last;
1142 }
1143 
1144 
1145 /* Execute dataflow analysis on a single dataflow problem.
1146 
1147    BLOCKS_TO_CONSIDER are the blocks whose solution can either be
1148    examined or will be computed.  For calls from DF_ANALYZE, this is
1149    the set of blocks that has been passed to DF_SET_BLOCKS.
1150 */
1151 
1152 void
df_analyze_problem(struct dataflow * dflow,bitmap blocks_to_consider,int * postorder,int n_blocks)1153 df_analyze_problem (struct dataflow *dflow,
1154 		    bitmap blocks_to_consider,
1155 		    int *postorder, int n_blocks)
1156 {
1157   timevar_push (dflow->problem->tv_id);
1158 
1159   /* (Re)Allocate the datastructures necessary to solve the problem.  */
1160   if (dflow->problem->alloc_fun)
1161     dflow->problem->alloc_fun (blocks_to_consider);
1162 
1163 #ifdef ENABLE_DF_CHECKING
1164   if (dflow->problem->verify_start_fun)
1165     dflow->problem->verify_start_fun ();
1166 #endif
1167 
1168   /* Set up the problem and compute the local information.  */
1169   if (dflow->problem->local_compute_fun)
1170     dflow->problem->local_compute_fun (blocks_to_consider);
1171 
1172   /* Solve the equations.  */
1173   if (dflow->problem->dataflow_fun)
1174     dflow->problem->dataflow_fun (dflow, blocks_to_consider,
1175 				  postorder, n_blocks);
1176 
1177   /* Massage the solution.  */
1178   if (dflow->problem->finalize_fun)
1179     dflow->problem->finalize_fun (blocks_to_consider);
1180 
1181 #ifdef ENABLE_DF_CHECKING
1182   if (dflow->problem->verify_end_fun)
1183     dflow->problem->verify_end_fun ();
1184 #endif
1185 
1186   timevar_pop (dflow->problem->tv_id);
1187 
1188   dflow->computed = true;
1189 }
1190 
1191 
1192 /* Analyze dataflow info.  */
1193 
1194 static void
df_analyze_1(void)1195 df_analyze_1 (void)
1196 {
1197   int i;
1198 
1199   /* These should be the same.  */
1200   gcc_assert ((unsigned) df->n_blocks == df->postorder_inverted.length ());
1201 
1202   /* We need to do this before the df_verify_all because this is
1203      not kept incrementally up to date.  */
1204   df_compute_regs_ever_live (false);
1205   df_process_deferred_rescans ();
1206 
1207   if (dump_file)
1208     fprintf (dump_file, "df_analyze called\n");
1209 
1210 #ifndef ENABLE_DF_CHECKING
1211   if (df->changeable_flags & DF_VERIFY_SCHEDULED)
1212 #endif
1213     df_verify ();
1214 
1215   /* Skip over the DF_SCAN problem. */
1216   for (i = 1; i < df->num_problems_defined; i++)
1217     {
1218       struct dataflow *dflow = df->problems_in_order[i];
1219       if (dflow->solutions_dirty)
1220         {
1221           if (dflow->problem->dir == DF_FORWARD)
1222             df_analyze_problem (dflow,
1223                                 df->blocks_to_analyze,
1224 				df->postorder_inverted.address (),
1225 				df->postorder_inverted.length ());
1226           else
1227             df_analyze_problem (dflow,
1228                                 df->blocks_to_analyze,
1229                                 df->postorder,
1230                                 df->n_blocks);
1231         }
1232     }
1233 
1234   if (!df->analyze_subset)
1235     {
1236       BITMAP_FREE (df->blocks_to_analyze);
1237       df->blocks_to_analyze = NULL;
1238     }
1239 
1240 #ifdef DF_DEBUG_CFG
1241   df_set_clean_cfg ();
1242 #endif
1243 }
1244 
1245 /* Analyze dataflow info.  */
1246 
1247 void
df_analyze(void)1248 df_analyze (void)
1249 {
1250   bitmap current_all_blocks = BITMAP_ALLOC (&df_bitmap_obstack);
1251 
1252   free (df->postorder);
1253   df->postorder = XNEWVEC (int, last_basic_block_for_fn (cfun));
1254   df->n_blocks = post_order_compute (df->postorder, true, true);
1255   df->postorder_inverted.truncate (0);
1256   inverted_post_order_compute (&df->postorder_inverted);
1257 
1258   for (int i = 0; i < df->n_blocks; i++)
1259     bitmap_set_bit (current_all_blocks, df->postorder[i]);
1260 
1261   if (flag_checking)
1262     {
1263       /* Verify that POSTORDER_INVERTED only contains blocks reachable from
1264 	 the ENTRY block.  */
1265       for (unsigned int i = 0; i < df->postorder_inverted.length (); i++)
1266 	gcc_assert (bitmap_bit_p (current_all_blocks,
1267 				  df->postorder_inverted[i]));
1268     }
1269 
1270   /* Make sure that we have pruned any unreachable blocks from these
1271      sets.  */
1272   if (df->analyze_subset)
1273     {
1274       bitmap_and_into (df->blocks_to_analyze, current_all_blocks);
1275       df->n_blocks = df_prune_to_subcfg (df->postorder,
1276 					 df->n_blocks, df->blocks_to_analyze);
1277       unsigned int newlen = df_prune_to_subcfg (df->postorder_inverted.address (),
1278 						df->postorder_inverted.length (),
1279 						  df->blocks_to_analyze);
1280       df->postorder_inverted.truncate (newlen);
1281       BITMAP_FREE (current_all_blocks);
1282     }
1283   else
1284     {
1285       df->blocks_to_analyze = current_all_blocks;
1286       current_all_blocks = NULL;
1287     }
1288 
1289   df_analyze_1 ();
1290 }
1291 
1292 /* Compute the reverse top sort order of the sub-CFG specified by LOOP.
1293    Returns the number of blocks which is always loop->num_nodes.  */
1294 
1295 static int
loop_post_order_compute(int * post_order,struct loop * loop)1296 loop_post_order_compute (int *post_order, struct loop *loop)
1297 {
1298   edge_iterator *stack;
1299   int sp;
1300   int post_order_num = 0;
1301 
1302   /* Allocate stack for back-tracking up CFG.  */
1303   stack = XNEWVEC (edge_iterator, loop->num_nodes + 1);
1304   sp = 0;
1305 
1306   /* Allocate bitmap to track nodes that have been visited.  */
1307   auto_bitmap visited;
1308 
1309   /* Push the first edge on to the stack.  */
1310   stack[sp++] = ei_start (loop_preheader_edge (loop)->src->succs);
1311 
1312   while (sp)
1313     {
1314       edge_iterator ei;
1315       basic_block src;
1316       basic_block dest;
1317 
1318       /* Look at the edge on the top of the stack.  */
1319       ei = stack[sp - 1];
1320       src = ei_edge (ei)->src;
1321       dest = ei_edge (ei)->dest;
1322 
1323       /* Check if the edge destination has been visited yet and mark it
1324          if not so.  */
1325       if (flow_bb_inside_loop_p (loop, dest)
1326 	  && bitmap_set_bit (visited, dest->index))
1327 	{
1328 	  if (EDGE_COUNT (dest->succs) > 0)
1329 	    /* Since the DEST node has been visited for the first
1330 	       time, check its successors.  */
1331 	    stack[sp++] = ei_start (dest->succs);
1332 	  else
1333 	    post_order[post_order_num++] = dest->index;
1334 	}
1335       else
1336 	{
1337 	  if (ei_one_before_end_p (ei)
1338 	      && src != loop_preheader_edge (loop)->src)
1339 	    post_order[post_order_num++] = src->index;
1340 
1341 	  if (!ei_one_before_end_p (ei))
1342 	    ei_next (&stack[sp - 1]);
1343 	  else
1344 	    sp--;
1345 	}
1346     }
1347 
1348   free (stack);
1349 
1350   return post_order_num;
1351 }
1352 
1353 /* Compute the reverse top sort order of the inverted sub-CFG specified
1354    by LOOP.  Returns the number of blocks which is always loop->num_nodes.  */
1355 
1356 static void
loop_inverted_post_order_compute(vec<int> * post_order,struct loop * loop)1357 loop_inverted_post_order_compute (vec<int> *post_order, struct loop *loop)
1358 {
1359   basic_block bb;
1360   edge_iterator *stack;
1361   int sp;
1362 
1363   post_order->reserve_exact (loop->num_nodes);
1364 
1365   /* Allocate stack for back-tracking up CFG.  */
1366   stack = XNEWVEC (edge_iterator, loop->num_nodes + 1);
1367   sp = 0;
1368 
1369   /* Allocate bitmap to track nodes that have been visited.  */
1370   auto_bitmap visited;
1371 
1372   /* Put all latches into the initial work list.  In theory we'd want
1373      to start from loop exits but then we'd have the special case of
1374      endless loops.  It doesn't really matter for DF iteration order and
1375      handling latches last is probably even better.  */
1376   stack[sp++] = ei_start (loop->header->preds);
1377   bitmap_set_bit (visited, loop->header->index);
1378 
1379   /* The inverted traversal loop. */
1380   while (sp)
1381     {
1382       edge_iterator ei;
1383       basic_block pred;
1384 
1385       /* Look at the edge on the top of the stack.  */
1386       ei = stack[sp - 1];
1387       bb = ei_edge (ei)->dest;
1388       pred = ei_edge (ei)->src;
1389 
1390       /* Check if the predecessor has been visited yet and mark it
1391 	 if not so.  */
1392       if (flow_bb_inside_loop_p (loop, pred)
1393 	  && bitmap_set_bit (visited, pred->index))
1394 	{
1395 	  if (EDGE_COUNT (pred->preds) > 0)
1396 	    /* Since the predecessor node has been visited for the first
1397 	       time, check its predecessors.  */
1398 	    stack[sp++] = ei_start (pred->preds);
1399 	  else
1400 	    post_order->quick_push (pred->index);
1401 	}
1402       else
1403 	{
1404 	  if (flow_bb_inside_loop_p (loop, bb)
1405 	      && ei_one_before_end_p (ei))
1406 	    post_order->quick_push (bb->index);
1407 
1408 	  if (!ei_one_before_end_p (ei))
1409 	    ei_next (&stack[sp - 1]);
1410 	  else
1411 	    sp--;
1412 	}
1413     }
1414 
1415   free (stack);
1416 }
1417 
1418 
1419 /* Analyze dataflow info for the basic blocks contained in LOOP.  */
1420 
1421 void
df_analyze_loop(struct loop * loop)1422 df_analyze_loop (struct loop *loop)
1423 {
1424   free (df->postorder);
1425 
1426   df->postorder = XNEWVEC (int, loop->num_nodes);
1427   df->postorder_inverted.truncate (0);
1428   df->n_blocks = loop_post_order_compute (df->postorder, loop);
1429     loop_inverted_post_order_compute (&df->postorder_inverted, loop);
1430   gcc_assert ((unsigned) df->n_blocks == loop->num_nodes);
1431   gcc_assert (df->postorder_inverted.length () == loop->num_nodes);
1432 
1433   bitmap blocks = BITMAP_ALLOC (&df_bitmap_obstack);
1434   for (int i = 0; i < df->n_blocks; ++i)
1435     bitmap_set_bit (blocks, df->postorder[i]);
1436   df_set_blocks (blocks);
1437   BITMAP_FREE (blocks);
1438 
1439   df_analyze_1 ();
1440 }
1441 
1442 
1443 /* Return the number of basic blocks from the last call to df_analyze.  */
1444 
1445 int
df_get_n_blocks(enum df_flow_dir dir)1446 df_get_n_blocks (enum df_flow_dir dir)
1447 {
1448   gcc_assert (dir != DF_NONE);
1449 
1450   if (dir == DF_FORWARD)
1451     {
1452       gcc_assert (df->postorder_inverted.length ());
1453       return df->postorder_inverted.length ();
1454     }
1455 
1456   gcc_assert (df->postorder);
1457   return df->n_blocks;
1458 }
1459 
1460 
1461 /* Return a pointer to the array of basic blocks in the reverse postorder.
1462    Depending on the direction of the dataflow problem,
1463    it returns either the usual reverse postorder array
1464    or the reverse postorder of inverted traversal. */
1465 int *
df_get_postorder(enum df_flow_dir dir)1466 df_get_postorder (enum df_flow_dir dir)
1467 {
1468   gcc_assert (dir != DF_NONE);
1469 
1470   if (dir == DF_FORWARD)
1471     {
1472       gcc_assert (df->postorder_inverted.length ());
1473       return df->postorder_inverted.address ();
1474     }
1475   gcc_assert (df->postorder);
1476   return df->postorder;
1477 }
1478 
1479 static struct df_problem user_problem;
1480 static struct dataflow user_dflow;
1481 
1482 /* Interface for calling iterative dataflow with user defined
1483    confluence and transfer functions.  All that is necessary is to
1484    supply DIR, a direction, CONF_FUN_0, a confluence function for
1485    blocks with no logical preds (or NULL), CONF_FUN_N, the normal
1486    confluence function, TRANS_FUN, the basic block transfer function,
1487    and BLOCKS, the set of blocks to examine, POSTORDER the blocks in
1488    postorder, and N_BLOCKS, the number of blocks in POSTORDER. */
1489 
1490 void
df_simple_dataflow(enum df_flow_dir dir,df_init_function init_fun,df_confluence_function_0 con_fun_0,df_confluence_function_n con_fun_n,df_transfer_function trans_fun,bitmap blocks,int * postorder,int n_blocks)1491 df_simple_dataflow (enum df_flow_dir dir,
1492 		    df_init_function init_fun,
1493 		    df_confluence_function_0 con_fun_0,
1494 		    df_confluence_function_n con_fun_n,
1495 		    df_transfer_function trans_fun,
1496 		    bitmap blocks, int * postorder, int n_blocks)
1497 {
1498   memset (&user_problem, 0, sizeof (struct df_problem));
1499   user_problem.dir = dir;
1500   user_problem.init_fun = init_fun;
1501   user_problem.con_fun_0 = con_fun_0;
1502   user_problem.con_fun_n = con_fun_n;
1503   user_problem.trans_fun = trans_fun;
1504   user_dflow.problem = &user_problem;
1505   df_worklist_dataflow (&user_dflow, blocks, postorder, n_blocks);
1506 }
1507 
1508 
1509 
1510 /*----------------------------------------------------------------------------
1511    Functions to support limited incremental change.
1512 ----------------------------------------------------------------------------*/
1513 
1514 
1515 /* Get basic block info.  */
1516 
1517 static void *
df_get_bb_info(struct dataflow * dflow,unsigned int index)1518 df_get_bb_info (struct dataflow *dflow, unsigned int index)
1519 {
1520   if (dflow->block_info == NULL)
1521     return NULL;
1522   if (index >= dflow->block_info_size)
1523     return NULL;
1524   return (void *)((char *)dflow->block_info
1525 		  + index * dflow->problem->block_info_elt_size);
1526 }
1527 
1528 
1529 /* Set basic block info.  */
1530 
1531 static void
df_set_bb_info(struct dataflow * dflow,unsigned int index,void * bb_info)1532 df_set_bb_info (struct dataflow *dflow, unsigned int index,
1533 		void *bb_info)
1534 {
1535   gcc_assert (dflow->block_info);
1536   memcpy ((char *)dflow->block_info
1537 	  + index * dflow->problem->block_info_elt_size,
1538 	  bb_info, dflow->problem->block_info_elt_size);
1539 }
1540 
1541 
1542 /* Clear basic block info.  */
1543 
1544 static void
df_clear_bb_info(struct dataflow * dflow,unsigned int index)1545 df_clear_bb_info (struct dataflow *dflow, unsigned int index)
1546 {
1547   gcc_assert (dflow->block_info);
1548   gcc_assert (dflow->block_info_size > index);
1549   memset ((char *)dflow->block_info
1550 	  + index * dflow->problem->block_info_elt_size,
1551 	  0, dflow->problem->block_info_elt_size);
1552 }
1553 
1554 
1555 /* Mark the solutions as being out of date.  */
1556 
1557 void
df_mark_solutions_dirty(void)1558 df_mark_solutions_dirty (void)
1559 {
1560   if (df)
1561     {
1562       int p;
1563       for (p = 1; p < df->num_problems_defined; p++)
1564 	df->problems_in_order[p]->solutions_dirty = true;
1565     }
1566 }
1567 
1568 
1569 /* Return true if BB needs it's transfer functions recomputed.  */
1570 
1571 bool
df_get_bb_dirty(basic_block bb)1572 df_get_bb_dirty (basic_block bb)
1573 {
1574   return bitmap_bit_p ((df_live
1575 			? df_live : df_lr)->out_of_date_transfer_functions,
1576 		       bb->index);
1577 }
1578 
1579 
1580 /* Mark BB as needing it's transfer functions as being out of
1581    date.  */
1582 
1583 void
df_set_bb_dirty(basic_block bb)1584 df_set_bb_dirty (basic_block bb)
1585 {
1586   bb->flags |= BB_MODIFIED;
1587   if (df)
1588     {
1589       int p;
1590       for (p = 1; p < df->num_problems_defined; p++)
1591 	{
1592 	  struct dataflow *dflow = df->problems_in_order[p];
1593 	  if (dflow->out_of_date_transfer_functions)
1594 	    bitmap_set_bit (dflow->out_of_date_transfer_functions, bb->index);
1595 	}
1596       df_mark_solutions_dirty ();
1597     }
1598 }
1599 
1600 
1601 /* Grow the bb_info array.  */
1602 
1603 void
df_grow_bb_info(struct dataflow * dflow)1604 df_grow_bb_info (struct dataflow *dflow)
1605 {
1606   unsigned int new_size = last_basic_block_for_fn (cfun) + 1;
1607   if (dflow->block_info_size < new_size)
1608     {
1609       new_size += new_size / 4;
1610       dflow->block_info
1611          = (void *)XRESIZEVEC (char, (char *)dflow->block_info,
1612 			       new_size
1613 			       * dflow->problem->block_info_elt_size);
1614       memset ((char *)dflow->block_info
1615 	      + dflow->block_info_size
1616 	      * dflow->problem->block_info_elt_size,
1617 	      0,
1618 	      (new_size - dflow->block_info_size)
1619 	      * dflow->problem->block_info_elt_size);
1620       dflow->block_info_size = new_size;
1621     }
1622 }
1623 
1624 
1625 /* Clear the dirty bits.  This is called from places that delete
1626    blocks.  */
1627 static void
df_clear_bb_dirty(basic_block bb)1628 df_clear_bb_dirty (basic_block bb)
1629 {
1630   int p;
1631   for (p = 1; p < df->num_problems_defined; p++)
1632     {
1633       struct dataflow *dflow = df->problems_in_order[p];
1634       if (dflow->out_of_date_transfer_functions)
1635 	bitmap_clear_bit (dflow->out_of_date_transfer_functions, bb->index);
1636     }
1637 }
1638 
1639 /* Called from the rtl_compact_blocks to reorganize the problems basic
1640    block info.  */
1641 
1642 void
df_compact_blocks(void)1643 df_compact_blocks (void)
1644 {
1645   int i, p;
1646   basic_block bb;
1647   void *problem_temps;
1648 
1649   auto_bitmap tmp (&df_bitmap_obstack);
1650   for (p = 0; p < df->num_problems_defined; p++)
1651     {
1652       struct dataflow *dflow = df->problems_in_order[p];
1653 
1654       /* Need to reorganize the out_of_date_transfer_functions for the
1655 	 dflow problem.  */
1656       if (dflow->out_of_date_transfer_functions)
1657 	{
1658 	  bitmap_copy (tmp, dflow->out_of_date_transfer_functions);
1659 	  bitmap_clear (dflow->out_of_date_transfer_functions);
1660 	  if (bitmap_bit_p (tmp, ENTRY_BLOCK))
1661 	    bitmap_set_bit (dflow->out_of_date_transfer_functions, ENTRY_BLOCK);
1662 	  if (bitmap_bit_p (tmp, EXIT_BLOCK))
1663 	    bitmap_set_bit (dflow->out_of_date_transfer_functions, EXIT_BLOCK);
1664 
1665 	  i = NUM_FIXED_BLOCKS;
1666 	  FOR_EACH_BB_FN (bb, cfun)
1667 	    {
1668 	      if (bitmap_bit_p (tmp, bb->index))
1669 		bitmap_set_bit (dflow->out_of_date_transfer_functions, i);
1670 	      i++;
1671 	    }
1672 	}
1673 
1674       /* Now shuffle the block info for the problem.  */
1675       if (dflow->problem->free_bb_fun)
1676 	{
1677 	  int size = (last_basic_block_for_fn (cfun)
1678 		      * dflow->problem->block_info_elt_size);
1679 	  problem_temps = XNEWVAR (char, size);
1680 	  df_grow_bb_info (dflow);
1681 	  memcpy (problem_temps, dflow->block_info, size);
1682 
1683 	  /* Copy the bb info from the problem tmps to the proper
1684 	     place in the block_info vector.  Null out the copied
1685 	     item.  The entry and exit blocks never move.  */
1686 	  i = NUM_FIXED_BLOCKS;
1687 	  FOR_EACH_BB_FN (bb, cfun)
1688 	    {
1689 	      df_set_bb_info (dflow, i,
1690 			      (char *)problem_temps
1691 			      + bb->index * dflow->problem->block_info_elt_size);
1692 	      i++;
1693 	    }
1694 	  memset ((char *)dflow->block_info
1695 		  + i * dflow->problem->block_info_elt_size, 0,
1696 		  (last_basic_block_for_fn (cfun) - i)
1697 		  * dflow->problem->block_info_elt_size);
1698 	  free (problem_temps);
1699 	}
1700     }
1701 
1702   /* Shuffle the bits in the basic_block indexed arrays.  */
1703 
1704   if (df->blocks_to_analyze)
1705     {
1706       if (bitmap_bit_p (tmp, ENTRY_BLOCK))
1707 	bitmap_set_bit (df->blocks_to_analyze, ENTRY_BLOCK);
1708       if (bitmap_bit_p (tmp, EXIT_BLOCK))
1709 	bitmap_set_bit (df->blocks_to_analyze, EXIT_BLOCK);
1710       bitmap_copy (tmp, df->blocks_to_analyze);
1711       bitmap_clear (df->blocks_to_analyze);
1712       i = NUM_FIXED_BLOCKS;
1713       FOR_EACH_BB_FN (bb, cfun)
1714 	{
1715 	  if (bitmap_bit_p (tmp, bb->index))
1716 	    bitmap_set_bit (df->blocks_to_analyze, i);
1717 	  i++;
1718 	}
1719     }
1720 
1721   i = NUM_FIXED_BLOCKS;
1722   FOR_EACH_BB_FN (bb, cfun)
1723     {
1724       SET_BASIC_BLOCK_FOR_FN (cfun, i, bb);
1725       bb->index = i;
1726       i++;
1727     }
1728 
1729   gcc_assert (i == n_basic_blocks_for_fn (cfun));
1730 
1731   for (; i < last_basic_block_for_fn (cfun); i++)
1732     SET_BASIC_BLOCK_FOR_FN (cfun, i, NULL);
1733 
1734 #ifdef DF_DEBUG_CFG
1735   if (!df_lr->solutions_dirty)
1736     df_set_clean_cfg ();
1737 #endif
1738 }
1739 
1740 
1741 /* Shove NEW_BLOCK in at OLD_INDEX.  Called from ifcvt to hack a
1742    block.  There is no excuse for people to do this kind of thing.  */
1743 
1744 void
df_bb_replace(int old_index,basic_block new_block)1745 df_bb_replace (int old_index, basic_block new_block)
1746 {
1747   int new_block_index = new_block->index;
1748   int p;
1749 
1750   if (dump_file)
1751     fprintf (dump_file, "shoving block %d into %d\n", new_block_index, old_index);
1752 
1753   gcc_assert (df);
1754   gcc_assert (BASIC_BLOCK_FOR_FN (cfun, old_index) == NULL);
1755 
1756   for (p = 0; p < df->num_problems_defined; p++)
1757     {
1758       struct dataflow *dflow = df->problems_in_order[p];
1759       if (dflow->block_info)
1760 	{
1761 	  df_grow_bb_info (dflow);
1762 	  df_set_bb_info (dflow, old_index,
1763 			  df_get_bb_info (dflow, new_block_index));
1764 	}
1765     }
1766 
1767   df_clear_bb_dirty (new_block);
1768   SET_BASIC_BLOCK_FOR_FN (cfun, old_index, new_block);
1769   new_block->index = old_index;
1770   df_set_bb_dirty (BASIC_BLOCK_FOR_FN (cfun, old_index));
1771   SET_BASIC_BLOCK_FOR_FN (cfun, new_block_index, NULL);
1772 }
1773 
1774 
1775 /* Free all of the per basic block dataflow from all of the problems.
1776    This is typically called before a basic block is deleted and the
1777    problem will be reanalyzed.  */
1778 
1779 void
df_bb_delete(int bb_index)1780 df_bb_delete (int bb_index)
1781 {
1782   basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
1783   int i;
1784 
1785   if (!df)
1786     return;
1787 
1788   for (i = 0; i < df->num_problems_defined; i++)
1789     {
1790       struct dataflow *dflow = df->problems_in_order[i];
1791       if (dflow->problem->free_bb_fun)
1792 	{
1793 	  void *bb_info = df_get_bb_info (dflow, bb_index);
1794 	  if (bb_info)
1795 	    {
1796 	      dflow->problem->free_bb_fun (bb, bb_info);
1797 	      df_clear_bb_info (dflow, bb_index);
1798 	    }
1799 	}
1800     }
1801   df_clear_bb_dirty (bb);
1802   df_mark_solutions_dirty ();
1803 }
1804 
1805 
1806 /* Verify that there is a place for everything and everything is in
1807    its place.  This is too expensive to run after every pass in the
1808    mainline.  However this is an excellent debugging tool if the
1809    dataflow information is not being updated properly.  You can just
1810    sprinkle calls in until you find the place that is changing an
1811    underlying structure without calling the proper updating
1812    routine.  */
1813 
1814 void
df_verify(void)1815 df_verify (void)
1816 {
1817   df_scan_verify ();
1818 #ifdef ENABLE_DF_CHECKING
1819   df_lr_verify_transfer_functions ();
1820   if (df_live)
1821     df_live_verify_transfer_functions ();
1822 #endif
1823   df->changeable_flags &= ~DF_VERIFY_SCHEDULED;
1824 }
1825 
1826 #ifdef DF_DEBUG_CFG
1827 
1828 /* Compute an array of ints that describes the cfg.  This can be used
1829    to discover places where the cfg is modified by the appropriate
1830    calls have not been made to the keep df informed.  The internals of
1831    this are unexciting, the key is that two instances of this can be
1832    compared to see if any changes have been made to the cfg.  */
1833 
1834 static int *
df_compute_cfg_image(void)1835 df_compute_cfg_image (void)
1836 {
1837   basic_block bb;
1838   int size = 2 + (2 * n_basic_blocks_for_fn (cfun));
1839   int i;
1840   int * map;
1841 
1842   FOR_ALL_BB_FN (bb, cfun)
1843     {
1844       size += EDGE_COUNT (bb->succs);
1845     }
1846 
1847   map = XNEWVEC (int, size);
1848   map[0] = size;
1849   i = 1;
1850   FOR_ALL_BB_FN (bb, cfun)
1851     {
1852       edge_iterator ei;
1853       edge e;
1854 
1855       map[i++] = bb->index;
1856       FOR_EACH_EDGE (e, ei, bb->succs)
1857 	map[i++] = e->dest->index;
1858       map[i++] = -1;
1859     }
1860   map[i] = -1;
1861   return map;
1862 }
1863 
1864 static int *saved_cfg = NULL;
1865 
1866 
1867 /* This function compares the saved version of the cfg with the
1868    current cfg and aborts if the two are identical.  The function
1869    silently returns if the cfg has been marked as dirty or the two are
1870    the same.  */
1871 
1872 void
df_check_cfg_clean(void)1873 df_check_cfg_clean (void)
1874 {
1875   int *new_map;
1876 
1877   if (!df)
1878     return;
1879 
1880   if (df_lr->solutions_dirty)
1881     return;
1882 
1883   if (saved_cfg == NULL)
1884     return;
1885 
1886   new_map = df_compute_cfg_image ();
1887   gcc_assert (memcmp (saved_cfg, new_map, saved_cfg[0] * sizeof (int)) == 0);
1888   free (new_map);
1889 }
1890 
1891 
1892 /* This function builds a cfg fingerprint and squirrels it away in
1893    saved_cfg.  */
1894 
1895 static void
df_set_clean_cfg(void)1896 df_set_clean_cfg (void)
1897 {
1898   free (saved_cfg);
1899   saved_cfg = df_compute_cfg_image ();
1900 }
1901 
1902 #endif /* DF_DEBUG_CFG  */
1903 /*----------------------------------------------------------------------------
1904    PUBLIC INTERFACES TO QUERY INFORMATION.
1905 ----------------------------------------------------------------------------*/
1906 
1907 
1908 /* Return first def of REGNO within BB.  */
1909 
1910 df_ref
df_bb_regno_first_def_find(basic_block bb,unsigned int regno)1911 df_bb_regno_first_def_find (basic_block bb, unsigned int regno)
1912 {
1913   rtx_insn *insn;
1914   df_ref def;
1915 
1916   FOR_BB_INSNS (bb, insn)
1917     {
1918       if (!INSN_P (insn))
1919 	continue;
1920 
1921       FOR_EACH_INSN_DEF (def, insn)
1922 	if (DF_REF_REGNO (def) == regno)
1923 	  return def;
1924     }
1925   return NULL;
1926 }
1927 
1928 
1929 /* Return last def of REGNO within BB.  */
1930 
1931 df_ref
df_bb_regno_last_def_find(basic_block bb,unsigned int regno)1932 df_bb_regno_last_def_find (basic_block bb, unsigned int regno)
1933 {
1934   rtx_insn *insn;
1935   df_ref def;
1936 
1937   FOR_BB_INSNS_REVERSE (bb, insn)
1938     {
1939       if (!INSN_P (insn))
1940 	continue;
1941 
1942       FOR_EACH_INSN_DEF (def, insn)
1943 	if (DF_REF_REGNO (def) == regno)
1944 	  return def;
1945     }
1946 
1947   return NULL;
1948 }
1949 
1950 /* Finds the reference corresponding to the definition of REG in INSN.
1951    DF is the dataflow object.  */
1952 
1953 df_ref
df_find_def(rtx_insn * insn,rtx reg)1954 df_find_def (rtx_insn *insn, rtx reg)
1955 {
1956   df_ref def;
1957 
1958   if (GET_CODE (reg) == SUBREG)
1959     reg = SUBREG_REG (reg);
1960   gcc_assert (REG_P (reg));
1961 
1962   FOR_EACH_INSN_DEF (def, insn)
1963     if (DF_REF_REGNO (def) == REGNO (reg))
1964       return def;
1965 
1966   return NULL;
1967 }
1968 
1969 
1970 /* Return true if REG is defined in INSN, zero otherwise.  */
1971 
1972 bool
df_reg_defined(rtx_insn * insn,rtx reg)1973 df_reg_defined (rtx_insn *insn, rtx reg)
1974 {
1975   return df_find_def (insn, reg) != NULL;
1976 }
1977 
1978 
1979 /* Finds the reference corresponding to the use of REG in INSN.
1980    DF is the dataflow object.  */
1981 
1982 df_ref
df_find_use(rtx_insn * insn,rtx reg)1983 df_find_use (rtx_insn *insn, rtx reg)
1984 {
1985   df_ref use;
1986 
1987   if (GET_CODE (reg) == SUBREG)
1988     reg = SUBREG_REG (reg);
1989   gcc_assert (REG_P (reg));
1990 
1991   df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
1992   FOR_EACH_INSN_INFO_USE (use, insn_info)
1993     if (DF_REF_REGNO (use) == REGNO (reg))
1994       return use;
1995   if (df->changeable_flags & DF_EQ_NOTES)
1996     FOR_EACH_INSN_INFO_EQ_USE (use, insn_info)
1997       if (DF_REF_REGNO (use) == REGNO (reg))
1998 	return use;
1999   return NULL;
2000 }
2001 
2002 
2003 /* Return true if REG is referenced in INSN, zero otherwise.  */
2004 
2005 bool
df_reg_used(rtx_insn * insn,rtx reg)2006 df_reg_used (rtx_insn *insn, rtx reg)
2007 {
2008   return df_find_use (insn, reg) != NULL;
2009 }
2010 
2011 
2012 /*----------------------------------------------------------------------------
2013    Debugging and printing functions.
2014 ----------------------------------------------------------------------------*/
2015 
2016 /* Write information about registers and basic blocks into FILE.
2017    This is part of making a debugging dump.  */
2018 
2019 void
dump_regset(regset r,FILE * outf)2020 dump_regset (regset r, FILE *outf)
2021 {
2022   unsigned i;
2023   reg_set_iterator rsi;
2024 
2025   if (r == NULL)
2026     {
2027       fputs (" (nil)", outf);
2028       return;
2029     }
2030 
2031   EXECUTE_IF_SET_IN_REG_SET (r, 0, i, rsi)
2032     {
2033       fprintf (outf, " %d", i);
2034       if (i < FIRST_PSEUDO_REGISTER)
2035 	fprintf (outf, " [%s]",
2036 		 reg_names[i]);
2037     }
2038 }
2039 
2040 /* Print a human-readable representation of R on the standard error
2041    stream.  This function is designed to be used from within the
2042    debugger.  */
2043 extern void debug_regset (regset);
2044 DEBUG_FUNCTION void
debug_regset(regset r)2045 debug_regset (regset r)
2046 {
2047   dump_regset (r, stderr);
2048   putc ('\n', stderr);
2049 }
2050 
2051 /* Write information about registers and basic blocks into FILE.
2052    This is part of making a debugging dump.  */
2053 
2054 void
df_print_regset(FILE * file,bitmap r)2055 df_print_regset (FILE *file, bitmap r)
2056 {
2057   unsigned int i;
2058   bitmap_iterator bi;
2059 
2060   if (r == NULL)
2061     fputs (" (nil)", file);
2062   else
2063     {
2064       EXECUTE_IF_SET_IN_BITMAP (r, 0, i, bi)
2065 	{
2066 	  fprintf (file, " %d", i);
2067 	  if (i < FIRST_PSEUDO_REGISTER)
2068 	    fprintf (file, " [%s]", reg_names[i]);
2069 	}
2070     }
2071   fprintf (file, "\n");
2072 }
2073 
2074 
2075 /* Write information about registers and basic blocks into FILE.  The
2076    bitmap is in the form used by df_byte_lr.  This is part of making a
2077    debugging dump.  */
2078 
2079 void
df_print_word_regset(FILE * file,bitmap r)2080 df_print_word_regset (FILE *file, bitmap r)
2081 {
2082   unsigned int max_reg = max_reg_num ();
2083 
2084   if (r == NULL)
2085     fputs (" (nil)", file);
2086   else
2087     {
2088       unsigned int i;
2089       for (i = FIRST_PSEUDO_REGISTER; i < max_reg; i++)
2090 	{
2091 	  bool found = (bitmap_bit_p (r, 2 * i)
2092 			|| bitmap_bit_p (r, 2 * i + 1));
2093 	  if (found)
2094 	    {
2095 	      int word;
2096 	      const char * sep = "";
2097 	      fprintf (file, " %d", i);
2098 	      fprintf (file, "(");
2099 	      for (word = 0; word < 2; word++)
2100 		if (bitmap_bit_p (r, 2 * i + word))
2101 		  {
2102 		    fprintf (file, "%s%d", sep, word);
2103 		    sep = ", ";
2104 		  }
2105 	      fprintf (file, ")");
2106 	    }
2107 	}
2108     }
2109   fprintf (file, "\n");
2110 }
2111 
2112 
2113 /* Dump dataflow info.  */
2114 
2115 void
df_dump(FILE * file)2116 df_dump (FILE *file)
2117 {
2118   basic_block bb;
2119   df_dump_start (file);
2120 
2121   FOR_ALL_BB_FN (bb, cfun)
2122     {
2123       df_print_bb_index (bb, file);
2124       df_dump_top (bb, file);
2125       df_dump_bottom (bb, file);
2126     }
2127 
2128   fprintf (file, "\n");
2129 }
2130 
2131 
2132 /* Dump dataflow info for df->blocks_to_analyze.  */
2133 
2134 void
df_dump_region(FILE * file)2135 df_dump_region (FILE *file)
2136 {
2137   if (df->blocks_to_analyze)
2138     {
2139       bitmap_iterator bi;
2140       unsigned int bb_index;
2141 
2142       fprintf (file, "\n\nstarting region dump\n");
2143       df_dump_start (file);
2144 
2145       EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, bb_index, bi)
2146 	{
2147 	  basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
2148 	  dump_bb (file, bb, 0, TDF_DETAILS);
2149 	}
2150       fprintf (file, "\n");
2151     }
2152   else
2153     df_dump (file);
2154 }
2155 
2156 
2157 /* Dump the introductory information for each problem defined.  */
2158 
2159 void
df_dump_start(FILE * file)2160 df_dump_start (FILE *file)
2161 {
2162   int i;
2163 
2164   if (!df || !file)
2165     return;
2166 
2167   fprintf (file, "\n\n%s\n", current_function_name ());
2168   fprintf (file, "\nDataflow summary:\n");
2169   if (df->blocks_to_analyze)
2170     fprintf (file, "def_info->table_size = %d, use_info->table_size = %d\n",
2171 	     DF_DEFS_TABLE_SIZE (), DF_USES_TABLE_SIZE ());
2172 
2173   for (i = 0; i < df->num_problems_defined; i++)
2174     {
2175       struct dataflow *dflow = df->problems_in_order[i];
2176       if (dflow->computed)
2177 	{
2178 	  df_dump_problem_function fun = dflow->problem->dump_start_fun;
2179 	  if (fun)
2180 	    fun (file);
2181 	}
2182     }
2183 }
2184 
2185 
2186 /* Dump the top or bottom of the block information for BB.  */
2187 static void
df_dump_bb_problem_data(basic_block bb,FILE * file,bool top)2188 df_dump_bb_problem_data (basic_block bb, FILE *file, bool top)
2189 {
2190   int i;
2191 
2192   if (!df || !file)
2193     return;
2194 
2195   for (i = 0; i < df->num_problems_defined; i++)
2196     {
2197       struct dataflow *dflow = df->problems_in_order[i];
2198       if (dflow->computed)
2199 	{
2200 	  df_dump_bb_problem_function bbfun;
2201 
2202 	  if (top)
2203 	    bbfun = dflow->problem->dump_top_fun;
2204 	  else
2205 	    bbfun = dflow->problem->dump_bottom_fun;
2206 
2207 	  if (bbfun)
2208 	    bbfun (bb, file);
2209 	}
2210     }
2211 }
2212 
2213 /* Dump the top of the block information for BB.  */
2214 
2215 void
df_dump_top(basic_block bb,FILE * file)2216 df_dump_top (basic_block bb, FILE *file)
2217 {
2218   df_dump_bb_problem_data (bb, file, /*top=*/true);
2219 }
2220 
2221 /* Dump the bottom of the block information for BB.  */
2222 
2223 void
df_dump_bottom(basic_block bb,FILE * file)2224 df_dump_bottom (basic_block bb, FILE *file)
2225 {
2226   df_dump_bb_problem_data (bb, file, /*top=*/false);
2227 }
2228 
2229 
2230 /* Dump information about INSN just before or after dumping INSN itself.  */
2231 static void
df_dump_insn_problem_data(const rtx_insn * insn,FILE * file,bool top)2232 df_dump_insn_problem_data (const rtx_insn *insn, FILE *file, bool top)
2233 {
2234   int i;
2235 
2236   if (!df || !file)
2237     return;
2238 
2239   for (i = 0; i < df->num_problems_defined; i++)
2240     {
2241       struct dataflow *dflow = df->problems_in_order[i];
2242       if (dflow->computed)
2243 	{
2244 	  df_dump_insn_problem_function insnfun;
2245 
2246 	  if (top)
2247 	    insnfun = dflow->problem->dump_insn_top_fun;
2248 	  else
2249 	    insnfun = dflow->problem->dump_insn_bottom_fun;
2250 
2251 	  if (insnfun)
2252 	    insnfun (insn, file);
2253 	}
2254     }
2255 }
2256 
2257 /* Dump information about INSN before dumping INSN itself.  */
2258 
2259 void
df_dump_insn_top(const rtx_insn * insn,FILE * file)2260 df_dump_insn_top (const rtx_insn *insn, FILE *file)
2261 {
2262   df_dump_insn_problem_data (insn,  file, /*top=*/true);
2263 }
2264 
2265 /* Dump information about INSN after dumping INSN itself.  */
2266 
2267 void
df_dump_insn_bottom(const rtx_insn * insn,FILE * file)2268 df_dump_insn_bottom (const rtx_insn *insn, FILE *file)
2269 {
2270   df_dump_insn_problem_data (insn,  file, /*top=*/false);
2271 }
2272 
2273 
2274 static void
df_ref_dump(df_ref ref,FILE * file)2275 df_ref_dump (df_ref ref, FILE *file)
2276 {
2277   fprintf (file, "%c%d(%d)",
2278 	   DF_REF_REG_DEF_P (ref)
2279 	   ? 'd'
2280 	   : (DF_REF_FLAGS (ref) & DF_REF_IN_NOTE) ? 'e' : 'u',
2281 	   DF_REF_ID (ref),
2282 	   DF_REF_REGNO (ref));
2283 }
2284 
2285 void
df_refs_chain_dump(df_ref ref,bool follow_chain,FILE * file)2286 df_refs_chain_dump (df_ref ref, bool follow_chain, FILE *file)
2287 {
2288   fprintf (file, "{ ");
2289   for (; ref; ref = DF_REF_NEXT_LOC (ref))
2290     {
2291       df_ref_dump (ref, file);
2292       if (follow_chain)
2293 	df_chain_dump (DF_REF_CHAIN (ref), file);
2294     }
2295   fprintf (file, "}");
2296 }
2297 
2298 
2299 /* Dump either a ref-def or reg-use chain.  */
2300 
2301 void
df_regs_chain_dump(df_ref ref,FILE * file)2302 df_regs_chain_dump (df_ref ref,  FILE *file)
2303 {
2304   fprintf (file, "{ ");
2305   while (ref)
2306     {
2307       df_ref_dump (ref, file);
2308       ref = DF_REF_NEXT_REG (ref);
2309     }
2310   fprintf (file, "}");
2311 }
2312 
2313 
2314 static void
df_mws_dump(struct df_mw_hardreg * mws,FILE * file)2315 df_mws_dump (struct df_mw_hardreg *mws, FILE *file)
2316 {
2317   for (; mws; mws = DF_MWS_NEXT (mws))
2318     fprintf (file, "mw %c r[%d..%d]\n",
2319 	     DF_MWS_REG_DEF_P (mws) ? 'd' : 'u',
2320 	     mws->start_regno, mws->end_regno);
2321 }
2322 
2323 
2324 static void
df_insn_uid_debug(unsigned int uid,bool follow_chain,FILE * file)2325 df_insn_uid_debug (unsigned int uid,
2326 		   bool follow_chain, FILE *file)
2327 {
2328   fprintf (file, "insn %d luid %d",
2329 	   uid, DF_INSN_UID_LUID (uid));
2330 
2331   if (DF_INSN_UID_DEFS (uid))
2332     {
2333       fprintf (file, " defs ");
2334       df_refs_chain_dump (DF_INSN_UID_DEFS (uid), follow_chain, file);
2335     }
2336 
2337   if (DF_INSN_UID_USES (uid))
2338     {
2339       fprintf (file, " uses ");
2340       df_refs_chain_dump (DF_INSN_UID_USES (uid), follow_chain, file);
2341     }
2342 
2343   if (DF_INSN_UID_EQ_USES (uid))
2344     {
2345       fprintf (file, " eq uses ");
2346       df_refs_chain_dump (DF_INSN_UID_EQ_USES (uid), follow_chain, file);
2347     }
2348 
2349   if (DF_INSN_UID_MWS (uid))
2350     {
2351       fprintf (file, " mws ");
2352       df_mws_dump (DF_INSN_UID_MWS (uid), file);
2353     }
2354   fprintf (file, "\n");
2355 }
2356 
2357 
2358 DEBUG_FUNCTION void
df_insn_debug(rtx_insn * insn,bool follow_chain,FILE * file)2359 df_insn_debug (rtx_insn *insn, bool follow_chain, FILE *file)
2360 {
2361   df_insn_uid_debug (INSN_UID (insn), follow_chain, file);
2362 }
2363 
2364 DEBUG_FUNCTION void
df_insn_debug_regno(rtx_insn * insn,FILE * file)2365 df_insn_debug_regno (rtx_insn *insn, FILE *file)
2366 {
2367   struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
2368 
2369   fprintf (file, "insn %d bb %d luid %d defs ",
2370 	   INSN_UID (insn), BLOCK_FOR_INSN (insn)->index,
2371 	   DF_INSN_INFO_LUID (insn_info));
2372   df_refs_chain_dump (DF_INSN_INFO_DEFS (insn_info), false, file);
2373 
2374   fprintf (file, " uses ");
2375   df_refs_chain_dump (DF_INSN_INFO_USES (insn_info), false, file);
2376 
2377   fprintf (file, " eq_uses ");
2378   df_refs_chain_dump (DF_INSN_INFO_EQ_USES (insn_info), false, file);
2379   fprintf (file, "\n");
2380 }
2381 
2382 DEBUG_FUNCTION void
df_regno_debug(unsigned int regno,FILE * file)2383 df_regno_debug (unsigned int regno, FILE *file)
2384 {
2385   fprintf (file, "reg %d defs ", regno);
2386   df_regs_chain_dump (DF_REG_DEF_CHAIN (regno), file);
2387   fprintf (file, " uses ");
2388   df_regs_chain_dump (DF_REG_USE_CHAIN (regno), file);
2389   fprintf (file, " eq_uses ");
2390   df_regs_chain_dump (DF_REG_EQ_USE_CHAIN (regno), file);
2391   fprintf (file, "\n");
2392 }
2393 
2394 
2395 DEBUG_FUNCTION void
df_ref_debug(df_ref ref,FILE * file)2396 df_ref_debug (df_ref ref, FILE *file)
2397 {
2398   fprintf (file, "%c%d ",
2399 	   DF_REF_REG_DEF_P (ref) ? 'd' : 'u',
2400 	   DF_REF_ID (ref));
2401   fprintf (file, "reg %d bb %d insn %d flag %#x type %#x ",
2402 	   DF_REF_REGNO (ref),
2403 	   DF_REF_BBNO (ref),
2404 	   DF_REF_IS_ARTIFICIAL (ref) ? -1 : DF_REF_INSN_UID (ref),
2405 	   DF_REF_FLAGS (ref),
2406 	   DF_REF_TYPE (ref));
2407   if (DF_REF_LOC (ref))
2408     {
2409       if (flag_dump_noaddr)
2410 	fprintf (file, "loc #(#) chain ");
2411       else
2412 	fprintf (file, "loc %p(%p) chain ", (void *)DF_REF_LOC (ref),
2413 		 (void *)*DF_REF_LOC (ref));
2414     }
2415   else
2416     fprintf (file, "chain ");
2417   df_chain_dump (DF_REF_CHAIN (ref), file);
2418   fprintf (file, "\n");
2419 }
2420 
2421 /* Functions for debugging from GDB.  */
2422 
2423 DEBUG_FUNCTION void
debug_df_insn(rtx_insn * insn)2424 debug_df_insn (rtx_insn *insn)
2425 {
2426   df_insn_debug (insn, true, stderr);
2427   debug_rtx (insn);
2428 }
2429 
2430 
2431 DEBUG_FUNCTION void
debug_df_reg(rtx reg)2432 debug_df_reg (rtx reg)
2433 {
2434   df_regno_debug (REGNO (reg), stderr);
2435 }
2436 
2437 
2438 DEBUG_FUNCTION void
debug_df_regno(unsigned int regno)2439 debug_df_regno (unsigned int regno)
2440 {
2441   df_regno_debug (regno, stderr);
2442 }
2443 
2444 
2445 DEBUG_FUNCTION void
debug_df_ref(df_ref ref)2446 debug_df_ref (df_ref ref)
2447 {
2448   df_ref_debug (ref, stderr);
2449 }
2450 
2451 
2452 DEBUG_FUNCTION void
debug_df_defno(unsigned int defno)2453 debug_df_defno (unsigned int defno)
2454 {
2455   df_ref_debug (DF_DEFS_GET (defno), stderr);
2456 }
2457 
2458 
2459 DEBUG_FUNCTION void
debug_df_useno(unsigned int defno)2460 debug_df_useno (unsigned int defno)
2461 {
2462   df_ref_debug (DF_USES_GET (defno), stderr);
2463 }
2464 
2465 
2466 DEBUG_FUNCTION void
debug_df_chain(struct df_link * link)2467 debug_df_chain (struct df_link *link)
2468 {
2469   df_chain_dump (link, stderr);
2470   fputc ('\n', stderr);
2471 }
2472