xref: /openbsd/gnu/gcc/gcc/df-scan.c (revision 404b540a)
1 /* FIXME: We need to go back and add the warning messages about code
2    moved across setjmp.  */
3 
4 
5 /* Scanning of rtl for dataflow analysis.
6    Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
7    Free Software Foundation, Inc.
8    Originally contributed by Michael P. Hayes
9              (m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com)
10    Major rewrite contributed by Danny Berlin (dberlin@dberlin.org)
11              and Kenneth Zadeck (zadeck@naturalbridge.com).
12 
13 This file is part of GCC.
14 
15 GCC is free software; you can redistribute it and/or modify it under
16 the terms of the GNU General Public License as published by the Free
17 Software Foundation; either version 2, or (at your option) any later
18 version.
19 
20 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
21 WARRANTY; without even the implied warranty of MERCHANTABILITY or
22 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
23 for more details.
24 
25 You should have received a copy of the GNU General Public License
26 along with GCC; see the file COPYING.  If not, write to the Free
27 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
28 02110-1301, USA.
29 */
30 
31 #include "config.h"
32 #include "system.h"
33 #include "coretypes.h"
34 #include "tm.h"
35 #include "rtl.h"
36 #include "tm_p.h"
37 #include "insn-config.h"
38 #include "recog.h"
39 #include "function.h"
40 #include "regs.h"
41 #include "output.h"
42 #include "alloc-pool.h"
43 #include "flags.h"
44 #include "hard-reg-set.h"
45 #include "basic-block.h"
46 #include "sbitmap.h"
47 #include "bitmap.h"
48 #include "timevar.h"
49 #include "tree.h"
50 #include "target.h"
51 #include "target-def.h"
52 #include "df.h"
53 
54 #ifndef HAVE_epilogue
55 #define HAVE_epilogue 0
56 #endif
57 #ifndef HAVE_prologue
58 #define HAVE_prologue 0
59 #endif
60 #ifndef HAVE_sibcall_epilogue
61 #define HAVE_sibcall_epilogue 0
62 #endif
63 
64 #ifndef EPILOGUE_USES
65 #define EPILOGUE_USES(REGNO)  0
66 #endif
67 
68 /* The bitmap_obstack is used to hold some static variables that
69    should not be reset after each function is compiled.  */
70 
71 static bitmap_obstack persistent_obstack;
72 
73 /* The set of hard registers in eliminables[i].from. */
74 
75 static HARD_REG_SET elim_reg_set;
76 
77 /* This is a bitmap copy of regs_invalidated_by_call so that we can
78    easily add it into bitmaps, etc. */
79 
80 bitmap df_invalidated_by_call = NULL;
81 
82 /* Initialize ur_in and ur_out as if all hard registers were partially
83    available.  */
84 
85 static void df_ref_record (struct dataflow *, rtx, rtx *,
86 			   basic_block, rtx, enum df_ref_type,
87 			   enum df_ref_flags, bool record_live);
88 static void df_def_record_1 (struct dataflow *, rtx, basic_block, rtx,
89 			     enum df_ref_flags, bool record_live);
90 static void df_defs_record (struct dataflow *, rtx, basic_block, rtx);
91 static void df_uses_record (struct dataflow *, rtx *, enum df_ref_type,
92 			    basic_block, rtx, enum df_ref_flags);
93 
94 static void df_insn_refs_record (struct dataflow *, basic_block, rtx);
95 static void df_bb_refs_record (struct dataflow *, basic_block);
96 static void df_refs_record (struct dataflow *, bitmap);
97 static struct df_ref *df_ref_create_structure (struct dataflow *, rtx, rtx *,
98 					       basic_block, rtx, enum df_ref_type,
99 					       enum df_ref_flags);
100 static void df_record_entry_block_defs (struct dataflow *);
101 static void df_record_exit_block_uses (struct dataflow *);
102 static void df_grow_reg_info (struct dataflow *, struct df_ref_info *);
103 static void df_grow_ref_info (struct df_ref_info *, unsigned int);
104 static void df_grow_insn_info (struct df *);
105 
106 
107 /*----------------------------------------------------------------------------
108    SCANNING DATAFLOW PROBLEM
109 
110    There are several ways in which scanning looks just like the other
111    dataflow problems.  It shares the all the mechanisms for local info
112    as well as basic block info.  Where it differs is when and how often
113    it gets run.  It also has no need for the iterative solver.
114 ----------------------------------------------------------------------------*/
115 
116 /* Problem data for the scanning dataflow function.  */
117 struct df_scan_problem_data
118 {
119   alloc_pool ref_pool;
120   alloc_pool insn_pool;
121   alloc_pool reg_pool;
122   alloc_pool mw_reg_pool;
123   alloc_pool mw_link_pool;
124 };
125 
126 typedef struct df_scan_bb_info *df_scan_bb_info_t;
127 
128 static void
df_scan_free_internal(struct dataflow * dflow)129 df_scan_free_internal (struct dataflow *dflow)
130 {
131   struct df *df = dflow->df;
132   struct df_scan_problem_data *problem_data
133     = (struct df_scan_problem_data *) dflow->problem_data;
134 
135   free (df->def_info.regs);
136   free (df->def_info.refs);
137   memset (&df->def_info, 0, (sizeof (struct df_ref_info)));
138 
139   free (df->use_info.regs);
140   free (df->use_info.refs);
141   memset (&df->use_info, 0, (sizeof (struct df_ref_info)));
142 
143   free (df->insns);
144   df->insns = NULL;
145   df->insns_size = 0;
146 
147   free (dflow->block_info);
148   dflow->block_info = NULL;
149   dflow->block_info_size = 0;
150 
151   BITMAP_FREE (df->hardware_regs_used);
152   BITMAP_FREE (df->entry_block_defs);
153   BITMAP_FREE (df->exit_block_uses);
154 
155   free_alloc_pool (dflow->block_pool);
156   free_alloc_pool (problem_data->ref_pool);
157   free_alloc_pool (problem_data->insn_pool);
158   free_alloc_pool (problem_data->reg_pool);
159   free_alloc_pool (problem_data->mw_reg_pool);
160   free_alloc_pool (problem_data->mw_link_pool);
161 }
162 
163 
164 /* Get basic block info.  */
165 
166 struct df_scan_bb_info *
df_scan_get_bb_info(struct dataflow * dflow,unsigned int index)167 df_scan_get_bb_info (struct dataflow *dflow, unsigned int index)
168 {
169   gcc_assert (index < dflow->block_info_size);
170   return (struct df_scan_bb_info *) dflow->block_info[index];
171 }
172 
173 
174 /* Set basic block info.  */
175 
176 static void
df_scan_set_bb_info(struct dataflow * dflow,unsigned int index,struct df_scan_bb_info * bb_info)177 df_scan_set_bb_info (struct dataflow *dflow, unsigned int index,
178 		     struct df_scan_bb_info *bb_info)
179 {
180   gcc_assert (index < dflow->block_info_size);
181   dflow->block_info[index] = (void *) bb_info;
182 }
183 
184 
185 /* Free basic block info.  */
186 
187 static void
df_scan_free_bb_info(struct dataflow * dflow,basic_block bb,void * vbb_info)188 df_scan_free_bb_info (struct dataflow *dflow, basic_block bb, void *vbb_info)
189 {
190   struct df_scan_bb_info *bb_info = (struct df_scan_bb_info *) vbb_info;
191   if (bb_info)
192     {
193       df_bb_refs_delete (dflow, bb->index);
194       pool_free (dflow->block_pool, bb_info);
195     }
196 }
197 
198 
199 /* Allocate the problem data for the scanning problem.  This should be
200    called when the problem is created or when the entire function is to
201    be rescanned.  */
202 
203 static void
df_scan_alloc(struct dataflow * dflow,bitmap blocks_to_rescan,bitmap all_blocks ATTRIBUTE_UNUSED)204 df_scan_alloc (struct dataflow *dflow, bitmap blocks_to_rescan,
205 	       bitmap all_blocks ATTRIBUTE_UNUSED)
206 {
207   struct df *df = dflow->df;
208   struct df_scan_problem_data *problem_data;
209   unsigned int insn_num = get_max_uid () + 1;
210   unsigned int block_size = 50;
211   unsigned int bb_index;
212   bitmap_iterator bi;
213 
214   /* Given the number of pools, this is really faster than tearing
215      everything apart.  */
216   if (dflow->problem_data)
217     df_scan_free_internal (dflow);
218 
219   dflow->block_pool
220     = create_alloc_pool ("df_scan_block pool",
221 			 sizeof (struct df_scan_bb_info),
222 			 block_size);
223 
224   problem_data = XNEW (struct df_scan_problem_data);
225   dflow->problem_data = problem_data;
226 
227   problem_data->ref_pool
228     = create_alloc_pool ("df_scan_ref pool",
229 			 sizeof (struct df_ref), block_size);
230   problem_data->insn_pool
231     = create_alloc_pool ("df_scan_insn pool",
232 			 sizeof (struct df_insn_info), block_size);
233   problem_data->reg_pool
234     = create_alloc_pool ("df_scan_reg pool",
235 			 sizeof (struct df_reg_info), block_size);
236   problem_data->mw_reg_pool
237     = create_alloc_pool ("df_scan_mw_reg pool",
238 			 sizeof (struct df_mw_hardreg), block_size);
239   problem_data->mw_link_pool
240     = create_alloc_pool ("df_scan_mw_link pool",
241 			 sizeof (struct df_link), block_size);
242 
243   insn_num += insn_num / 4;
244   df_grow_reg_info (dflow, &df->def_info);
245   df_grow_ref_info (&df->def_info, insn_num);
246 
247   df_grow_reg_info (dflow, &df->use_info);
248   df_grow_ref_info (&df->use_info, insn_num *2);
249 
250   df_grow_insn_info (df);
251   df_grow_bb_info (dflow);
252 
253   EXECUTE_IF_SET_IN_BITMAP (blocks_to_rescan, 0, bb_index, bi)
254     {
255       struct df_scan_bb_info *bb_info = df_scan_get_bb_info (dflow, bb_index);
256       if (!bb_info)
257 	{
258 	  bb_info = (struct df_scan_bb_info *) pool_alloc (dflow->block_pool);
259 	  df_scan_set_bb_info (dflow, bb_index, bb_info);
260 	}
261       bb_info->artificial_defs = NULL;
262       bb_info->artificial_uses = NULL;
263     }
264 
265   df->hardware_regs_used = BITMAP_ALLOC (NULL);
266   df->entry_block_defs = BITMAP_ALLOC (NULL);
267   df->exit_block_uses = BITMAP_ALLOC (NULL);
268 }
269 
270 
271 /* Free all of the data associated with the scan problem.  */
272 
273 static void
df_scan_free(struct dataflow * dflow)274 df_scan_free (struct dataflow *dflow)
275 {
276   struct df *df = dflow->df;
277 
278   if (dflow->problem_data)
279     {
280       df_scan_free_internal (dflow);
281       free (dflow->problem_data);
282     }
283 
284   if (df->blocks_to_scan)
285     BITMAP_FREE (df->blocks_to_scan);
286 
287   if (df->blocks_to_analyze)
288     BITMAP_FREE (df->blocks_to_analyze);
289 
290   free (dflow);
291 }
292 
293 static void
df_scan_dump(struct dataflow * dflow ATTRIBUTE_UNUSED,FILE * file ATTRIBUTE_UNUSED)294 df_scan_dump (struct dataflow *dflow ATTRIBUTE_UNUSED, FILE *file ATTRIBUTE_UNUSED)
295 {
296   struct df *df = dflow->df;
297   int i;
298 
299   fprintf (file, "  invalidated by call \t");
300   dump_bitmap (file, df_invalidated_by_call);
301   fprintf (file, "  hardware regs used \t");
302   dump_bitmap (file, df->hardware_regs_used);
303   fprintf (file, "  entry block uses \t");
304   dump_bitmap (file, df->entry_block_defs);
305   fprintf (file, "  exit block uses \t");
306   dump_bitmap (file, df->exit_block_uses);
307   fprintf (file, "  regs ever live \t");
308   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
309     if (regs_ever_live[i])
310       fprintf (file, "%d ", i);
311   fprintf (file, "\n");
312 }
313 
314 static struct df_problem problem_SCAN =
315 {
316   DF_SCAN,                    /* Problem id.  */
317   DF_NONE,                    /* Direction.  */
318   df_scan_alloc,              /* Allocate the problem specific data.  */
319   NULL,                       /* Reset global information.  */
320   df_scan_free_bb_info,       /* Free basic block info.  */
321   NULL,                       /* Local compute function.  */
322   NULL,                       /* Init the solution specific data.  */
323   NULL,                       /* Iterative solver.  */
324   NULL,                       /* Confluence operator 0.  */
325   NULL,                       /* Confluence operator n.  */
326   NULL,                       /* Transfer function.  */
327   NULL,                       /* Finalize function.  */
328   df_scan_free,               /* Free all of the problem information.  */
329   df_scan_dump,               /* Debugging.  */
330   NULL,                       /* Dependent problem.  */
331   0                           /* Changeable flags.  */
332 };
333 
334 
335 /* Create a new DATAFLOW instance and add it to an existing instance
336    of DF.  The returned structure is what is used to get at the
337    solution.  */
338 
339 struct dataflow *
df_scan_add_problem(struct df * df,int flags)340 df_scan_add_problem (struct df *df, int flags)
341 {
342   return df_add_problem (df, &problem_SCAN, flags);
343 }
344 
345 /*----------------------------------------------------------------------------
346    Storage Allocation Utilities
347 ----------------------------------------------------------------------------*/
348 
349 
350 /* First, grow the reg_info information.  If the current size is less than
351    the number of psuedos, grow to 25% more than the number of
352    pseudos.
353 
354    Second, assure that all of the slots up to max_reg_num have been
355    filled with reg_info structures.  */
356 
357 static void
df_grow_reg_info(struct dataflow * dflow,struct df_ref_info * ref_info)358 df_grow_reg_info (struct dataflow *dflow, struct df_ref_info *ref_info)
359 {
360   unsigned int max_reg = max_reg_num ();
361   unsigned int new_size = max_reg;
362   struct df_scan_problem_data *problem_data
363     = (struct df_scan_problem_data *) dflow->problem_data;
364   unsigned int i;
365 
366   if (ref_info->regs_size < new_size)
367     {
368       new_size += new_size / 4;
369       ref_info->regs = xrealloc (ref_info->regs,
370 				 new_size *sizeof (struct df_reg_info*));
371       ref_info->regs_size = new_size;
372     }
373 
374   for (i = ref_info->regs_inited; i < max_reg; i++)
375     {
376       struct df_reg_info *reg_info = pool_alloc (problem_data->reg_pool);
377       memset (reg_info, 0, sizeof (struct df_reg_info));
378       ref_info->regs[i] = reg_info;
379     }
380 
381   ref_info->regs_inited = max_reg;
382 }
383 
384 
385 /* Grow the ref information.  */
386 
387 static void
df_grow_ref_info(struct df_ref_info * ref_info,unsigned int new_size)388 df_grow_ref_info (struct df_ref_info *ref_info, unsigned int new_size)
389 {
390   if (ref_info->refs_size < new_size)
391     {
392       ref_info->refs = xrealloc (ref_info->refs,
393 				 new_size *sizeof (struct df_ref *));
394       memset (ref_info->refs + ref_info->refs_size, 0,
395 	      (new_size - ref_info->refs_size) *sizeof (struct df_ref *));
396       ref_info->refs_size = new_size;
397     }
398 }
399 
400 
401 /* Grow the ref information.  If the current size is less than the
402    number of instructions, grow to 25% more than the number of
403    instructions.  */
404 
405 static void
df_grow_insn_info(struct df * df)406 df_grow_insn_info (struct df *df)
407 {
408   unsigned int new_size = get_max_uid () + 1;
409   if (df->insns_size < new_size)
410     {
411       new_size += new_size / 4;
412       df->insns = xrealloc (df->insns,
413 			    new_size *sizeof (struct df_insn_info *));
414       memset (df->insns + df->insns_size, 0,
415 	      (new_size - df->insns_size) *sizeof (struct df_insn_info *));
416       df->insns_size = new_size;
417     }
418 }
419 
420 
421 
422 
423 /*----------------------------------------------------------------------------
424    PUBLIC INTERFACES FOR SMALL GRAIN CHANGES TO SCANNING.
425 ----------------------------------------------------------------------------*/
426 
427 /* Rescan some BLOCKS or all the blocks defined by the last call to
428    df_set_blocks if BLOCKS is NULL);  */
429 
430 void
df_rescan_blocks(struct df * df,bitmap blocks)431 df_rescan_blocks (struct df *df, bitmap blocks)
432 {
433   bitmap local_blocks_to_scan = BITMAP_ALLOC (NULL);
434 
435   struct dataflow *dflow = df->problems_by_index[DF_SCAN];
436   basic_block bb;
437 
438   df->def_info.refs_organized = false;
439   df->use_info.refs_organized = false;
440 
441   if (blocks)
442     {
443       int i;
444       unsigned int bb_index;
445       bitmap_iterator bi;
446       bool cleared_bits = false;
447 
448       /* Need to assure that there are space in all of the tables.  */
449       unsigned int insn_num = get_max_uid () + 1;
450       insn_num += insn_num / 4;
451 
452       df_grow_reg_info (dflow, &df->def_info);
453       df_grow_ref_info (&df->def_info, insn_num);
454 
455       df_grow_reg_info (dflow, &df->use_info);
456       df_grow_ref_info (&df->use_info, insn_num *2);
457 
458       df_grow_insn_info (df);
459       df_grow_bb_info (dflow);
460 
461       bitmap_copy (local_blocks_to_scan, blocks);
462 
463       EXECUTE_IF_SET_IN_BITMAP (blocks, 0, bb_index, bi)
464 	{
465 	  basic_block bb = BASIC_BLOCK (bb_index);
466 	  if (!bb)
467 	    {
468 	      bitmap_clear_bit (local_blocks_to_scan, bb_index);
469 	      cleared_bits = true;
470 	    }
471 	}
472 
473       if (cleared_bits)
474 	bitmap_copy (blocks, local_blocks_to_scan);
475 
476       df->def_info.add_refs_inline = true;
477       df->use_info.add_refs_inline = true;
478 
479       for (i = df->num_problems_defined; i; i--)
480 	{
481 	  bitmap blocks_to_reset = NULL;
482 	  if (dflow->problem->reset_fun)
483 	    {
484 	      if (!blocks_to_reset)
485 		{
486 		  blocks_to_reset = BITMAP_ALLOC (NULL);
487 		  bitmap_copy (blocks_to_reset, local_blocks_to_scan);
488 		  if (df->blocks_to_scan)
489 		    bitmap_ior_into (blocks_to_reset, df->blocks_to_scan);
490 		}
491 	      dflow->problem->reset_fun (dflow, blocks_to_reset);
492 	    }
493 	  if (blocks_to_reset)
494 	    BITMAP_FREE (blocks_to_reset);
495 	}
496 
497       df_refs_delete (dflow, local_blocks_to_scan);
498 
499       /* This may be a mistake, but if an explicit blocks is passed in
500          and the set of blocks to analyze has been explicitly set, add
501          the extra blocks to blocks_to_analyze.  The alternative is to
502          put an assert here.  We do not want this to just go by
503          silently or else we may get storage leaks.  */
504       if (df->blocks_to_analyze)
505 	bitmap_ior_into (df->blocks_to_analyze, blocks);
506     }
507   else
508     {
509       /* If we are going to do everything, just reallocate everything.
510 	 Most stuff is allocated in pools so this is faster than
511 	 walking it.  */
512       if (df->blocks_to_analyze)
513 	bitmap_copy (local_blocks_to_scan, df->blocks_to_analyze);
514       else
515 	FOR_ALL_BB (bb)
516 	  {
517 	    bitmap_set_bit (local_blocks_to_scan, bb->index);
518 	  }
519       df_scan_alloc (dflow, local_blocks_to_scan, NULL);
520 
521       df->def_info.add_refs_inline = false;
522       df->use_info.add_refs_inline = false;
523     }
524 
525   df_refs_record (dflow, local_blocks_to_scan);
526 #if 0
527   bitmap_print (stderr, local_blocks_to_scan, "scanning: ", "\n");
528 #endif
529 
530   if (!df->blocks_to_scan)
531     df->blocks_to_scan = BITMAP_ALLOC (NULL);
532 
533   bitmap_ior_into (df->blocks_to_scan, local_blocks_to_scan);
534   BITMAP_FREE (local_blocks_to_scan);
535 }
536 
537 
538 /* Create a new ref of type DF_REF_TYPE for register REG at address
539    LOC within INSN of BB.  */
540 
541 struct df_ref *
df_ref_create(struct df * df,rtx reg,rtx * loc,rtx insn,basic_block bb,enum df_ref_type ref_type,enum df_ref_flags ref_flags)542 df_ref_create (struct df *df, rtx reg, rtx *loc, rtx insn,
543 	       basic_block bb,
544 	       enum df_ref_type ref_type,
545 	       enum df_ref_flags ref_flags)
546 {
547   struct dataflow *dflow = df->problems_by_index[DF_SCAN];
548   struct df_scan_bb_info *bb_info;
549 
550   df_grow_reg_info (dflow, &df->use_info);
551   df_grow_reg_info (dflow, &df->def_info);
552   df_grow_bb_info (dflow);
553 
554   /* Make sure there is the bb_info for this block.  */
555   bb_info = df_scan_get_bb_info (dflow, bb->index);
556   if (!bb_info)
557     {
558       bb_info = (struct df_scan_bb_info *) pool_alloc (dflow->block_pool);
559       df_scan_set_bb_info (dflow, bb->index, bb_info);
560       bb_info->artificial_defs = NULL;
561       bb_info->artificial_uses = NULL;
562     }
563 
564   if (ref_type == DF_REF_REG_DEF)
565     df->def_info.add_refs_inline = true;
566   else
567     df->use_info.add_refs_inline = true;
568 
569   return df_ref_create_structure (dflow, reg, loc, bb, insn, ref_type, ref_flags);
570 }
571 
572 
573 
574 /*----------------------------------------------------------------------------
575    UTILITIES TO CREATE AND DESTROY REFS AND CHAINS.
576 ----------------------------------------------------------------------------*/
577 
578 
579 /* Get the artificial uses for a basic block.  */
580 
581 struct df_ref *
df_get_artificial_defs(struct df * df,unsigned int bb_index)582 df_get_artificial_defs (struct df *df, unsigned int bb_index)
583 {
584   struct dataflow *dflow = df->problems_by_index[DF_SCAN];
585   return df_scan_get_bb_info (dflow, bb_index)->artificial_defs;
586 }
587 
588 
589 /* Get the artificial uses for a basic block.  */
590 
591 struct df_ref *
df_get_artificial_uses(struct df * df,unsigned int bb_index)592 df_get_artificial_uses (struct df *df, unsigned int bb_index)
593 {
594   struct dataflow *dflow = df->problems_by_index[DF_SCAN];
595   return df_scan_get_bb_info (dflow, bb_index)->artificial_uses;
596 }
597 
598 
599 /* Link REF at the front of reg_use or reg_def chain for REGNO.  */
600 
601 void
df_reg_chain_create(struct df_reg_info * reg_info,struct df_ref * ref)602 df_reg_chain_create (struct df_reg_info *reg_info,
603 		     struct df_ref *ref)
604 {
605   struct df_ref *head = reg_info->reg_chain;
606   reg_info->reg_chain = ref;
607 
608   DF_REF_NEXT_REG (ref) = head;
609 
610   /* We cannot actually link to the head of the chain.  */
611   DF_REF_PREV_REG (ref) = NULL;
612 
613   if (head)
614     DF_REF_PREV_REG (head) = ref;
615 }
616 
617 
618 /* Remove REF from the CHAIN.  Return the head of the chain.  This
619    will be CHAIN unless the REF was at the beginning of the chain.  */
620 
621 static struct df_ref *
df_ref_unlink(struct df_ref * chain,struct df_ref * ref)622 df_ref_unlink (struct df_ref *chain, struct df_ref *ref)
623 {
624   struct df_ref *orig_chain = chain;
625   struct df_ref *prev = NULL;
626   while (chain)
627     {
628       if (chain == ref)
629 	{
630 	  if (prev)
631 	    {
632 	      prev->next_ref = ref->next_ref;
633 	      ref->next_ref = NULL;
634 	      return orig_chain;
635 	    }
636 	  else
637 	    {
638 	      chain = ref->next_ref;
639 	      ref->next_ref = NULL;
640 	      return chain;
641 	    }
642 	}
643 
644       prev = chain;
645       chain = chain->next_ref;
646     }
647 
648   /* Someone passed in a ref that was not in the chain.  */
649   gcc_unreachable ();
650   return NULL;
651 }
652 
653 
654 /* Unlink and delete REF at the reg_use or reg_def chain.  Also delete
655    the def-use or use-def chain if it exists.  Returns the next ref in
656    uses or defs chain.  */
657 
658 struct df_ref *
df_reg_chain_unlink(struct dataflow * dflow,struct df_ref * ref)659 df_reg_chain_unlink (struct dataflow *dflow, struct df_ref *ref)
660 {
661   struct df *df = dflow->df;
662   struct df_ref *next = DF_REF_NEXT_REG (ref);
663   struct df_ref *prev = DF_REF_PREV_REG (ref);
664   struct df_scan_problem_data *problem_data
665     = (struct df_scan_problem_data *) dflow->problem_data;
666   struct df_reg_info *reg_info;
667   struct df_ref *next_ref = ref->next_ref;
668   unsigned int id = DF_REF_ID (ref);
669 
670   if (DF_REF_TYPE (ref) == DF_REF_REG_DEF)
671     {
672       reg_info = DF_REG_DEF_GET (df, DF_REF_REGNO (ref));
673       df->def_info.bitmap_size--;
674       if (df->def_info.refs && (id < df->def_info.refs_size))
675 	DF_DEFS_SET (df, id, NULL);
676     }
677   else
678     {
679       reg_info = DF_REG_USE_GET (df, DF_REF_REGNO (ref));
680       df->use_info.bitmap_size--;
681       if (df->use_info.refs && (id < df->use_info.refs_size))
682 	DF_USES_SET (df, id, NULL);
683     }
684 
685   /* Delete any def-use or use-def chains that start here.  */
686   if (DF_REF_CHAIN (ref))
687     df_chain_unlink (df->problems_by_index[DF_CHAIN], ref, NULL);
688 
689   reg_info->n_refs--;
690 
691   /* Unlink from the reg chain.  If there is no prev, this is the
692      first of the list.  If not, just join the next and prev.  */
693   if (prev)
694     {
695       DF_REF_NEXT_REG (prev) = next;
696       if (next)
697 	DF_REF_PREV_REG (next) = prev;
698     }
699   else
700     {
701       reg_info->reg_chain = next;
702       if (next)
703 	DF_REF_PREV_REG (next) = NULL;
704     }
705 
706   pool_free (problem_data->ref_pool, ref);
707   return next_ref;
708 }
709 
710 
711 /* Unlink REF from all def-use/use-def chains, etc.  */
712 
713 void
df_ref_remove(struct df * df,struct df_ref * ref)714 df_ref_remove (struct df *df, struct df_ref *ref)
715 {
716   struct dataflow *dflow = df->problems_by_index[DF_SCAN];
717   if (DF_REF_REG_DEF_P (ref))
718     {
719       if (DF_REF_FLAGS (ref) & DF_REF_ARTIFICIAL)
720 	{
721 	  struct df_scan_bb_info *bb_info
722 	    = df_scan_get_bb_info (dflow, DF_REF_BB (ref)->index);
723 	  bb_info->artificial_defs
724 	    = df_ref_unlink (bb_info->artificial_defs, ref);
725 	}
726       else
727 	DF_INSN_UID_DEFS (df, DF_REF_INSN_UID (ref))
728 	  = df_ref_unlink (DF_INSN_UID_DEFS (df, DF_REF_INSN_UID (ref)), ref);
729 
730       if (df->def_info.add_refs_inline)
731 	DF_DEFS_SET (df, DF_REF_ID (ref), NULL);
732     }
733   else
734     {
735       if (DF_REF_FLAGS (ref) & DF_REF_ARTIFICIAL)
736 	{
737 	  struct df_scan_bb_info *bb_info
738 	    = df_scan_get_bb_info (dflow, DF_REF_BB (ref)->index);
739 	  bb_info->artificial_uses
740 	    = df_ref_unlink (bb_info->artificial_uses, ref);
741 	}
742       else
743 	DF_INSN_UID_USES (df, DF_REF_INSN_UID (ref))
744 	  = df_ref_unlink (DF_INSN_UID_USES (df, DF_REF_INSN_UID (ref)), ref);
745 
746       if (df->use_info.add_refs_inline)
747 	DF_USES_SET (df, DF_REF_ID (ref), NULL);
748     }
749 
750   df_reg_chain_unlink (dflow, ref);
751 }
752 
753 
754 /* Create the insn record for INSN.  If there was one there, zero it out.  */
755 
756 static struct df_insn_info *
df_insn_create_insn_record(struct dataflow * dflow,rtx insn)757 df_insn_create_insn_record (struct dataflow *dflow, rtx insn)
758 {
759   struct df *df = dflow->df;
760   struct df_scan_problem_data *problem_data
761     = (struct df_scan_problem_data *) dflow->problem_data;
762 
763   struct df_insn_info *insn_rec = DF_INSN_GET (df, insn);
764   if (!insn_rec)
765     {
766       insn_rec = pool_alloc (problem_data->insn_pool);
767       DF_INSN_SET (df, insn, insn_rec);
768     }
769   memset (insn_rec, 0, sizeof (struct df_insn_info));
770 
771   return insn_rec;
772 }
773 
774 
775 /* Delete all of the refs information from INSN.  */
776 
777 void
df_insn_refs_delete(struct dataflow * dflow,rtx insn)778 df_insn_refs_delete (struct dataflow *dflow, rtx insn)
779 {
780   struct df *df = dflow->df;
781   unsigned int uid = INSN_UID (insn);
782   struct df_insn_info *insn_info = NULL;
783   struct df_ref *ref;
784   struct df_scan_problem_data *problem_data
785     = (struct df_scan_problem_data *) dflow->problem_data;
786 
787   if (uid < df->insns_size)
788     insn_info = DF_INSN_UID_GET (df, uid);
789 
790   if (insn_info)
791     {
792       struct df_mw_hardreg *hardregs = insn_info->mw_hardregs;
793 
794       while (hardregs)
795 	{
796 	  struct df_mw_hardreg *next_hr = hardregs->next;
797 	  struct df_link *link = hardregs->regs;
798 	  while (link)
799 	    {
800 	      struct df_link *next_l = link->next;
801 	      pool_free (problem_data->mw_link_pool, link);
802 	      link = next_l;
803 	    }
804 
805 	  pool_free (problem_data->mw_reg_pool, hardregs);
806 	  hardregs = next_hr;
807 	}
808 
809       ref = insn_info->defs;
810       while (ref)
811 	ref = df_reg_chain_unlink (dflow, ref);
812 
813       ref = insn_info->uses;
814       while (ref)
815 	ref = df_reg_chain_unlink (dflow, ref);
816 
817       pool_free (problem_data->insn_pool, insn_info);
818       DF_INSN_SET (df, insn, NULL);
819     }
820 }
821 
822 
823 /* Delete all of the refs information from basic_block with BB_INDEX.  */
824 
825 void
df_bb_refs_delete(struct dataflow * dflow,int bb_index)826 df_bb_refs_delete (struct dataflow *dflow, int bb_index)
827 {
828   struct df_ref *def;
829   struct df_ref *use;
830 
831   struct df_scan_bb_info *bb_info
832     = df_scan_get_bb_info (dflow, bb_index);
833   rtx insn;
834   basic_block bb = BASIC_BLOCK (bb_index);
835   FOR_BB_INSNS (bb, insn)
836     {
837       if (INSN_P (insn))
838 	{
839 	  /* Record defs within INSN.  */
840 	  df_insn_refs_delete (dflow, insn);
841 	}
842     }
843 
844   /* Get rid of any artificial uses or defs.  */
845   if (bb_info)
846     {
847       def = bb_info->artificial_defs;
848       while (def)
849 	def = df_reg_chain_unlink (dflow, def);
850       bb_info->artificial_defs = NULL;
851       use = bb_info->artificial_uses;
852       while (use)
853 	use = df_reg_chain_unlink (dflow, use);
854       bb_info->artificial_uses = NULL;
855     }
856 }
857 
858 
859 /* Delete all of the refs information from BLOCKS.  */
860 
861 void
df_refs_delete(struct dataflow * dflow,bitmap blocks)862 df_refs_delete (struct dataflow *dflow, bitmap blocks)
863 {
864   bitmap_iterator bi;
865   unsigned int bb_index;
866 
867   EXECUTE_IF_SET_IN_BITMAP (blocks, 0, bb_index, bi)
868     {
869       df_bb_refs_delete (dflow, bb_index);
870     }
871 }
872 
873 
874 /* Take build ref table for either the uses or defs from the reg-use
875    or reg-def chains.  */
876 
877 void
df_reorganize_refs(struct df_ref_info * ref_info)878 df_reorganize_refs (struct df_ref_info *ref_info)
879 {
880   unsigned int m = ref_info->regs_inited;
881   unsigned int regno;
882   unsigned int offset = 0;
883   unsigned int size = 0;
884 
885   if (ref_info->refs_organized)
886     return;
887 
888   if (ref_info->refs_size < ref_info->bitmap_size)
889     {
890       int new_size = ref_info->bitmap_size + ref_info->bitmap_size / 4;
891       df_grow_ref_info (ref_info, new_size);
892     }
893 
894   for (regno = 0; regno < m; regno++)
895     {
896       struct df_reg_info *reg_info = ref_info->regs[regno];
897       int count = 0;
898       if (reg_info)
899 	{
900 	  struct df_ref *ref = reg_info->reg_chain;
901 	  reg_info->begin = offset;
902 	  while (ref)
903 	    {
904 	      ref_info->refs[offset] = ref;
905 	      DF_REF_ID (ref) = offset++;
906 	      ref = DF_REF_NEXT_REG (ref);
907 	      count++;
908 	      size++;
909 	    }
910 	  reg_info->n_refs = count;
911 	}
912     }
913 
914   /* The bitmap size is not decremented when refs are deleted.  So
915      reset it now that we have squished out all of the empty
916      slots.  */
917   ref_info->bitmap_size = size;
918   ref_info->refs_organized = true;
919   ref_info->add_refs_inline = true;
920 }
921 
922 
923 /*----------------------------------------------------------------------------
924    Hard core instruction scanning code.  No external interfaces here,
925    just a lot of routines that look inside insns.
926 ----------------------------------------------------------------------------*/
927 
928 /* Create a ref and add it to the reg-def or reg-use chains.  */
929 
930 static struct df_ref *
df_ref_create_structure(struct dataflow * dflow,rtx reg,rtx * loc,basic_block bb,rtx insn,enum df_ref_type ref_type,enum df_ref_flags ref_flags)931 df_ref_create_structure (struct dataflow *dflow, rtx reg, rtx *loc,
932 			 basic_block bb, rtx insn,
933 			 enum df_ref_type ref_type,
934 			 enum df_ref_flags ref_flags)
935 {
936   struct df_ref *this_ref;
937   struct df *df = dflow->df;
938   int regno = REGNO (GET_CODE (reg) == SUBREG ? SUBREG_REG (reg) : reg);
939   struct df_scan_problem_data *problem_data
940     = (struct df_scan_problem_data *) dflow->problem_data;
941 
942   this_ref = pool_alloc (problem_data->ref_pool);
943   DF_REF_REG (this_ref) = reg;
944   DF_REF_REGNO (this_ref) =  regno;
945   DF_REF_LOC (this_ref) = loc;
946   DF_REF_INSN (this_ref) = insn;
947   DF_REF_CHAIN (this_ref) = NULL;
948   DF_REF_TYPE (this_ref) = ref_type;
949   DF_REF_FLAGS (this_ref) = ref_flags;
950   DF_REF_DATA (this_ref) = NULL;
951   DF_REF_BB (this_ref) = bb;
952 
953   /* Link the ref into the reg_def and reg_use chains and keep a count
954      of the instances.  */
955   switch (ref_type)
956     {
957     case DF_REF_REG_DEF:
958       {
959 	struct df_reg_info *reg_info = DF_REG_DEF_GET (df, regno);
960 	reg_info->n_refs++;
961 
962 	/* Add the ref to the reg_def chain.  */
963 	df_reg_chain_create (reg_info, this_ref);
964 	DF_REF_ID (this_ref) = df->def_info.bitmap_size;
965 	if (df->def_info.add_refs_inline)
966 	  {
967 	    if (DF_DEFS_SIZE (df) >= df->def_info.refs_size)
968 	      {
969 		int new_size = df->def_info.bitmap_size
970 		  + df->def_info.bitmap_size / 4;
971 		df_grow_ref_info (&df->def_info, new_size);
972 	      }
973 	    /* Add the ref to the big array of defs.  */
974 	    DF_DEFS_SET (df, df->def_info.bitmap_size, this_ref);
975 	    df->def_info.refs_organized = false;
976 	  }
977 
978 	df->def_info.bitmap_size++;
979 
980 	if (DF_REF_FLAGS (this_ref) & DF_REF_ARTIFICIAL)
981 	  {
982 	    struct df_scan_bb_info *bb_info
983 	      = df_scan_get_bb_info (dflow, bb->index);
984 	    this_ref->next_ref = bb_info->artificial_defs;
985 	    bb_info->artificial_defs = this_ref;
986 	  }
987 	else
988 	  {
989 	    this_ref->next_ref = DF_INSN_GET (df, insn)->defs;
990 	    DF_INSN_GET (df, insn)->defs = this_ref;
991 	  }
992       }
993       break;
994 
995     case DF_REF_REG_MEM_LOAD:
996     case DF_REF_REG_MEM_STORE:
997     case DF_REF_REG_USE:
998       {
999 	struct df_reg_info *reg_info = DF_REG_USE_GET (df, regno);
1000 	reg_info->n_refs++;
1001 
1002 	/* Add the ref to the reg_use chain.  */
1003 	df_reg_chain_create (reg_info, this_ref);
1004 	DF_REF_ID (this_ref) = df->use_info.bitmap_size;
1005 	if (df->use_info.add_refs_inline)
1006 	  {
1007 	    if (DF_USES_SIZE (df) >= df->use_info.refs_size)
1008 	      {
1009 		int new_size = df->use_info.bitmap_size
1010 		  + df->use_info.bitmap_size / 4;
1011 		df_grow_ref_info (&df->use_info, new_size);
1012 	      }
1013 	    /* Add the ref to the big array of defs.  */
1014 	    DF_USES_SET (df, df->use_info.bitmap_size, this_ref);
1015 	    df->use_info.refs_organized = false;
1016 	  }
1017 
1018 	df->use_info.bitmap_size++;
1019 	if (DF_REF_FLAGS (this_ref) & DF_REF_ARTIFICIAL)
1020 	  {
1021 	    struct df_scan_bb_info *bb_info
1022 	      = df_scan_get_bb_info (dflow, bb->index);
1023 	    this_ref->next_ref = bb_info->artificial_uses;
1024 	    bb_info->artificial_uses = this_ref;
1025 	  }
1026 	else
1027 	  {
1028 	    this_ref->next_ref = DF_INSN_GET (df, insn)->uses;
1029 	    DF_INSN_GET (df, insn)->uses = this_ref;
1030 	  }
1031       }
1032       break;
1033 
1034     default:
1035       gcc_unreachable ();
1036 
1037     }
1038   return this_ref;
1039 }
1040 
1041 
1042 /* Create new references of type DF_REF_TYPE for each part of register REG
1043    at address LOC within INSN of BB.  */
1044 
1045 static void
df_ref_record(struct dataflow * dflow,rtx reg,rtx * loc,basic_block bb,rtx insn,enum df_ref_type ref_type,enum df_ref_flags ref_flags,bool record_live)1046 df_ref_record (struct dataflow *dflow, rtx reg, rtx *loc,
1047 	       basic_block bb, rtx insn,
1048 	       enum df_ref_type ref_type,
1049 	       enum df_ref_flags ref_flags,
1050 	       bool record_live)
1051 {
1052   struct df *df = dflow->df;
1053   rtx oldreg = reg;
1054   unsigned int regno;
1055 
1056   gcc_assert (REG_P (reg) || GET_CODE (reg) == SUBREG);
1057 
1058   /* For the reg allocator we are interested in some SUBREG rtx's, but not
1059      all.  Notably only those representing a word extraction from a multi-word
1060      reg.  As written in the docu those should have the form
1061      (subreg:SI (reg:M A) N), with size(SImode) > size(Mmode).
1062      XXX Is that true?  We could also use the global word_mode variable.  */
1063   if ((dflow->flags & DF_SUBREGS) == 0
1064       && GET_CODE (reg) == SUBREG
1065       && (GET_MODE_SIZE (GET_MODE (reg)) < GET_MODE_SIZE (word_mode)
1066 	  || GET_MODE_SIZE (GET_MODE (reg))
1067 	       >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (reg)))))
1068     {
1069       loc = &SUBREG_REG (reg);
1070       reg = *loc;
1071       ref_flags |= DF_REF_STRIPPED;
1072     }
1073 
1074   regno = REGNO (GET_CODE (reg) == SUBREG ? SUBREG_REG (reg) : reg);
1075   if (regno < FIRST_PSEUDO_REGISTER)
1076     {
1077       unsigned int i;
1078       unsigned int endregno;
1079       struct df_mw_hardreg *hardreg = NULL;
1080       struct df_scan_problem_data *problem_data
1081 	= (struct df_scan_problem_data *) dflow->problem_data;
1082 
1083       if (!(dflow->flags & DF_HARD_REGS))
1084 	return;
1085 
1086       /* GET_MODE (reg) is correct here.  We do not want to go into a SUBREG
1087          for the mode, because we only want to add references to regs, which
1088 	 are really referenced.  E.g., a (subreg:SI (reg:DI 0) 0) does _not_
1089 	 reference the whole reg 0 in DI mode (which would also include
1090 	 reg 1, at least, if 0 and 1 are SImode registers).  */
1091       endregno = hard_regno_nregs[regno][GET_MODE (reg)];
1092       if (GET_CODE (reg) == SUBREG)
1093         regno += subreg_regno_offset (regno, GET_MODE (SUBREG_REG (reg)),
1094 				      SUBREG_BYTE (reg), GET_MODE (reg));
1095       endregno += regno;
1096 
1097       /*  If this is a multiword hardreg, we create some extra datastructures that
1098 	  will enable us to easily build REG_DEAD and REG_UNUSED notes.  */
1099       if ((endregno != regno + 1) && insn)
1100 	{
1101 	  struct df_insn_info *insn_info = DF_INSN_GET (df, insn);
1102 	  /* Sets to a subreg of a multiword register are partial.
1103 	     Sets to a non-subreg of a multiword register are not.  */
1104 	  if (GET_CODE (oldreg) == SUBREG)
1105 	    ref_flags |= DF_REF_PARTIAL;
1106 	  ref_flags |= DF_REF_MW_HARDREG;
1107 	  hardreg = pool_alloc (problem_data->mw_reg_pool);
1108 	  hardreg->next = insn_info->mw_hardregs;
1109 	  insn_info->mw_hardregs = hardreg;
1110 	  hardreg->type = ref_type;
1111 	  hardreg->flags = ref_flags;
1112 	  hardreg->mw_reg = reg;
1113 	  hardreg->regs = NULL;
1114 
1115 	}
1116 
1117       for (i = regno; i < endregno; i++)
1118 	{
1119 	  struct df_ref *ref;
1120 
1121 	  /* Calls are handled at call site because regs_ever_live
1122 	     doesn't include clobbered regs, only used ones.  */
1123 	  if (ref_type == DF_REF_REG_DEF && record_live)
1124 	    regs_ever_live[i] = 1;
1125 	  else if ((ref_type == DF_REF_REG_USE
1126 		   || ref_type == DF_REF_REG_MEM_STORE
1127 		   || ref_type == DF_REF_REG_MEM_LOAD)
1128 		   && ((ref_flags & DF_REF_ARTIFICIAL) == 0))
1129 	    {
1130 	      /* Set regs_ever_live on uses of non-eliminable frame
1131 		 pointers and arg pointers.  */
1132 	      if (!(TEST_HARD_REG_BIT (elim_reg_set, regno)
1133 		     && (regno == FRAME_POINTER_REGNUM
1134 			 || regno == ARG_POINTER_REGNUM)))
1135 		regs_ever_live[i] = 1;
1136 	    }
1137 
1138 	  ref = df_ref_create_structure (dflow, regno_reg_rtx[i], loc,
1139 					 bb, insn, ref_type, ref_flags);
1140 	  if (hardreg)
1141 	    {
1142 	      struct df_link *link = pool_alloc (problem_data->mw_link_pool);
1143 
1144 	      link->next = hardreg->regs;
1145 	      link->ref = ref;
1146 	      hardreg->regs = link;
1147 	    }
1148 	}
1149     }
1150   else
1151     {
1152       df_ref_create_structure (dflow, reg, loc,
1153 			       bb, insn, ref_type, ref_flags);
1154     }
1155 }
1156 
1157 
1158 /* A set to a non-paradoxical SUBREG for which the number of word_mode units
1159    covered by the outer mode is smaller than that covered by the inner mode,
1160    is a read-modify-write operation.
1161    This function returns true iff the SUBREG X is such a SUBREG.  */
1162 
1163 bool
df_read_modify_subreg_p(rtx x)1164 df_read_modify_subreg_p (rtx x)
1165 {
1166   unsigned int isize, osize;
1167   if (GET_CODE (x) != SUBREG)
1168     return false;
1169   isize = GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)));
1170   osize = GET_MODE_SIZE (GET_MODE (x));
1171   return (isize > osize && isize > UNITS_PER_WORD);
1172 }
1173 
1174 
1175 /* Process all the registers defined in the rtx, X.
1176    Autoincrement/decrement definitions will be picked up by
1177    df_uses_record.  */
1178 
1179 static void
df_def_record_1(struct dataflow * dflow,rtx x,basic_block bb,rtx insn,enum df_ref_flags flags,bool record_live)1180 df_def_record_1 (struct dataflow *dflow, rtx x,
1181 		 basic_block bb, rtx insn,
1182 		 enum df_ref_flags flags, bool record_live)
1183 {
1184   rtx *loc;
1185   rtx dst;
1186   bool dst_in_strict_lowpart = false;
1187 
1188  /* We may recursively call ourselves on EXPR_LIST when dealing with PARALLEL
1189      construct.  */
1190   if (GET_CODE (x) == EXPR_LIST || GET_CODE (x) == CLOBBER)
1191     loc = &XEXP (x, 0);
1192   else
1193     loc = &SET_DEST (x);
1194   dst = *loc;
1195 
1196   /* It is legal to have a set destination be a parallel. */
1197   if (GET_CODE (dst) == PARALLEL)
1198     {
1199       int i;
1200 
1201       for (i = XVECLEN (dst, 0) - 1; i >= 0; i--)
1202 	{
1203 	  rtx temp = XVECEXP (dst, 0, i);
1204 	  if (GET_CODE (temp) == EXPR_LIST || GET_CODE (temp) == CLOBBER
1205 	      || GET_CODE (temp) == SET)
1206 	    df_def_record_1 (dflow, temp, bb, insn,
1207 			     GET_CODE (temp) == CLOBBER
1208 			     ? flags | DF_REF_MUST_CLOBBER : flags,
1209 			     record_live);
1210 	}
1211       return;
1212     }
1213 
1214   /* Maybe, we should flag the use of STRICT_LOW_PART somehow.  It might
1215      be handy for the reg allocator.  */
1216   while (GET_CODE (dst) == STRICT_LOW_PART
1217 	 || GET_CODE (dst) == ZERO_EXTRACT
1218 	 || df_read_modify_subreg_p (dst))
1219     {
1220 #if 0
1221       /* Strict low part always contains SUBREG, but we do not want to make
1222 	 it appear outside, as whole register is always considered.  */
1223       if (GET_CODE (dst) == STRICT_LOW_PART)
1224 	{
1225 	  loc = &XEXP (dst, 0);
1226 	  dst = *loc;
1227 	}
1228 #endif
1229       loc = &XEXP (dst, 0);
1230       if (GET_CODE (dst) == STRICT_LOW_PART)
1231 	dst_in_strict_lowpart = true;
1232       dst = *loc;
1233       flags |= DF_REF_READ_WRITE;
1234 
1235     }
1236 
1237   /* Sets to a subreg of a single word register are partial sets if
1238      they are wrapped in a strict lowpart, and not partial otherwise.
1239   */
1240   if (GET_CODE (dst) == SUBREG && REG_P (SUBREG_REG (dst))
1241       && dst_in_strict_lowpart)
1242     flags |= DF_REF_PARTIAL;
1243 
1244   if (REG_P (dst)
1245       || (GET_CODE (dst) == SUBREG && REG_P (SUBREG_REG (dst))))
1246     df_ref_record (dflow, dst, loc, bb, insn,
1247 		   DF_REF_REG_DEF, flags, record_live);
1248 }
1249 
1250 
1251 /* Process all the registers defined in the pattern rtx, X.  */
1252 
1253 static void
df_defs_record(struct dataflow * dflow,rtx x,basic_block bb,rtx insn)1254 df_defs_record (struct dataflow *dflow, rtx x, basic_block bb, rtx insn)
1255 {
1256   RTX_CODE code = GET_CODE (x);
1257 
1258   if (code == SET || code == CLOBBER)
1259     {
1260       /* Mark the single def within the pattern.  */
1261       df_def_record_1 (dflow, x, bb, insn,
1262 		       code == CLOBBER ? DF_REF_MUST_CLOBBER : 0, true);
1263     }
1264   else if (code == COND_EXEC)
1265     {
1266       df_defs_record  (dflow, COND_EXEC_CODE (x), bb, insn);
1267     }
1268   else if (code == PARALLEL)
1269     {
1270       int i;
1271 
1272       /* Mark the multiple defs within the pattern.  */
1273       for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1274 	 df_defs_record (dflow, XVECEXP (x, 0, i), bb, insn);
1275     }
1276 }
1277 
1278 
1279 /* Process all the registers used in the rtx at address LOC.  */
1280 
1281 static void
df_uses_record(struct dataflow * dflow,rtx * loc,enum df_ref_type ref_type,basic_block bb,rtx insn,enum df_ref_flags flags)1282 df_uses_record (struct dataflow *dflow, rtx *loc, enum df_ref_type ref_type,
1283 		basic_block bb, rtx insn, enum df_ref_flags flags)
1284 {
1285   RTX_CODE code;
1286   rtx x;
1287  retry:
1288   x = *loc;
1289   if (!x)
1290     return;
1291   code = GET_CODE (x);
1292   switch (code)
1293     {
1294     case LABEL_REF:
1295     case SYMBOL_REF:
1296     case CONST_INT:
1297     case CONST:
1298     case CONST_DOUBLE:
1299     case CONST_VECTOR:
1300     case PC:
1301     case CC0:
1302     case ADDR_VEC:
1303     case ADDR_DIFF_VEC:
1304       return;
1305 
1306     case CLOBBER:
1307       /* If we are clobbering a MEM, mark any registers inside the address
1308 	 as being used.  */
1309       if (MEM_P (XEXP (x, 0)))
1310 	df_uses_record (dflow, &XEXP (XEXP (x, 0), 0),
1311 			DF_REF_REG_MEM_STORE, bb, insn, flags);
1312 
1313       /* If we're clobbering a REG then we have a def so ignore.  */
1314       return;
1315 
1316     case MEM:
1317       df_uses_record (dflow, &XEXP (x, 0), DF_REF_REG_MEM_LOAD, bb, insn,
1318 		      flags & DF_REF_IN_NOTE);
1319       return;
1320 
1321     case SUBREG:
1322       /* While we're here, optimize this case.  */
1323       flags |= DF_REF_PARTIAL;
1324       /* In case the SUBREG is not of a REG, do not optimize.  */
1325       if (!REG_P (SUBREG_REG (x)))
1326 	{
1327 	  loc = &SUBREG_REG (x);
1328 	  df_uses_record (dflow, loc, ref_type, bb, insn, flags);
1329 	  return;
1330 	}
1331       /* ... Fall through ...  */
1332 
1333     case REG:
1334       df_ref_record (dflow, x, loc, bb, insn, ref_type, flags, true);
1335       return;
1336 
1337     case SET:
1338       {
1339 	rtx dst = SET_DEST (x);
1340 	gcc_assert (!(flags & DF_REF_IN_NOTE));
1341 	df_uses_record (dflow, &SET_SRC (x), DF_REF_REG_USE, bb, insn, flags);
1342 
1343 	switch (GET_CODE (dst))
1344 	  {
1345 	    case SUBREG:
1346 	      if (df_read_modify_subreg_p (dst))
1347 		{
1348 		  df_uses_record (dflow, &SUBREG_REG (dst),
1349 				  DF_REF_REG_USE, bb,
1350 				  insn, flags | DF_REF_READ_WRITE);
1351 		  break;
1352 		}
1353 	      /* Fall through.  */
1354 	    case REG:
1355 	    case PARALLEL:
1356 	    case SCRATCH:
1357 	    case PC:
1358 	    case CC0:
1359 		break;
1360 	    case MEM:
1361 	      df_uses_record (dflow, &XEXP (dst, 0),
1362 			      DF_REF_REG_MEM_STORE,
1363 			      bb, insn, flags);
1364 	      break;
1365 	    case STRICT_LOW_PART:
1366 	      {
1367 		rtx *temp = &XEXP (dst, 0);
1368 		/* A strict_low_part uses the whole REG and not just the
1369 		 SUBREG.  */
1370 		dst = XEXP (dst, 0);
1371 		df_uses_record (dflow,
1372 				(GET_CODE (dst) == SUBREG)
1373 				? &SUBREG_REG (dst) : temp,
1374 				DF_REF_REG_USE, bb,
1375 				insn, DF_REF_READ_WRITE);
1376 	      }
1377 	      break;
1378 	    case ZERO_EXTRACT:
1379 	    case SIGN_EXTRACT:
1380 	      df_uses_record (dflow, &XEXP (dst, 0),
1381 			      DF_REF_REG_USE, bb, insn,
1382 			      DF_REF_READ_WRITE);
1383 	      df_uses_record (dflow, &XEXP (dst, 1),
1384 			      DF_REF_REG_USE, bb, insn, flags);
1385 	      df_uses_record (dflow, &XEXP (dst, 2),
1386 			      DF_REF_REG_USE, bb, insn, flags);
1387 	      dst = XEXP (dst, 0);
1388 	      break;
1389 	    default:
1390 	      gcc_unreachable ();
1391 	  }
1392 	return;
1393       }
1394 
1395     case RETURN:
1396       break;
1397 
1398     case ASM_OPERANDS:
1399     case UNSPEC_VOLATILE:
1400     case TRAP_IF:
1401     case ASM_INPUT:
1402       {
1403 	/* Traditional and volatile asm instructions must be
1404 	   considered to use and clobber all hard registers, all
1405 	   pseudo-registers and all of memory.  So must TRAP_IF and
1406 	   UNSPEC_VOLATILE operations.
1407 
1408 	   Consider for instance a volatile asm that changes the fpu
1409 	   rounding mode.  An insn should not be moved across this
1410 	   even if it only uses pseudo-regs because it might give an
1411 	   incorrectly rounded result.
1412 
1413 	   However, flow.c's liveness computation did *not* do this,
1414 	   giving the reasoning as " ?!? Unfortunately, marking all
1415 	   hard registers as live causes massive problems for the
1416 	   register allocator and marking all pseudos as live creates
1417 	   mountains of uninitialized variable warnings."
1418 
1419 	   In order to maintain the status quo with regard to liveness
1420 	   and uses, we do what flow.c did and just mark any regs we
1421 	   can find in ASM_OPERANDS as used.  Later on, when liveness
1422 	   is computed, asm insns are scanned and regs_asm_clobbered
1423 	   is filled out.
1424 
1425 	   For all ASM_OPERANDS, we must traverse the vector of input
1426 	   operands.  We can not just fall through here since then we
1427 	   would be confused by the ASM_INPUT rtx inside ASM_OPERANDS,
1428 	   which do not indicate traditional asms unlike their normal
1429 	   usage.  */
1430 	if (code == ASM_OPERANDS)
1431 	  {
1432 	    int j;
1433 
1434 	    for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
1435 	      df_uses_record (dflow, &ASM_OPERANDS_INPUT (x, j),
1436 			      DF_REF_REG_USE, bb, insn, flags);
1437 	    return;
1438 	  }
1439 	break;
1440       }
1441 
1442     case PRE_DEC:
1443     case POST_DEC:
1444     case PRE_INC:
1445     case POST_INC:
1446     case PRE_MODIFY:
1447     case POST_MODIFY:
1448       /* Catch the def of the register being modified.  */
1449       flags |= DF_REF_READ_WRITE;
1450       df_ref_record (dflow, XEXP (x, 0), &XEXP (x, 0), bb, insn,
1451 		     DF_REF_REG_DEF, flags, true);
1452 
1453       /* ... Fall through to handle uses ...  */
1454 
1455     default:
1456       break;
1457     }
1458 
1459   /* Recursively scan the operands of this expression.  */
1460   {
1461     const char *fmt = GET_RTX_FORMAT (code);
1462     int i;
1463 
1464     for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1465       {
1466 	if (fmt[i] == 'e')
1467 	  {
1468 	    /* Tail recursive case: save a function call level.  */
1469 	    if (i == 0)
1470 	      {
1471 		loc = &XEXP (x, 0);
1472 		goto retry;
1473 	      }
1474 	    df_uses_record (dflow, &XEXP (x, i), ref_type, bb, insn, flags);
1475 	  }
1476 	else if (fmt[i] == 'E')
1477 	  {
1478 	    int j;
1479 	    for (j = 0; j < XVECLEN (x, i); j++)
1480 	      df_uses_record (dflow, &XVECEXP (x, i, j), ref_type,
1481 			      bb, insn, flags);
1482 	  }
1483       }
1484   }
1485 }
1486 
1487 /* Return true if *LOC contains an asm.  */
1488 
1489 static int
df_insn_contains_asm_1(rtx * loc,void * data ATTRIBUTE_UNUSED)1490 df_insn_contains_asm_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
1491 {
1492   if ( !*loc)
1493     return 0;
1494   if (GET_CODE (*loc) == ASM_OPERANDS)
1495     return 1;
1496   return 0;
1497 }
1498 
1499 
1500 /* Return true if INSN contains an ASM.  */
1501 
1502 static int
df_insn_contains_asm(rtx insn)1503 df_insn_contains_asm (rtx insn)
1504 {
1505   return for_each_rtx (&insn, df_insn_contains_asm_1, NULL);
1506 }
1507 
1508 
1509 
1510 /* Record all the refs for DF within INSN of basic block BB.  */
1511 
1512 static void
df_insn_refs_record(struct dataflow * dflow,basic_block bb,rtx insn)1513 df_insn_refs_record (struct dataflow *dflow, basic_block bb, rtx insn)
1514 {
1515   struct df *df = dflow->df;
1516   int i;
1517 
1518   if (INSN_P (insn))
1519     {
1520       rtx note;
1521 
1522       if (df_insn_contains_asm (insn))
1523 	DF_INSN_CONTAINS_ASM (df, insn) = true;
1524 
1525       /* Record register defs.  */
1526       df_defs_record (dflow, PATTERN (insn), bb, insn);
1527 
1528       if (dflow->flags & DF_EQUIV_NOTES)
1529 	for (note = REG_NOTES (insn); note;
1530 	     note = XEXP (note, 1))
1531 	  {
1532 	    switch (REG_NOTE_KIND (note))
1533 	      {
1534 	      case REG_EQUIV:
1535 	      case REG_EQUAL:
1536 		df_uses_record (dflow, &XEXP (note, 0), DF_REF_REG_USE,
1537 				bb, insn, DF_REF_IN_NOTE);
1538 	      default:
1539 		break;
1540 	      }
1541 	  }
1542 
1543       if (CALL_P (insn))
1544 	{
1545 	  rtx note;
1546 
1547 	  /* Record the registers used to pass arguments, and explicitly
1548 	     noted as clobbered.  */
1549 	  for (note = CALL_INSN_FUNCTION_USAGE (insn); note;
1550 	       note = XEXP (note, 1))
1551 	    {
1552 	      if (GET_CODE (XEXP (note, 0)) == USE)
1553 		df_uses_record (dflow, &XEXP (XEXP (note, 0), 0),
1554 				DF_REF_REG_USE,
1555 				bb, insn, 0);
1556               else if (GET_CODE (XEXP (note, 0)) == CLOBBER)
1557 		{
1558 		  df_defs_record (dflow, XEXP (note, 0), bb, insn);
1559 		  if (REG_P (XEXP (XEXP (note, 0), 0)))
1560 		    {
1561 		      rtx reg = XEXP (XEXP (note, 0), 0);
1562 		      int regno_last;
1563 		      int regno_first;
1564 		      int i;
1565 
1566 		      regno_last = regno_first = REGNO (reg);
1567 		      if (regno_first < FIRST_PSEUDO_REGISTER)
1568 			regno_last
1569 			  += hard_regno_nregs[regno_first][GET_MODE (reg)] - 1;
1570 		      for (i = regno_first; i <= regno_last; i++)
1571 			regs_ever_live[i] = 1;
1572 		    }
1573 		}
1574 	    }
1575 
1576 	  /* The stack ptr is used (honorarily) by a CALL insn.  */
1577 	  df_uses_record (dflow, &regno_reg_rtx[STACK_POINTER_REGNUM],
1578 			  DF_REF_REG_USE, bb, insn,
1579 			  0);
1580 
1581 	  if (dflow->flags & DF_HARD_REGS)
1582 	    {
1583 	      bitmap_iterator bi;
1584 	      unsigned int ui;
1585 	      /* Calls may also reference any of the global registers,
1586 		 so they are recorded as used.  */
1587 	      for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1588 		if (global_regs[i])
1589 		  df_uses_record (dflow, &regno_reg_rtx[i],
1590 				  DF_REF_REG_USE, bb, insn,
1591 				  0);
1592 	      EXECUTE_IF_SET_IN_BITMAP (df_invalidated_by_call, 0, ui, bi)
1593 	        df_ref_record (dflow, regno_reg_rtx[ui], &regno_reg_rtx[ui], bb,
1594 			       insn, DF_REF_REG_DEF, DF_REF_MAY_CLOBBER, false);
1595 	    }
1596 	}
1597 
1598       /* Record the register uses.  */
1599       df_uses_record (dflow, &PATTERN (insn),
1600 		      DF_REF_REG_USE, bb, insn, 0);
1601 
1602     }
1603 }
1604 
1605 static bool
df_has_eh_preds(basic_block bb)1606 df_has_eh_preds (basic_block bb)
1607 {
1608   edge e;
1609   edge_iterator ei;
1610 
1611   FOR_EACH_EDGE (e, ei, bb->preds)
1612     {
1613       if (e->flags & EDGE_EH)
1614 	return true;
1615     }
1616   return false;
1617 }
1618 
1619 /* Record all the refs within the basic block BB.  */
1620 
1621 static void
df_bb_refs_record(struct dataflow * dflow,basic_block bb)1622 df_bb_refs_record (struct dataflow *dflow, basic_block bb)
1623 {
1624   struct df *df = dflow->df;
1625   rtx insn;
1626   int luid = 0;
1627   struct df_scan_bb_info *bb_info = df_scan_get_bb_info (dflow, bb->index);
1628   bitmap artificial_uses_at_bottom = NULL;
1629 
1630   if (dflow->flags & DF_HARD_REGS)
1631     artificial_uses_at_bottom = BITMAP_ALLOC (NULL);
1632 
1633   /* Need to make sure that there is a record in the basic block info. */
1634   if (!bb_info)
1635     {
1636       bb_info = (struct df_scan_bb_info *) pool_alloc (dflow->block_pool);
1637       df_scan_set_bb_info (dflow, bb->index, bb_info);
1638       bb_info->artificial_defs = NULL;
1639       bb_info->artificial_uses = NULL;
1640     }
1641 
1642   /* Scan the block an insn at a time from beginning to end.  */
1643   FOR_BB_INSNS (bb, insn)
1644     {
1645       df_insn_create_insn_record (dflow, insn);
1646       if (INSN_P (insn))
1647 	{
1648 	  /* Record defs within INSN.  */
1649 	  DF_INSN_LUID (df, insn) = luid++;
1650 	  df_insn_refs_record (dflow, bb, insn);
1651 	}
1652       DF_INSN_LUID (df, insn) = luid;
1653     }
1654 
1655 #ifdef EH_RETURN_DATA_REGNO
1656   if ((dflow->flags & DF_HARD_REGS)
1657       && df_has_eh_preds (bb))
1658     {
1659       unsigned int i;
1660       /* Mark the registers that will contain data for the handler.  */
1661       for (i = 0; ; ++i)
1662 	{
1663 	  unsigned regno = EH_RETURN_DATA_REGNO (i);
1664 	  if (regno == INVALID_REGNUM)
1665 	    break;
1666 	  df_ref_record (dflow, regno_reg_rtx[regno], &regno_reg_rtx[regno],
1667 			 bb, NULL,
1668 			 DF_REF_REG_DEF, DF_REF_ARTIFICIAL | DF_REF_AT_TOP,
1669 			 false);
1670 	}
1671     }
1672 #endif
1673 
1674 
1675   if ((dflow->flags & DF_HARD_REGS)
1676       && df_has_eh_preds (bb))
1677     {
1678 #ifdef EH_USES
1679       unsigned int i;
1680       /* This code is putting in a artificial ref for the use at the
1681 	 TOP of the block that receives the exception.  It is too
1682 	 cumbersome to actually put the ref on the edge.  We could
1683 	 either model this at the top of the receiver block or the
1684 	 bottom of the sender block.
1685 
1686          The bottom of the sender block is problematic because not all
1687          out-edges of the a block are eh-edges.  However, it is true
1688          that all edges into a block are either eh-edges or none of
1689          them are eh-edges.  Thus, we can model this at the top of the
1690          eh-receiver for all of the edges at once. */
1691       for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1692 	if (EH_USES (i))
1693 	  df_uses_record (dflow, &regno_reg_rtx[i],
1694 			  DF_REF_REG_USE, bb, NULL,
1695 			  DF_REF_ARTIFICIAL | DF_REF_AT_TOP);
1696 #endif
1697 
1698       /* The following code (down thru the arg_pointer setting APPEARS
1699 	 to be necessary because there is nothing that actually
1700 	 describes what the exception handling code may actually need
1701 	 to keep alive.  */
1702       if (reload_completed)
1703 	{
1704 	  if (frame_pointer_needed)
1705 	    {
1706 	      bitmap_set_bit (artificial_uses_at_bottom, FRAME_POINTER_REGNUM);
1707 #if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
1708 	      bitmap_set_bit (artificial_uses_at_bottom, HARD_FRAME_POINTER_REGNUM);
1709 #endif
1710 	    }
1711 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1712 	  if (fixed_regs[ARG_POINTER_REGNUM])
1713 	    bitmap_set_bit (artificial_uses_at_bottom, ARG_POINTER_REGNUM);
1714 #endif
1715 	}
1716     }
1717 
1718   if ((dflow->flags & DF_HARD_REGS)
1719       && bb->index >= NUM_FIXED_BLOCKS)
1720     {
1721       /* Before reload, there are a few registers that must be forced
1722 	 live everywhere -- which might not already be the case for
1723 	 blocks within infinite loops.  */
1724       if (!reload_completed)
1725 	{
1726 
1727 	  /* Any reference to any pseudo before reload is a potential
1728 	     reference of the frame pointer.  */
1729 	  bitmap_set_bit (artificial_uses_at_bottom, FRAME_POINTER_REGNUM);
1730 
1731 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1732 	  /* Pseudos with argument area equivalences may require
1733 	     reloading via the argument pointer.  */
1734 	  if (fixed_regs[ARG_POINTER_REGNUM])
1735 	    bitmap_set_bit (artificial_uses_at_bottom, ARG_POINTER_REGNUM);
1736 #endif
1737 
1738 	  /* Any constant, or pseudo with constant equivalences, may
1739 	     require reloading from memory using the pic register.  */
1740 	  if ((unsigned) PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
1741 	      && fixed_regs[PIC_OFFSET_TABLE_REGNUM])
1742 	    bitmap_set_bit (artificial_uses_at_bottom, PIC_OFFSET_TABLE_REGNUM);
1743 	}
1744       /* The all-important stack pointer must always be live.  */
1745       bitmap_set_bit (artificial_uses_at_bottom, STACK_POINTER_REGNUM);
1746     }
1747 
1748   if (dflow->flags & DF_HARD_REGS)
1749     {
1750       bitmap_iterator bi;
1751       unsigned int regno;
1752 
1753       EXECUTE_IF_SET_IN_BITMAP (artificial_uses_at_bottom, 0, regno, bi)
1754 	{
1755 	  df_uses_record (dflow, &regno_reg_rtx[regno],
1756 			  DF_REF_REG_USE, bb, NULL, DF_REF_ARTIFICIAL);
1757 	}
1758 
1759       BITMAP_FREE (artificial_uses_at_bottom);
1760     }
1761 }
1762 
1763 
1764 /* Record all the refs in the basic blocks specified by BLOCKS.  */
1765 
1766 static void
df_refs_record(struct dataflow * dflow,bitmap blocks)1767 df_refs_record (struct dataflow *dflow, bitmap blocks)
1768 {
1769   unsigned int bb_index;
1770   bitmap_iterator bi;
1771 
1772   EXECUTE_IF_SET_IN_BITMAP (blocks, 0, bb_index, bi)
1773     {
1774       basic_block bb = BASIC_BLOCK (bb_index);
1775       df_bb_refs_record (dflow, bb);
1776     }
1777 
1778   if (bitmap_bit_p (blocks, EXIT_BLOCK))
1779     df_record_exit_block_uses (dflow);
1780 
1781   if (bitmap_bit_p (blocks, ENTRY_BLOCK))
1782     df_record_entry_block_defs (dflow);
1783 }
1784 
1785 
1786 /*----------------------------------------------------------------------------
1787    Specialized hard register scanning functions.
1788 ----------------------------------------------------------------------------*/
1789 
1790 /* Mark a register in SET.  Hard registers in large modes get all
1791    of their component registers set as well.  */
1792 
1793 static void
df_mark_reg(rtx reg,void * vset)1794 df_mark_reg (rtx reg, void *vset)
1795 {
1796   bitmap set = (bitmap) vset;
1797   int regno = REGNO (reg);
1798 
1799   gcc_assert (GET_MODE (reg) != BLKmode);
1800 
1801   bitmap_set_bit (set, regno);
1802   if (regno < FIRST_PSEUDO_REGISTER)
1803     {
1804       int n = hard_regno_nregs[regno][GET_MODE (reg)];
1805       while (--n > 0)
1806 	bitmap_set_bit  (set, regno + n);
1807     }
1808 }
1809 
1810 
1811 /* Record the (conservative) set of hard registers that are defined on
1812    entry to the function.  */
1813 
1814 static void
df_record_entry_block_defs(struct dataflow * dflow)1815 df_record_entry_block_defs (struct dataflow *dflow)
1816 {
1817   unsigned int i;
1818   bitmap_iterator bi;
1819   rtx r;
1820   struct df *df = dflow->df;
1821 
1822   bitmap_clear (df->entry_block_defs);
1823 
1824   if (!(dflow->flags & DF_HARD_REGS))
1825     return;
1826 
1827   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1828     {
1829       if (FUNCTION_ARG_REGNO_P (i))
1830 #ifdef INCOMING_REGNO
1831 	bitmap_set_bit (df->entry_block_defs, INCOMING_REGNO (i));
1832 #else
1833 	bitmap_set_bit (df->entry_block_defs, i);
1834 #endif
1835     }
1836 
1837   /* Once the prologue has been generated, all of these registers
1838      should just show up in the first regular block.  */
1839   if (HAVE_prologue && epilogue_completed)
1840     {
1841       /* Defs for the callee saved registers are inserted so that the
1842 	 pushes have some defining location.  */
1843       for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1844 	if ((call_used_regs[i] == 0) && (regs_ever_live[i]))
1845 	  bitmap_set_bit (df->entry_block_defs, i);
1846     }
1847   else
1848     {
1849       /* The always important stack pointer.  */
1850       bitmap_set_bit (df->entry_block_defs, STACK_POINTER_REGNUM);
1851 
1852 #ifdef INCOMING_RETURN_ADDR_RTX
1853       if (REG_P (INCOMING_RETURN_ADDR_RTX))
1854 	bitmap_set_bit (df->entry_block_defs, REGNO (INCOMING_RETURN_ADDR_RTX));
1855 #endif
1856 
1857       /* If STATIC_CHAIN_INCOMING_REGNUM == STATIC_CHAIN_REGNUM
1858 	 only STATIC_CHAIN_REGNUM is defined.  If they are different,
1859 	 we only care about the STATIC_CHAIN_INCOMING_REGNUM.  */
1860 #ifdef STATIC_CHAIN_INCOMING_REGNUM
1861       bitmap_set_bit (df->entry_block_defs, STATIC_CHAIN_INCOMING_REGNUM);
1862 #else
1863 #ifdef STATIC_CHAIN_REGNUM
1864       bitmap_set_bit (df->entry_block_defs, STATIC_CHAIN_REGNUM);
1865 #endif
1866 #endif
1867 
1868       r = TARGET_STRUCT_VALUE_RTX (current_function_decl, true);
1869       if (r && REG_P (r))
1870 	bitmap_set_bit (df->entry_block_defs, REGNO (r));
1871     }
1872 
1873   if ((!reload_completed) || frame_pointer_needed)
1874     {
1875       /* Any reference to any pseudo before reload is a potential
1876 	 reference of the frame pointer.  */
1877       bitmap_set_bit (df->entry_block_defs, FRAME_POINTER_REGNUM);
1878 #if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
1879       /* If they are different, also mark the hard frame pointer as live.  */
1880       if (!LOCAL_REGNO (HARD_FRAME_POINTER_REGNUM))
1881 	bitmap_set_bit (df->entry_block_defs, HARD_FRAME_POINTER_REGNUM);
1882 #endif
1883     }
1884 
1885   /* These registers are live everywhere.  */
1886   if (!reload_completed)
1887     {
1888 #ifdef EH_USES
1889       /* The ia-64, the only machine that uses this, does not define these
1890 	 until after reload.  */
1891       for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1892 	if (EH_USES (i))
1893 	  {
1894 	    bitmap_set_bit (df->entry_block_defs, i);
1895 	  }
1896 #endif
1897 
1898 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1899       /* Pseudos with argument area equivalences may require
1900 	 reloading via the argument pointer.  */
1901       if (fixed_regs[ARG_POINTER_REGNUM])
1902 	bitmap_set_bit (df->entry_block_defs, ARG_POINTER_REGNUM);
1903 #endif
1904 
1905 #ifdef PIC_OFFSET_TABLE_REGNUM
1906       /* Any constant, or pseudo with constant equivalences, may
1907 	 require reloading from memory using the pic register.  */
1908       if ((unsigned) PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
1909 	  && fixed_regs[PIC_OFFSET_TABLE_REGNUM])
1910 	bitmap_set_bit (df->entry_block_defs, PIC_OFFSET_TABLE_REGNUM);
1911 #endif
1912     }
1913 
1914   targetm.live_on_entry (df->entry_block_defs);
1915 
1916   EXECUTE_IF_SET_IN_BITMAP (df->entry_block_defs, 0, i, bi)
1917     {
1918       df_ref_record (dflow, regno_reg_rtx[i], &regno_reg_rtx[i],
1919 		     ENTRY_BLOCK_PTR, NULL,
1920 		     DF_REF_REG_DEF, DF_REF_ARTIFICIAL , false);
1921     }
1922 }
1923 
1924 
1925 /* Record the set of hard registers that are used in the exit block.  */
1926 
1927 static void
df_record_exit_block_uses(struct dataflow * dflow)1928 df_record_exit_block_uses (struct dataflow *dflow)
1929 {
1930   unsigned int i;
1931   bitmap_iterator bi;
1932   struct df *df = dflow->df;
1933 
1934   bitmap_clear (df->exit_block_uses);
1935 
1936   if (!(dflow->flags & DF_HARD_REGS))
1937     return;
1938 
1939   /* If exiting needs the right stack value, consider the stack
1940      pointer live at the end of the function.  */
1941   if ((HAVE_epilogue && epilogue_completed)
1942       || !EXIT_IGNORE_STACK
1943       || (!FRAME_POINTER_REQUIRED
1944 	  && !current_function_calls_alloca
1945 	  && flag_omit_frame_pointer)
1946       || current_function_sp_is_unchanging)
1947     {
1948       bitmap_set_bit (df->exit_block_uses, STACK_POINTER_REGNUM);
1949     }
1950 
1951   /* Mark the frame pointer if needed at the end of the function.
1952      If we end up eliminating it, it will be removed from the live
1953      list of each basic block by reload.  */
1954 
1955   if ((!reload_completed) || frame_pointer_needed)
1956     {
1957       bitmap_set_bit (df->exit_block_uses, FRAME_POINTER_REGNUM);
1958 #if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
1959       /* If they are different, also mark the hard frame pointer as live.  */
1960       if (!LOCAL_REGNO (HARD_FRAME_POINTER_REGNUM))
1961 	bitmap_set_bit (df->exit_block_uses, HARD_FRAME_POINTER_REGNUM);
1962 #endif
1963     }
1964 
1965 #ifndef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
1966   /* Many architectures have a GP register even without flag_pic.
1967      Assume the pic register is not in use, or will be handled by
1968      other means, if it is not fixed.  */
1969   if ((unsigned) PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
1970       && fixed_regs[PIC_OFFSET_TABLE_REGNUM])
1971     bitmap_set_bit (df->exit_block_uses, PIC_OFFSET_TABLE_REGNUM);
1972 #endif
1973 
1974   /* Mark all global registers, and all registers used by the
1975      epilogue as being live at the end of the function since they
1976      may be referenced by our caller.  */
1977   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1978     if (global_regs[i] || EPILOGUE_USES (i))
1979       bitmap_set_bit (df->exit_block_uses, i);
1980 
1981   if (HAVE_epilogue && epilogue_completed)
1982     {
1983       /* Mark all call-saved registers that we actually used.  */
1984       for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1985 	if (regs_ever_live[i] && !LOCAL_REGNO (i)
1986 	    && !TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
1987 	  bitmap_set_bit (df->exit_block_uses, i);
1988     }
1989 
1990 #ifdef EH_RETURN_DATA_REGNO
1991   /* Mark the registers that will contain data for the handler.  */
1992   if (reload_completed && current_function_calls_eh_return)
1993     for (i = 0; ; ++i)
1994       {
1995 	unsigned regno = EH_RETURN_DATA_REGNO (i);
1996 	if (regno == INVALID_REGNUM)
1997 	  break;
1998 	bitmap_set_bit (df->exit_block_uses, regno);
1999       }
2000 #endif
2001 
2002 #ifdef EH_RETURN_STACKADJ_RTX
2003   if ((!HAVE_epilogue || ! epilogue_completed)
2004       && current_function_calls_eh_return)
2005     {
2006       rtx tmp = EH_RETURN_STACKADJ_RTX;
2007       if (tmp && REG_P (tmp))
2008 	df_mark_reg (tmp, df->exit_block_uses);
2009     }
2010 #endif
2011 
2012 #ifdef EH_RETURN_HANDLER_RTX
2013   if ((!HAVE_epilogue || ! epilogue_completed)
2014       && current_function_calls_eh_return)
2015     {
2016       rtx tmp = EH_RETURN_HANDLER_RTX;
2017       if (tmp && REG_P (tmp))
2018 	df_mark_reg (tmp, df->exit_block_uses);
2019     }
2020 #endif
2021 
2022   /* Mark function return value.  */
2023   diddle_return_value (df_mark_reg, (void*) df->exit_block_uses);
2024 
2025   if (dflow->flags & DF_HARD_REGS)
2026     EXECUTE_IF_SET_IN_BITMAP (df->exit_block_uses, 0, i, bi)
2027       df_uses_record (dflow, &regno_reg_rtx[i],
2028   		      DF_REF_REG_USE, EXIT_BLOCK_PTR, NULL,
2029 		      DF_REF_ARTIFICIAL);
2030 }
2031 
2032 static bool initialized = false;
2033 
2034 /* Initialize some platform specific structures.  */
2035 
2036 void
df_hard_reg_init(void)2037 df_hard_reg_init (void)
2038 {
2039   int i;
2040 #ifdef ELIMINABLE_REGS
2041   static const struct {const int from, to; } eliminables[] = ELIMINABLE_REGS;
2042 #endif
2043   /* After reload, some ports add certain bits to regs_ever_live so
2044      this cannot be reset.  */
2045 
2046   if (!reload_completed)
2047     memset (regs_ever_live, 0, sizeof (regs_ever_live));
2048 
2049   if (initialized)
2050     return;
2051 
2052   bitmap_obstack_initialize (&persistent_obstack);
2053 
2054   /* Record which registers will be eliminated.  We use this in
2055      mark_used_regs.  */
2056   CLEAR_HARD_REG_SET (elim_reg_set);
2057 
2058 #ifdef ELIMINABLE_REGS
2059   for (i = 0; i < (int) ARRAY_SIZE (eliminables); i++)
2060     SET_HARD_REG_BIT (elim_reg_set, eliminables[i].from);
2061 #else
2062   SET_HARD_REG_BIT (elim_reg_set, FRAME_POINTER_REGNUM);
2063 #endif
2064 
2065   df_invalidated_by_call = BITMAP_ALLOC (&persistent_obstack);
2066 
2067   /* Inconveniently, this is only readily available in hard reg set
2068      form.  */
2069   for (i = 0; i < FIRST_PSEUDO_REGISTER; ++i)
2070     if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
2071       bitmap_set_bit (df_invalidated_by_call, i);
2072 
2073   initialized = true;
2074 }
2075