xref: /dragonfly/contrib/gcc-8.0/gcc/gcse.c (revision ec21d9fb)
1 /* Partial redundancy elimination / Hoisting for RTL.
2    Copyright (C) 1997-2018 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 /* TODO
21    - reordering of memory allocation and freeing to be more space efficient
22    - calc rough register pressure information and use the info to drive all
23      kinds of code motion (including code hoisting) in a unified way.
24 */
25 
26 /* References searched while implementing this.
27 
28    Compilers Principles, Techniques and Tools
29    Aho, Sethi, Ullman
30    Addison-Wesley, 1988
31 
32    Global Optimization by Suppression of Partial Redundancies
33    E. Morel, C. Renvoise
34    communications of the acm, Vol. 22, Num. 2, Feb. 1979
35 
36    A Portable Machine-Independent Global Optimizer - Design and Measurements
37    Frederick Chow
38    Stanford Ph.D. thesis, Dec. 1983
39 
40    A Fast Algorithm for Code Movement Optimization
41    D.M. Dhamdhere
42    SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
43 
44    A Solution to a Problem with Morel and Renvoise's
45    Global Optimization by Suppression of Partial Redundancies
46    K-H Drechsler, M.P. Stadel
47    ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
48 
49    Practical Adaptation of the Global Optimization
50    Algorithm of Morel and Renvoise
51    D.M. Dhamdhere
52    ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
53 
54    Efficiently Computing Static Single Assignment Form and the Control
55    Dependence Graph
56    R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
57    ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
58 
59    Lazy Code Motion
60    J. Knoop, O. Ruthing, B. Steffen
61    ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
62 
63    What's In a Region?  Or Computing Control Dependence Regions in Near-Linear
64    Time for Reducible Flow Control
65    Thomas Ball
66    ACM Letters on Programming Languages and Systems,
67    Vol. 2, Num. 1-4, Mar-Dec 1993
68 
69    An Efficient Representation for Sparse Sets
70    Preston Briggs, Linda Torczon
71    ACM Letters on Programming Languages and Systems,
72    Vol. 2, Num. 1-4, Mar-Dec 1993
73 
74    A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
75    K-H Drechsler, M.P. Stadel
76    ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
77 
78    Partial Dead Code Elimination
79    J. Knoop, O. Ruthing, B. Steffen
80    ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
81 
82    Effective Partial Redundancy Elimination
83    P. Briggs, K.D. Cooper
84    ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
85 
86    The Program Structure Tree: Computing Control Regions in Linear Time
87    R. Johnson, D. Pearson, K. Pingali
88    ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
89 
90    Optimal Code Motion: Theory and Practice
91    J. Knoop, O. Ruthing, B. Steffen
92    ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
93 
94    The power of assignment motion
95    J. Knoop, O. Ruthing, B. Steffen
96    ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
97 
98    Global code motion / global value numbering
99    C. Click
100    ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
101 
102    Value Driven Redundancy Elimination
103    L.T. Simpson
104    Rice University Ph.D. thesis, Apr. 1996
105 
106    Value Numbering
107    L.T. Simpson
108    Massively Scalar Compiler Project, Rice University, Sep. 1996
109 
110    High Performance Compilers for Parallel Computing
111    Michael Wolfe
112    Addison-Wesley, 1996
113 
114    Advanced Compiler Design and Implementation
115    Steven Muchnick
116    Morgan Kaufmann, 1997
117 
118    Building an Optimizing Compiler
119    Robert Morgan
120    Digital Press, 1998
121 
122    People wishing to speed up the code here should read:
123      Elimination Algorithms for Data Flow Analysis
124      B.G. Ryder, M.C. Paull
125      ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
126 
127      How to Analyze Large Programs Efficiently and Informatively
128      D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
129      ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
130 
131    People wishing to do something different can find various possibilities
132    in the above papers and elsewhere.
133 */
134 
135 #include "config.h"
136 #include "system.h"
137 #include "coretypes.h"
138 #include "backend.h"
139 #include "target.h"
140 #include "rtl.h"
141 #include "tree.h"
142 #include "predict.h"
143 #include "df.h"
144 #include "memmodel.h"
145 #include "tm_p.h"
146 #include "insn-config.h"
147 #include "print-rtl.h"
148 #include "regs.h"
149 #include "ira.h"
150 #include "recog.h"
151 #include "diagnostic-core.h"
152 #include "cfgrtl.h"
153 #include "cfganal.h"
154 #include "lcm.h"
155 #include "cfgcleanup.h"
156 #include "expr.h"
157 #include "params.h"
158 #include "intl.h"
159 #include "tree-pass.h"
160 #include "dbgcnt.h"
161 #include "gcse.h"
162 #include "gcse-common.h"
163 
164 /* We support GCSE via Partial Redundancy Elimination.  PRE optimizations
165    are a superset of those done by classic GCSE.
166 
167    Two passes of copy/constant propagation are done around PRE or hoisting
168    because the first one enables more GCSE and the second one helps to clean
169    up the copies that PRE and HOIST create.  This is needed more for PRE than
170    for HOIST because code hoisting will try to use an existing register
171    containing the common subexpression rather than create a new one.  This is
172    harder to do for PRE because of the code motion (which HOIST doesn't do).
173 
174    Expressions we are interested in GCSE-ing are of the form
175    (set (pseudo-reg) (expression)).
176    Function want_to_gcse_p says what these are.
177 
178    In addition, expressions in REG_EQUAL notes are candidates for GCSE-ing.
179    This allows PRE to hoist expressions that are expressed in multiple insns,
180    such as complex address calculations (e.g. for PIC code, or loads with a
181    high part and a low part).
182 
183    PRE handles moving invariant expressions out of loops (by treating them as
184    partially redundant).
185 
186    **********************
187 
188    We used to support multiple passes but there are diminishing returns in
189    doing so.  The first pass usually makes 90% of the changes that are doable.
190    A second pass can make a few more changes made possible by the first pass.
191    Experiments show any further passes don't make enough changes to justify
192    the expense.
193 
194    A study of spec92 using an unlimited number of passes:
195    [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
196    [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
197    [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
198 
199    It was found doing copy propagation between each pass enables further
200    substitutions.
201 
202    This study was done before expressions in REG_EQUAL notes were added as
203    candidate expressions for optimization, and before the GIMPLE optimizers
204    were added.  Probably, multiple passes is even less efficient now than
205    at the time when the study was conducted.
206 
207    PRE is quite expensive in complicated functions because the DFA can take
208    a while to converge.  Hence we only perform one pass.
209 
210    **********************
211 
212    The steps for PRE are:
213 
214    1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
215 
216    2) Perform the data flow analysis for PRE.
217 
218    3) Delete the redundant instructions
219 
220    4) Insert the required copies [if any] that make the partially
221       redundant instructions fully redundant.
222 
223    5) For other reaching expressions, insert an instruction to copy the value
224       to a newly created pseudo that will reach the redundant instruction.
225 
226    The deletion is done first so that when we do insertions we
227    know which pseudo reg to use.
228 
229    Various papers have argued that PRE DFA is expensive (O(n^2)) and others
230    argue it is not.  The number of iterations for the algorithm to converge
231    is typically 2-4 so I don't view it as that expensive (relatively speaking).
232 
233    PRE GCSE depends heavily on the second CPROP pass to clean up the copies
234    we create.  To make an expression reach the place where it's redundant,
235    the result of the expression is copied to a new register, and the redundant
236    expression is deleted by replacing it with this new register.  Classic GCSE
237    doesn't have this problem as much as it computes the reaching defs of
238    each register in each block and thus can try to use an existing
239    register.  */
240 
241 /* GCSE global vars.  */
242 
243 struct target_gcse default_target_gcse;
244 #if SWITCHABLE_TARGET
245 struct target_gcse *this_target_gcse = &default_target_gcse;
246 #endif
247 
248 /* Set to non-zero if CSE should run after all GCSE optimizations are done.  */
249 int flag_rerun_cse_after_global_opts;
250 
251 /* An obstack for our working variables.  */
252 static struct obstack gcse_obstack;
253 
254 /* Hash table of expressions.  */
255 
256 struct gcse_expr
257 {
258   /* The expression.  */
259   rtx expr;
260   /* Index in the available expression bitmaps.  */
261   int bitmap_index;
262   /* Next entry with the same hash.  */
263   struct gcse_expr *next_same_hash;
264   /* List of anticipatable occurrences in basic blocks in the function.
265      An "anticipatable occurrence" is one that is the first occurrence in the
266      basic block, the operands are not modified in the basic block prior
267      to the occurrence and the output is not used between the start of
268      the block and the occurrence.  */
269   struct gcse_occr *antic_occr;
270   /* List of available occurrence in basic blocks in the function.
271      An "available occurrence" is one that is the last occurrence in the
272      basic block and the operands are not modified by following statements in
273      the basic block [including this insn].  */
274   struct gcse_occr *avail_occr;
275   /* Non-null if the computation is PRE redundant.
276      The value is the newly created pseudo-reg to record a copy of the
277      expression in all the places that reach the redundant copy.  */
278   rtx reaching_reg;
279   /* Maximum distance in instructions this expression can travel.
280      We avoid moving simple expressions for more than a few instructions
281      to keep register pressure under control.
282      A value of "0" removes restrictions on how far the expression can
283      travel.  */
284   HOST_WIDE_INT max_distance;
285 };
286 
287 /* Occurrence of an expression.
288    There is one per basic block.  If a pattern appears more than once the
289    last appearance is used [or first for anticipatable expressions].  */
290 
291 struct gcse_occr
292 {
293   /* Next occurrence of this expression.  */
294   struct gcse_occr *next;
295   /* The insn that computes the expression.  */
296   rtx_insn *insn;
297   /* Nonzero if this [anticipatable] occurrence has been deleted.  */
298   char deleted_p;
299   /* Nonzero if this [available] occurrence has been copied to
300      reaching_reg.  */
301   /* ??? This is mutually exclusive with deleted_p, so they could share
302      the same byte.  */
303   char copied_p;
304 };
305 
306 typedef struct gcse_occr *occr_t;
307 
308 /* Expression hash tables.
309    Each hash table is an array of buckets.
310    ??? It is known that if it were an array of entries, structure elements
311    `next_same_hash' and `bitmap_index' wouldn't be necessary.  However, it is
312    not clear whether in the final analysis a sufficient amount of memory would
313    be saved as the size of the available expression bitmaps would be larger
314    [one could build a mapping table without holes afterwards though].
315    Someday I'll perform the computation and figure it out.  */
316 
317 struct gcse_hash_table_d
318 {
319   /* The table itself.
320      This is an array of `expr_hash_table_size' elements.  */
321   struct gcse_expr **table;
322 
323   /* Size of the hash table, in elements.  */
324   unsigned int size;
325 
326   /* Number of hash table elements.  */
327   unsigned int n_elems;
328 };
329 
330 /* Expression hash table.  */
331 static struct gcse_hash_table_d expr_hash_table;
332 
333 /* This is a list of expressions which are MEMs and will be used by load
334    or store motion.
335    Load motion tracks MEMs which aren't killed by anything except itself,
336    i.e. loads and stores to a single location.
337    We can then allow movement of these MEM refs with a little special
338    allowance. (all stores copy the same value to the reaching reg used
339    for the loads).  This means all values used to store into memory must have
340    no side effects so we can re-issue the setter value.  */
341 
342 struct ls_expr
343 {
344   struct gcse_expr * expr;	/* Gcse expression reference for LM.  */
345   rtx pattern;			/* Pattern of this mem.  */
346   rtx pattern_regs;		/* List of registers mentioned by the mem.  */
347   vec<rtx_insn *> stores;	/* INSN list of stores seen.  */
348   struct ls_expr * next;	/* Next in the list.  */
349   int invalid;			/* Invalid for some reason.  */
350   int index;			/* If it maps to a bitmap index.  */
351   unsigned int hash_index;	/* Index when in a hash table.  */
352   rtx reaching_reg;		/* Register to use when re-writing.  */
353 };
354 
355 /* Head of the list of load/store memory refs.  */
356 static struct ls_expr * pre_ldst_mems = NULL;
357 
358 struct pre_ldst_expr_hasher : nofree_ptr_hash <ls_expr>
359 {
360   typedef value_type compare_type;
361   static inline hashval_t hash (const ls_expr *);
362   static inline bool equal (const ls_expr *, const ls_expr *);
363 };
364 
365 /* Hashtable helpers.  */
366 inline hashval_t
367 pre_ldst_expr_hasher::hash (const ls_expr *x)
368 {
369   int do_not_record_p = 0;
370   return
371     hash_rtx (x->pattern, GET_MODE (x->pattern), &do_not_record_p, NULL, false);
372 }
373 
374 static int expr_equiv_p (const_rtx, const_rtx);
375 
376 inline bool
377 pre_ldst_expr_hasher::equal (const ls_expr *ptr1,
378 			     const ls_expr *ptr2)
379 {
380   return expr_equiv_p (ptr1->pattern, ptr2->pattern);
381 }
382 
383 /* Hashtable for the load/store memory refs.  */
384 static hash_table<pre_ldst_expr_hasher> *pre_ldst_table;
385 
386 /* Bitmap containing one bit for each register in the program.
387    Used when performing GCSE to track which registers have been set since
388    the start of the basic block.  */
389 static regset reg_set_bitmap;
390 
391 /* Array, indexed by basic block number for a list of insns which modify
392    memory within that block.  */
393 static vec<rtx_insn *> *modify_mem_list;
394 static bitmap modify_mem_list_set;
395 
396 /* This array parallels modify_mem_list, except that it stores MEMs
397    being set and their canonicalized memory addresses.  */
398 static vec<modify_pair> *canon_modify_mem_list;
399 
400 /* Bitmap indexed by block numbers to record which blocks contain
401    function calls.  */
402 static bitmap blocks_with_calls;
403 
404 /* Various variables for statistics gathering.  */
405 
406 /* Memory used in a pass.
407    This isn't intended to be absolutely precise.  Its intent is only
408    to keep an eye on memory usage.  */
409 static int bytes_used;
410 
411 /* GCSE substitutions made.  */
412 static int gcse_subst_count;
413 /* Number of copy instructions created.  */
414 static int gcse_create_count;
415 
416 /* Doing code hoisting.  */
417 static bool doing_code_hoisting_p = false;
418 
419 /* For available exprs */
420 static sbitmap *ae_kill;
421 
422 /* Data stored for each basic block.  */
423 struct bb_data
424 {
425   /* Maximal register pressure inside basic block for given register class
426      (defined only for the pressure classes).  */
427   int max_reg_pressure[N_REG_CLASSES];
428   /* Recorded register pressure of basic block before trying to hoist
429      an expression.  Will be used to restore the register pressure
430      if the expression should not be hoisted.  */
431   int old_pressure;
432   /* Recorded register live_in info of basic block during code hoisting
433      process.  BACKUP is used to record live_in info before trying to
434      hoist an expression, and will be used to restore LIVE_IN if the
435      expression should not be hoisted.  */
436   bitmap live_in, backup;
437 };
438 
439 #define BB_DATA(bb) ((struct bb_data *) (bb)->aux)
440 
441 static basic_block curr_bb;
442 
443 /* Current register pressure for each pressure class.  */
444 static int curr_reg_pressure[N_REG_CLASSES];
445 
446 
447 static void compute_can_copy (void);
448 static void *gmalloc (size_t) ATTRIBUTE_MALLOC;
449 static void *gcalloc (size_t, size_t) ATTRIBUTE_MALLOC;
450 static void *gcse_alloc (unsigned long);
451 static void alloc_gcse_mem (void);
452 static void free_gcse_mem (void);
453 static void hash_scan_insn (rtx_insn *, struct gcse_hash_table_d *);
454 static void hash_scan_set (rtx, rtx_insn *, struct gcse_hash_table_d *);
455 static void hash_scan_clobber (rtx, rtx_insn *, struct gcse_hash_table_d *);
456 static void hash_scan_call (rtx, rtx_insn *, struct gcse_hash_table_d *);
457 static int oprs_unchanged_p (const_rtx, const rtx_insn *, int);
458 static int oprs_anticipatable_p (const_rtx, const rtx_insn *);
459 static int oprs_available_p (const_rtx, const rtx_insn *);
460 static void insert_expr_in_table (rtx, machine_mode, rtx_insn *, int, int,
461 				  HOST_WIDE_INT, struct gcse_hash_table_d *);
462 static unsigned int hash_expr (const_rtx, machine_mode, int *, int);
463 static void record_last_reg_set_info (rtx_insn *, int);
464 static void record_last_mem_set_info (rtx_insn *);
465 static void record_last_set_info (rtx, const_rtx, void *);
466 static void compute_hash_table (struct gcse_hash_table_d *);
467 static void alloc_hash_table (struct gcse_hash_table_d *);
468 static void free_hash_table (struct gcse_hash_table_d *);
469 static void compute_hash_table_work (struct gcse_hash_table_d *);
470 static void dump_hash_table (FILE *, const char *, struct gcse_hash_table_d *);
471 static void compute_local_properties (sbitmap *, sbitmap *, sbitmap *,
472 				      struct gcse_hash_table_d *);
473 static void mems_conflict_for_gcse_p (rtx, const_rtx, void *);
474 static int load_killed_in_block_p (const_basic_block, int, const_rtx, int);
475 static void alloc_pre_mem (int, int);
476 static void free_pre_mem (void);
477 static struct edge_list *compute_pre_data (void);
478 static int pre_expr_reaches_here_p (basic_block, struct gcse_expr *,
479 				    basic_block);
480 static void insert_insn_end_basic_block (struct gcse_expr *, basic_block);
481 static void pre_insert_copy_insn (struct gcse_expr *, rtx_insn *);
482 static void pre_insert_copies (void);
483 static int pre_delete (void);
484 static int pre_gcse (struct edge_list *);
485 static int one_pre_gcse_pass (void);
486 static void add_label_notes (rtx, rtx_insn *);
487 static void alloc_code_hoist_mem (int, int);
488 static void free_code_hoist_mem (void);
489 static void compute_code_hoist_vbeinout (void);
490 static void compute_code_hoist_data (void);
491 static int should_hoist_expr_to_dom (basic_block, struct gcse_expr *,
492 				     basic_block,
493 				     sbitmap, HOST_WIDE_INT, int *,
494 				     enum reg_class,
495 				     int *, bitmap, rtx_insn *);
496 static int hoist_code (void);
497 static enum reg_class get_regno_pressure_class (int regno, int *nregs);
498 static enum reg_class get_pressure_class_and_nregs (rtx_insn *insn, int *nregs);
499 static int one_code_hoisting_pass (void);
500 static rtx_insn *process_insert_insn (struct gcse_expr *);
501 static int pre_edge_insert (struct edge_list *, struct gcse_expr **);
502 static int pre_expr_reaches_here_p_work (basic_block, struct gcse_expr *,
503 					 basic_block, char *);
504 static struct ls_expr * ldst_entry (rtx);
505 static void free_ldst_entry (struct ls_expr *);
506 static void free_ld_motion_mems (void);
507 static void print_ldst_list (FILE *);
508 static struct ls_expr * find_rtx_in_ldst (rtx);
509 static int simple_mem (const_rtx);
510 static void invalidate_any_buried_refs (rtx);
511 static void compute_ld_motion_mems (void);
512 static void trim_ld_motion_mems (void);
513 static void update_ld_motion_stores (struct gcse_expr *);
514 static void clear_modify_mem_tables (void);
515 static void free_modify_mem_tables (void);
516 
517 #define GNEW(T)			((T *) gmalloc (sizeof (T)))
518 #define GCNEW(T)		((T *) gcalloc (1, sizeof (T)))
519 
520 #define GNEWVEC(T, N)		((T *) gmalloc (sizeof (T) * (N)))
521 #define GCNEWVEC(T, N)		((T *) gcalloc ((N), sizeof (T)))
522 
523 #define GNEWVAR(T, S)		((T *) gmalloc ((S)))
524 #define GCNEWVAR(T, S)		((T *) gcalloc (1, (S)))
525 
526 #define GOBNEW(T)		((T *) gcse_alloc (sizeof (T)))
527 #define GOBNEWVAR(T, S)		((T *) gcse_alloc ((S)))
528 
529 /* Misc. utilities.  */
530 
531 #define can_copy \
532   (this_target_gcse->x_can_copy)
533 #define can_copy_init_p \
534   (this_target_gcse->x_can_copy_init_p)
535 
536 /* Compute which modes support reg/reg copy operations.  */
537 
538 static void
539 compute_can_copy (void)
540 {
541   int i;
542 #ifndef AVOID_CCMODE_COPIES
543   rtx reg;
544  rtx_insn *insn;
545 #endif
546   memset (can_copy, 0, NUM_MACHINE_MODES);
547 
548   start_sequence ();
549   for (i = 0; i < NUM_MACHINE_MODES; i++)
550     if (GET_MODE_CLASS (i) == MODE_CC)
551       {
552 #ifdef AVOID_CCMODE_COPIES
553 	can_copy[i] = 0;
554 #else
555 	reg = gen_rtx_REG ((machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
556 	insn = emit_insn (gen_rtx_SET (reg, reg));
557 	if (recog (PATTERN (insn), insn, NULL) >= 0)
558 	  can_copy[i] = 1;
559 #endif
560       }
561     else
562       can_copy[i] = 1;
563 
564   end_sequence ();
565 }
566 
567 /* Returns whether the mode supports reg/reg copy operations.  */
568 
569 bool
570 can_copy_p (machine_mode mode)
571 {
572   if (! can_copy_init_p)
573     {
574       compute_can_copy ();
575       can_copy_init_p = true;
576     }
577 
578   return can_copy[mode] != 0;
579 }
580 
581 /* Cover function to xmalloc to record bytes allocated.  */
582 
583 static void *
584 gmalloc (size_t size)
585 {
586   bytes_used += size;
587   return xmalloc (size);
588 }
589 
590 /* Cover function to xcalloc to record bytes allocated.  */
591 
592 static void *
593 gcalloc (size_t nelem, size_t elsize)
594 {
595   bytes_used += nelem * elsize;
596   return xcalloc (nelem, elsize);
597 }
598 
599 /* Cover function to obstack_alloc.  */
600 
601 static void *
602 gcse_alloc (unsigned long size)
603 {
604   bytes_used += size;
605   return obstack_alloc (&gcse_obstack, size);
606 }
607 
608 /* Allocate memory for the reg/memory set tracking tables.
609    This is called at the start of each pass.  */
610 
611 static void
612 alloc_gcse_mem (void)
613 {
614   /* Allocate vars to track sets of regs.  */
615   reg_set_bitmap = ALLOC_REG_SET (NULL);
616 
617   /* Allocate array to keep a list of insns which modify memory in each
618      basic block.  The two typedefs are needed to work around the
619      pre-processor limitation with template types in macro arguments.  */
620   typedef vec<rtx_insn *> vec_rtx_heap;
621   typedef vec<modify_pair> vec_modify_pair_heap;
622   modify_mem_list = GCNEWVEC (vec_rtx_heap, last_basic_block_for_fn (cfun));
623   canon_modify_mem_list = GCNEWVEC (vec_modify_pair_heap,
624 				    last_basic_block_for_fn (cfun));
625   modify_mem_list_set = BITMAP_ALLOC (NULL);
626   blocks_with_calls = BITMAP_ALLOC (NULL);
627 }
628 
629 /* Free memory allocated by alloc_gcse_mem.  */
630 
631 static void
632 free_gcse_mem (void)
633 {
634   FREE_REG_SET (reg_set_bitmap);
635 
636   free_modify_mem_tables ();
637   BITMAP_FREE (modify_mem_list_set);
638   BITMAP_FREE (blocks_with_calls);
639 }
640 
641 /* Compute the local properties of each recorded expression.
642 
643    Local properties are those that are defined by the block, irrespective of
644    other blocks.
645 
646    An expression is transparent in a block if its operands are not modified
647    in the block.
648 
649    An expression is computed (locally available) in a block if it is computed
650    at least once and expression would contain the same value if the
651    computation was moved to the end of the block.
652 
653    An expression is locally anticipatable in a block if it is computed at
654    least once and expression would contain the same value if the computation
655    was moved to the beginning of the block.
656 
657    We call this routine for pre and code hoisting.  They all compute
658    basically the same information and thus can easily share this code.
659 
660    TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
661    properties.  If NULL, then it is not necessary to compute or record that
662    particular property.
663 
664    TABLE controls which hash table to look at.  */
665 
666 static void
667 compute_local_properties (sbitmap *transp, sbitmap *comp, sbitmap *antloc,
668 			  struct gcse_hash_table_d *table)
669 {
670   unsigned int i;
671 
672   /* Initialize any bitmaps that were passed in.  */
673   if (transp)
674     {
675       bitmap_vector_ones (transp, last_basic_block_for_fn (cfun));
676     }
677 
678   if (comp)
679     bitmap_vector_clear (comp, last_basic_block_for_fn (cfun));
680   if (antloc)
681     bitmap_vector_clear (antloc, last_basic_block_for_fn (cfun));
682 
683   for (i = 0; i < table->size; i++)
684     {
685       struct gcse_expr *expr;
686 
687       for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
688 	{
689 	  int indx = expr->bitmap_index;
690 	  struct gcse_occr *occr;
691 
692 	  /* The expression is transparent in this block if it is not killed.
693 	     We start by assuming all are transparent [none are killed], and
694 	     then reset the bits for those that are.  */
695 	  if (transp)
696 	    compute_transp (expr->expr, indx, transp,
697 			    blocks_with_calls,
698 			    modify_mem_list_set,
699 			    canon_modify_mem_list);
700 
701 	  /* The occurrences recorded in antic_occr are exactly those that
702 	     we want to set to nonzero in ANTLOC.  */
703 	  if (antloc)
704 	    for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
705 	      {
706 		bitmap_set_bit (antloc[BLOCK_FOR_INSN (occr->insn)->index], indx);
707 
708 		/* While we're scanning the table, this is a good place to
709 		   initialize this.  */
710 		occr->deleted_p = 0;
711 	      }
712 
713 	  /* The occurrences recorded in avail_occr are exactly those that
714 	     we want to set to nonzero in COMP.  */
715 	  if (comp)
716 	    for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
717 	      {
718 		bitmap_set_bit (comp[BLOCK_FOR_INSN (occr->insn)->index], indx);
719 
720 		/* While we're scanning the table, this is a good place to
721 		   initialize this.  */
722 		occr->copied_p = 0;
723 	      }
724 
725 	  /* While we're scanning the table, this is a good place to
726 	     initialize this.  */
727 	  expr->reaching_reg = 0;
728 	}
729     }
730 }
731 
732 /* Hash table support.  */
733 
734 struct reg_avail_info
735 {
736   basic_block last_bb;
737   int first_set;
738   int last_set;
739 };
740 
741 static struct reg_avail_info *reg_avail_info;
742 static basic_block current_bb;
743 
744 /* See whether X, the source of a set, is something we want to consider for
745    GCSE.  */
746 
747 static int
748 want_to_gcse_p (rtx x, machine_mode mode, HOST_WIDE_INT *max_distance_ptr)
749 {
750 #ifdef STACK_REGS
751   /* On register stack architectures, don't GCSE constants from the
752      constant pool, as the benefits are often swamped by the overhead
753      of shuffling the register stack between basic blocks.  */
754   if (IS_STACK_MODE (GET_MODE (x)))
755     x = avoid_constant_pool_reference (x);
756 #endif
757 
758   /* GCSE'ing constants:
759 
760      We do not specifically distinguish between constant and non-constant
761      expressions in PRE and Hoist.  We use set_src_cost below to limit
762      the maximum distance simple expressions can travel.
763 
764      Nevertheless, constants are much easier to GCSE, and, hence,
765      it is easy to overdo the optimizations.  Usually, excessive PRE and
766      Hoisting of constant leads to increased register pressure.
767 
768      RA can deal with this by rematerialing some of the constants.
769      Therefore, it is important that the back-end generates sets of constants
770      in a way that allows reload rematerialize them under high register
771      pressure, i.e., a pseudo register with REG_EQUAL to constant
772      is set only once.  Failing to do so will result in IRA/reload
773      spilling such constants under high register pressure instead of
774      rematerializing them.  */
775 
776   switch (GET_CODE (x))
777     {
778     case REG:
779     case SUBREG:
780     case CALL:
781       return 0;
782 
783     CASE_CONST_ANY:
784       if (!doing_code_hoisting_p)
785 	/* Do not PRE constants.  */
786 	return 0;
787 
788       /* FALLTHRU */
789 
790     default:
791       if (doing_code_hoisting_p)
792 	/* PRE doesn't implement max_distance restriction.  */
793 	{
794 	  int cost;
795 	  HOST_WIDE_INT max_distance;
796 
797 	  gcc_assert (!optimize_function_for_speed_p (cfun)
798 		      && optimize_function_for_size_p (cfun));
799 	  cost = set_src_cost (x, mode, 0);
800 
801 	  if (cost < COSTS_N_INSNS (GCSE_UNRESTRICTED_COST))
802 	    {
803 	      max_distance
804 		= ((HOST_WIDE_INT)GCSE_COST_DISTANCE_RATIO * cost) / 10;
805 	      if (max_distance == 0)
806 		return 0;
807 
808 	      gcc_assert (max_distance > 0);
809 	    }
810 	  else
811 	    max_distance = 0;
812 
813 	  if (max_distance_ptr)
814 	    *max_distance_ptr = max_distance;
815 	}
816 
817       return can_assign_to_reg_without_clobbers_p (x, mode);
818     }
819 }
820 
821 /* Used internally by can_assign_to_reg_without_clobbers_p.  */
822 
823 static GTY(()) rtx_insn *test_insn;
824 
825 /* Return true if we can assign X to a pseudo register of mode MODE
826    such that the resulting insn does not result in clobbering a hard
827    register as a side-effect.
828 
829    Additionally, if the target requires it, check that the resulting insn
830    can be copied.  If it cannot, this means that X is special and probably
831    has hidden side-effects we don't want to mess with.
832 
833    This function is typically used by code motion passes, to verify
834    that it is safe to insert an insn without worrying about clobbering
835    maybe live hard regs.  */
836 
837 bool
838 can_assign_to_reg_without_clobbers_p (rtx x, machine_mode mode)
839 {
840   int num_clobbers = 0;
841   int icode;
842   bool can_assign = false;
843 
844   /* If this is a valid operand, we are OK.  If it's VOIDmode, we aren't.  */
845   if (general_operand (x, mode))
846     return 1;
847   else if (GET_MODE (x) == VOIDmode)
848     return 0;
849 
850   /* Otherwise, check if we can make a valid insn from it.  First initialize
851      our test insn if we haven't already.  */
852   if (test_insn == 0)
853     {
854       test_insn
855 	= make_insn_raw (gen_rtx_SET (gen_rtx_REG (word_mode,
856 						   FIRST_PSEUDO_REGISTER * 2),
857 				      const0_rtx));
858       SET_NEXT_INSN (test_insn) = SET_PREV_INSN (test_insn) = 0;
859       INSN_LOCATION (test_insn) = UNKNOWN_LOCATION;
860     }
861 
862   /* Now make an insn like the one we would make when GCSE'ing and see if
863      valid.  */
864   PUT_MODE (SET_DEST (PATTERN (test_insn)), mode);
865   SET_SRC (PATTERN (test_insn)) = x;
866 
867   icode = recog (PATTERN (test_insn), test_insn, &num_clobbers);
868 
869   /* If the test insn is valid and doesn't need clobbers, and the target also
870      has no objections, we're good.  */
871   if (icode >= 0
872       && (num_clobbers == 0 || !added_clobbers_hard_reg_p (icode))
873       && ! (targetm.cannot_copy_insn_p
874 	    && targetm.cannot_copy_insn_p (test_insn)))
875     can_assign = true;
876 
877   /* Make sure test_insn doesn't have any pointers into GC space.  */
878   SET_SRC (PATTERN (test_insn)) = NULL_RTX;
879 
880   return can_assign;
881 }
882 
883 /* Return nonzero if the operands of expression X are unchanged from the
884    start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
885    or from INSN to the end of INSN's basic block (if AVAIL_P != 0).  */
886 
887 static int
888 oprs_unchanged_p (const_rtx x, const rtx_insn *insn, int avail_p)
889 {
890   int i, j;
891   enum rtx_code code;
892   const char *fmt;
893 
894   if (x == 0)
895     return 1;
896 
897   code = GET_CODE (x);
898   switch (code)
899     {
900     case REG:
901       {
902 	struct reg_avail_info *info = &reg_avail_info[REGNO (x)];
903 
904 	if (info->last_bb != current_bb)
905 	  return 1;
906 	if (avail_p)
907 	  return info->last_set < DF_INSN_LUID (insn);
908 	else
909 	  return info->first_set >= DF_INSN_LUID (insn);
910       }
911 
912     case MEM:
913       if (! flag_gcse_lm
914 	  || load_killed_in_block_p (current_bb, DF_INSN_LUID (insn),
915 				     x, avail_p))
916 	return 0;
917       else
918 	return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
919 
920     case PRE_DEC:
921     case PRE_INC:
922     case POST_DEC:
923     case POST_INC:
924     case PRE_MODIFY:
925     case POST_MODIFY:
926       return 0;
927 
928     case PC:
929     case CC0: /*FIXME*/
930     case CONST:
931     CASE_CONST_ANY:
932     case SYMBOL_REF:
933     case LABEL_REF:
934     case ADDR_VEC:
935     case ADDR_DIFF_VEC:
936       return 1;
937 
938     default:
939       break;
940     }
941 
942   for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
943     {
944       if (fmt[i] == 'e')
945 	{
946 	  /* If we are about to do the last recursive call needed at this
947 	     level, change it into iteration.  This function is called enough
948 	     to be worth it.  */
949 	  if (i == 0)
950 	    return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
951 
952 	  else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
953 	    return 0;
954 	}
955       else if (fmt[i] == 'E')
956 	for (j = 0; j < XVECLEN (x, i); j++)
957 	  if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
958 	    return 0;
959     }
960 
961   return 1;
962 }
963 
964 /* Info passed from load_killed_in_block_p to mems_conflict_for_gcse_p.  */
965 
966 struct mem_conflict_info
967 {
968   /* A memory reference for a load instruction, mems_conflict_for_gcse_p will
969      see if a memory store conflicts with this memory load.  */
970   const_rtx mem;
971 
972   /* True if mems_conflict_for_gcse_p finds a conflict between two memory
973      references.  */
974   bool conflict;
975 };
976 
977 /* DEST is the output of an instruction.  If it is a memory reference and
978    possibly conflicts with the load found in DATA, then communicate this
979    information back through DATA.  */
980 
981 static void
982 mems_conflict_for_gcse_p (rtx dest, const_rtx setter ATTRIBUTE_UNUSED,
983 			  void *data)
984 {
985   struct mem_conflict_info *mci = (struct mem_conflict_info *) data;
986 
987   while (GET_CODE (dest) == SUBREG
988 	 || GET_CODE (dest) == ZERO_EXTRACT
989 	 || GET_CODE (dest) == STRICT_LOW_PART)
990     dest = XEXP (dest, 0);
991 
992   /* If DEST is not a MEM, then it will not conflict with the load.  Note
993      that function calls are assumed to clobber memory, but are handled
994      elsewhere.  */
995   if (! MEM_P (dest))
996     return;
997 
998   /* If we are setting a MEM in our list of specially recognized MEMs,
999      don't mark as killed this time.  */
1000   if (pre_ldst_mems != NULL && expr_equiv_p (dest, mci->mem))
1001     {
1002       if (!find_rtx_in_ldst (dest))
1003 	mci->conflict = true;
1004       return;
1005     }
1006 
1007   if (true_dependence (dest, GET_MODE (dest), mci->mem))
1008     mci->conflict = true;
1009 }
1010 
1011 /* Return nonzero if the expression in X (a memory reference) is killed
1012    in block BB before or after the insn with the LUID in UID_LIMIT.
1013    AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
1014    before UID_LIMIT.
1015 
1016    To check the entire block, set UID_LIMIT to max_uid + 1 and
1017    AVAIL_P to 0.  */
1018 
1019 static int
1020 load_killed_in_block_p (const_basic_block bb, int uid_limit, const_rtx x,
1021 			int avail_p)
1022 {
1023   vec<rtx_insn *> list = modify_mem_list[bb->index];
1024   rtx_insn *setter;
1025   unsigned ix;
1026 
1027   /* If this is a readonly then we aren't going to be changing it.  */
1028   if (MEM_READONLY_P (x))
1029     return 0;
1030 
1031   FOR_EACH_VEC_ELT_REVERSE (list, ix, setter)
1032     {
1033       struct mem_conflict_info mci;
1034 
1035       /* Ignore entries in the list that do not apply.  */
1036       if ((avail_p
1037 	   && DF_INSN_LUID (setter) < uid_limit)
1038 	  || (! avail_p
1039 	      && DF_INSN_LUID (setter) > uid_limit))
1040 	continue;
1041 
1042       /* If SETTER is a call everything is clobbered.  Note that calls
1043 	 to pure functions are never put on the list, so we need not
1044 	 worry about them.  */
1045       if (CALL_P (setter))
1046 	return 1;
1047 
1048       /* SETTER must be an INSN of some kind that sets memory.  Call
1049 	 note_stores to examine each hunk of memory that is modified.  */
1050       mci.mem = x;
1051       mci.conflict = false;
1052       note_stores (PATTERN (setter), mems_conflict_for_gcse_p, &mci);
1053       if (mci.conflict)
1054 	return 1;
1055     }
1056   return 0;
1057 }
1058 
1059 /* Return nonzero if the operands of expression X are unchanged from
1060    the start of INSN's basic block up to but not including INSN.  */
1061 
1062 static int
1063 oprs_anticipatable_p (const_rtx x, const rtx_insn *insn)
1064 {
1065   return oprs_unchanged_p (x, insn, 0);
1066 }
1067 
1068 /* Return nonzero if the operands of expression X are unchanged from
1069    INSN to the end of INSN's basic block.  */
1070 
1071 static int
1072 oprs_available_p (const_rtx x, const rtx_insn *insn)
1073 {
1074   return oprs_unchanged_p (x, insn, 1);
1075 }
1076 
1077 /* Hash expression X.
1078 
1079    MODE is only used if X is a CONST_INT.  DO_NOT_RECORD_P is a boolean
1080    indicating if a volatile operand is found or if the expression contains
1081    something we don't want to insert in the table.  HASH_TABLE_SIZE is
1082    the current size of the hash table to be probed.  */
1083 
1084 static unsigned int
1085 hash_expr (const_rtx x, machine_mode mode, int *do_not_record_p,
1086 	   int hash_table_size)
1087 {
1088   unsigned int hash;
1089 
1090   *do_not_record_p = 0;
1091 
1092   hash = hash_rtx (x, mode, do_not_record_p, NULL, /*have_reg_qty=*/false);
1093   return hash % hash_table_size;
1094 }
1095 
1096 /* Return nonzero if exp1 is equivalent to exp2.  */
1097 
1098 static int
1099 expr_equiv_p (const_rtx x, const_rtx y)
1100 {
1101   return exp_equiv_p (x, y, 0, true);
1102 }
1103 
1104 /* Insert expression X in INSN in the hash TABLE.
1105    If it is already present, record it as the last occurrence in INSN's
1106    basic block.
1107 
1108    MODE is the mode of the value X is being stored into.
1109    It is only used if X is a CONST_INT.
1110 
1111    ANTIC_P is nonzero if X is an anticipatable expression.
1112    AVAIL_P is nonzero if X is an available expression.
1113 
1114    MAX_DISTANCE is the maximum distance in instructions this expression can
1115    be moved.  */
1116 
1117 static void
1118 insert_expr_in_table (rtx x, machine_mode mode, rtx_insn *insn,
1119 		      int antic_p,
1120 		      int avail_p, HOST_WIDE_INT max_distance,
1121 		      struct gcse_hash_table_d *table)
1122 {
1123   int found, do_not_record_p;
1124   unsigned int hash;
1125   struct gcse_expr *cur_expr, *last_expr = NULL;
1126   struct gcse_occr *antic_occr, *avail_occr;
1127 
1128   hash = hash_expr (x, mode, &do_not_record_p, table->size);
1129 
1130   /* Do not insert expression in table if it contains volatile operands,
1131      or if hash_expr determines the expression is something we don't want
1132      to or can't handle.  */
1133   if (do_not_record_p)
1134     return;
1135 
1136   cur_expr = table->table[hash];
1137   found = 0;
1138 
1139   while (cur_expr && (found = expr_equiv_p (cur_expr->expr, x)) == 0)
1140     {
1141       /* If the expression isn't found, save a pointer to the end of
1142 	 the list.  */
1143       last_expr = cur_expr;
1144       cur_expr = cur_expr->next_same_hash;
1145     }
1146 
1147   if (! found)
1148     {
1149       cur_expr = GOBNEW (struct gcse_expr);
1150       bytes_used += sizeof (struct gcse_expr);
1151       if (table->table[hash] == NULL)
1152 	/* This is the first pattern that hashed to this index.  */
1153 	table->table[hash] = cur_expr;
1154       else
1155 	/* Add EXPR to end of this hash chain.  */
1156 	last_expr->next_same_hash = cur_expr;
1157 
1158       /* Set the fields of the expr element.  */
1159       cur_expr->expr = x;
1160       cur_expr->bitmap_index = table->n_elems++;
1161       cur_expr->next_same_hash = NULL;
1162       cur_expr->antic_occr = NULL;
1163       cur_expr->avail_occr = NULL;
1164       gcc_assert (max_distance >= 0);
1165       cur_expr->max_distance = max_distance;
1166     }
1167   else
1168     gcc_assert (cur_expr->max_distance == max_distance);
1169 
1170   /* Now record the occurrence(s).  */
1171   if (antic_p)
1172     {
1173       antic_occr = cur_expr->antic_occr;
1174 
1175       if (antic_occr
1176 	  && BLOCK_FOR_INSN (antic_occr->insn) != BLOCK_FOR_INSN (insn))
1177 	antic_occr = NULL;
1178 
1179       if (antic_occr)
1180 	/* Found another instance of the expression in the same basic block.
1181 	   Prefer the currently recorded one.  We want the first one in the
1182 	   block and the block is scanned from start to end.  */
1183 	; /* nothing to do */
1184       else
1185 	{
1186 	  /* First occurrence of this expression in this basic block.  */
1187 	  antic_occr = GOBNEW (struct gcse_occr);
1188 	  bytes_used += sizeof (struct gcse_occr);
1189 	  antic_occr->insn = insn;
1190 	  antic_occr->next = cur_expr->antic_occr;
1191 	  antic_occr->deleted_p = 0;
1192 	  cur_expr->antic_occr = antic_occr;
1193 	}
1194     }
1195 
1196   if (avail_p)
1197     {
1198       avail_occr = cur_expr->avail_occr;
1199 
1200       if (avail_occr
1201 	  && BLOCK_FOR_INSN (avail_occr->insn) == BLOCK_FOR_INSN (insn))
1202 	{
1203 	  /* Found another instance of the expression in the same basic block.
1204 	     Prefer this occurrence to the currently recorded one.  We want
1205 	     the last one in the block and the block is scanned from start
1206 	     to end.  */
1207 	  avail_occr->insn = insn;
1208 	}
1209       else
1210 	{
1211 	  /* First occurrence of this expression in this basic block.  */
1212 	  avail_occr = GOBNEW (struct gcse_occr);
1213 	  bytes_used += sizeof (struct gcse_occr);
1214 	  avail_occr->insn = insn;
1215 	  avail_occr->next = cur_expr->avail_occr;
1216 	  avail_occr->deleted_p = 0;
1217 	  cur_expr->avail_occr = avail_occr;
1218 	}
1219     }
1220 }
1221 
1222 /* Scan SET present in INSN and add an entry to the hash TABLE.  */
1223 
1224 static void
1225 hash_scan_set (rtx set, rtx_insn *insn, struct gcse_hash_table_d *table)
1226 {
1227   rtx src = SET_SRC (set);
1228   rtx dest = SET_DEST (set);
1229   rtx note;
1230 
1231   if (GET_CODE (src) == CALL)
1232     hash_scan_call (src, insn, table);
1233 
1234   else if (REG_P (dest))
1235     {
1236       unsigned int regno = REGNO (dest);
1237       HOST_WIDE_INT max_distance = 0;
1238 
1239       /* See if a REG_EQUAL note shows this equivalent to a simpler expression.
1240 
1241 	 This allows us to do a single GCSE pass and still eliminate
1242 	 redundant constants, addresses or other expressions that are
1243 	 constructed with multiple instructions.
1244 
1245 	 However, keep the original SRC if INSN is a simple reg-reg move.
1246 	 In this case, there will almost always be a REG_EQUAL note on the
1247 	 insn that sets SRC.  By recording the REG_EQUAL value here as SRC
1248 	 for INSN, we miss copy propagation opportunities and we perform the
1249 	 same PRE GCSE operation repeatedly on the same REG_EQUAL value if we
1250 	 do more than one PRE GCSE pass.
1251 
1252 	 Note that this does not impede profitable constant propagations.  We
1253 	 "look through" reg-reg sets in lookup_avail_set.  */
1254       note = find_reg_equal_equiv_note (insn);
1255       if (note != 0
1256 	  && REG_NOTE_KIND (note) == REG_EQUAL
1257 	  && !REG_P (src)
1258 	  && want_to_gcse_p (XEXP (note, 0), GET_MODE (dest), NULL))
1259 	src = XEXP (note, 0), set = gen_rtx_SET (dest, src);
1260 
1261       /* Only record sets of pseudo-regs in the hash table.  */
1262       if (regno >= FIRST_PSEUDO_REGISTER
1263 	  /* Don't GCSE something if we can't do a reg/reg copy.  */
1264 	  && can_copy_p (GET_MODE (dest))
1265 	  /* GCSE commonly inserts instruction after the insn.  We can't
1266 	     do that easily for EH edges so disable GCSE on these for now.  */
1267 	  /* ??? We can now easily create new EH landing pads at the
1268 	     gimple level, for splitting edges; there's no reason we
1269 	     can't do the same thing at the rtl level.  */
1270 	  && !can_throw_internal (insn)
1271 	  /* Is SET_SRC something we want to gcse?  */
1272 	  && want_to_gcse_p (src, GET_MODE (dest), &max_distance)
1273 	  /* Don't CSE a nop.  */
1274 	  && ! set_noop_p (set)
1275 	  /* Don't GCSE if it has attached REG_EQUIV note.
1276 	     At this point this only function parameters should have
1277 	     REG_EQUIV notes and if the argument slot is used somewhere
1278 	     explicitly, it means address of parameter has been taken,
1279 	     so we should not extend the lifetime of the pseudo.  */
1280 	  && (note == NULL_RTX || ! MEM_P (XEXP (note, 0))))
1281 	{
1282 	  /* An expression is not anticipatable if its operands are
1283 	     modified before this insn or if this is not the only SET in
1284 	     this insn.  The latter condition does not have to mean that
1285 	     SRC itself is not anticipatable, but we just will not be
1286 	     able to handle code motion of insns with multiple sets.  */
1287 	  int antic_p = oprs_anticipatable_p (src, insn)
1288 			&& !multiple_sets (insn);
1289 	  /* An expression is not available if its operands are
1290 	     subsequently modified, including this insn.  It's also not
1291 	     available if this is a branch, because we can't insert
1292 	     a set after the branch.  */
1293 	  int avail_p = (oprs_available_p (src, insn)
1294 			 && ! JUMP_P (insn));
1295 
1296 	  insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p,
1297 				max_distance, table);
1298 	}
1299     }
1300   /* In case of store we want to consider the memory value as available in
1301      the REG stored in that memory. This makes it possible to remove
1302      redundant loads from due to stores to the same location.  */
1303   else if (flag_gcse_las && REG_P (src) && MEM_P (dest))
1304     {
1305       unsigned int regno = REGNO (src);
1306       HOST_WIDE_INT max_distance = 0;
1307 
1308       /* Only record sets of pseudo-regs in the hash table.  */
1309       if (regno >= FIRST_PSEUDO_REGISTER
1310 	  /* Don't GCSE something if we can't do a reg/reg copy.  */
1311 	  && can_copy_p (GET_MODE (src))
1312 	  /* GCSE commonly inserts instruction after the insn.  We can't
1313 	     do that easily for EH edges so disable GCSE on these for now.  */
1314 	  && !can_throw_internal (insn)
1315 	  /* Is SET_DEST something we want to gcse?  */
1316 	  && want_to_gcse_p (dest, GET_MODE (dest), &max_distance)
1317 	  /* Don't CSE a nop.  */
1318 	  && ! set_noop_p (set)
1319 	  /* Don't GCSE if it has attached REG_EQUIV note.
1320 	     At this point this only function parameters should have
1321 	     REG_EQUIV notes and if the argument slot is used somewhere
1322 	     explicitly, it means address of parameter has been taken,
1323 	     so we should not extend the lifetime of the pseudo.  */
1324 	  && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
1325 	      || ! MEM_P (XEXP (note, 0))))
1326 	{
1327 	  /* Stores are never anticipatable.  */
1328 	  int antic_p = 0;
1329 	  /* An expression is not available if its operands are
1330 	     subsequently modified, including this insn.  It's also not
1331 	     available if this is a branch, because we can't insert
1332 	     a set after the branch.  */
1333 	  int avail_p = oprs_available_p (dest, insn) && ! JUMP_P (insn);
1334 
1335 	  /* Record the memory expression (DEST) in the hash table.  */
1336 	  insert_expr_in_table (dest, GET_MODE (dest), insn,
1337 				antic_p, avail_p, max_distance, table);
1338 	}
1339     }
1340 }
1341 
1342 static void
1343 hash_scan_clobber (rtx x ATTRIBUTE_UNUSED, rtx_insn *insn ATTRIBUTE_UNUSED,
1344 		   struct gcse_hash_table_d *table ATTRIBUTE_UNUSED)
1345 {
1346   /* Currently nothing to do.  */
1347 }
1348 
1349 static void
1350 hash_scan_call (rtx x ATTRIBUTE_UNUSED, rtx_insn *insn ATTRIBUTE_UNUSED,
1351 		struct gcse_hash_table_d *table ATTRIBUTE_UNUSED)
1352 {
1353   /* Currently nothing to do.  */
1354 }
1355 
1356 /* Process INSN and add hash table entries as appropriate.  */
1357 
1358 static void
1359 hash_scan_insn (rtx_insn *insn, struct gcse_hash_table_d *table)
1360 {
1361   rtx pat = PATTERN (insn);
1362   int i;
1363 
1364   /* Pick out the sets of INSN and for other forms of instructions record
1365      what's been modified.  */
1366 
1367   if (GET_CODE (pat) == SET)
1368     hash_scan_set (pat, insn, table);
1369 
1370   else if (GET_CODE (pat) == CLOBBER)
1371     hash_scan_clobber (pat, insn, table);
1372 
1373   else if (GET_CODE (pat) == CALL)
1374     hash_scan_call (pat, insn, table);
1375 
1376   else if (GET_CODE (pat) == PARALLEL)
1377     for (i = 0; i < XVECLEN (pat, 0); i++)
1378       {
1379 	rtx x = XVECEXP (pat, 0, i);
1380 
1381 	if (GET_CODE (x) == SET)
1382 	  hash_scan_set (x, insn, table);
1383 	else if (GET_CODE (x) == CLOBBER)
1384 	  hash_scan_clobber (x, insn, table);
1385 	else if (GET_CODE (x) == CALL)
1386 	  hash_scan_call (x, insn, table);
1387       }
1388 }
1389 
1390 /* Dump the hash table TABLE to file FILE under the name NAME.  */
1391 
1392 static void
1393 dump_hash_table (FILE *file, const char *name, struct gcse_hash_table_d *table)
1394 {
1395   int i;
1396   /* Flattened out table, so it's printed in proper order.  */
1397   struct gcse_expr **flat_table;
1398   unsigned int *hash_val;
1399   struct gcse_expr *expr;
1400 
1401   flat_table = XCNEWVEC (struct gcse_expr *, table->n_elems);
1402   hash_val = XNEWVEC (unsigned int, table->n_elems);
1403 
1404   for (i = 0; i < (int) table->size; i++)
1405     for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
1406       {
1407 	flat_table[expr->bitmap_index] = expr;
1408 	hash_val[expr->bitmap_index] = i;
1409       }
1410 
1411   fprintf (file, "%s hash table (%d buckets, %d entries)\n",
1412 	   name, table->size, table->n_elems);
1413 
1414   for (i = 0; i < (int) table->n_elems; i++)
1415     if (flat_table[i] != 0)
1416       {
1417 	expr = flat_table[i];
1418 	fprintf (file, "Index %d (hash value %d; max distance "
1419 		 HOST_WIDE_INT_PRINT_DEC ")\n  ",
1420 		 expr->bitmap_index, hash_val[i], expr->max_distance);
1421 	print_rtl (file, expr->expr);
1422 	fprintf (file, "\n");
1423       }
1424 
1425   fprintf (file, "\n");
1426 
1427   free (flat_table);
1428   free (hash_val);
1429 }
1430 
1431 /* Record register first/last/block set information for REGNO in INSN.
1432 
1433    first_set records the first place in the block where the register
1434    is set and is used to compute "anticipatability".
1435 
1436    last_set records the last place in the block where the register
1437    is set and is used to compute "availability".
1438 
1439    last_bb records the block for which first_set and last_set are
1440    valid, as a quick test to invalidate them.  */
1441 
1442 static void
1443 record_last_reg_set_info (rtx_insn *insn, int regno)
1444 {
1445   struct reg_avail_info *info = &reg_avail_info[regno];
1446   int luid = DF_INSN_LUID (insn);
1447 
1448   info->last_set = luid;
1449   if (info->last_bb != current_bb)
1450     {
1451       info->last_bb = current_bb;
1452       info->first_set = luid;
1453     }
1454 }
1455 
1456 /* Record memory modification information for INSN.  We do not actually care
1457    about the memory location(s) that are set, or even how they are set (consider
1458    a CALL_INSN).  We merely need to record which insns modify memory.  */
1459 
1460 static void
1461 record_last_mem_set_info (rtx_insn *insn)
1462 {
1463   if (! flag_gcse_lm)
1464     return;
1465 
1466   record_last_mem_set_info_common (insn, modify_mem_list,
1467 				   canon_modify_mem_list,
1468 				   modify_mem_list_set,
1469 				   blocks_with_calls);
1470 }
1471 
1472 /* Called from compute_hash_table via note_stores to handle one
1473    SET or CLOBBER in an insn.  DATA is really the instruction in which
1474    the SET is taking place.  */
1475 
1476 static void
1477 record_last_set_info (rtx dest, const_rtx setter ATTRIBUTE_UNUSED, void *data)
1478 {
1479   rtx_insn *last_set_insn = (rtx_insn *) data;
1480 
1481   if (GET_CODE (dest) == SUBREG)
1482     dest = SUBREG_REG (dest);
1483 
1484   if (REG_P (dest))
1485     record_last_reg_set_info (last_set_insn, REGNO (dest));
1486   else if (MEM_P (dest)
1487 	   /* Ignore pushes, they clobber nothing.  */
1488 	   && ! push_operand (dest, GET_MODE (dest)))
1489     record_last_mem_set_info (last_set_insn);
1490 }
1491 
1492 /* Top level function to create an expression hash table.
1493 
1494    Expression entries are placed in the hash table if
1495    - they are of the form (set (pseudo-reg) src),
1496    - src is something we want to perform GCSE on,
1497    - none of the operands are subsequently modified in the block
1498 
1499    Currently src must be a pseudo-reg or a const_int.
1500 
1501    TABLE is the table computed.  */
1502 
1503 static void
1504 compute_hash_table_work (struct gcse_hash_table_d *table)
1505 {
1506   int i;
1507 
1508   /* re-Cache any INSN_LIST nodes we have allocated.  */
1509   clear_modify_mem_tables ();
1510   /* Some working arrays used to track first and last set in each block.  */
1511   reg_avail_info = GNEWVEC (struct reg_avail_info, max_reg_num ());
1512 
1513   for (i = 0; i < max_reg_num (); ++i)
1514     reg_avail_info[i].last_bb = NULL;
1515 
1516   FOR_EACH_BB_FN (current_bb, cfun)
1517     {
1518       rtx_insn *insn;
1519       unsigned int regno;
1520 
1521       /* First pass over the instructions records information used to
1522 	 determine when registers and memory are first and last set.  */
1523       FOR_BB_INSNS (current_bb, insn)
1524 	{
1525 	  if (!NONDEBUG_INSN_P (insn))
1526 	    continue;
1527 
1528 	  if (CALL_P (insn))
1529 	    {
1530 	      hard_reg_set_iterator hrsi;
1531 	      EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call,
1532 					      0, regno, hrsi)
1533 		record_last_reg_set_info (insn, regno);
1534 
1535 	      if (! RTL_CONST_OR_PURE_CALL_P (insn))
1536 		record_last_mem_set_info (insn);
1537 	    }
1538 
1539 	  note_stores (PATTERN (insn), record_last_set_info, insn);
1540 	}
1541 
1542       /* The next pass builds the hash table.  */
1543       FOR_BB_INSNS (current_bb, insn)
1544 	if (NONDEBUG_INSN_P (insn))
1545 	  hash_scan_insn (insn, table);
1546     }
1547 
1548   free (reg_avail_info);
1549   reg_avail_info = NULL;
1550 }
1551 
1552 /* Allocate space for the set/expr hash TABLE.
1553    It is used to determine the number of buckets to use.  */
1554 
1555 static void
1556 alloc_hash_table (struct gcse_hash_table_d *table)
1557 {
1558   int n;
1559 
1560   n = get_max_insn_count ();
1561 
1562   table->size = n / 4;
1563   if (table->size < 11)
1564     table->size = 11;
1565 
1566   /* Attempt to maintain efficient use of hash table.
1567      Making it an odd number is simplest for now.
1568      ??? Later take some measurements.  */
1569   table->size |= 1;
1570   n = table->size * sizeof (struct gcse_expr *);
1571   table->table = GNEWVAR (struct gcse_expr *, n);
1572 }
1573 
1574 /* Free things allocated by alloc_hash_table.  */
1575 
1576 static void
1577 free_hash_table (struct gcse_hash_table_d *table)
1578 {
1579   free (table->table);
1580 }
1581 
1582 /* Compute the expression hash table TABLE.  */
1583 
1584 static void
1585 compute_hash_table (struct gcse_hash_table_d *table)
1586 {
1587   /* Initialize count of number of entries in hash table.  */
1588   table->n_elems = 0;
1589   memset (table->table, 0, table->size * sizeof (struct gcse_expr *));
1590 
1591   compute_hash_table_work (table);
1592 }
1593 
1594 /* Expression tracking support.  */
1595 
1596 /* Clear canon_modify_mem_list and modify_mem_list tables.  */
1597 static void
1598 clear_modify_mem_tables (void)
1599 {
1600   unsigned i;
1601   bitmap_iterator bi;
1602 
1603   EXECUTE_IF_SET_IN_BITMAP (modify_mem_list_set, 0, i, bi)
1604     {
1605       modify_mem_list[i].release ();
1606       canon_modify_mem_list[i].release ();
1607     }
1608   bitmap_clear (modify_mem_list_set);
1609   bitmap_clear (blocks_with_calls);
1610 }
1611 
1612 /* Release memory used by modify_mem_list_set.  */
1613 
1614 static void
1615 free_modify_mem_tables (void)
1616 {
1617   clear_modify_mem_tables ();
1618   free (modify_mem_list);
1619   free (canon_modify_mem_list);
1620   modify_mem_list = 0;
1621   canon_modify_mem_list = 0;
1622 }
1623 
1624 /* Compute PRE+LCM working variables.  */
1625 
1626 /* Local properties of expressions.  */
1627 
1628 /* Nonzero for expressions that are transparent in the block.  */
1629 static sbitmap *transp;
1630 
1631 /* Nonzero for expressions that are computed (available) in the block.  */
1632 static sbitmap *comp;
1633 
1634 /* Nonzero for expressions that are locally anticipatable in the block.  */
1635 static sbitmap *antloc;
1636 
1637 /* Nonzero for expressions where this block is an optimal computation
1638    point.  */
1639 static sbitmap *pre_optimal;
1640 
1641 /* Nonzero for expressions which are redundant in a particular block.  */
1642 static sbitmap *pre_redundant;
1643 
1644 /* Nonzero for expressions which should be inserted on a specific edge.  */
1645 static sbitmap *pre_insert_map;
1646 
1647 /* Nonzero for expressions which should be deleted in a specific block.  */
1648 static sbitmap *pre_delete_map;
1649 
1650 /* Allocate vars used for PRE analysis.  */
1651 
1652 static void
1653 alloc_pre_mem (int n_blocks, int n_exprs)
1654 {
1655   transp = sbitmap_vector_alloc (n_blocks, n_exprs);
1656   comp = sbitmap_vector_alloc (n_blocks, n_exprs);
1657   antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
1658 
1659   pre_optimal = NULL;
1660   pre_redundant = NULL;
1661   pre_insert_map = NULL;
1662   pre_delete_map = NULL;
1663   ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
1664 
1665   /* pre_insert and pre_delete are allocated later.  */
1666 }
1667 
1668 /* Free vars used for PRE analysis.  */
1669 
1670 static void
1671 free_pre_mem (void)
1672 {
1673   sbitmap_vector_free (transp);
1674   sbitmap_vector_free (comp);
1675 
1676   /* ANTLOC and AE_KILL are freed just after pre_lcm finishes.  */
1677 
1678   if (pre_optimal)
1679     sbitmap_vector_free (pre_optimal);
1680   if (pre_redundant)
1681     sbitmap_vector_free (pre_redundant);
1682   if (pre_insert_map)
1683     sbitmap_vector_free (pre_insert_map);
1684   if (pre_delete_map)
1685     sbitmap_vector_free (pre_delete_map);
1686 
1687   transp = comp = NULL;
1688   pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
1689 }
1690 
1691 /* Remove certain expressions from anticipatable and transparent
1692    sets of basic blocks that have incoming abnormal edge.
1693    For PRE remove potentially trapping expressions to avoid placing
1694    them on abnormal edges.  For hoisting remove memory references that
1695    can be clobbered by calls.  */
1696 
1697 static void
1698 prune_expressions (bool pre_p)
1699 {
1700   struct gcse_expr *expr;
1701   unsigned int ui;
1702   basic_block bb;
1703 
1704   auto_sbitmap prune_exprs (expr_hash_table.n_elems);
1705   bitmap_clear (prune_exprs);
1706   for (ui = 0; ui < expr_hash_table.size; ui++)
1707     {
1708       for (expr = expr_hash_table.table[ui]; expr; expr = expr->next_same_hash)
1709 	{
1710 	  /* Note potentially trapping expressions.  */
1711 	  if (may_trap_p (expr->expr))
1712 	    {
1713 	      bitmap_set_bit (prune_exprs, expr->bitmap_index);
1714 	      continue;
1715 	    }
1716 
1717 	  if (!pre_p && contains_mem_rtx_p (expr->expr))
1718 	    /* Note memory references that can be clobbered by a call.
1719 	       We do not split abnormal edges in hoisting, so would
1720 	       a memory reference get hoisted along an abnormal edge,
1721 	       it would be placed /before/ the call.  Therefore, only
1722 	       constant memory references can be hoisted along abnormal
1723 	       edges.  */
1724 	    {
1725 	      rtx x = expr->expr;
1726 
1727 	      /* Common cases where we might find the MEM which may allow us
1728 		 to avoid pruning the expression.  */
1729 	      while (GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND)
1730 		x = XEXP (x, 0);
1731 
1732 	      /* If we found the MEM, go ahead and look at it to see if it has
1733 		 properties that allow us to avoid pruning its expression out
1734 		 of the tables.  */
1735 	      if (MEM_P (x))
1736 		{
1737 		  if (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1738 		      && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
1739 		    continue;
1740 
1741 		  if (MEM_READONLY_P (x)
1742 		      && !MEM_VOLATILE_P (x)
1743 		      && MEM_NOTRAP_P (x))
1744 		    /* Constant memory reference, e.g., a PIC address.  */
1745 		    continue;
1746 		}
1747 
1748 	      /* ??? Optimally, we would use interprocedural alias
1749 		 analysis to determine if this mem is actually killed
1750 		 by this call.  */
1751 
1752 	      bitmap_set_bit (prune_exprs, expr->bitmap_index);
1753 	    }
1754 	}
1755     }
1756 
1757   FOR_EACH_BB_FN (bb, cfun)
1758     {
1759       edge e;
1760       edge_iterator ei;
1761 
1762       /* If the current block is the destination of an abnormal edge, we
1763 	 kill all trapping (for PRE) and memory (for hoist) expressions
1764 	 because we won't be able to properly place the instruction on
1765 	 the edge.  So make them neither anticipatable nor transparent.
1766 	 This is fairly conservative.
1767 
1768 	 ??? For hoisting it may be necessary to check for set-and-jump
1769 	 instructions here, not just for abnormal edges.  The general problem
1770 	 is that when an expression cannot not be placed right at the end of
1771 	 a basic block we should account for any side-effects of a subsequent
1772 	 jump instructions that could clobber the expression.  It would
1773 	 be best to implement this check along the lines of
1774 	 should_hoist_expr_to_dom where the target block is already known
1775 	 and, hence, there's no need to conservatively prune expressions on
1776 	 "intermediate" set-and-jump instructions.  */
1777       FOR_EACH_EDGE (e, ei, bb->preds)
1778 	if ((e->flags & EDGE_ABNORMAL)
1779 	    && (pre_p || CALL_P (BB_END (e->src))))
1780 	  {
1781 	    bitmap_and_compl (antloc[bb->index],
1782 				antloc[bb->index], prune_exprs);
1783 	    bitmap_and_compl (transp[bb->index],
1784 				transp[bb->index], prune_exprs);
1785 	    break;
1786 	  }
1787     }
1788 }
1789 
1790 /* It may be necessary to insert a large number of insns on edges to
1791    make the existing occurrences of expressions fully redundant.  This
1792    routine examines the set of insertions and deletions and if the ratio
1793    of insertions to deletions is too high for a particular expression, then
1794    the expression is removed from the insertion/deletion sets.
1795 
1796    N_ELEMS is the number of elements in the hash table.  */
1797 
1798 static void
1799 prune_insertions_deletions (int n_elems)
1800 {
1801   sbitmap_iterator sbi;
1802 
1803   /* We always use I to iterate over blocks/edges and J to iterate over
1804      expressions.  */
1805   unsigned int i, j;
1806 
1807   /* Counts for the number of times an expression needs to be inserted and
1808      number of times an expression can be removed as a result.  */
1809   int *insertions = GCNEWVEC (int, n_elems);
1810   int *deletions = GCNEWVEC (int, n_elems);
1811 
1812   /* Set of expressions which require too many insertions relative to
1813      the number of deletions achieved.  We will prune these out of the
1814      insertion/deletion sets.  */
1815   auto_sbitmap prune_exprs (n_elems);
1816   bitmap_clear (prune_exprs);
1817 
1818   /* Iterate over the edges counting the number of times each expression
1819      needs to be inserted.  */
1820   for (i = 0; i < (unsigned) n_edges_for_fn (cfun); i++)
1821     {
1822       EXECUTE_IF_SET_IN_BITMAP (pre_insert_map[i], 0, j, sbi)
1823 	insertions[j]++;
1824     }
1825 
1826   /* Similarly for deletions, but those occur in blocks rather than on
1827      edges.  */
1828   for (i = 0; i < (unsigned) last_basic_block_for_fn (cfun); i++)
1829     {
1830       EXECUTE_IF_SET_IN_BITMAP (pre_delete_map[i], 0, j, sbi)
1831 	deletions[j]++;
1832     }
1833 
1834   /* Now that we have accurate counts, iterate over the elements in the
1835      hash table and see if any need too many insertions relative to the
1836      number of evaluations that can be removed.  If so, mark them in
1837      PRUNE_EXPRS.  */
1838   for (j = 0; j < (unsigned) n_elems; j++)
1839     if (deletions[j]
1840 	&& ((unsigned) insertions[j] / deletions[j]) > MAX_GCSE_INSERTION_RATIO)
1841       bitmap_set_bit (prune_exprs, j);
1842 
1843   /* Now prune PRE_INSERT_MAP and PRE_DELETE_MAP based on PRUNE_EXPRS.  */
1844   EXECUTE_IF_SET_IN_BITMAP (prune_exprs, 0, j, sbi)
1845     {
1846       for (i = 0; i < (unsigned) n_edges_for_fn (cfun); i++)
1847 	bitmap_clear_bit (pre_insert_map[i], j);
1848 
1849       for (i = 0; i < (unsigned) last_basic_block_for_fn (cfun); i++)
1850 	bitmap_clear_bit (pre_delete_map[i], j);
1851     }
1852 
1853   free (insertions);
1854   free (deletions);
1855 }
1856 
1857 /* Top level routine to do the dataflow analysis needed by PRE.  */
1858 
1859 static struct edge_list *
1860 compute_pre_data (void)
1861 {
1862   struct edge_list *edge_list;
1863   basic_block bb;
1864 
1865   compute_local_properties (transp, comp, antloc, &expr_hash_table);
1866   prune_expressions (true);
1867   bitmap_vector_clear (ae_kill, last_basic_block_for_fn (cfun));
1868 
1869   /* Compute ae_kill for each basic block using:
1870 
1871      ~(TRANSP | COMP)
1872   */
1873 
1874   FOR_EACH_BB_FN (bb, cfun)
1875     {
1876       bitmap_ior (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
1877       bitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
1878     }
1879 
1880   edge_list = pre_edge_lcm (expr_hash_table.n_elems, transp, comp, antloc,
1881 			    ae_kill, &pre_insert_map, &pre_delete_map);
1882   sbitmap_vector_free (antloc);
1883   antloc = NULL;
1884   sbitmap_vector_free (ae_kill);
1885   ae_kill = NULL;
1886 
1887   prune_insertions_deletions (expr_hash_table.n_elems);
1888 
1889   return edge_list;
1890 }
1891 
1892 /* PRE utilities */
1893 
1894 /* Return nonzero if an occurrence of expression EXPR in OCCR_BB would reach
1895    block BB.
1896 
1897    VISITED is a pointer to a working buffer for tracking which BB's have
1898    been visited.  It is NULL for the top-level call.
1899 
1900    We treat reaching expressions that go through blocks containing the same
1901    reaching expression as "not reaching".  E.g. if EXPR is generated in blocks
1902    2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
1903    2 as not reaching.  The intent is to improve the probability of finding
1904    only one reaching expression and to reduce register lifetimes by picking
1905    the closest such expression.  */
1906 
1907 static int
1908 pre_expr_reaches_here_p_work (basic_block occr_bb, struct gcse_expr *expr,
1909 			      basic_block bb, char *visited)
1910 {
1911   edge pred;
1912   edge_iterator ei;
1913 
1914   FOR_EACH_EDGE (pred, ei, bb->preds)
1915     {
1916       basic_block pred_bb = pred->src;
1917 
1918       if (pred->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
1919 	  /* Has predecessor has already been visited?  */
1920 	  || visited[pred_bb->index])
1921 	;/* Nothing to do.  */
1922 
1923       /* Does this predecessor generate this expression?  */
1924       else if (bitmap_bit_p (comp[pred_bb->index], expr->bitmap_index))
1925 	{
1926 	  /* Is this the occurrence we're looking for?
1927 	     Note that there's only one generating occurrence per block
1928 	     so we just need to check the block number.  */
1929 	  if (occr_bb == pred_bb)
1930 	    return 1;
1931 
1932 	  visited[pred_bb->index] = 1;
1933 	}
1934       /* Ignore this predecessor if it kills the expression.  */
1935       else if (! bitmap_bit_p (transp[pred_bb->index], expr->bitmap_index))
1936 	visited[pred_bb->index] = 1;
1937 
1938       /* Neither gen nor kill.  */
1939       else
1940 	{
1941 	  visited[pred_bb->index] = 1;
1942 	  if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
1943 	    return 1;
1944 	}
1945     }
1946 
1947   /* All paths have been checked.  */
1948   return 0;
1949 }
1950 
1951 /* The wrapper for pre_expr_reaches_here_work that ensures that any
1952    memory allocated for that function is returned.  */
1953 
1954 static int
1955 pre_expr_reaches_here_p (basic_block occr_bb, struct gcse_expr *expr, basic_block bb)
1956 {
1957   int rval;
1958   char *visited = XCNEWVEC (char, last_basic_block_for_fn (cfun));
1959 
1960   rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
1961 
1962   free (visited);
1963   return rval;
1964 }
1965 
1966 /* Generate RTL to copy an EXP to REG and return it.  */
1967 
1968 rtx_insn *
1969 prepare_copy_insn (rtx reg, rtx exp)
1970 {
1971   rtx_insn *pat;
1972 
1973   start_sequence ();
1974 
1975   /* If the expression is something that's an operand, like a constant,
1976      just copy it to a register.  */
1977   if (general_operand (exp, GET_MODE (reg)))
1978     emit_move_insn (reg, exp);
1979 
1980   /* Otherwise, make a new insn to compute this expression and make sure the
1981      insn will be recognized (this also adds any needed CLOBBERs).  */
1982   else
1983     {
1984       rtx_insn *insn = emit_insn (gen_rtx_SET (reg, exp));
1985 
1986       if (insn_invalid_p (insn, false))
1987 	gcc_unreachable ();
1988     }
1989 
1990   pat = get_insns ();
1991   end_sequence ();
1992 
1993   return pat;
1994 }
1995 
1996 /* Generate RTL to copy an EXPR to its `reaching_reg' and return it.  */
1997 
1998 static rtx_insn *
1999 process_insert_insn (struct gcse_expr *expr)
2000 {
2001   rtx reg = expr->reaching_reg;
2002   /* Copy the expression to make sure we don't have any sharing issues.  */
2003   rtx exp = copy_rtx (expr->expr);
2004 
2005   return prepare_copy_insn (reg, exp);
2006 }
2007 
2008 /* Add EXPR to the end of basic block BB.
2009 
2010    This is used by both the PRE and code hoisting.  */
2011 
2012 static void
2013 insert_insn_end_basic_block (struct gcse_expr *expr, basic_block bb)
2014 {
2015   rtx_insn *insn = BB_END (bb);
2016   rtx_insn *new_insn;
2017   rtx reg = expr->reaching_reg;
2018   int regno = REGNO (reg);
2019   rtx_insn *pat, *pat_end;
2020 
2021   pat = process_insert_insn (expr);
2022   gcc_assert (pat && INSN_P (pat));
2023 
2024   pat_end = pat;
2025   while (NEXT_INSN (pat_end) != NULL_RTX)
2026     pat_end = NEXT_INSN (pat_end);
2027 
2028   /* If the last insn is a jump, insert EXPR in front [taking care to
2029      handle cc0, etc. properly].  Similarly we need to care trapping
2030      instructions in presence of non-call exceptions.  */
2031 
2032   if (JUMP_P (insn)
2033       || (NONJUMP_INSN_P (insn)
2034 	  && (!single_succ_p (bb)
2035 	      || single_succ_edge (bb)->flags & EDGE_ABNORMAL)))
2036     {
2037       /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
2038 	 if cc0 isn't set.  */
2039       if (HAVE_cc0)
2040 	{
2041 	  rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
2042 	  if (note)
2043 	    insn = safe_as_a <rtx_insn *> (XEXP (note, 0));
2044 	  else
2045 	    {
2046 	      rtx_insn *maybe_cc0_setter = prev_nonnote_insn (insn);
2047 	      if (maybe_cc0_setter
2048 		  && INSN_P (maybe_cc0_setter)
2049 		  && sets_cc0_p (PATTERN (maybe_cc0_setter)))
2050 		insn = maybe_cc0_setter;
2051 	    }
2052 	}
2053 
2054       /* FIXME: What if something in cc0/jump uses value set in new insn?  */
2055       new_insn = emit_insn_before_noloc (pat, insn, bb);
2056     }
2057 
2058   /* Likewise if the last insn is a call, as will happen in the presence
2059      of exception handling.  */
2060   else if (CALL_P (insn)
2061 	   && (!single_succ_p (bb)
2062 	       || single_succ_edge (bb)->flags & EDGE_ABNORMAL))
2063     {
2064       /* Keeping in mind targets with small register classes and parameters
2065          in registers, we search backward and place the instructions before
2066 	 the first parameter is loaded.  Do this for everyone for consistency
2067 	 and a presumption that we'll get better code elsewhere as well.  */
2068 
2069       /* Since different machines initialize their parameter registers
2070 	 in different orders, assume nothing.  Collect the set of all
2071 	 parameter registers.  */
2072       insn = find_first_parameter_load (insn, BB_HEAD (bb));
2073 
2074       /* If we found all the parameter loads, then we want to insert
2075 	 before the first parameter load.
2076 
2077 	 If we did not find all the parameter loads, then we might have
2078 	 stopped on the head of the block, which could be a CODE_LABEL.
2079 	 If we inserted before the CODE_LABEL, then we would be putting
2080 	 the insn in the wrong basic block.  In that case, put the insn
2081 	 after the CODE_LABEL.  Also, respect NOTE_INSN_BASIC_BLOCK.  */
2082       while (LABEL_P (insn)
2083 	     || NOTE_INSN_BASIC_BLOCK_P (insn))
2084 	insn = NEXT_INSN (insn);
2085 
2086       new_insn = emit_insn_before_noloc (pat, insn, bb);
2087     }
2088   else
2089     new_insn = emit_insn_after_noloc (pat, insn, bb);
2090 
2091   while (1)
2092     {
2093       if (INSN_P (pat))
2094 	add_label_notes (PATTERN (pat), new_insn);
2095       if (pat == pat_end)
2096 	break;
2097       pat = NEXT_INSN (pat);
2098     }
2099 
2100   gcse_create_count++;
2101 
2102   if (dump_file)
2103     {
2104       fprintf (dump_file, "PRE/HOIST: end of bb %d, insn %d, ",
2105 	       bb->index, INSN_UID (new_insn));
2106       fprintf (dump_file, "copying expression %d to reg %d\n",
2107 	       expr->bitmap_index, regno);
2108     }
2109 }
2110 
2111 /* Insert partially redundant expressions on edges in the CFG to make
2112    the expressions fully redundant.  */
2113 
2114 static int
2115 pre_edge_insert (struct edge_list *edge_list, struct gcse_expr **index_map)
2116 {
2117   int e, i, j, num_edges, set_size, did_insert = 0;
2118   sbitmap *inserted;
2119 
2120   /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
2121      if it reaches any of the deleted expressions.  */
2122 
2123   set_size = pre_insert_map[0]->size;
2124   num_edges = NUM_EDGES (edge_list);
2125   inserted = sbitmap_vector_alloc (num_edges, expr_hash_table.n_elems);
2126   bitmap_vector_clear (inserted, num_edges);
2127 
2128   for (e = 0; e < num_edges; e++)
2129     {
2130       int indx;
2131       basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
2132 
2133       for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
2134 	{
2135 	  SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
2136 
2137 	  for (j = indx;
2138 	       insert && j < (int) expr_hash_table.n_elems;
2139 	       j++, insert >>= 1)
2140 	    if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
2141 	      {
2142 		struct gcse_expr *expr = index_map[j];
2143 		struct gcse_occr *occr;
2144 
2145 		/* Now look at each deleted occurrence of this expression.  */
2146 		for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2147 		  {
2148 		    if (! occr->deleted_p)
2149 		      continue;
2150 
2151 		    /* Insert this expression on this edge if it would
2152 		       reach the deleted occurrence in BB.  */
2153 		    if (!bitmap_bit_p (inserted[e], j))
2154 		      {
2155 			rtx_insn *insn;
2156 			edge eg = INDEX_EDGE (edge_list, e);
2157 
2158 			/* We can't insert anything on an abnormal and
2159 			   critical edge, so we insert the insn at the end of
2160 			   the previous block. There are several alternatives
2161 			   detailed in Morgans book P277 (sec 10.5) for
2162 			   handling this situation.  This one is easiest for
2163 			   now.  */
2164 
2165 			if (eg->flags & EDGE_ABNORMAL)
2166 			  insert_insn_end_basic_block (index_map[j], bb);
2167 			else
2168 			  {
2169 			    insn = process_insert_insn (index_map[j]);
2170 			    insert_insn_on_edge (insn, eg);
2171 			  }
2172 
2173 			if (dump_file)
2174 			  {
2175 			    fprintf (dump_file, "PRE: edge (%d,%d), ",
2176 				     bb->index,
2177 				     INDEX_EDGE_SUCC_BB (edge_list, e)->index);
2178 			    fprintf (dump_file, "copy expression %d\n",
2179 				     expr->bitmap_index);
2180 			  }
2181 
2182 			update_ld_motion_stores (expr);
2183 			bitmap_set_bit (inserted[e], j);
2184 			did_insert = 1;
2185 			gcse_create_count++;
2186 		      }
2187 		  }
2188 	      }
2189 	}
2190     }
2191 
2192   sbitmap_vector_free (inserted);
2193   return did_insert;
2194 }
2195 
2196 /* Copy the result of EXPR->EXPR generated by INSN to EXPR->REACHING_REG.
2197    Given "old_reg <- expr" (INSN), instead of adding after it
2198      reaching_reg <- old_reg
2199    it's better to do the following:
2200      reaching_reg <- expr
2201      old_reg      <- reaching_reg
2202    because this way copy propagation can discover additional PRE
2203    opportunities.  But if this fails, we try the old way.
2204    When "expr" is a store, i.e.
2205    given "MEM <- old_reg", instead of adding after it
2206      reaching_reg <- old_reg
2207    it's better to add it before as follows:
2208      reaching_reg <- old_reg
2209      MEM          <- reaching_reg.  */
2210 
2211 static void
2212 pre_insert_copy_insn (struct gcse_expr *expr, rtx_insn *insn)
2213 {
2214   rtx reg = expr->reaching_reg;
2215   int regno = REGNO (reg);
2216   int indx = expr->bitmap_index;
2217   rtx pat = PATTERN (insn);
2218   rtx set, first_set;
2219   rtx_insn *new_insn;
2220   rtx old_reg;
2221   int i;
2222 
2223   /* This block matches the logic in hash_scan_insn.  */
2224   switch (GET_CODE (pat))
2225     {
2226     case SET:
2227       set = pat;
2228       break;
2229 
2230     case PARALLEL:
2231       /* Search through the parallel looking for the set whose
2232 	 source was the expression that we're interested in.  */
2233       first_set = NULL_RTX;
2234       set = NULL_RTX;
2235       for (i = 0; i < XVECLEN (pat, 0); i++)
2236 	{
2237 	  rtx x = XVECEXP (pat, 0, i);
2238 	  if (GET_CODE (x) == SET)
2239 	    {
2240 	      /* If the source was a REG_EQUAL or REG_EQUIV note, we
2241 		 may not find an equivalent expression, but in this
2242 		 case the PARALLEL will have a single set.  */
2243 	      if (first_set == NULL_RTX)
2244 		first_set = x;
2245 	      if (expr_equiv_p (SET_SRC (x), expr->expr))
2246 	        {
2247 	          set = x;
2248 	          break;
2249 	        }
2250 	    }
2251 	}
2252 
2253       gcc_assert (first_set);
2254       if (set == NULL_RTX)
2255         set = first_set;
2256       break;
2257 
2258     default:
2259       gcc_unreachable ();
2260     }
2261 
2262   if (REG_P (SET_DEST (set)))
2263     {
2264       old_reg = SET_DEST (set);
2265       /* Check if we can modify the set destination in the original insn.  */
2266       if (validate_change (insn, &SET_DEST (set), reg, 0))
2267         {
2268           new_insn = gen_move_insn (old_reg, reg);
2269           new_insn = emit_insn_after (new_insn, insn);
2270         }
2271       else
2272         {
2273           new_insn = gen_move_insn (reg, old_reg);
2274           new_insn = emit_insn_after (new_insn, insn);
2275         }
2276     }
2277   else /* This is possible only in case of a store to memory.  */
2278     {
2279       old_reg = SET_SRC (set);
2280       new_insn = gen_move_insn (reg, old_reg);
2281 
2282       /* Check if we can modify the set source in the original insn.  */
2283       if (validate_change (insn, &SET_SRC (set), reg, 0))
2284         new_insn = emit_insn_before (new_insn, insn);
2285       else
2286         new_insn = emit_insn_after (new_insn, insn);
2287     }
2288 
2289   gcse_create_count++;
2290 
2291   if (dump_file)
2292     fprintf (dump_file,
2293 	     "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
2294 	      BLOCK_FOR_INSN (insn)->index, INSN_UID (new_insn), indx,
2295 	      INSN_UID (insn), regno);
2296 }
2297 
2298 /* Copy available expressions that reach the redundant expression
2299    to `reaching_reg'.  */
2300 
2301 static void
2302 pre_insert_copies (void)
2303 {
2304   unsigned int i, added_copy;
2305   struct gcse_expr *expr;
2306   struct gcse_occr *occr;
2307   struct gcse_occr *avail;
2308 
2309   /* For each available expression in the table, copy the result to
2310      `reaching_reg' if the expression reaches a deleted one.
2311 
2312      ??? The current algorithm is rather brute force.
2313      Need to do some profiling.  */
2314 
2315   for (i = 0; i < expr_hash_table.size; i++)
2316     for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2317       {
2318 	/* If the basic block isn't reachable, PPOUT will be TRUE.  However,
2319 	   we don't want to insert a copy here because the expression may not
2320 	   really be redundant.  So only insert an insn if the expression was
2321 	   deleted.  This test also avoids further processing if the
2322 	   expression wasn't deleted anywhere.  */
2323 	if (expr->reaching_reg == NULL)
2324 	  continue;
2325 
2326 	/* Set when we add a copy for that expression.  */
2327 	added_copy = 0;
2328 
2329 	for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2330 	  {
2331 	    if (! occr->deleted_p)
2332 	      continue;
2333 
2334 	    for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
2335 	      {
2336 		rtx_insn *insn = avail->insn;
2337 
2338 		/* No need to handle this one if handled already.  */
2339 		if (avail->copied_p)
2340 		  continue;
2341 
2342 		/* Don't handle this one if it's a redundant one.  */
2343 		if (insn->deleted ())
2344 		  continue;
2345 
2346 		/* Or if the expression doesn't reach the deleted one.  */
2347 		if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
2348 					       expr,
2349 					       BLOCK_FOR_INSN (occr->insn)))
2350 		  continue;
2351 
2352                 added_copy = 1;
2353 
2354 		/* Copy the result of avail to reaching_reg.  */
2355 		pre_insert_copy_insn (expr, insn);
2356 		avail->copied_p = 1;
2357 	      }
2358 	  }
2359 
2360 	  if (added_copy)
2361             update_ld_motion_stores (expr);
2362       }
2363 }
2364 
2365 struct set_data
2366 {
2367   rtx_insn *insn;
2368   const_rtx set;
2369   int nsets;
2370 };
2371 
2372 /* Increment number of sets and record set in DATA.  */
2373 
2374 static void
2375 record_set_data (rtx dest, const_rtx set, void *data)
2376 {
2377   struct set_data *s = (struct set_data *)data;
2378 
2379   if (GET_CODE (set) == SET)
2380     {
2381       /* We allow insns having multiple sets, where all but one are
2382 	 dead as single set insns.  In the common case only a single
2383 	 set is present, so we want to avoid checking for REG_UNUSED
2384 	 notes unless necessary.  */
2385       if (s->nsets == 1
2386 	  && find_reg_note (s->insn, REG_UNUSED, SET_DEST (s->set))
2387 	  && !side_effects_p (s->set))
2388 	s->nsets = 0;
2389 
2390       if (!s->nsets)
2391 	{
2392 	  /* Record this set.  */
2393 	  s->nsets += 1;
2394 	  s->set = set;
2395 	}
2396       else if (!find_reg_note (s->insn, REG_UNUSED, dest)
2397 	       || side_effects_p (set))
2398 	s->nsets += 1;
2399     }
2400 }
2401 
2402 static const_rtx
2403 single_set_gcse (rtx_insn *insn)
2404 {
2405   struct set_data s;
2406   rtx pattern;
2407 
2408   gcc_assert (INSN_P (insn));
2409 
2410   /* Optimize common case.  */
2411   pattern = PATTERN (insn);
2412   if (GET_CODE (pattern) == SET)
2413     return pattern;
2414 
2415   s.insn = insn;
2416   s.nsets = 0;
2417   note_stores (pattern, record_set_data, &s);
2418 
2419   /* Considered invariant insns have exactly one set.  */
2420   gcc_assert (s.nsets == 1);
2421   return s.set;
2422 }
2423 
2424 /* Emit move from SRC to DEST noting the equivalence with expression computed
2425    in INSN.  */
2426 
2427 static rtx_insn *
2428 gcse_emit_move_after (rtx dest, rtx src, rtx_insn *insn)
2429 {
2430   rtx_insn *new_rtx;
2431   const_rtx set = single_set_gcse (insn);
2432   rtx set2;
2433   rtx note;
2434   rtx eqv = NULL_RTX;
2435 
2436   /* This should never fail since we're creating a reg->reg copy
2437      we've verified to be valid.  */
2438 
2439   new_rtx = emit_insn_after (gen_move_insn (dest, src), insn);
2440 
2441   /* Note the equivalence for local CSE pass.  Take the note from the old
2442      set if there was one.  Otherwise record the SET_SRC from the old set
2443      unless DEST is also an operand of the SET_SRC.  */
2444   set2 = single_set (new_rtx);
2445   if (!set2 || !rtx_equal_p (SET_DEST (set2), dest))
2446     return new_rtx;
2447   if ((note = find_reg_equal_equiv_note (insn)))
2448     eqv = XEXP (note, 0);
2449   else if (! REG_P (dest)
2450 	   || ! reg_mentioned_p (dest, SET_SRC (set)))
2451     eqv = SET_SRC (set);
2452 
2453   if (eqv != NULL_RTX)
2454     set_unique_reg_note (new_rtx, REG_EQUAL, copy_insn_1 (eqv));
2455 
2456   return new_rtx;
2457 }
2458 
2459 /* Delete redundant computations.
2460    Deletion is done by changing the insn to copy the `reaching_reg' of
2461    the expression into the result of the SET.  It is left to later passes
2462    to propagate the copy or eliminate it.
2463 
2464    Return nonzero if a change is made.  */
2465 
2466 static int
2467 pre_delete (void)
2468 {
2469   unsigned int i;
2470   int changed;
2471   struct gcse_expr *expr;
2472   struct gcse_occr *occr;
2473 
2474   changed = 0;
2475   for (i = 0; i < expr_hash_table.size; i++)
2476     for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2477       {
2478 	int indx = expr->bitmap_index;
2479 
2480 	/* We only need to search antic_occr since we require ANTLOC != 0.  */
2481 	for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2482 	  {
2483 	    rtx_insn *insn = occr->insn;
2484 	    rtx set;
2485 	    basic_block bb = BLOCK_FOR_INSN (insn);
2486 
2487 	    /* We only delete insns that have a single_set.  */
2488 	    if (bitmap_bit_p (pre_delete_map[bb->index], indx)
2489 		&& (set = single_set (insn)) != 0
2490                 && dbg_cnt (pre_insn))
2491 	      {
2492 		/* Create a pseudo-reg to store the result of reaching
2493 		   expressions into.  Get the mode for the new pseudo from
2494 		   the mode of the original destination pseudo.  */
2495 		if (expr->reaching_reg == NULL)
2496 		  expr->reaching_reg = gen_reg_rtx_and_attrs (SET_DEST (set));
2497 
2498 		gcse_emit_move_after (SET_DEST (set), expr->reaching_reg, insn);
2499 		delete_insn (insn);
2500 		occr->deleted_p = 1;
2501 		changed = 1;
2502 		gcse_subst_count++;
2503 
2504 		if (dump_file)
2505 		  {
2506 		    fprintf (dump_file,
2507 			     "PRE: redundant insn %d (expression %d) in ",
2508 			       INSN_UID (insn), indx);
2509 		    fprintf (dump_file, "bb %d, reaching reg is %d\n",
2510 			     bb->index, REGNO (expr->reaching_reg));
2511 		  }
2512 	      }
2513 	  }
2514       }
2515 
2516   return changed;
2517 }
2518 
2519 /* Perform GCSE optimizations using PRE.
2520    This is called by one_pre_gcse_pass after all the dataflow analysis
2521    has been done.
2522 
2523    This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
2524    lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
2525    Compiler Design and Implementation.
2526 
2527    ??? A new pseudo reg is created to hold the reaching expression.  The nice
2528    thing about the classical approach is that it would try to use an existing
2529    reg.  If the register can't be adequately optimized [i.e. we introduce
2530    reload problems], one could add a pass here to propagate the new register
2531    through the block.
2532 
2533    ??? We don't handle single sets in PARALLELs because we're [currently] not
2534    able to copy the rest of the parallel when we insert copies to create full
2535    redundancies from partial redundancies.  However, there's no reason why we
2536    can't handle PARALLELs in the cases where there are no partial
2537    redundancies.  */
2538 
2539 static int
2540 pre_gcse (struct edge_list *edge_list)
2541 {
2542   unsigned int i;
2543   int did_insert, changed;
2544   struct gcse_expr **index_map;
2545   struct gcse_expr *expr;
2546 
2547   /* Compute a mapping from expression number (`bitmap_index') to
2548      hash table entry.  */
2549 
2550   index_map = XCNEWVEC (struct gcse_expr *, expr_hash_table.n_elems);
2551   for (i = 0; i < expr_hash_table.size; i++)
2552     for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2553       index_map[expr->bitmap_index] = expr;
2554 
2555   /* Delete the redundant insns first so that
2556      - we know what register to use for the new insns and for the other
2557        ones with reaching expressions
2558      - we know which insns are redundant when we go to create copies  */
2559 
2560   changed = pre_delete ();
2561   did_insert = pre_edge_insert (edge_list, index_map);
2562 
2563   /* In other places with reaching expressions, copy the expression to the
2564      specially allocated pseudo-reg that reaches the redundant expr.  */
2565   pre_insert_copies ();
2566   if (did_insert)
2567     {
2568       commit_edge_insertions ();
2569       changed = 1;
2570     }
2571 
2572   free (index_map);
2573   return changed;
2574 }
2575 
2576 /* Top level routine to perform one PRE GCSE pass.
2577 
2578    Return nonzero if a change was made.  */
2579 
2580 static int
2581 one_pre_gcse_pass (void)
2582 {
2583   int changed = 0;
2584 
2585   gcse_subst_count = 0;
2586   gcse_create_count = 0;
2587 
2588   /* Return if there's nothing to do, or it is too expensive.  */
2589   if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
2590       || gcse_or_cprop_is_too_expensive (_("PRE disabled")))
2591     return 0;
2592 
2593   /* We need alias.  */
2594   init_alias_analysis ();
2595 
2596   bytes_used = 0;
2597   gcc_obstack_init (&gcse_obstack);
2598   alloc_gcse_mem ();
2599 
2600   alloc_hash_table (&expr_hash_table);
2601   add_noreturn_fake_exit_edges ();
2602   if (flag_gcse_lm)
2603     compute_ld_motion_mems ();
2604 
2605   compute_hash_table (&expr_hash_table);
2606   if (flag_gcse_lm)
2607     trim_ld_motion_mems ();
2608   if (dump_file)
2609     dump_hash_table (dump_file, "Expression", &expr_hash_table);
2610 
2611   if (expr_hash_table.n_elems > 0)
2612     {
2613       struct edge_list *edge_list;
2614       alloc_pre_mem (last_basic_block_for_fn (cfun), expr_hash_table.n_elems);
2615       edge_list = compute_pre_data ();
2616       changed |= pre_gcse (edge_list);
2617       free_edge_list (edge_list);
2618       free_pre_mem ();
2619     }
2620 
2621   if (flag_gcse_lm)
2622     free_ld_motion_mems ();
2623   remove_fake_exit_edges ();
2624   free_hash_table (&expr_hash_table);
2625 
2626   free_gcse_mem ();
2627   obstack_free (&gcse_obstack, NULL);
2628 
2629   /* We are finished with alias.  */
2630   end_alias_analysis ();
2631 
2632   if (dump_file)
2633     {
2634       fprintf (dump_file, "PRE GCSE of %s, %d basic blocks, %d bytes needed, ",
2635 	       current_function_name (), n_basic_blocks_for_fn (cfun),
2636 	       bytes_used);
2637       fprintf (dump_file, "%d substs, %d insns created\n",
2638 	       gcse_subst_count, gcse_create_count);
2639     }
2640 
2641   return changed;
2642 }
2643 
2644 /* If X contains any LABEL_REF's, add REG_LABEL_OPERAND notes for them
2645    to INSN.  If such notes are added to an insn which references a
2646    CODE_LABEL, the LABEL_NUSES count is incremented.  We have to add
2647    that note, because the following loop optimization pass requires
2648    them.  */
2649 
2650 /* ??? If there was a jump optimization pass after gcse and before loop,
2651    then we would not need to do this here, because jump would add the
2652    necessary REG_LABEL_OPERAND and REG_LABEL_TARGET notes.  */
2653 
2654 static void
2655 add_label_notes (rtx x, rtx_insn *insn)
2656 {
2657   enum rtx_code code = GET_CODE (x);
2658   int i, j;
2659   const char *fmt;
2660 
2661   if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
2662     {
2663       /* This code used to ignore labels that referred to dispatch tables to
2664 	 avoid flow generating (slightly) worse code.
2665 
2666 	 We no longer ignore such label references (see LABEL_REF handling in
2667 	 mark_jump_label for additional information).  */
2668 
2669       /* There's no reason for current users to emit jump-insns with
2670 	 such a LABEL_REF, so we don't have to handle REG_LABEL_TARGET
2671 	 notes.  */
2672       gcc_assert (!JUMP_P (insn));
2673       add_reg_note (insn, REG_LABEL_OPERAND, label_ref_label (x));
2674 
2675       if (LABEL_P (label_ref_label (x)))
2676 	LABEL_NUSES (label_ref_label (x))++;
2677 
2678       return;
2679     }
2680 
2681   for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2682     {
2683       if (fmt[i] == 'e')
2684 	add_label_notes (XEXP (x, i), insn);
2685       else if (fmt[i] == 'E')
2686 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2687 	  add_label_notes (XVECEXP (x, i, j), insn);
2688     }
2689 }
2690 
2691 /* Code Hoisting variables and subroutines.  */
2692 
2693 /* Very busy expressions.  */
2694 static sbitmap *hoist_vbein;
2695 static sbitmap *hoist_vbeout;
2696 
2697 /* ??? We could compute post dominators and run this algorithm in
2698    reverse to perform tail merging, doing so would probably be
2699    more effective than the tail merging code in jump.c.
2700 
2701    It's unclear if tail merging could be run in parallel with
2702    code hoisting.  It would be nice.  */
2703 
2704 /* Allocate vars used for code hoisting analysis.  */
2705 
2706 static void
2707 alloc_code_hoist_mem (int n_blocks, int n_exprs)
2708 {
2709   antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
2710   transp = sbitmap_vector_alloc (n_blocks, n_exprs);
2711   comp = sbitmap_vector_alloc (n_blocks, n_exprs);
2712 
2713   hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
2714   hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
2715 }
2716 
2717 /* Free vars used for code hoisting analysis.  */
2718 
2719 static void
2720 free_code_hoist_mem (void)
2721 {
2722   sbitmap_vector_free (antloc);
2723   sbitmap_vector_free (transp);
2724   sbitmap_vector_free (comp);
2725 
2726   sbitmap_vector_free (hoist_vbein);
2727   sbitmap_vector_free (hoist_vbeout);
2728 
2729   free_dominance_info (CDI_DOMINATORS);
2730 }
2731 
2732 /* Compute the very busy expressions at entry/exit from each block.
2733 
2734    An expression is very busy if all paths from a given point
2735    compute the expression.  */
2736 
2737 static void
2738 compute_code_hoist_vbeinout (void)
2739 {
2740   int changed, passes;
2741   basic_block bb;
2742 
2743   bitmap_vector_clear (hoist_vbeout, last_basic_block_for_fn (cfun));
2744   bitmap_vector_clear (hoist_vbein, last_basic_block_for_fn (cfun));
2745 
2746   passes = 0;
2747   changed = 1;
2748 
2749   while (changed)
2750     {
2751       changed = 0;
2752 
2753       /* We scan the blocks in the reverse order to speed up
2754 	 the convergence.  */
2755       FOR_EACH_BB_REVERSE_FN (bb, cfun)
2756 	{
2757 	  if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
2758 	    {
2759 	      bitmap_intersection_of_succs (hoist_vbeout[bb->index],
2760 					    hoist_vbein, bb);
2761 
2762 	      /* Include expressions in VBEout that are calculated
2763 		 in BB and available at its end.  */
2764 	      bitmap_ior (hoist_vbeout[bb->index],
2765 			      hoist_vbeout[bb->index], comp[bb->index]);
2766 	    }
2767 
2768 	  changed |= bitmap_or_and (hoist_vbein[bb->index],
2769 					      antloc[bb->index],
2770 					      hoist_vbeout[bb->index],
2771 					      transp[bb->index]);
2772 	}
2773 
2774       passes++;
2775     }
2776 
2777   if (dump_file)
2778     {
2779       fprintf (dump_file, "hoisting vbeinout computation: %d passes\n", passes);
2780 
2781       FOR_EACH_BB_FN (bb, cfun)
2782         {
2783 	  fprintf (dump_file, "vbein (%d): ", bb->index);
2784 	  dump_bitmap_file (dump_file, hoist_vbein[bb->index]);
2785 	  fprintf (dump_file, "vbeout(%d): ", bb->index);
2786 	  dump_bitmap_file (dump_file, hoist_vbeout[bb->index]);
2787 	}
2788     }
2789 }
2790 
2791 /* Top level routine to do the dataflow analysis needed by code hoisting.  */
2792 
2793 static void
2794 compute_code_hoist_data (void)
2795 {
2796   compute_local_properties (transp, comp, antloc, &expr_hash_table);
2797   prune_expressions (false);
2798   compute_code_hoist_vbeinout ();
2799   calculate_dominance_info (CDI_DOMINATORS);
2800   if (dump_file)
2801     fprintf (dump_file, "\n");
2802 }
2803 
2804 /* Update register pressure for BB when hoisting an expression from
2805    instruction FROM, if live ranges of inputs are shrunk.  Also
2806    maintain live_in information if live range of register referred
2807    in FROM is shrunk.
2808 
2809    Return 0 if register pressure doesn't change, otherwise return
2810    the number by which register pressure is decreased.
2811 
2812    NOTE: Register pressure won't be increased in this function.  */
2813 
2814 static int
2815 update_bb_reg_pressure (basic_block bb, rtx_insn *from)
2816 {
2817   rtx dreg;
2818   rtx_insn *insn;
2819   basic_block succ_bb;
2820   df_ref use, op_ref;
2821   edge succ;
2822   edge_iterator ei;
2823   int decreased_pressure = 0;
2824   int nregs;
2825   enum reg_class pressure_class;
2826 
2827   FOR_EACH_INSN_USE (use, from)
2828     {
2829       dreg = DF_REF_REAL_REG (use);
2830       /* The live range of register is shrunk only if it isn't:
2831 	 1. referred on any path from the end of this block to EXIT, or
2832 	 2. referred by insns other than FROM in this block.  */
2833       FOR_EACH_EDGE (succ, ei, bb->succs)
2834 	{
2835 	  succ_bb = succ->dest;
2836 	  if (succ_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
2837 	    continue;
2838 
2839 	  if (bitmap_bit_p (BB_DATA (succ_bb)->live_in, REGNO (dreg)))
2840 	    break;
2841 	}
2842       if (succ != NULL)
2843 	continue;
2844 
2845       op_ref = DF_REG_USE_CHAIN (REGNO (dreg));
2846       for (; op_ref; op_ref = DF_REF_NEXT_REG (op_ref))
2847 	{
2848 	  if (!DF_REF_INSN_INFO (op_ref))
2849 	    continue;
2850 
2851 	  insn = DF_REF_INSN (op_ref);
2852 	  if (BLOCK_FOR_INSN (insn) == bb
2853 	      && NONDEBUG_INSN_P (insn) && insn != from)
2854 	    break;
2855 	}
2856 
2857       pressure_class = get_regno_pressure_class (REGNO (dreg), &nregs);
2858       /* Decrease register pressure and update live_in information for
2859 	 this block.  */
2860       if (!op_ref && pressure_class != NO_REGS)
2861 	{
2862 	  decreased_pressure += nregs;
2863 	  BB_DATA (bb)->max_reg_pressure[pressure_class] -= nregs;
2864 	  bitmap_clear_bit (BB_DATA (bb)->live_in, REGNO (dreg));
2865 	}
2866     }
2867   return decreased_pressure;
2868 }
2869 
2870 /* Determine if the expression EXPR should be hoisted to EXPR_BB up in
2871    flow graph, if it can reach BB unimpared.  Stop the search if the
2872    expression would need to be moved more than DISTANCE instructions.
2873 
2874    DISTANCE is the number of instructions through which EXPR can be
2875    hoisted up in flow graph.
2876 
2877    BB_SIZE points to an array which contains the number of instructions
2878    for each basic block.
2879 
2880    PRESSURE_CLASS and NREGS are register class and number of hard registers
2881    for storing EXPR.
2882 
2883    HOISTED_BBS points to a bitmap indicating basic blocks through which
2884    EXPR is hoisted.
2885 
2886    FROM is the instruction from which EXPR is hoisted.
2887 
2888    It's unclear exactly what Muchnick meant by "unimpared".  It seems
2889    to me that the expression must either be computed or transparent in
2890    *every* block in the path(s) from EXPR_BB to BB.  Any other definition
2891    would allow the expression to be hoisted out of loops, even if
2892    the expression wasn't a loop invariant.
2893 
2894    Contrast this to reachability for PRE where an expression is
2895    considered reachable if *any* path reaches instead of *all*
2896    paths.  */
2897 
2898 static int
2899 should_hoist_expr_to_dom (basic_block expr_bb, struct gcse_expr *expr,
2900 			  basic_block bb, sbitmap visited,
2901 			  HOST_WIDE_INT distance,
2902 			  int *bb_size, enum reg_class pressure_class,
2903 			  int *nregs, bitmap hoisted_bbs, rtx_insn *from)
2904 {
2905   unsigned int i;
2906   edge pred;
2907   edge_iterator ei;
2908   sbitmap_iterator sbi;
2909   int visited_allocated_locally = 0;
2910   int decreased_pressure = 0;
2911 
2912   if (flag_ira_hoist_pressure)
2913     {
2914       /* Record old information of basic block BB when it is visited
2915 	 at the first time.  */
2916       if (!bitmap_bit_p (hoisted_bbs, bb->index))
2917 	{
2918 	  struct bb_data *data = BB_DATA (bb);
2919 	  bitmap_copy (data->backup, data->live_in);
2920 	  data->old_pressure = data->max_reg_pressure[pressure_class];
2921 	}
2922       decreased_pressure = update_bb_reg_pressure (bb, from);
2923     }
2924   /* Terminate the search if distance, for which EXPR is allowed to move,
2925      is exhausted.  */
2926   if (distance > 0)
2927     {
2928       if (flag_ira_hoist_pressure)
2929 	{
2930 	  /* Prefer to hoist EXPR if register pressure is decreased.  */
2931 	  if (decreased_pressure > *nregs)
2932 	    distance += bb_size[bb->index];
2933 	  /* Let EXPR be hoisted through basic block at no cost if one
2934 	     of following conditions is satisfied:
2935 
2936 	     1. The basic block has low register pressure.
2937 	     2. Register pressure won't be increases after hoisting EXPR.
2938 
2939 	     Constant expressions is handled conservatively, because
2940 	     hoisting constant expression aggressively results in worse
2941 	     code.  This decision is made by the observation of CSiBE
2942 	     on ARM target, while it has no obvious effect on other
2943 	     targets like x86, x86_64, mips and powerpc.  */
2944 	  else if (CONST_INT_P (expr->expr)
2945 		   || (BB_DATA (bb)->max_reg_pressure[pressure_class]
2946 			 >= ira_class_hard_regs_num[pressure_class]
2947 		       && decreased_pressure < *nregs))
2948 	    distance -= bb_size[bb->index];
2949 	}
2950       else
2951 	distance -= bb_size[bb->index];
2952 
2953       if (distance <= 0)
2954 	return 0;
2955     }
2956   else
2957     gcc_assert (distance == 0);
2958 
2959   if (visited == NULL)
2960     {
2961       visited_allocated_locally = 1;
2962       visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
2963       bitmap_clear (visited);
2964     }
2965 
2966   FOR_EACH_EDGE (pred, ei, bb->preds)
2967     {
2968       basic_block pred_bb = pred->src;
2969 
2970       if (pred->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2971 	break;
2972       else if (pred_bb == expr_bb)
2973 	continue;
2974       else if (bitmap_bit_p (visited, pred_bb->index))
2975 	continue;
2976       else if (! bitmap_bit_p (transp[pred_bb->index], expr->bitmap_index))
2977 	break;
2978       /* Not killed.  */
2979       else
2980 	{
2981 	  bitmap_set_bit (visited, pred_bb->index);
2982 	  if (! should_hoist_expr_to_dom (expr_bb, expr, pred_bb,
2983 					  visited, distance, bb_size,
2984 					  pressure_class, nregs,
2985 					  hoisted_bbs, from))
2986 	    break;
2987 	}
2988     }
2989   if (visited_allocated_locally)
2990     {
2991       /* If EXPR can be hoisted to expr_bb, record basic blocks through
2992 	 which EXPR is hoisted in hoisted_bbs.  */
2993       if (flag_ira_hoist_pressure && !pred)
2994 	{
2995 	  /* Record the basic block from which EXPR is hoisted.  */
2996 	  bitmap_set_bit (visited, bb->index);
2997 	  EXECUTE_IF_SET_IN_BITMAP (visited, 0, i, sbi)
2998 	    bitmap_set_bit (hoisted_bbs, i);
2999 	}
3000       sbitmap_free (visited);
3001     }
3002 
3003   return (pred == NULL);
3004 }
3005 
3006 /* Find occurrence in BB.  */
3007 
3008 static struct gcse_occr *
3009 find_occr_in_bb (struct gcse_occr *occr, basic_block bb)
3010 {
3011   /* Find the right occurrence of this expression.  */
3012   while (occr && BLOCK_FOR_INSN (occr->insn) != bb)
3013     occr = occr->next;
3014 
3015   return occr;
3016 }
3017 
3018 /* Actually perform code hoisting.
3019 
3020    The code hoisting pass can hoist multiple computations of the same
3021    expression along dominated path to a dominating basic block, like
3022    from b2/b3 to b1 as depicted below:
3023 
3024           b1      ------
3025           /\         |
3026          /  \        |
3027         bx   by   distance
3028        /      \      |
3029       /        \     |
3030      b2        b3 ------
3031 
3032    Unfortunately code hoisting generally extends the live range of an
3033    output pseudo register, which increases register pressure and hurts
3034    register allocation.  To address this issue, an attribute MAX_DISTANCE
3035    is computed and attached to each expression.  The attribute is computed
3036    from rtx cost of the corresponding expression and it's used to control
3037    how long the expression can be hoisted up in flow graph.  As the
3038    expression is hoisted up in flow graph, GCC decreases its DISTANCE
3039    and stops the hoist if DISTANCE reaches 0.  Code hoisting can decrease
3040    register pressure if live ranges of inputs are shrunk.
3041 
3042    Option "-fira-hoist-pressure" implements register pressure directed
3043    hoist based on upper method.  The rationale is:
3044      1. Calculate register pressure for each basic block by reusing IRA
3045 	facility.
3046      2. When expression is hoisted through one basic block, GCC checks
3047 	the change of live ranges for inputs/output.  The basic block's
3048 	register pressure will be increased because of extended live
3049 	range of output.  However, register pressure will be decreased
3050 	if the live ranges of inputs are shrunk.
3051      3. After knowing how hoisting affects register pressure, GCC prefers
3052 	to hoist the expression if it can decrease register pressure, by
3053 	increasing DISTANCE of the corresponding expression.
3054      4. If hoisting the expression increases register pressure, GCC checks
3055 	register pressure of the basic block and decrease DISTANCE only if
3056 	the register pressure is high.  In other words, expression will be
3057 	hoisted through at no cost if the basic block has low register
3058 	pressure.
3059      5. Update register pressure information for basic blocks through
3060 	which expression is hoisted.  */
3061 
3062 static int
3063 hoist_code (void)
3064 {
3065   basic_block bb, dominated;
3066   vec<basic_block> dom_tree_walk;
3067   unsigned int dom_tree_walk_index;
3068   vec<basic_block> domby;
3069   unsigned int i, j, k;
3070   struct gcse_expr **index_map;
3071   struct gcse_expr *expr;
3072   int *to_bb_head;
3073   int *bb_size;
3074   int changed = 0;
3075   struct bb_data *data;
3076   /* Basic blocks that have occurrences reachable from BB.  */
3077   bitmap from_bbs;
3078   /* Basic blocks through which expr is hoisted.  */
3079   bitmap hoisted_bbs = NULL;
3080   bitmap_iterator bi;
3081 
3082   /* Compute a mapping from expression number (`bitmap_index') to
3083      hash table entry.  */
3084 
3085   index_map = XCNEWVEC (struct gcse_expr *, expr_hash_table.n_elems);
3086   for (i = 0; i < expr_hash_table.size; i++)
3087     for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
3088       index_map[expr->bitmap_index] = expr;
3089 
3090   /* Calculate sizes of basic blocks and note how far
3091      each instruction is from the start of its block.  We then use this
3092      data to restrict distance an expression can travel.  */
3093 
3094   to_bb_head = XCNEWVEC (int, get_max_uid ());
3095   bb_size = XCNEWVEC (int, last_basic_block_for_fn (cfun));
3096 
3097   FOR_EACH_BB_FN (bb, cfun)
3098     {
3099       rtx_insn *insn;
3100       int to_head;
3101 
3102       to_head = 0;
3103       FOR_BB_INSNS (bb, insn)
3104 	{
3105 	  /* Don't count debug instructions to avoid them affecting
3106 	     decision choices.  */
3107 	  if (NONDEBUG_INSN_P (insn))
3108 	    to_bb_head[INSN_UID (insn)] = to_head++;
3109 	}
3110 
3111       bb_size[bb->index] = to_head;
3112     }
3113 
3114   gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs) == 1
3115 	      && (EDGE_SUCC (ENTRY_BLOCK_PTR_FOR_FN (cfun), 0)->dest
3116 		  == ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb));
3117 
3118   from_bbs = BITMAP_ALLOC (NULL);
3119   if (flag_ira_hoist_pressure)
3120     hoisted_bbs = BITMAP_ALLOC (NULL);
3121 
3122   dom_tree_walk = get_all_dominated_blocks (CDI_DOMINATORS,
3123 					    ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb);
3124 
3125   /* Walk over each basic block looking for potentially hoistable
3126      expressions, nothing gets hoisted from the entry block.  */
3127   FOR_EACH_VEC_ELT (dom_tree_walk, dom_tree_walk_index, bb)
3128     {
3129       domby = get_dominated_to_depth (CDI_DOMINATORS, bb, MAX_HOIST_DEPTH);
3130 
3131       if (domby.length () == 0)
3132 	continue;
3133 
3134       /* Examine each expression that is very busy at the exit of this
3135 	 block.  These are the potentially hoistable expressions.  */
3136       for (i = 0; i < SBITMAP_SIZE (hoist_vbeout[bb->index]); i++)
3137 	{
3138 	  if (bitmap_bit_p (hoist_vbeout[bb->index], i))
3139 	    {
3140 	      int nregs = 0;
3141 	      enum reg_class pressure_class = NO_REGS;
3142 	      /* Current expression.  */
3143 	      struct gcse_expr *expr = index_map[i];
3144 	      /* Number of occurrences of EXPR that can be hoisted to BB.  */
3145 	      int hoistable = 0;
3146 	      /* Occurrences reachable from BB.  */
3147 	      vec<occr_t> occrs_to_hoist = vNULL;
3148 	      /* We want to insert the expression into BB only once, so
3149 		 note when we've inserted it.  */
3150 	      int insn_inserted_p;
3151 	      occr_t occr;
3152 
3153 	      /* If an expression is computed in BB and is available at end of
3154 		 BB, hoist all occurrences dominated by BB to BB.  */
3155 	      if (bitmap_bit_p (comp[bb->index], i))
3156 		{
3157 		  occr = find_occr_in_bb (expr->antic_occr, bb);
3158 
3159 		  if (occr)
3160 		    {
3161 		      /* An occurrence might've been already deleted
3162 			 while processing a dominator of BB.  */
3163 		      if (!occr->deleted_p)
3164 			{
3165 			  gcc_assert (NONDEBUG_INSN_P (occr->insn));
3166 			  hoistable++;
3167 			}
3168 		    }
3169 		  else
3170 		    hoistable++;
3171 		}
3172 
3173 	      /* We've found a potentially hoistable expression, now
3174 		 we look at every block BB dominates to see if it
3175 		 computes the expression.  */
3176 	      FOR_EACH_VEC_ELT (domby, j, dominated)
3177 		{
3178 		  HOST_WIDE_INT max_distance;
3179 
3180 		  /* Ignore self dominance.  */
3181 		  if (bb == dominated)
3182 		    continue;
3183 		  /* We've found a dominated block, now see if it computes
3184 		     the busy expression and whether or not moving that
3185 		     expression to the "beginning" of that block is safe.  */
3186 		  if (!bitmap_bit_p (antloc[dominated->index], i))
3187 		    continue;
3188 
3189 		  occr = find_occr_in_bb (expr->antic_occr, dominated);
3190 		  gcc_assert (occr);
3191 
3192 		  /* An occurrence might've been already deleted
3193 		     while processing a dominator of BB.  */
3194 		  if (occr->deleted_p)
3195 		    continue;
3196 		  gcc_assert (NONDEBUG_INSN_P (occr->insn));
3197 
3198 		  max_distance = expr->max_distance;
3199 		  if (max_distance > 0)
3200 		    /* Adjust MAX_DISTANCE to account for the fact that
3201 		       OCCR won't have to travel all of DOMINATED, but
3202 		       only part of it.  */
3203 		    max_distance += (bb_size[dominated->index]
3204 				     - to_bb_head[INSN_UID (occr->insn)]);
3205 
3206 		  pressure_class = get_pressure_class_and_nregs (occr->insn,
3207 								 &nregs);
3208 
3209 		  /* Note if the expression should be hoisted from the dominated
3210 		     block to BB if it can reach DOMINATED unimpared.
3211 
3212 		     Keep track of how many times this expression is hoistable
3213 		     from a dominated block into BB.  */
3214 		  if (should_hoist_expr_to_dom (bb, expr, dominated, NULL,
3215 						max_distance, bb_size,
3216 						pressure_class,	&nregs,
3217 						hoisted_bbs, occr->insn))
3218 		    {
3219 		      hoistable++;
3220 		      occrs_to_hoist.safe_push (occr);
3221 		      bitmap_set_bit (from_bbs, dominated->index);
3222 		    }
3223 		}
3224 
3225 	      /* If we found more than one hoistable occurrence of this
3226 		 expression, then note it in the vector of expressions to
3227 		 hoist.  It makes no sense to hoist things which are computed
3228 		 in only one BB, and doing so tends to pessimize register
3229 		 allocation.  One could increase this value to try harder
3230 		 to avoid any possible code expansion due to register
3231 		 allocation issues; however experiments have shown that
3232 		 the vast majority of hoistable expressions are only movable
3233 		 from two successors, so raising this threshold is likely
3234 		 to nullify any benefit we get from code hoisting.  */
3235 	      if (hoistable > 1 && dbg_cnt (hoist_insn))
3236 		{
3237 		  /* If (hoistable != vec::length), then there is
3238 		     an occurrence of EXPR in BB itself.  Don't waste
3239 		     time looking for LCA in this case.  */
3240 		  if ((unsigned) hoistable == occrs_to_hoist.length ())
3241 		    {
3242 		      basic_block lca;
3243 
3244 		      lca = nearest_common_dominator_for_set (CDI_DOMINATORS,
3245 							      from_bbs);
3246 		      if (lca != bb)
3247 			/* Punt, it's better to hoist these occurrences to
3248 			   LCA.  */
3249 			occrs_to_hoist.release ();
3250 		    }
3251 		}
3252 	      else
3253 		/* Punt, no point hoisting a single occurrence.  */
3254 		occrs_to_hoist.release ();
3255 
3256 	      if (flag_ira_hoist_pressure
3257 		  && !occrs_to_hoist.is_empty ())
3258 		{
3259 		  /* Increase register pressure of basic blocks to which
3260 		     expr is hoisted because of extended live range of
3261 		     output.  */
3262 		  data = BB_DATA (bb);
3263 		  data->max_reg_pressure[pressure_class] += nregs;
3264 		  EXECUTE_IF_SET_IN_BITMAP (hoisted_bbs, 0, k, bi)
3265 		    {
3266 		      data = BB_DATA (BASIC_BLOCK_FOR_FN (cfun, k));
3267 		      data->max_reg_pressure[pressure_class] += nregs;
3268 		    }
3269 		}
3270 	      else if (flag_ira_hoist_pressure)
3271 		{
3272 		  /* Restore register pressure and live_in info for basic
3273 		     blocks recorded in hoisted_bbs when expr will not be
3274 		     hoisted.  */
3275 		  EXECUTE_IF_SET_IN_BITMAP (hoisted_bbs, 0, k, bi)
3276 		    {
3277 		      data = BB_DATA (BASIC_BLOCK_FOR_FN (cfun, k));
3278 		      bitmap_copy (data->live_in, data->backup);
3279 		      data->max_reg_pressure[pressure_class]
3280 			  = data->old_pressure;
3281 		    }
3282 		}
3283 
3284 	      if (flag_ira_hoist_pressure)
3285 		bitmap_clear (hoisted_bbs);
3286 
3287 	      insn_inserted_p = 0;
3288 
3289 	      /* Walk through occurrences of I'th expressions we want
3290 		 to hoist to BB and make the transformations.  */
3291 	      FOR_EACH_VEC_ELT (occrs_to_hoist, j, occr)
3292 		{
3293 		  rtx_insn *insn;
3294 		  const_rtx set;
3295 
3296 		  gcc_assert (!occr->deleted_p);
3297 
3298 		  insn = occr->insn;
3299 		  set = single_set_gcse (insn);
3300 
3301 		  /* Create a pseudo-reg to store the result of reaching
3302 		     expressions into.  Get the mode for the new pseudo
3303 		     from the mode of the original destination pseudo.
3304 
3305 		     It is important to use new pseudos whenever we
3306 		     emit a set.  This will allow reload to use
3307 		     rematerialization for such registers.  */
3308 		  if (!insn_inserted_p)
3309 		    expr->reaching_reg
3310 		      = gen_reg_rtx_and_attrs (SET_DEST (set));
3311 
3312 		  gcse_emit_move_after (SET_DEST (set), expr->reaching_reg,
3313 					insn);
3314 		  delete_insn (insn);
3315 		  occr->deleted_p = 1;
3316 		  changed = 1;
3317 		  gcse_subst_count++;
3318 
3319 		  if (!insn_inserted_p)
3320 		    {
3321 		      insert_insn_end_basic_block (expr, bb);
3322 		      insn_inserted_p = 1;
3323 		    }
3324 		}
3325 
3326 	      occrs_to_hoist.release ();
3327 	      bitmap_clear (from_bbs);
3328 	    }
3329 	}
3330       domby.release ();
3331     }
3332 
3333   dom_tree_walk.release ();
3334   BITMAP_FREE (from_bbs);
3335   if (flag_ira_hoist_pressure)
3336     BITMAP_FREE (hoisted_bbs);
3337 
3338   free (bb_size);
3339   free (to_bb_head);
3340   free (index_map);
3341 
3342   return changed;
3343 }
3344 
3345 /* Return pressure class and number of needed hard registers (through
3346    *NREGS) of register REGNO.  */
3347 static enum reg_class
3348 get_regno_pressure_class (int regno, int *nregs)
3349 {
3350   if (regno >= FIRST_PSEUDO_REGISTER)
3351     {
3352       enum reg_class pressure_class;
3353 
3354       pressure_class = reg_allocno_class (regno);
3355       pressure_class = ira_pressure_class_translate[pressure_class];
3356       *nregs
3357 	= ira_reg_class_max_nregs[pressure_class][PSEUDO_REGNO_MODE (regno)];
3358       return pressure_class;
3359     }
3360   else if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno)
3361 	   && ! TEST_HARD_REG_BIT (eliminable_regset, regno))
3362     {
3363       *nregs = 1;
3364       return ira_pressure_class_translate[REGNO_REG_CLASS (regno)];
3365     }
3366   else
3367     {
3368       *nregs = 0;
3369       return NO_REGS;
3370     }
3371 }
3372 
3373 /* Return pressure class and number of hard registers (through *NREGS)
3374    for destination of INSN. */
3375 static enum reg_class
3376 get_pressure_class_and_nregs (rtx_insn *insn, int *nregs)
3377 {
3378   rtx reg;
3379   enum reg_class pressure_class;
3380   const_rtx set = single_set_gcse (insn);
3381 
3382   reg = SET_DEST (set);
3383   if (GET_CODE (reg) == SUBREG)
3384     reg = SUBREG_REG (reg);
3385   if (MEM_P (reg))
3386     {
3387       *nregs = 0;
3388       pressure_class = NO_REGS;
3389     }
3390   else
3391     {
3392       gcc_assert (REG_P (reg));
3393       pressure_class = reg_allocno_class (REGNO (reg));
3394       pressure_class = ira_pressure_class_translate[pressure_class];
3395       *nregs
3396 	= ira_reg_class_max_nregs[pressure_class][GET_MODE (SET_SRC (set))];
3397     }
3398   return pressure_class;
3399 }
3400 
3401 /* Increase (if INCR_P) or decrease current register pressure for
3402    register REGNO.  */
3403 static void
3404 change_pressure (int regno, bool incr_p)
3405 {
3406   int nregs;
3407   enum reg_class pressure_class;
3408 
3409   pressure_class = get_regno_pressure_class (regno, &nregs);
3410   if (! incr_p)
3411     curr_reg_pressure[pressure_class] -= nregs;
3412   else
3413     {
3414       curr_reg_pressure[pressure_class] += nregs;
3415       if (BB_DATA (curr_bb)->max_reg_pressure[pressure_class]
3416 	  < curr_reg_pressure[pressure_class])
3417 	BB_DATA (curr_bb)->max_reg_pressure[pressure_class]
3418 	  = curr_reg_pressure[pressure_class];
3419     }
3420 }
3421 
3422 /* Calculate register pressure for each basic block by walking insns
3423    from last to first.  */
3424 static void
3425 calculate_bb_reg_pressure (void)
3426 {
3427   int i;
3428   unsigned int j;
3429   rtx_insn *insn;
3430   basic_block bb;
3431   bitmap curr_regs_live;
3432   bitmap_iterator bi;
3433 
3434 
3435   ira_setup_eliminable_regset ();
3436   curr_regs_live = BITMAP_ALLOC (&reg_obstack);
3437   FOR_EACH_BB_FN (bb, cfun)
3438     {
3439       curr_bb = bb;
3440       BB_DATA (bb)->live_in = BITMAP_ALLOC (NULL);
3441       BB_DATA (bb)->backup = BITMAP_ALLOC (NULL);
3442       bitmap_copy (BB_DATA (bb)->live_in, df_get_live_in (bb));
3443       bitmap_copy (curr_regs_live, df_get_live_out (bb));
3444       for (i = 0; i < ira_pressure_classes_num; i++)
3445 	curr_reg_pressure[ira_pressure_classes[i]] = 0;
3446       EXECUTE_IF_SET_IN_BITMAP (curr_regs_live, 0, j, bi)
3447 	change_pressure (j, true);
3448 
3449       FOR_BB_INSNS_REVERSE (bb, insn)
3450 	{
3451 	  rtx dreg;
3452 	  int regno;
3453 	  df_ref def, use;
3454 
3455 	  if (! NONDEBUG_INSN_P (insn))
3456 	    continue;
3457 
3458 	  FOR_EACH_INSN_DEF (def, insn)
3459 	    {
3460 	      dreg = DF_REF_REAL_REG (def);
3461 	      gcc_assert (REG_P (dreg));
3462 	      regno = REGNO (dreg);
3463 	      if (!(DF_REF_FLAGS (def)
3464 		    & (DF_REF_PARTIAL | DF_REF_CONDITIONAL)))
3465 		{
3466 		  if (bitmap_clear_bit (curr_regs_live, regno))
3467 		    change_pressure (regno, false);
3468 		}
3469 	    }
3470 
3471 	  FOR_EACH_INSN_USE (use, insn)
3472 	    {
3473 	      dreg = DF_REF_REAL_REG (use);
3474 	      gcc_assert (REG_P (dreg));
3475 	      regno = REGNO (dreg);
3476 	      if (bitmap_set_bit (curr_regs_live, regno))
3477 		change_pressure (regno, true);
3478 	    }
3479 	}
3480     }
3481   BITMAP_FREE (curr_regs_live);
3482 
3483   if (dump_file == NULL)
3484     return;
3485 
3486   fprintf (dump_file, "\nRegister Pressure: \n");
3487   FOR_EACH_BB_FN (bb, cfun)
3488     {
3489       fprintf (dump_file, "  Basic block %d: \n", bb->index);
3490       for (i = 0; (int) i < ira_pressure_classes_num; i++)
3491 	{
3492 	  enum reg_class pressure_class;
3493 
3494 	  pressure_class = ira_pressure_classes[i];
3495 	  if (BB_DATA (bb)->max_reg_pressure[pressure_class] == 0)
3496 	    continue;
3497 
3498 	  fprintf (dump_file, "    %s=%d\n", reg_class_names[pressure_class],
3499 		   BB_DATA (bb)->max_reg_pressure[pressure_class]);
3500 	}
3501     }
3502   fprintf (dump_file, "\n");
3503 }
3504 
3505 /* Top level routine to perform one code hoisting (aka unification) pass
3506 
3507    Return nonzero if a change was made.  */
3508 
3509 static int
3510 one_code_hoisting_pass (void)
3511 {
3512   int changed = 0;
3513 
3514   gcse_subst_count = 0;
3515   gcse_create_count = 0;
3516 
3517   /* Return if there's nothing to do, or it is too expensive.  */
3518   if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
3519       || gcse_or_cprop_is_too_expensive (_("GCSE disabled")))
3520     return 0;
3521 
3522   doing_code_hoisting_p = true;
3523 
3524   /* Calculate register pressure for each basic block.  */
3525   if (flag_ira_hoist_pressure)
3526     {
3527       regstat_init_n_sets_and_refs ();
3528       ira_set_pseudo_classes (false, dump_file);
3529       alloc_aux_for_blocks (sizeof (struct bb_data));
3530       calculate_bb_reg_pressure ();
3531       regstat_free_n_sets_and_refs ();
3532     }
3533 
3534   /* We need alias.  */
3535   init_alias_analysis ();
3536 
3537   bytes_used = 0;
3538   gcc_obstack_init (&gcse_obstack);
3539   alloc_gcse_mem ();
3540 
3541   alloc_hash_table (&expr_hash_table);
3542   compute_hash_table (&expr_hash_table);
3543   if (dump_file)
3544     dump_hash_table (dump_file, "Code Hosting Expressions", &expr_hash_table);
3545 
3546   if (expr_hash_table.n_elems > 0)
3547     {
3548       alloc_code_hoist_mem (last_basic_block_for_fn (cfun),
3549 			    expr_hash_table.n_elems);
3550       compute_code_hoist_data ();
3551       changed = hoist_code ();
3552       free_code_hoist_mem ();
3553     }
3554 
3555   if (flag_ira_hoist_pressure)
3556     {
3557       free_aux_for_blocks ();
3558       free_reg_info ();
3559     }
3560   free_hash_table (&expr_hash_table);
3561   free_gcse_mem ();
3562   obstack_free (&gcse_obstack, NULL);
3563 
3564   /* We are finished with alias.  */
3565   end_alias_analysis ();
3566 
3567   if (dump_file)
3568     {
3569       fprintf (dump_file, "HOIST of %s, %d basic blocks, %d bytes needed, ",
3570 	       current_function_name (), n_basic_blocks_for_fn (cfun),
3571 	       bytes_used);
3572       fprintf (dump_file, "%d substs, %d insns created\n",
3573 	       gcse_subst_count, gcse_create_count);
3574     }
3575 
3576   doing_code_hoisting_p = false;
3577 
3578   return changed;
3579 }
3580 
3581 /*  Here we provide the things required to do store motion towards the exit.
3582     In order for this to be effective, gcse also needed to be taught how to
3583     move a load when it is killed only by a store to itself.
3584 
3585 	    int i;
3586 	    float a[10];
3587 
3588 	    void foo(float scale)
3589 	    {
3590 	      for (i=0; i<10; i++)
3591 		a[i] *= scale;
3592 	    }
3593 
3594     'i' is both loaded and stored to in the loop. Normally, gcse cannot move
3595     the load out since its live around the loop, and stored at the bottom
3596     of the loop.
3597 
3598       The 'Load Motion' referred to and implemented in this file is
3599     an enhancement to gcse which when using edge based LCM, recognizes
3600     this situation and allows gcse to move the load out of the loop.
3601 
3602       Once gcse has hoisted the load, store motion can then push this
3603     load towards the exit, and we end up with no loads or stores of 'i'
3604     in the loop.  */
3605 
3606 /* This will search the ldst list for a matching expression. If it
3607    doesn't find one, we create one and initialize it.  */
3608 
3609 static struct ls_expr *
3610 ldst_entry (rtx x)
3611 {
3612   int do_not_record_p = 0;
3613   struct ls_expr * ptr;
3614   unsigned int hash;
3615   ls_expr **slot;
3616   struct ls_expr e;
3617 
3618   hash = hash_rtx (x, GET_MODE (x), &do_not_record_p,
3619 		   NULL,  /*have_reg_qty=*/false);
3620 
3621   e.pattern = x;
3622   slot = pre_ldst_table->find_slot_with_hash (&e, hash, INSERT);
3623   if (*slot)
3624     return *slot;
3625 
3626   ptr = XNEW (struct ls_expr);
3627 
3628   ptr->next         = pre_ldst_mems;
3629   ptr->expr         = NULL;
3630   ptr->pattern      = x;
3631   ptr->pattern_regs = NULL_RTX;
3632   ptr->stores.create (0);
3633   ptr->reaching_reg = NULL_RTX;
3634   ptr->invalid      = 0;
3635   ptr->index        = 0;
3636   ptr->hash_index   = hash;
3637   pre_ldst_mems     = ptr;
3638   *slot = ptr;
3639 
3640   return ptr;
3641 }
3642 
3643 /* Free up an individual ldst entry.  */
3644 
3645 static void
3646 free_ldst_entry (struct ls_expr * ptr)
3647 {
3648   ptr->stores.release ();
3649 
3650   free (ptr);
3651 }
3652 
3653 /* Free up all memory associated with the ldst list.  */
3654 
3655 static void
3656 free_ld_motion_mems (void)
3657 {
3658   delete pre_ldst_table;
3659   pre_ldst_table = NULL;
3660 
3661   while (pre_ldst_mems)
3662     {
3663       struct ls_expr * tmp = pre_ldst_mems;
3664 
3665       pre_ldst_mems = pre_ldst_mems->next;
3666 
3667       free_ldst_entry (tmp);
3668     }
3669 
3670   pre_ldst_mems = NULL;
3671 }
3672 
3673 /* Dump debugging info about the ldst list.  */
3674 
3675 static void
3676 print_ldst_list (FILE * file)
3677 {
3678   struct ls_expr * ptr;
3679 
3680   fprintf (file, "LDST list: \n");
3681 
3682   for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
3683     {
3684       fprintf (file, "  Pattern (%3d): ", ptr->index);
3685 
3686       print_rtl (file, ptr->pattern);
3687 
3688       fprintf (file, "\n	Stores : ");
3689       print_rtx_insn_vec (file, ptr->stores);
3690 
3691       fprintf (file, "\n\n");
3692     }
3693 
3694   fprintf (file, "\n");
3695 }
3696 
3697 /* Returns 1 if X is in the list of ldst only expressions.  */
3698 
3699 static struct ls_expr *
3700 find_rtx_in_ldst (rtx x)
3701 {
3702   struct ls_expr e;
3703   ls_expr **slot;
3704   if (!pre_ldst_table)
3705     return NULL;
3706   e.pattern = x;
3707   slot = pre_ldst_table->find_slot (&e, NO_INSERT);
3708   if (!slot || (*slot)->invalid)
3709     return NULL;
3710   return *slot;
3711 }
3712 
3713 /* Load Motion for loads which only kill themselves.  */
3714 
3715 /* Return true if x, a MEM, is a simple access with no side effects.
3716    These are the types of loads we consider for the ld_motion list,
3717    otherwise we let the usual aliasing take care of it.  */
3718 
3719 static int
3720 simple_mem (const_rtx x)
3721 {
3722   if (MEM_VOLATILE_P (x))
3723     return 0;
3724 
3725   if (GET_MODE (x) == BLKmode)
3726     return 0;
3727 
3728   /* If we are handling exceptions, we must be careful with memory references
3729      that may trap.  If we are not, the behavior is undefined, so we may just
3730      continue.  */
3731   if (cfun->can_throw_non_call_exceptions && may_trap_p (x))
3732     return 0;
3733 
3734   if (side_effects_p (x))
3735     return 0;
3736 
3737   /* Do not consider function arguments passed on stack.  */
3738   if (reg_mentioned_p (stack_pointer_rtx, x))
3739     return 0;
3740 
3741   if (flag_float_store && FLOAT_MODE_P (GET_MODE (x)))
3742     return 0;
3743 
3744   return 1;
3745 }
3746 
3747 /* Make sure there isn't a buried reference in this pattern anywhere.
3748    If there is, invalidate the entry for it since we're not capable
3749    of fixing it up just yet.. We have to be sure we know about ALL
3750    loads since the aliasing code will allow all entries in the
3751    ld_motion list to not-alias itself.  If we miss a load, we will get
3752    the wrong value since gcse might common it and we won't know to
3753    fix it up.  */
3754 
3755 static void
3756 invalidate_any_buried_refs (rtx x)
3757 {
3758   const char * fmt;
3759   int i, j;
3760   struct ls_expr * ptr;
3761 
3762   /* Invalidate it in the list.  */
3763   if (MEM_P (x) && simple_mem (x))
3764     {
3765       ptr = ldst_entry (x);
3766       ptr->invalid = 1;
3767     }
3768 
3769   /* Recursively process the insn.  */
3770   fmt = GET_RTX_FORMAT (GET_CODE (x));
3771 
3772   for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3773     {
3774       if (fmt[i] == 'e')
3775 	invalidate_any_buried_refs (XEXP (x, i));
3776       else if (fmt[i] == 'E')
3777 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3778 	  invalidate_any_buried_refs (XVECEXP (x, i, j));
3779     }
3780 }
3781 
3782 /* Find all the 'simple' MEMs which are used in LOADs and STORES.  Simple
3783    being defined as MEM loads and stores to symbols, with no side effects
3784    and no registers in the expression.  For a MEM destination, we also
3785    check that the insn is still valid if we replace the destination with a
3786    REG, as is done in update_ld_motion_stores.  If there are any uses/defs
3787    which don't match this criteria, they are invalidated and trimmed out
3788    later.  */
3789 
3790 static void
3791 compute_ld_motion_mems (void)
3792 {
3793   struct ls_expr * ptr;
3794   basic_block bb;
3795   rtx_insn *insn;
3796 
3797   pre_ldst_mems = NULL;
3798   pre_ldst_table = new hash_table<pre_ldst_expr_hasher> (13);
3799 
3800   FOR_EACH_BB_FN (bb, cfun)
3801     {
3802       FOR_BB_INSNS (bb, insn)
3803 	{
3804 	  if (NONDEBUG_INSN_P (insn))
3805 	    {
3806 	      if (GET_CODE (PATTERN (insn)) == SET)
3807 		{
3808 		  rtx src = SET_SRC (PATTERN (insn));
3809 		  rtx dest = SET_DEST (PATTERN (insn));
3810 
3811 		  /* Check for a simple load.  */
3812 		  if (MEM_P (src) && simple_mem (src))
3813 		    {
3814 		      ptr = ldst_entry (src);
3815 		      if (!REG_P (dest))
3816 			ptr->invalid = 1;
3817 		    }
3818 		  else
3819 		    {
3820 		      /* Make sure there isn't a buried load somewhere.  */
3821 		      invalidate_any_buried_refs (src);
3822 		    }
3823 
3824 		  /* Check for a simple load through a REG_EQUAL note.  */
3825 		  rtx note = find_reg_equal_equiv_note (insn), src_eq;
3826 		  if (note
3827 		      && REG_NOTE_KIND (note) == REG_EQUAL
3828 		      && (src_eq = XEXP (note, 0))
3829 		      && !(MEM_P (src_eq) && simple_mem (src_eq)))
3830 		    invalidate_any_buried_refs (src_eq);
3831 
3832 		  /* Check for stores. Don't worry about aliased ones, they
3833 		     will block any movement we might do later. We only care
3834 		     about this exact pattern since those are the only
3835 		     circumstance that we will ignore the aliasing info.  */
3836 		  if (MEM_P (dest) && simple_mem (dest))
3837 		    {
3838 		      ptr = ldst_entry (dest);
3839 		      machine_mode src_mode = GET_MODE (src);
3840 		      if (! MEM_P (src)
3841 			  && GET_CODE (src) != ASM_OPERANDS
3842 			  /* Check for REG manually since want_to_gcse_p
3843 			     returns 0 for all REGs.  */
3844 			  && can_assign_to_reg_without_clobbers_p (src,
3845 								    src_mode))
3846 			ptr->stores.safe_push (insn);
3847 		      else
3848 			ptr->invalid = 1;
3849 		    }
3850 		}
3851 	      else
3852 		{
3853 		  /* Invalidate all MEMs in the pattern and...  */
3854 		  invalidate_any_buried_refs (PATTERN (insn));
3855 
3856 		  /* ...in REG_EQUAL notes for PARALLELs with single SET.  */
3857 		  rtx note = find_reg_equal_equiv_note (insn), src_eq;
3858 		  if (note
3859 		      && REG_NOTE_KIND (note) == REG_EQUAL
3860 		      && (src_eq = XEXP (note, 0)))
3861 		    invalidate_any_buried_refs (src_eq);
3862 		}
3863 	    }
3864 	}
3865     }
3866 }
3867 
3868 /* Remove any references that have been either invalidated or are not in the
3869    expression list for pre gcse.  */
3870 
3871 static void
3872 trim_ld_motion_mems (void)
3873 {
3874   struct ls_expr * * last = & pre_ldst_mems;
3875   struct ls_expr * ptr = pre_ldst_mems;
3876 
3877   while (ptr != NULL)
3878     {
3879       struct gcse_expr * expr;
3880 
3881       /* Delete if entry has been made invalid.  */
3882       if (! ptr->invalid)
3883 	{
3884 	  /* Delete if we cannot find this mem in the expression list.  */
3885 	  unsigned int hash = ptr->hash_index % expr_hash_table.size;
3886 
3887 	  for (expr = expr_hash_table.table[hash];
3888 	       expr != NULL;
3889 	       expr = expr->next_same_hash)
3890 	    if (expr_equiv_p (expr->expr, ptr->pattern))
3891 	      break;
3892 	}
3893       else
3894 	expr = (struct gcse_expr *) 0;
3895 
3896       if (expr)
3897 	{
3898 	  /* Set the expression field if we are keeping it.  */
3899 	  ptr->expr = expr;
3900 	  last = & ptr->next;
3901 	  ptr = ptr->next;
3902 	}
3903       else
3904 	{
3905 	  *last = ptr->next;
3906 	  pre_ldst_table->remove_elt_with_hash (ptr, ptr->hash_index);
3907 	  free_ldst_entry (ptr);
3908 	  ptr = * last;
3909 	}
3910     }
3911 
3912   /* Show the world what we've found.  */
3913   if (dump_file && pre_ldst_mems != NULL)
3914     print_ldst_list (dump_file);
3915 }
3916 
3917 /* This routine will take an expression which we are replacing with
3918    a reaching register, and update any stores that are needed if
3919    that expression is in the ld_motion list.  Stores are updated by
3920    copying their SRC to the reaching register, and then storing
3921    the reaching register into the store location. These keeps the
3922    correct value in the reaching register for the loads.  */
3923 
3924 static void
3925 update_ld_motion_stores (struct gcse_expr * expr)
3926 {
3927   struct ls_expr * mem_ptr;
3928 
3929   if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
3930     {
3931       /* We can try to find just the REACHED stores, but is shouldn't
3932 	 matter to set the reaching reg everywhere...  some might be
3933 	 dead and should be eliminated later.  */
3934 
3935       /* We replace (set mem expr) with (set reg expr) (set mem reg)
3936 	 where reg is the reaching reg used in the load.  We checked in
3937 	 compute_ld_motion_mems that we can replace (set mem expr) with
3938 	 (set reg expr) in that insn.  */
3939       rtx_insn *insn;
3940       unsigned int i;
3941       FOR_EACH_VEC_ELT_REVERSE (mem_ptr->stores, i, insn)
3942 	{
3943 	  rtx pat = PATTERN (insn);
3944 	  rtx src = SET_SRC (pat);
3945 	  rtx reg = expr->reaching_reg;
3946 
3947 	  /* If we've already copied it, continue.  */
3948 	  if (expr->reaching_reg == src)
3949 	    continue;
3950 
3951 	  if (dump_file)
3952 	    {
3953 	      fprintf (dump_file, "PRE:  store updated with reaching reg ");
3954 	      print_rtl (dump_file, reg);
3955 	      fprintf (dump_file, ":\n	");
3956 	      print_inline_rtx (dump_file, insn, 8);
3957 	      fprintf (dump_file, "\n");
3958 	    }
3959 
3960 	  rtx_insn *copy = gen_move_insn (reg, copy_rtx (SET_SRC (pat)));
3961 	  emit_insn_before (copy, insn);
3962 	  SET_SRC (pat) = reg;
3963 	  df_insn_rescan (insn);
3964 
3965 	  /* un-recognize this pattern since it's probably different now.  */
3966 	  INSN_CODE (insn) = -1;
3967 	  gcse_create_count++;
3968 	}
3969     }
3970 }
3971 
3972 /* Return true if the graph is too expensive to optimize. PASS is the
3973    optimization about to be performed.  */
3974 
3975 bool
3976 gcse_or_cprop_is_too_expensive (const char *pass)
3977 {
3978   unsigned int memory_request = (n_basic_blocks_for_fn (cfun)
3979 				 * SBITMAP_SET_SIZE (max_reg_num ())
3980 				 * sizeof (SBITMAP_ELT_TYPE));
3981 
3982   /* Trying to perform global optimizations on flow graphs which have
3983      a high connectivity will take a long time and is unlikely to be
3984      particularly useful.
3985 
3986      In normal circumstances a cfg should have about twice as many
3987      edges as blocks.  But we do not want to punish small functions
3988      which have a couple switch statements.  Rather than simply
3989      threshold the number of blocks, uses something with a more
3990      graceful degradation.  */
3991   if (n_edges_for_fn (cfun) > 20000 + n_basic_blocks_for_fn (cfun) * 4)
3992     {
3993       warning (OPT_Wdisabled_optimization,
3994 	       "%s: %d basic blocks and %d edges/basic block",
3995 	       pass, n_basic_blocks_for_fn (cfun),
3996 	       n_edges_for_fn (cfun) / n_basic_blocks_for_fn (cfun));
3997 
3998       return true;
3999     }
4000 
4001   /* If allocating memory for the dataflow bitmaps would take up too much
4002      storage it's better just to disable the optimization.  */
4003   if (memory_request > MAX_GCSE_MEMORY)
4004     {
4005       warning (OPT_Wdisabled_optimization,
4006 	       "%s: %d basic blocks and %d registers; increase --param max-gcse-memory above %d",
4007 	       pass, n_basic_blocks_for_fn (cfun), max_reg_num (),
4008 	       memory_request);
4009 
4010       return true;
4011     }
4012 
4013   return false;
4014 }
4015 
4016 static unsigned int
4017 execute_rtl_pre (void)
4018 {
4019   int changed;
4020   delete_unreachable_blocks ();
4021   df_analyze ();
4022   changed = one_pre_gcse_pass ();
4023   flag_rerun_cse_after_global_opts |= changed;
4024   if (changed)
4025     cleanup_cfg (0);
4026   return 0;
4027 }
4028 
4029 static unsigned int
4030 execute_rtl_hoist (void)
4031 {
4032   int changed;
4033   delete_unreachable_blocks ();
4034   df_analyze ();
4035   changed = one_code_hoisting_pass ();
4036   flag_rerun_cse_after_global_opts |= changed;
4037   if (changed)
4038     cleanup_cfg (0);
4039   return 0;
4040 }
4041 
4042 namespace {
4043 
4044 const pass_data pass_data_rtl_pre =
4045 {
4046   RTL_PASS, /* type */
4047   "rtl pre", /* name */
4048   OPTGROUP_NONE, /* optinfo_flags */
4049   TV_PRE, /* tv_id */
4050   PROP_cfglayout, /* properties_required */
4051   0, /* properties_provided */
4052   0, /* properties_destroyed */
4053   0, /* todo_flags_start */
4054   TODO_df_finish, /* todo_flags_finish */
4055 };
4056 
4057 class pass_rtl_pre : public rtl_opt_pass
4058 {
4059 public:
4060   pass_rtl_pre (gcc::context *ctxt)
4061     : rtl_opt_pass (pass_data_rtl_pre, ctxt)
4062   {}
4063 
4064   /* opt_pass methods: */
4065   virtual bool gate (function *);
4066   virtual unsigned int execute (function *) { return execute_rtl_pre (); }
4067 
4068 }; // class pass_rtl_pre
4069 
4070 /* We do not construct an accurate cfg in functions which call
4071    setjmp, so none of these passes runs if the function calls
4072    setjmp.
4073    FIXME: Should just handle setjmp via REG_SETJMP notes.  */
4074 
4075 bool
4076 pass_rtl_pre::gate (function *fun)
4077 {
4078   return optimize > 0 && flag_gcse
4079     && !fun->calls_setjmp
4080     && optimize_function_for_speed_p (fun)
4081     && dbg_cnt (pre);
4082 }
4083 
4084 } // anon namespace
4085 
4086 rtl_opt_pass *
4087 make_pass_rtl_pre (gcc::context *ctxt)
4088 {
4089   return new pass_rtl_pre (ctxt);
4090 }
4091 
4092 namespace {
4093 
4094 const pass_data pass_data_rtl_hoist =
4095 {
4096   RTL_PASS, /* type */
4097   "hoist", /* name */
4098   OPTGROUP_NONE, /* optinfo_flags */
4099   TV_HOIST, /* tv_id */
4100   PROP_cfglayout, /* properties_required */
4101   0, /* properties_provided */
4102   0, /* properties_destroyed */
4103   0, /* todo_flags_start */
4104   TODO_df_finish, /* todo_flags_finish */
4105 };
4106 
4107 class pass_rtl_hoist : public rtl_opt_pass
4108 {
4109 public:
4110   pass_rtl_hoist (gcc::context *ctxt)
4111     : rtl_opt_pass (pass_data_rtl_hoist, ctxt)
4112   {}
4113 
4114   /* opt_pass methods: */
4115   virtual bool gate (function *);
4116   virtual unsigned int execute (function *) { return execute_rtl_hoist (); }
4117 
4118 }; // class pass_rtl_hoist
4119 
4120 bool
4121 pass_rtl_hoist::gate (function *)
4122 {
4123   return optimize > 0 && flag_gcse
4124     && !cfun->calls_setjmp
4125     /* It does not make sense to run code hoisting unless we are optimizing
4126        for code size -- it rarely makes programs faster, and can make then
4127        bigger if we did PRE (when optimizing for space, we don't run PRE).  */
4128     && optimize_function_for_size_p (cfun)
4129     && dbg_cnt (hoist);
4130 }
4131 
4132 } // anon namespace
4133 
4134 rtl_opt_pass *
4135 make_pass_rtl_hoist (gcc::context *ctxt)
4136 {
4137   return new pass_rtl_hoist (ctxt);
4138 }
4139 
4140 /* Reset all state within gcse.c so that we can rerun the compiler
4141    within the same process.  For use by toplev::finalize.  */
4142 
4143 void
4144 gcse_c_finalize (void)
4145 {
4146   test_insn = NULL;
4147 }
4148 
4149 #include "gt-gcse.h"
4150