xref: /dragonfly/contrib/gcc-8.0/gcc/gcse.c (revision dcb5d66b)
1 /* Partial redundancy elimination / Hoisting for RTL.
2    Copyright (C) 1997-2018 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 /* TODO
21    - reordering of memory allocation and freeing to be more space efficient
22    - calc rough register pressure information and use the info to drive all
23      kinds of code motion (including code hoisting) in a unified way.
24 */
25 
26 /* References searched while implementing this.
27 
28    Compilers Principles, Techniques and Tools
29    Aho, Sethi, Ullman
30    Addison-Wesley, 1988
31 
32    Global Optimization by Suppression of Partial Redundancies
33    E. Morel, C. Renvoise
34    communications of the acm, Vol. 22, Num. 2, Feb. 1979
35 
36    A Portable Machine-Independent Global Optimizer - Design and Measurements
37    Frederick Chow
38    Stanford Ph.D. thesis, Dec. 1983
39 
40    A Fast Algorithm for Code Movement Optimization
41    D.M. Dhamdhere
42    SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
43 
44    A Solution to a Problem with Morel and Renvoise's
45    Global Optimization by Suppression of Partial Redundancies
46    K-H Drechsler, M.P. Stadel
47    ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
48 
49    Practical Adaptation of the Global Optimization
50    Algorithm of Morel and Renvoise
51    D.M. Dhamdhere
52    ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
53 
54    Efficiently Computing Static Single Assignment Form and the Control
55    Dependence Graph
56    R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
57    ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
58 
59    Lazy Code Motion
60    J. Knoop, O. Ruthing, B. Steffen
61    ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
62 
63    What's In a Region?  Or Computing Control Dependence Regions in Near-Linear
64    Time for Reducible Flow Control
65    Thomas Ball
66    ACM Letters on Programming Languages and Systems,
67    Vol. 2, Num. 1-4, Mar-Dec 1993
68 
69    An Efficient Representation for Sparse Sets
70    Preston Briggs, Linda Torczon
71    ACM Letters on Programming Languages and Systems,
72    Vol. 2, Num. 1-4, Mar-Dec 1993
73 
74    A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
75    K-H Drechsler, M.P. Stadel
76    ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
77 
78    Partial Dead Code Elimination
79    J. Knoop, O. Ruthing, B. Steffen
80    ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
81 
82    Effective Partial Redundancy Elimination
83    P. Briggs, K.D. Cooper
84    ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
85 
86    The Program Structure Tree: Computing Control Regions in Linear Time
87    R. Johnson, D. Pearson, K. Pingali
88    ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
89 
90    Optimal Code Motion: Theory and Practice
91    J. Knoop, O. Ruthing, B. Steffen
92    ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
93 
94    The power of assignment motion
95    J. Knoop, O. Ruthing, B. Steffen
96    ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
97 
98    Global code motion / global value numbering
99    C. Click
100    ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
101 
102    Value Driven Redundancy Elimination
103    L.T. Simpson
104    Rice University Ph.D. thesis, Apr. 1996
105 
106    Value Numbering
107    L.T. Simpson
108    Massively Scalar Compiler Project, Rice University, Sep. 1996
109 
110    High Performance Compilers for Parallel Computing
111    Michael Wolfe
112    Addison-Wesley, 1996
113 
114    Advanced Compiler Design and Implementation
115    Steven Muchnick
116    Morgan Kaufmann, 1997
117 
118    Building an Optimizing Compiler
119    Robert Morgan
120    Digital Press, 1998
121 
122    People wishing to speed up the code here should read:
123      Elimination Algorithms for Data Flow Analysis
124      B.G. Ryder, M.C. Paull
125      ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
126 
127      How to Analyze Large Programs Efficiently and Informatively
128      D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
129      ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
130 
131    People wishing to do something different can find various possibilities
132    in the above papers and elsewhere.
133 */
134 
135 #include "config.h"
136 #include "system.h"
137 #include "coretypes.h"
138 #include "backend.h"
139 #include "target.h"
140 #include "rtl.h"
141 #include "tree.h"
142 #include "predict.h"
143 #include "df.h"
144 #include "memmodel.h"
145 #include "tm_p.h"
146 #include "insn-config.h"
147 #include "print-rtl.h"
148 #include "regs.h"
149 #include "ira.h"
150 #include "recog.h"
151 #include "diagnostic-core.h"
152 #include "cfgrtl.h"
153 #include "cfganal.h"
154 #include "lcm.h"
155 #include "cfgcleanup.h"
156 #include "expr.h"
157 #include "params.h"
158 #include "intl.h"
159 #include "tree-pass.h"
160 #include "dbgcnt.h"
161 #include "gcse.h"
162 #include "gcse-common.h"
163 
164 /* We support GCSE via Partial Redundancy Elimination.  PRE optimizations
165    are a superset of those done by classic GCSE.
166 
167    Two passes of copy/constant propagation are done around PRE or hoisting
168    because the first one enables more GCSE and the second one helps to clean
169    up the copies that PRE and HOIST create.  This is needed more for PRE than
170    for HOIST because code hoisting will try to use an existing register
171    containing the common subexpression rather than create a new one.  This is
172    harder to do for PRE because of the code motion (which HOIST doesn't do).
173 
174    Expressions we are interested in GCSE-ing are of the form
175    (set (pseudo-reg) (expression)).
176    Function want_to_gcse_p says what these are.
177 
178    In addition, expressions in REG_EQUAL notes are candidates for GCSE-ing.
179    This allows PRE to hoist expressions that are expressed in multiple insns,
180    such as complex address calculations (e.g. for PIC code, or loads with a
181    high part and a low part).
182 
183    PRE handles moving invariant expressions out of loops (by treating them as
184    partially redundant).
185 
186    **********************
187 
188    We used to support multiple passes but there are diminishing returns in
189    doing so.  The first pass usually makes 90% of the changes that are doable.
190    A second pass can make a few more changes made possible by the first pass.
191    Experiments show any further passes don't make enough changes to justify
192    the expense.
193 
194    A study of spec92 using an unlimited number of passes:
195    [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
196    [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
197    [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
198 
199    It was found doing copy propagation between each pass enables further
200    substitutions.
201 
202    This study was done before expressions in REG_EQUAL notes were added as
203    candidate expressions for optimization, and before the GIMPLE optimizers
204    were added.  Probably, multiple passes is even less efficient now than
205    at the time when the study was conducted.
206 
207    PRE is quite expensive in complicated functions because the DFA can take
208    a while to converge.  Hence we only perform one pass.
209 
210    **********************
211 
212    The steps for PRE are:
213 
214    1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
215 
216    2) Perform the data flow analysis for PRE.
217 
218    3) Delete the redundant instructions
219 
220    4) Insert the required copies [if any] that make the partially
221       redundant instructions fully redundant.
222 
223    5) For other reaching expressions, insert an instruction to copy the value
224       to a newly created pseudo that will reach the redundant instruction.
225 
226    The deletion is done first so that when we do insertions we
227    know which pseudo reg to use.
228 
229    Various papers have argued that PRE DFA is expensive (O(n^2)) and others
230    argue it is not.  The number of iterations for the algorithm to converge
231    is typically 2-4 so I don't view it as that expensive (relatively speaking).
232 
233    PRE GCSE depends heavily on the second CPROP pass to clean up the copies
234    we create.  To make an expression reach the place where it's redundant,
235    the result of the expression is copied to a new register, and the redundant
236    expression is deleted by replacing it with this new register.  Classic GCSE
237    doesn't have this problem as much as it computes the reaching defs of
238    each register in each block and thus can try to use an existing
239    register.  */
240 
241 /* GCSE global vars.  */
242 
243 struct target_gcse default_target_gcse;
244 #if SWITCHABLE_TARGET
245 struct target_gcse *this_target_gcse = &default_target_gcse;
246 #endif
247 
248 /* Set to non-zero if CSE should run after all GCSE optimizations are done.  */
249 int flag_rerun_cse_after_global_opts;
250 
251 /* An obstack for our working variables.  */
252 static struct obstack gcse_obstack;
253 
254 /* Hash table of expressions.  */
255 
256 struct gcse_expr
257 {
258   /* The expression.  */
259   rtx expr;
260   /* Index in the available expression bitmaps.  */
261   int bitmap_index;
262   /* Next entry with the same hash.  */
263   struct gcse_expr *next_same_hash;
264   /* List of anticipatable occurrences in basic blocks in the function.
265      An "anticipatable occurrence" is one that is the first occurrence in the
266      basic block, the operands are not modified in the basic block prior
267      to the occurrence and the output is not used between the start of
268      the block and the occurrence.  */
269   struct gcse_occr *antic_occr;
270   /* List of available occurrence in basic blocks in the function.
271      An "available occurrence" is one that is the last occurrence in the
272      basic block and the operands are not modified by following statements in
273      the basic block [including this insn].  */
274   struct gcse_occr *avail_occr;
275   /* Non-null if the computation is PRE redundant.
276      The value is the newly created pseudo-reg to record a copy of the
277      expression in all the places that reach the redundant copy.  */
278   rtx reaching_reg;
279   /* Maximum distance in instructions this expression can travel.
280      We avoid moving simple expressions for more than a few instructions
281      to keep register pressure under control.
282      A value of "0" removes restrictions on how far the expression can
283      travel.  */
284   HOST_WIDE_INT max_distance;
285 };
286 
287 /* Occurrence of an expression.
288    There is one per basic block.  If a pattern appears more than once the
289    last appearance is used [or first for anticipatable expressions].  */
290 
291 struct gcse_occr
292 {
293   /* Next occurrence of this expression.  */
294   struct gcse_occr *next;
295   /* The insn that computes the expression.  */
296   rtx_insn *insn;
297   /* Nonzero if this [anticipatable] occurrence has been deleted.  */
298   char deleted_p;
299   /* Nonzero if this [available] occurrence has been copied to
300      reaching_reg.  */
301   /* ??? This is mutually exclusive with deleted_p, so they could share
302      the same byte.  */
303   char copied_p;
304 };
305 
306 typedef struct gcse_occr *occr_t;
307 
308 /* Expression hash tables.
309    Each hash table is an array of buckets.
310    ??? It is known that if it were an array of entries, structure elements
311    `next_same_hash' and `bitmap_index' wouldn't be necessary.  However, it is
312    not clear whether in the final analysis a sufficient amount of memory would
313    be saved as the size of the available expression bitmaps would be larger
314    [one could build a mapping table without holes afterwards though].
315    Someday I'll perform the computation and figure it out.  */
316 
317 struct gcse_hash_table_d
318 {
319   /* The table itself.
320      This is an array of `expr_hash_table_size' elements.  */
321   struct gcse_expr **table;
322 
323   /* Size of the hash table, in elements.  */
324   unsigned int size;
325 
326   /* Number of hash table elements.  */
327   unsigned int n_elems;
328 };
329 
330 /* Expression hash table.  */
331 static struct gcse_hash_table_d expr_hash_table;
332 
333 /* This is a list of expressions which are MEMs and will be used by load
334    or store motion.
335    Load motion tracks MEMs which aren't killed by anything except itself,
336    i.e. loads and stores to a single location.
337    We can then allow movement of these MEM refs with a little special
338    allowance. (all stores copy the same value to the reaching reg used
339    for the loads).  This means all values used to store into memory must have
340    no side effects so we can re-issue the setter value.  */
341 
342 struct ls_expr
343 {
344   struct gcse_expr * expr;	/* Gcse expression reference for LM.  */
345   rtx pattern;			/* Pattern of this mem.  */
346   rtx pattern_regs;		/* List of registers mentioned by the mem.  */
347   vec<rtx_insn *> stores;	/* INSN list of stores seen.  */
348   struct ls_expr * next;	/* Next in the list.  */
349   int invalid;			/* Invalid for some reason.  */
350   int index;			/* If it maps to a bitmap index.  */
351   unsigned int hash_index;	/* Index when in a hash table.  */
352   rtx reaching_reg;		/* Register to use when re-writing.  */
353 };
354 
355 /* Head of the list of load/store memory refs.  */
356 static struct ls_expr * pre_ldst_mems = NULL;
357 
358 struct pre_ldst_expr_hasher : nofree_ptr_hash <ls_expr>
359 {
360   typedef value_type compare_type;
361   static inline hashval_t hash (const ls_expr *);
362   static inline bool equal (const ls_expr *, const ls_expr *);
363 };
364 
365 /* Hashtable helpers.  */
366 inline hashval_t
367 pre_ldst_expr_hasher::hash (const ls_expr *x)
368 {
369   int do_not_record_p = 0;
370   return
371     hash_rtx (x->pattern, GET_MODE (x->pattern), &do_not_record_p, NULL, false);
372 }
373 
374 static int expr_equiv_p (const_rtx, const_rtx);
375 
376 inline bool
377 pre_ldst_expr_hasher::equal (const ls_expr *ptr1,
378 			     const ls_expr *ptr2)
379 {
380   return expr_equiv_p (ptr1->pattern, ptr2->pattern);
381 }
382 
383 /* Hashtable for the load/store memory refs.  */
384 static hash_table<pre_ldst_expr_hasher> *pre_ldst_table;
385 
386 /* Bitmap containing one bit for each register in the program.
387    Used when performing GCSE to track which registers have been set since
388    the start of the basic block.  */
389 static regset reg_set_bitmap;
390 
391 /* Array, indexed by basic block number for a list of insns which modify
392    memory within that block.  */
393 static vec<rtx_insn *> *modify_mem_list;
394 static bitmap modify_mem_list_set;
395 
396 /* This array parallels modify_mem_list, except that it stores MEMs
397    being set and their canonicalized memory addresses.  */
398 static vec<modify_pair> *canon_modify_mem_list;
399 
400 /* Bitmap indexed by block numbers to record which blocks contain
401    function calls.  */
402 static bitmap blocks_with_calls;
403 
404 /* Various variables for statistics gathering.  */
405 
406 /* Memory used in a pass.
407    This isn't intended to be absolutely precise.  Its intent is only
408    to keep an eye on memory usage.  */
409 static int bytes_used;
410 
411 /* GCSE substitutions made.  */
412 static int gcse_subst_count;
413 /* Number of copy instructions created.  */
414 static int gcse_create_count;
415 
416 /* Doing code hoisting.  */
417 static bool doing_code_hoisting_p = false;
418 
419 /* For available exprs */
420 static sbitmap *ae_kill;
421 
422 /* Data stored for each basic block.  */
423 struct bb_data
424 {
425   /* Maximal register pressure inside basic block for given register class
426      (defined only for the pressure classes).  */
427   int max_reg_pressure[N_REG_CLASSES];
428   /* Recorded register pressure of basic block before trying to hoist
429      an expression.  Will be used to restore the register pressure
430      if the expression should not be hoisted.  */
431   int old_pressure;
432   /* Recorded register live_in info of basic block during code hoisting
433      process.  BACKUP is used to record live_in info before trying to
434      hoist an expression, and will be used to restore LIVE_IN if the
435      expression should not be hoisted.  */
436   bitmap live_in, backup;
437 };
438 
439 #define BB_DATA(bb) ((struct bb_data *) (bb)->aux)
440 
441 static basic_block curr_bb;
442 
443 /* Current register pressure for each pressure class.  */
444 static int curr_reg_pressure[N_REG_CLASSES];
445 
446 
447 static void compute_can_copy (void);
448 static void *gmalloc (size_t) ATTRIBUTE_MALLOC;
449 static void *gcalloc (size_t, size_t) ATTRIBUTE_MALLOC;
450 static void *gcse_alloc (unsigned long);
451 static void alloc_gcse_mem (void);
452 static void free_gcse_mem (void);
453 static void hash_scan_insn (rtx_insn *, struct gcse_hash_table_d *);
454 static void hash_scan_set (rtx, rtx_insn *, struct gcse_hash_table_d *);
455 static void hash_scan_clobber (rtx, rtx_insn *, struct gcse_hash_table_d *);
456 static void hash_scan_call (rtx, rtx_insn *, struct gcse_hash_table_d *);
457 static int oprs_unchanged_p (const_rtx, const rtx_insn *, int);
458 static int oprs_anticipatable_p (const_rtx, const rtx_insn *);
459 static int oprs_available_p (const_rtx, const rtx_insn *);
460 static void insert_expr_in_table (rtx, machine_mode, rtx_insn *, int, int,
461 				  HOST_WIDE_INT, struct gcse_hash_table_d *);
462 static unsigned int hash_expr (const_rtx, machine_mode, int *, int);
463 static void record_last_reg_set_info (rtx_insn *, int);
464 static void record_last_mem_set_info (rtx_insn *);
465 static void record_last_set_info (rtx, const_rtx, void *);
466 static void compute_hash_table (struct gcse_hash_table_d *);
467 static void alloc_hash_table (struct gcse_hash_table_d *);
468 static void free_hash_table (struct gcse_hash_table_d *);
469 static void compute_hash_table_work (struct gcse_hash_table_d *);
470 static void dump_hash_table (FILE *, const char *, struct gcse_hash_table_d *);
471 static void compute_local_properties (sbitmap *, sbitmap *, sbitmap *,
472 				      struct gcse_hash_table_d *);
473 static void mems_conflict_for_gcse_p (rtx, const_rtx, void *);
474 static int load_killed_in_block_p (const_basic_block, int, const_rtx, int);
475 static void alloc_pre_mem (int, int);
476 static void free_pre_mem (void);
477 static struct edge_list *compute_pre_data (void);
478 static int pre_expr_reaches_here_p (basic_block, struct gcse_expr *,
479 				    basic_block);
480 static void insert_insn_end_basic_block (struct gcse_expr *, basic_block);
481 static void pre_insert_copy_insn (struct gcse_expr *, rtx_insn *);
482 static void pre_insert_copies (void);
483 static int pre_delete (void);
484 static int pre_gcse (struct edge_list *);
485 static int one_pre_gcse_pass (void);
486 static void add_label_notes (rtx, rtx_insn *);
487 static void alloc_code_hoist_mem (int, int);
488 static void free_code_hoist_mem (void);
489 static void compute_code_hoist_vbeinout (void);
490 static void compute_code_hoist_data (void);
491 static int should_hoist_expr_to_dom (basic_block, struct gcse_expr *,
492 				     basic_block,
493 				     sbitmap, HOST_WIDE_INT, int *,
494 				     enum reg_class,
495 				     int *, bitmap, rtx_insn *);
496 static int hoist_code (void);
497 static enum reg_class get_regno_pressure_class (int regno, int *nregs);
498 static enum reg_class get_pressure_class_and_nregs (rtx_insn *insn, int *nregs);
499 static int one_code_hoisting_pass (void);
500 static rtx_insn *process_insert_insn (struct gcse_expr *);
501 static int pre_edge_insert (struct edge_list *, struct gcse_expr **);
502 static int pre_expr_reaches_here_p_work (basic_block, struct gcse_expr *,
503 					 basic_block, char *);
504 static struct ls_expr * ldst_entry (rtx);
505 static void free_ldst_entry (struct ls_expr *);
506 static void free_ld_motion_mems (void);
507 static void print_ldst_list (FILE *);
508 static struct ls_expr * find_rtx_in_ldst (rtx);
509 static int simple_mem (const_rtx);
510 static void invalidate_any_buried_refs (rtx);
511 static void compute_ld_motion_mems (void);
512 static void trim_ld_motion_mems (void);
513 static void update_ld_motion_stores (struct gcse_expr *);
514 static void clear_modify_mem_tables (void);
515 static void free_modify_mem_tables (void);
516 
517 #define GNEW(T)			((T *) gmalloc (sizeof (T)))
518 #define GCNEW(T)		((T *) gcalloc (1, sizeof (T)))
519 
520 #define GNEWVEC(T, N)		((T *) gmalloc (sizeof (T) * (N)))
521 #define GCNEWVEC(T, N)		((T *) gcalloc ((N), sizeof (T)))
522 
523 #define GNEWVAR(T, S)		((T *) gmalloc ((S)))
524 #define GCNEWVAR(T, S)		((T *) gcalloc (1, (S)))
525 
526 #define GOBNEW(T)		((T *) gcse_alloc (sizeof (T)))
527 #define GOBNEWVAR(T, S)		((T *) gcse_alloc ((S)))
528 
529 /* Misc. utilities.  */
530 
531 #define can_copy \
532   (this_target_gcse->x_can_copy)
533 #define can_copy_init_p \
534   (this_target_gcse->x_can_copy_init_p)
535 
536 /* Compute which modes support reg/reg copy operations.  */
537 
538 static void
539 compute_can_copy (void)
540 {
541   int i;
542 #ifndef AVOID_CCMODE_COPIES
543   rtx reg;
544  rtx_insn *insn;
545 #endif
546   memset (can_copy, 0, NUM_MACHINE_MODES);
547 
548   start_sequence ();
549   for (i = 0; i < NUM_MACHINE_MODES; i++)
550     if (GET_MODE_CLASS (i) == MODE_CC)
551       {
552 #ifdef AVOID_CCMODE_COPIES
553 	can_copy[i] = 0;
554 #else
555 	reg = gen_rtx_REG ((machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
556 	insn = emit_insn (gen_rtx_SET (reg, reg));
557 	if (recog (PATTERN (insn), insn, NULL) >= 0)
558 	  can_copy[i] = 1;
559 #endif
560       }
561     else
562       can_copy[i] = 1;
563 
564   end_sequence ();
565 }
566 
567 /* Returns whether the mode supports reg/reg copy operations.  */
568 
569 bool
570 can_copy_p (machine_mode mode)
571 {
572   if (! can_copy_init_p)
573     {
574       compute_can_copy ();
575       can_copy_init_p = true;
576     }
577 
578   return can_copy[mode] != 0;
579 }
580 
581 /* Cover function to xmalloc to record bytes allocated.  */
582 
583 static void *
584 gmalloc (size_t size)
585 {
586   bytes_used += size;
587   return xmalloc (size);
588 }
589 
590 /* Cover function to xcalloc to record bytes allocated.  */
591 
592 static void *
593 gcalloc (size_t nelem, size_t elsize)
594 {
595   bytes_used += nelem * elsize;
596   return xcalloc (nelem, elsize);
597 }
598 
599 /* Cover function to obstack_alloc.  */
600 
601 static void *
602 gcse_alloc (unsigned long size)
603 {
604   bytes_used += size;
605   return obstack_alloc (&gcse_obstack, size);
606 }
607 
608 /* Allocate memory for the reg/memory set tracking tables.
609    This is called at the start of each pass.  */
610 
611 static void
612 alloc_gcse_mem (void)
613 {
614   /* Allocate vars to track sets of regs.  */
615   reg_set_bitmap = ALLOC_REG_SET (NULL);
616 
617   /* Allocate array to keep a list of insns which modify memory in each
618      basic block.  The two typedefs are needed to work around the
619      pre-processor limitation with template types in macro arguments.  */
620   typedef vec<rtx_insn *> vec_rtx_heap;
621   typedef vec<modify_pair> vec_modify_pair_heap;
622   modify_mem_list = GCNEWVEC (vec_rtx_heap, last_basic_block_for_fn (cfun));
623   canon_modify_mem_list = GCNEWVEC (vec_modify_pair_heap,
624 				    last_basic_block_for_fn (cfun));
625   modify_mem_list_set = BITMAP_ALLOC (NULL);
626   blocks_with_calls = BITMAP_ALLOC (NULL);
627 }
628 
629 /* Free memory allocated by alloc_gcse_mem.  */
630 
631 static void
632 free_gcse_mem (void)
633 {
634   FREE_REG_SET (reg_set_bitmap);
635 
636   free_modify_mem_tables ();
637   BITMAP_FREE (modify_mem_list_set);
638   BITMAP_FREE (blocks_with_calls);
639 }
640 
641 /* Compute the local properties of each recorded expression.
642 
643    Local properties are those that are defined by the block, irrespective of
644    other blocks.
645 
646    An expression is transparent in a block if its operands are not modified
647    in the block.
648 
649    An expression is computed (locally available) in a block if it is computed
650    at least once and expression would contain the same value if the
651    computation was moved to the end of the block.
652 
653    An expression is locally anticipatable in a block if it is computed at
654    least once and expression would contain the same value if the computation
655    was moved to the beginning of the block.
656 
657    We call this routine for pre and code hoisting.  They all compute
658    basically the same information and thus can easily share this code.
659 
660    TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
661    properties.  If NULL, then it is not necessary to compute or record that
662    particular property.
663 
664    TABLE controls which hash table to look at.  */
665 
666 static void
667 compute_local_properties (sbitmap *transp, sbitmap *comp, sbitmap *antloc,
668 			  struct gcse_hash_table_d *table)
669 {
670   unsigned int i;
671 
672   /* Initialize any bitmaps that were passed in.  */
673   if (transp)
674     {
675       bitmap_vector_ones (transp, last_basic_block_for_fn (cfun));
676     }
677 
678   if (comp)
679     bitmap_vector_clear (comp, last_basic_block_for_fn (cfun));
680   if (antloc)
681     bitmap_vector_clear (antloc, last_basic_block_for_fn (cfun));
682 
683   for (i = 0; i < table->size; i++)
684     {
685       struct gcse_expr *expr;
686 
687       for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
688 	{
689 	  int indx = expr->bitmap_index;
690 	  struct gcse_occr *occr;
691 
692 	  /* The expression is transparent in this block if it is not killed.
693 	     We start by assuming all are transparent [none are killed], and
694 	     then reset the bits for those that are.  */
695 	  if (transp)
696 	    compute_transp (expr->expr, indx, transp,
697 			    blocks_with_calls,
698 			    modify_mem_list_set,
699 			    canon_modify_mem_list);
700 
701 	  /* The occurrences recorded in antic_occr are exactly those that
702 	     we want to set to nonzero in ANTLOC.  */
703 	  if (antloc)
704 	    for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
705 	      {
706 		bitmap_set_bit (antloc[BLOCK_FOR_INSN (occr->insn)->index], indx);
707 
708 		/* While we're scanning the table, this is a good place to
709 		   initialize this.  */
710 		occr->deleted_p = 0;
711 	      }
712 
713 	  /* The occurrences recorded in avail_occr are exactly those that
714 	     we want to set to nonzero in COMP.  */
715 	  if (comp)
716 	    for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
717 	      {
718 		bitmap_set_bit (comp[BLOCK_FOR_INSN (occr->insn)->index], indx);
719 
720 		/* While we're scanning the table, this is a good place to
721 		   initialize this.  */
722 		occr->copied_p = 0;
723 	      }
724 
725 	  /* While we're scanning the table, this is a good place to
726 	     initialize this.  */
727 	  expr->reaching_reg = 0;
728 	}
729     }
730 }
731 
732 /* Hash table support.  */
733 
734 struct reg_avail_info
735 {
736   basic_block last_bb;
737   int first_set;
738   int last_set;
739 };
740 
741 static struct reg_avail_info *reg_avail_info;
742 static basic_block current_bb;
743 
744 /* See whether X, the source of a set, is something we want to consider for
745    GCSE.  */
746 
747 static int
748 want_to_gcse_p (rtx x, machine_mode mode, HOST_WIDE_INT *max_distance_ptr)
749 {
750 #ifdef STACK_REGS
751   /* On register stack architectures, don't GCSE constants from the
752      constant pool, as the benefits are often swamped by the overhead
753      of shuffling the register stack between basic blocks.  */
754   if (IS_STACK_MODE (GET_MODE (x)))
755     x = avoid_constant_pool_reference (x);
756 #endif
757 
758   /* GCSE'ing constants:
759 
760      We do not specifically distinguish between constant and non-constant
761      expressions in PRE and Hoist.  We use set_src_cost below to limit
762      the maximum distance simple expressions can travel.
763 
764      Nevertheless, constants are much easier to GCSE, and, hence,
765      it is easy to overdo the optimizations.  Usually, excessive PRE and
766      Hoisting of constant leads to increased register pressure.
767 
768      RA can deal with this by rematerialing some of the constants.
769      Therefore, it is important that the back-end generates sets of constants
770      in a way that allows reload rematerialize them under high register
771      pressure, i.e., a pseudo register with REG_EQUAL to constant
772      is set only once.  Failing to do so will result in IRA/reload
773      spilling such constants under high register pressure instead of
774      rematerializing them.  */
775 
776   switch (GET_CODE (x))
777     {
778     case REG:
779     case SUBREG:
780     case CALL:
781       return 0;
782 
783     CASE_CONST_ANY:
784       if (!doing_code_hoisting_p)
785 	/* Do not PRE constants.  */
786 	return 0;
787 
788       /* FALLTHRU */
789 
790     default:
791       if (doing_code_hoisting_p)
792 	/* PRE doesn't implement max_distance restriction.  */
793 	{
794 	  int cost;
795 	  HOST_WIDE_INT max_distance;
796 
797 	  gcc_assert (!optimize_function_for_speed_p (cfun)
798 		      && optimize_function_for_size_p (cfun));
799 	  cost = set_src_cost (x, mode, 0);
800 
801 	  if (cost < COSTS_N_INSNS (GCSE_UNRESTRICTED_COST))
802 	    {
803 	      max_distance
804 		= ((HOST_WIDE_INT)GCSE_COST_DISTANCE_RATIO * cost) / 10;
805 	      if (max_distance == 0)
806 		return 0;
807 
808 	      gcc_assert (max_distance > 0);
809 	    }
810 	  else
811 	    max_distance = 0;
812 
813 	  if (max_distance_ptr)
814 	    *max_distance_ptr = max_distance;
815 	}
816 
817       return can_assign_to_reg_without_clobbers_p (x, mode);
818     }
819 }
820 
821 /* Used internally by can_assign_to_reg_without_clobbers_p.  */
822 
823 static GTY(()) rtx_insn *test_insn;
824 
825 /* Return true if we can assign X to a pseudo register of mode MODE
826    such that the resulting insn does not result in clobbering a hard
827    register as a side-effect.
828 
829    Additionally, if the target requires it, check that the resulting insn
830    can be copied.  If it cannot, this means that X is special and probably
831    has hidden side-effects we don't want to mess with.
832 
833    This function is typically used by code motion passes, to verify
834    that it is safe to insert an insn without worrying about clobbering
835    maybe live hard regs.  */
836 
837 bool
838 can_assign_to_reg_without_clobbers_p (rtx x, machine_mode mode)
839 {
840   int num_clobbers = 0;
841   int icode;
842   bool can_assign = false;
843 
844   /* If this is a valid operand, we are OK.  If it's VOIDmode, we aren't.  */
845   if (general_operand (x, mode))
846     return 1;
847   else if (GET_MODE (x) == VOIDmode)
848     return 0;
849 
850   /* Otherwise, check if we can make a valid insn from it.  First initialize
851      our test insn if we haven't already.  */
852   if (test_insn == 0)
853     {
854       test_insn
855 	= make_insn_raw (gen_rtx_SET (gen_rtx_REG (word_mode,
856 						   FIRST_PSEUDO_REGISTER * 2),
857 				      const0_rtx));
858       SET_NEXT_INSN (test_insn) = SET_PREV_INSN (test_insn) = 0;
859       INSN_LOCATION (test_insn) = UNKNOWN_LOCATION;
860     }
861 
862   /* Now make an insn like the one we would make when GCSE'ing and see if
863      valid.  */
864   PUT_MODE (SET_DEST (PATTERN (test_insn)), mode);
865   SET_SRC (PATTERN (test_insn)) = x;
866 
867   icode = recog (PATTERN (test_insn), test_insn, &num_clobbers);
868 
869   /* If the test insn is valid and doesn't need clobbers, and the target also
870      has no objections, we're good.  */
871   if (icode >= 0
872       && (num_clobbers == 0 || !added_clobbers_hard_reg_p (icode))
873       && ! (targetm.cannot_copy_insn_p
874 	    && targetm.cannot_copy_insn_p (test_insn)))
875     can_assign = true;
876 
877   /* Make sure test_insn doesn't have any pointers into GC space.  */
878   SET_SRC (PATTERN (test_insn)) = NULL_RTX;
879 
880   return can_assign;
881 }
882 
883 /* Return nonzero if the operands of expression X are unchanged from the
884    start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
885    or from INSN to the end of INSN's basic block (if AVAIL_P != 0).  */
886 
887 static int
888 oprs_unchanged_p (const_rtx x, const rtx_insn *insn, int avail_p)
889 {
890   int i, j;
891   enum rtx_code code;
892   const char *fmt;
893 
894   if (x == 0)
895     return 1;
896 
897   code = GET_CODE (x);
898   switch (code)
899     {
900     case REG:
901       {
902 	struct reg_avail_info *info = &reg_avail_info[REGNO (x)];
903 
904 	if (info->last_bb != current_bb)
905 	  return 1;
906 	if (avail_p)
907 	  return info->last_set < DF_INSN_LUID (insn);
908 	else
909 	  return info->first_set >= DF_INSN_LUID (insn);
910       }
911 
912     case MEM:
913       if (! flag_gcse_lm
914 	  || load_killed_in_block_p (current_bb, DF_INSN_LUID (insn),
915 				     x, avail_p))
916 	return 0;
917       else
918 	return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
919 
920     case PRE_DEC:
921     case PRE_INC:
922     case POST_DEC:
923     case POST_INC:
924     case PRE_MODIFY:
925     case POST_MODIFY:
926       return 0;
927 
928     case PC:
929     case CC0: /*FIXME*/
930     case CONST:
931     CASE_CONST_ANY:
932     case SYMBOL_REF:
933     case LABEL_REF:
934     case ADDR_VEC:
935     case ADDR_DIFF_VEC:
936       return 1;
937 
938     default:
939       break;
940     }
941 
942   for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
943     {
944       if (fmt[i] == 'e')
945 	{
946 	  /* If we are about to do the last recursive call needed at this
947 	     level, change it into iteration.  This function is called enough
948 	     to be worth it.  */
949 	  if (i == 0)
950 	    return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
951 
952 	  else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
953 	    return 0;
954 	}
955       else if (fmt[i] == 'E')
956 	for (j = 0; j < XVECLEN (x, i); j++)
957 	  if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
958 	    return 0;
959     }
960 
961   return 1;
962 }
963 
964 /* Info passed from load_killed_in_block_p to mems_conflict_for_gcse_p.  */
965 
966 struct mem_conflict_info
967 {
968   /* A memory reference for a load instruction, mems_conflict_for_gcse_p will
969      see if a memory store conflicts with this memory load.  */
970   const_rtx mem;
971 
972   /* True if mems_conflict_for_gcse_p finds a conflict between two memory
973      references.  */
974   bool conflict;
975 };
976 
977 /* DEST is the output of an instruction.  If it is a memory reference and
978    possibly conflicts with the load found in DATA, then communicate this
979    information back through DATA.  */
980 
981 static void
982 mems_conflict_for_gcse_p (rtx dest, const_rtx setter ATTRIBUTE_UNUSED,
983 			  void *data)
984 {
985   struct mem_conflict_info *mci = (struct mem_conflict_info *) data;
986 
987   while (GET_CODE (dest) == SUBREG
988 	 || GET_CODE (dest) == ZERO_EXTRACT
989 	 || GET_CODE (dest) == STRICT_LOW_PART)
990     dest = XEXP (dest, 0);
991 
992   /* If DEST is not a MEM, then it will not conflict with the load.  Note
993      that function calls are assumed to clobber memory, but are handled
994      elsewhere.  */
995   if (! MEM_P (dest))
996     return;
997 
998   /* If we are setting a MEM in our list of specially recognized MEMs,
999      don't mark as killed this time.  */
1000   if (pre_ldst_mems != NULL && expr_equiv_p (dest, mci->mem))
1001     {
1002       if (!find_rtx_in_ldst (dest))
1003 	mci->conflict = true;
1004       return;
1005     }
1006 
1007   if (true_dependence (dest, GET_MODE (dest), mci->mem))
1008     mci->conflict = true;
1009 }
1010 
1011 /* Return nonzero if the expression in X (a memory reference) is killed
1012    in block BB before or after the insn with the LUID in UID_LIMIT.
1013    AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
1014    before UID_LIMIT.
1015 
1016    To check the entire block, set UID_LIMIT to max_uid + 1 and
1017    AVAIL_P to 0.  */
1018 
1019 static int
1020 load_killed_in_block_p (const_basic_block bb, int uid_limit, const_rtx x,
1021 			int avail_p)
1022 {
1023   vec<rtx_insn *> list = modify_mem_list[bb->index];
1024   rtx_insn *setter;
1025   unsigned ix;
1026 
1027   /* If this is a readonly then we aren't going to be changing it.  */
1028   if (MEM_READONLY_P (x))
1029     return 0;
1030 
1031   FOR_EACH_VEC_ELT_REVERSE (list, ix, setter)
1032     {
1033       struct mem_conflict_info mci;
1034 
1035       /* Ignore entries in the list that do not apply.  */
1036       if ((avail_p
1037 	   && DF_INSN_LUID (setter) < uid_limit)
1038 	  || (! avail_p
1039 	      && DF_INSN_LUID (setter) > uid_limit))
1040 	continue;
1041 
1042       /* If SETTER is a call everything is clobbered.  Note that calls
1043 	 to pure functions are never put on the list, so we need not
1044 	 worry about them.  */
1045       if (CALL_P (setter))
1046 	return 1;
1047 
1048       /* SETTER must be an INSN of some kind that sets memory.  Call
1049 	 note_stores to examine each hunk of memory that is modified.  */
1050       mci.mem = x;
1051       mci.conflict = false;
1052       note_stores (PATTERN (setter), mems_conflict_for_gcse_p, &mci);
1053       if (mci.conflict)
1054 	return 1;
1055     }
1056   return 0;
1057 }
1058 
1059 /* Return nonzero if the operands of expression X are unchanged from
1060    the start of INSN's basic block up to but not including INSN.  */
1061 
1062 static int
1063 oprs_anticipatable_p (const_rtx x, const rtx_insn *insn)
1064 {
1065   return oprs_unchanged_p (x, insn, 0);
1066 }
1067 
1068 /* Return nonzero if the operands of expression X are unchanged from
1069    INSN to the end of INSN's basic block.  */
1070 
1071 static int
1072 oprs_available_p (const_rtx x, const rtx_insn *insn)
1073 {
1074   return oprs_unchanged_p (x, insn, 1);
1075 }
1076 
1077 /* Hash expression X.
1078 
1079    MODE is only used if X is a CONST_INT.  DO_NOT_RECORD_P is a boolean
1080    indicating if a volatile operand is found or if the expression contains
1081    something we don't want to insert in the table.  HASH_TABLE_SIZE is
1082    the current size of the hash table to be probed.  */
1083 
1084 static unsigned int
1085 hash_expr (const_rtx x, machine_mode mode, int *do_not_record_p,
1086 	   int hash_table_size)
1087 {
1088   unsigned int hash;
1089 
1090   *do_not_record_p = 0;
1091 
1092   hash = hash_rtx (x, mode, do_not_record_p, NULL, /*have_reg_qty=*/false);
1093   return hash % hash_table_size;
1094 }
1095 
1096 /* Return nonzero if exp1 is equivalent to exp2.  */
1097 
1098 static int
1099 expr_equiv_p (const_rtx x, const_rtx y)
1100 {
1101   return exp_equiv_p (x, y, 0, true);
1102 }
1103 
1104 /* Insert expression X in INSN in the hash TABLE.
1105    If it is already present, record it as the last occurrence in INSN's
1106    basic block.
1107 
1108    MODE is the mode of the value X is being stored into.
1109    It is only used if X is a CONST_INT.
1110 
1111    ANTIC_P is nonzero if X is an anticipatable expression.
1112    AVAIL_P is nonzero if X is an available expression.
1113 
1114    MAX_DISTANCE is the maximum distance in instructions this expression can
1115    be moved.  */
1116 
1117 static void
1118 insert_expr_in_table (rtx x, machine_mode mode, rtx_insn *insn,
1119 		      int antic_p,
1120 		      int avail_p, HOST_WIDE_INT max_distance,
1121 		      struct gcse_hash_table_d *table)
1122 {
1123   int found, do_not_record_p;
1124   unsigned int hash;
1125   struct gcse_expr *cur_expr, *last_expr = NULL;
1126   struct gcse_occr *antic_occr, *avail_occr;
1127 
1128   hash = hash_expr (x, mode, &do_not_record_p, table->size);
1129 
1130   /* Do not insert expression in table if it contains volatile operands,
1131      or if hash_expr determines the expression is something we don't want
1132      to or can't handle.  */
1133   if (do_not_record_p)
1134     return;
1135 
1136   cur_expr = table->table[hash];
1137   found = 0;
1138 
1139   while (cur_expr && (found = expr_equiv_p (cur_expr->expr, x)) == 0)
1140     {
1141       /* If the expression isn't found, save a pointer to the end of
1142 	 the list.  */
1143       last_expr = cur_expr;
1144       cur_expr = cur_expr->next_same_hash;
1145     }
1146 
1147   if (! found)
1148     {
1149       cur_expr = GOBNEW (struct gcse_expr);
1150       bytes_used += sizeof (struct gcse_expr);
1151       if (table->table[hash] == NULL)
1152 	/* This is the first pattern that hashed to this index.  */
1153 	table->table[hash] = cur_expr;
1154       else
1155 	/* Add EXPR to end of this hash chain.  */
1156 	last_expr->next_same_hash = cur_expr;
1157 
1158       /* Set the fields of the expr element.  */
1159       cur_expr->expr = x;
1160       cur_expr->bitmap_index = table->n_elems++;
1161       cur_expr->next_same_hash = NULL;
1162       cur_expr->antic_occr = NULL;
1163       cur_expr->avail_occr = NULL;
1164       gcc_assert (max_distance >= 0);
1165       cur_expr->max_distance = max_distance;
1166     }
1167   else
1168     gcc_assert (cur_expr->max_distance == max_distance);
1169 
1170   /* Now record the occurrence(s).  */
1171   if (antic_p)
1172     {
1173       antic_occr = cur_expr->antic_occr;
1174 
1175       if (antic_occr
1176 	  && BLOCK_FOR_INSN (antic_occr->insn) != BLOCK_FOR_INSN (insn))
1177 	antic_occr = NULL;
1178 
1179       if (antic_occr)
1180 	/* Found another instance of the expression in the same basic block.
1181 	   Prefer the currently recorded one.  We want the first one in the
1182 	   block and the block is scanned from start to end.  */
1183 	; /* nothing to do */
1184       else
1185 	{
1186 	  /* First occurrence of this expression in this basic block.  */
1187 	  antic_occr = GOBNEW (struct gcse_occr);
1188 	  bytes_used += sizeof (struct gcse_occr);
1189 	  antic_occr->insn = insn;
1190 	  antic_occr->next = cur_expr->antic_occr;
1191 	  antic_occr->deleted_p = 0;
1192 	  cur_expr->antic_occr = antic_occr;
1193 	}
1194     }
1195 
1196   if (avail_p)
1197     {
1198       avail_occr = cur_expr->avail_occr;
1199 
1200       if (avail_occr
1201 	  && BLOCK_FOR_INSN (avail_occr->insn) == BLOCK_FOR_INSN (insn))
1202 	{
1203 	  /* Found another instance of the expression in the same basic block.
1204 	     Prefer this occurrence to the currently recorded one.  We want
1205 	     the last one in the block and the block is scanned from start
1206 	     to end.  */
1207 	  avail_occr->insn = insn;
1208 	}
1209       else
1210 	{
1211 	  /* First occurrence of this expression in this basic block.  */
1212 	  avail_occr = GOBNEW (struct gcse_occr);
1213 	  bytes_used += sizeof (struct gcse_occr);
1214 	  avail_occr->insn = insn;
1215 	  avail_occr->next = cur_expr->avail_occr;
1216 	  avail_occr->deleted_p = 0;
1217 	  cur_expr->avail_occr = avail_occr;
1218 	}
1219     }
1220 }
1221 
1222 /* Scan SET present in INSN and add an entry to the hash TABLE.  */
1223 
1224 static void
1225 hash_scan_set (rtx set, rtx_insn *insn, struct gcse_hash_table_d *table)
1226 {
1227   rtx src = SET_SRC (set);
1228   rtx dest = SET_DEST (set);
1229   rtx note;
1230 
1231   if (GET_CODE (src) == CALL)
1232     hash_scan_call (src, insn, table);
1233 
1234   else if (REG_P (dest))
1235     {
1236       unsigned int regno = REGNO (dest);
1237       HOST_WIDE_INT max_distance = 0;
1238 
1239       /* See if a REG_EQUAL note shows this equivalent to a simpler expression.
1240 
1241 	 This allows us to do a single GCSE pass and still eliminate
1242 	 redundant constants, addresses or other expressions that are
1243 	 constructed with multiple instructions.
1244 
1245 	 However, keep the original SRC if INSN is a simple reg-reg move.
1246 	 In this case, there will almost always be a REG_EQUAL note on the
1247 	 insn that sets SRC.  By recording the REG_EQUAL value here as SRC
1248 	 for INSN, we miss copy propagation opportunities and we perform the
1249 	 same PRE GCSE operation repeatedly on the same REG_EQUAL value if we
1250 	 do more than one PRE GCSE pass.
1251 
1252 	 Note that this does not impede profitable constant propagations.  We
1253 	 "look through" reg-reg sets in lookup_avail_set.  */
1254       note = find_reg_equal_equiv_note (insn);
1255       if (note != 0
1256 	  && REG_NOTE_KIND (note) == REG_EQUAL
1257 	  && !REG_P (src)
1258 	  && want_to_gcse_p (XEXP (note, 0), GET_MODE (dest), NULL))
1259 	src = XEXP (note, 0), set = gen_rtx_SET (dest, src);
1260 
1261       /* Only record sets of pseudo-regs in the hash table.  */
1262       if (regno >= FIRST_PSEUDO_REGISTER
1263 	  /* Don't GCSE something if we can't do a reg/reg copy.  */
1264 	  && can_copy_p (GET_MODE (dest))
1265 	  /* GCSE commonly inserts instruction after the insn.  We can't
1266 	     do that easily for EH edges so disable GCSE on these for now.  */
1267 	  /* ??? We can now easily create new EH landing pads at the
1268 	     gimple level, for splitting edges; there's no reason we
1269 	     can't do the same thing at the rtl level.  */
1270 	  && !can_throw_internal (insn)
1271 	  /* Is SET_SRC something we want to gcse?  */
1272 	  && want_to_gcse_p (src, GET_MODE (dest), &max_distance)
1273 	  /* Don't CSE a nop.  */
1274 	  && ! set_noop_p (set)
1275 	  /* Don't GCSE if it has attached REG_EQUIV note.
1276 	     At this point this only function parameters should have
1277 	     REG_EQUIV notes and if the argument slot is used somewhere
1278 	     explicitly, it means address of parameter has been taken,
1279 	     so we should not extend the lifetime of the pseudo.  */
1280 	  && (note == NULL_RTX || ! MEM_P (XEXP (note, 0))))
1281 	{
1282 	  /* An expression is not anticipatable if its operands are
1283 	     modified before this insn or if this is not the only SET in
1284 	     this insn.  The latter condition does not have to mean that
1285 	     SRC itself is not anticipatable, but we just will not be
1286 	     able to handle code motion of insns with multiple sets.  */
1287 	  int antic_p = oprs_anticipatable_p (src, insn)
1288 			&& !multiple_sets (insn);
1289 	  /* An expression is not available if its operands are
1290 	     subsequently modified, including this insn.  It's also not
1291 	     available if this is a branch, because we can't insert
1292 	     a set after the branch.  */
1293 	  int avail_p = (oprs_available_p (src, insn)
1294 			 && ! JUMP_P (insn));
1295 
1296 	  insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p,
1297 				max_distance, table);
1298 	}
1299     }
1300   /* In case of store we want to consider the memory value as available in
1301      the REG stored in that memory. This makes it possible to remove
1302      redundant loads from due to stores to the same location.  */
1303   else if (flag_gcse_las && REG_P (src) && MEM_P (dest))
1304     {
1305       unsigned int regno = REGNO (src);
1306       HOST_WIDE_INT max_distance = 0;
1307 
1308       /* Only record sets of pseudo-regs in the hash table.  */
1309       if (regno >= FIRST_PSEUDO_REGISTER
1310 	  /* Don't GCSE something if we can't do a reg/reg copy.  */
1311 	  && can_copy_p (GET_MODE (src))
1312 	  /* GCSE commonly inserts instruction after the insn.  We can't
1313 	     do that easily for EH edges so disable GCSE on these for now.  */
1314 	  && !can_throw_internal (insn)
1315 	  /* Is SET_DEST something we want to gcse?  */
1316 	  && want_to_gcse_p (dest, GET_MODE (dest), &max_distance)
1317 	  /* Don't CSE a nop.  */
1318 	  && ! set_noop_p (set)
1319 	  /* Don't GCSE if it has attached REG_EQUIV note.
1320 	     At this point this only function parameters should have
1321 	     REG_EQUIV notes and if the argument slot is used somewhere
1322 	     explicitly, it means address of parameter has been taken,
1323 	     so we should not extend the lifetime of the pseudo.  */
1324 	  && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
1325 	      || ! MEM_P (XEXP (note, 0))))
1326 	{
1327 	  /* Stores are never anticipatable.  */
1328 	  int antic_p = 0;
1329 	  /* An expression is not available if its operands are
1330 	     subsequently modified, including this insn.  It's also not
1331 	     available if this is a branch, because we can't insert
1332 	     a set after the branch.  */
1333 	  int avail_p = oprs_available_p (dest, insn) && ! JUMP_P (insn);
1334 
1335 	  /* Record the memory expression (DEST) in the hash table.  */
1336 	  insert_expr_in_table (dest, GET_MODE (dest), insn,
1337 				antic_p, avail_p, max_distance, table);
1338 	}
1339     }
1340 }
1341 
1342 static void
1343 hash_scan_clobber (rtx x ATTRIBUTE_UNUSED, rtx_insn *insn ATTRIBUTE_UNUSED,
1344 		   struct gcse_hash_table_d *table ATTRIBUTE_UNUSED)
1345 {
1346   /* Currently nothing to do.  */
1347 }
1348 
1349 static void
1350 hash_scan_call (rtx x ATTRIBUTE_UNUSED, rtx_insn *insn ATTRIBUTE_UNUSED,
1351 		struct gcse_hash_table_d *table ATTRIBUTE_UNUSED)
1352 {
1353   /* Currently nothing to do.  */
1354 }
1355 
1356 /* Process INSN and add hash table entries as appropriate.  */
1357 
1358 static void
1359 hash_scan_insn (rtx_insn *insn, struct gcse_hash_table_d *table)
1360 {
1361   rtx pat = PATTERN (insn);
1362   int i;
1363 
1364   /* Pick out the sets of INSN and for other forms of instructions record
1365      what's been modified.  */
1366 
1367   if (GET_CODE (pat) == SET)
1368     hash_scan_set (pat, insn, table);
1369 
1370   else if (GET_CODE (pat) == CLOBBER)
1371     hash_scan_clobber (pat, insn, table);
1372 
1373   else if (GET_CODE (pat) == CALL)
1374     hash_scan_call (pat, insn, table);
1375 
1376   else if (GET_CODE (pat) == PARALLEL)
1377     for (i = 0; i < XVECLEN (pat, 0); i++)
1378       {
1379 	rtx x = XVECEXP (pat, 0, i);
1380 
1381 	if (GET_CODE (x) == SET)
1382 	  hash_scan_set (x, insn, table);
1383 	else if (GET_CODE (x) == CLOBBER)
1384 	  hash_scan_clobber (x, insn, table);
1385 	else if (GET_CODE (x) == CALL)
1386 	  hash_scan_call (x, insn, table);
1387       }
1388 }
1389 
1390 /* Dump the hash table TABLE to file FILE under the name NAME.  */
1391 
1392 static void
1393 dump_hash_table (FILE *file, const char *name, struct gcse_hash_table_d *table)
1394 {
1395   int i;
1396   /* Flattened out table, so it's printed in proper order.  */
1397   struct gcse_expr **flat_table;
1398   unsigned int *hash_val;
1399   struct gcse_expr *expr;
1400 
1401   flat_table = XCNEWVEC (struct gcse_expr *, table->n_elems);
1402   hash_val = XNEWVEC (unsigned int, table->n_elems);
1403 
1404   for (i = 0; i < (int) table->size; i++)
1405     for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
1406       {
1407 	flat_table[expr->bitmap_index] = expr;
1408 	hash_val[expr->bitmap_index] = i;
1409       }
1410 
1411   fprintf (file, "%s hash table (%d buckets, %d entries)\n",
1412 	   name, table->size, table->n_elems);
1413 
1414   for (i = 0; i < (int) table->n_elems; i++)
1415     if (flat_table[i] != 0)
1416       {
1417 	expr = flat_table[i];
1418 	fprintf (file, "Index %d (hash value %d; max distance "
1419 		 HOST_WIDE_INT_PRINT_DEC ")\n  ",
1420 		 expr->bitmap_index, hash_val[i], expr->max_distance);
1421 	print_rtl (file, expr->expr);
1422 	fprintf (file, "\n");
1423       }
1424 
1425   fprintf (file, "\n");
1426 
1427   free (flat_table);
1428   free (hash_val);
1429 }
1430 
1431 /* Record register first/last/block set information for REGNO in INSN.
1432 
1433    first_set records the first place in the block where the register
1434    is set and is used to compute "anticipatability".
1435 
1436    last_set records the last place in the block where the register
1437    is set and is used to compute "availability".
1438 
1439    last_bb records the block for which first_set and last_set are
1440    valid, as a quick test to invalidate them.  */
1441 
1442 static void
1443 record_last_reg_set_info (rtx_insn *insn, int regno)
1444 {
1445   struct reg_avail_info *info = &reg_avail_info[regno];
1446   int luid = DF_INSN_LUID (insn);
1447 
1448   info->last_set = luid;
1449   if (info->last_bb != current_bb)
1450     {
1451       info->last_bb = current_bb;
1452       info->first_set = luid;
1453     }
1454 }
1455 
1456 /* Record memory modification information for INSN.  We do not actually care
1457    about the memory location(s) that are set, or even how they are set (consider
1458    a CALL_INSN).  We merely need to record which insns modify memory.  */
1459 
1460 static void
1461 record_last_mem_set_info (rtx_insn *insn)
1462 {
1463   if (! flag_gcse_lm)
1464     return;
1465 
1466   record_last_mem_set_info_common (insn, modify_mem_list,
1467 				   canon_modify_mem_list,
1468 				   modify_mem_list_set,
1469 				   blocks_with_calls);
1470 }
1471 
1472 /* Called from compute_hash_table via note_stores to handle one
1473    SET or CLOBBER in an insn.  DATA is really the instruction in which
1474    the SET is taking place.  */
1475 
1476 static void
1477 record_last_set_info (rtx dest, const_rtx setter ATTRIBUTE_UNUSED, void *data)
1478 {
1479   rtx_insn *last_set_insn = (rtx_insn *) data;
1480 
1481   if (GET_CODE (dest) == SUBREG)
1482     dest = SUBREG_REG (dest);
1483 
1484   if (REG_P (dest))
1485     record_last_reg_set_info (last_set_insn, REGNO (dest));
1486   else if (MEM_P (dest)
1487 	   /* Ignore pushes, they clobber nothing.  */
1488 	   && ! push_operand (dest, GET_MODE (dest)))
1489     record_last_mem_set_info (last_set_insn);
1490 }
1491 
1492 /* Top level function to create an expression hash table.
1493 
1494    Expression entries are placed in the hash table if
1495    - they are of the form (set (pseudo-reg) src),
1496    - src is something we want to perform GCSE on,
1497    - none of the operands are subsequently modified in the block
1498 
1499    Currently src must be a pseudo-reg or a const_int.
1500 
1501    TABLE is the table computed.  */
1502 
1503 static void
1504 compute_hash_table_work (struct gcse_hash_table_d *table)
1505 {
1506   int i;
1507 
1508   /* re-Cache any INSN_LIST nodes we have allocated.  */
1509   clear_modify_mem_tables ();
1510   /* Some working arrays used to track first and last set in each block.  */
1511   reg_avail_info = GNEWVEC (struct reg_avail_info, max_reg_num ());
1512 
1513   for (i = 0; i < max_reg_num (); ++i)
1514     reg_avail_info[i].last_bb = NULL;
1515 
1516   FOR_EACH_BB_FN (current_bb, cfun)
1517     {
1518       rtx_insn *insn;
1519       unsigned int regno;
1520 
1521       /* First pass over the instructions records information used to
1522 	 determine when registers and memory are first and last set.  */
1523       FOR_BB_INSNS (current_bb, insn)
1524 	{
1525 	  if (!NONDEBUG_INSN_P (insn))
1526 	    continue;
1527 
1528 	  if (CALL_P (insn))
1529 	    {
1530 	      hard_reg_set_iterator hrsi;
1531 	      EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call,
1532 					      0, regno, hrsi)
1533 		record_last_reg_set_info (insn, regno);
1534 
1535 	      if (! RTL_CONST_OR_PURE_CALL_P (insn))
1536 		record_last_mem_set_info (insn);
1537 	    }
1538 
1539 	  note_stores (PATTERN (insn), record_last_set_info, insn);
1540 	}
1541 
1542       /* The next pass builds the hash table.  */
1543       FOR_BB_INSNS (current_bb, insn)
1544 	if (NONDEBUG_INSN_P (insn))
1545 	  hash_scan_insn (insn, table);
1546     }
1547 
1548   free (reg_avail_info);
1549   reg_avail_info = NULL;
1550 }
1551 
1552 /* Allocate space for the set/expr hash TABLE.
1553    It is used to determine the number of buckets to use.  */
1554 
1555 static void
1556 alloc_hash_table (struct gcse_hash_table_d *table)
1557 {
1558   int n;
1559 
1560   n = get_max_insn_count ();
1561 
1562   table->size = n / 4;
1563   if (table->size < 11)
1564     table->size = 11;
1565 
1566   /* Attempt to maintain efficient use of hash table.
1567      Making it an odd number is simplest for now.
1568      ??? Later take some measurements.  */
1569   table->size |= 1;
1570   n = table->size * sizeof (struct gcse_expr *);
1571   table->table = GNEWVAR (struct gcse_expr *, n);
1572 }
1573 
1574 /* Free things allocated by alloc_hash_table.  */
1575 
1576 static void
1577 free_hash_table (struct gcse_hash_table_d *table)
1578 {
1579   free (table->table);
1580 }
1581 
1582 /* Compute the expression hash table TABLE.  */
1583 
1584 static void
1585 compute_hash_table (struct gcse_hash_table_d *table)
1586 {
1587   /* Initialize count of number of entries in hash table.  */
1588   table->n_elems = 0;
1589   memset (table->table, 0, table->size * sizeof (struct gcse_expr *));
1590 
1591   compute_hash_table_work (table);
1592 }
1593 
1594 /* Expression tracking support.  */
1595 
1596 /* Clear canon_modify_mem_list and modify_mem_list tables.  */
1597 static void
1598 clear_modify_mem_tables (void)
1599 {
1600   unsigned i;
1601   bitmap_iterator bi;
1602 
1603   EXECUTE_IF_SET_IN_BITMAP (modify_mem_list_set, 0, i, bi)
1604     {
1605       modify_mem_list[i].release ();
1606       canon_modify_mem_list[i].release ();
1607     }
1608   bitmap_clear (modify_mem_list_set);
1609   bitmap_clear (blocks_with_calls);
1610 }
1611 
1612 /* Release memory used by modify_mem_list_set.  */
1613 
1614 static void
1615 free_modify_mem_tables (void)
1616 {
1617   clear_modify_mem_tables ();
1618   free (modify_mem_list);
1619   free (canon_modify_mem_list);
1620   modify_mem_list = 0;
1621   canon_modify_mem_list = 0;
1622 }
1623 
1624 /* Compute PRE+LCM working variables.  */
1625 
1626 /* Local properties of expressions.  */
1627 
1628 /* Nonzero for expressions that are transparent in the block.  */
1629 static sbitmap *transp;
1630 
1631 /* Nonzero for expressions that are computed (available) in the block.  */
1632 static sbitmap *comp;
1633 
1634 /* Nonzero for expressions that are locally anticipatable in the block.  */
1635 static sbitmap *antloc;
1636 
1637 /* Nonzero for expressions where this block is an optimal computation
1638    point.  */
1639 static sbitmap *pre_optimal;
1640 
1641 /* Nonzero for expressions which are redundant in a particular block.  */
1642 static sbitmap *pre_redundant;
1643 
1644 /* Nonzero for expressions which should be inserted on a specific edge.  */
1645 static sbitmap *pre_insert_map;
1646 
1647 /* Nonzero for expressions which should be deleted in a specific block.  */
1648 static sbitmap *pre_delete_map;
1649 
1650 /* Allocate vars used for PRE analysis.  */
1651 
1652 static void
1653 alloc_pre_mem (int n_blocks, int n_exprs)
1654 {
1655   transp = sbitmap_vector_alloc (n_blocks, n_exprs);
1656   comp = sbitmap_vector_alloc (n_blocks, n_exprs);
1657   antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
1658 
1659   pre_optimal = NULL;
1660   pre_redundant = NULL;
1661   pre_insert_map = NULL;
1662   pre_delete_map = NULL;
1663   ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
1664 
1665   /* pre_insert and pre_delete are allocated later.  */
1666 }
1667 
1668 /* Free vars used for PRE analysis.  */
1669 
1670 static void
1671 free_pre_mem (void)
1672 {
1673   sbitmap_vector_free (transp);
1674   sbitmap_vector_free (comp);
1675 
1676   /* ANTLOC and AE_KILL are freed just after pre_lcm finishes.  */
1677 
1678   if (pre_optimal)
1679     sbitmap_vector_free (pre_optimal);
1680   if (pre_redundant)
1681     sbitmap_vector_free (pre_redundant);
1682   if (pre_insert_map)
1683     sbitmap_vector_free (pre_insert_map);
1684   if (pre_delete_map)
1685     sbitmap_vector_free (pre_delete_map);
1686 
1687   transp = comp = NULL;
1688   pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
1689 }
1690 
1691 /* Remove certain expressions from anticipatable and transparent
1692    sets of basic blocks that have incoming abnormal edge.
1693    For PRE remove potentially trapping expressions to avoid placing
1694    them on abnormal edges.  For hoisting remove memory references that
1695    can be clobbered by calls.  */
1696 
1697 static void
1698 prune_expressions (bool pre_p)
1699 {
1700   struct gcse_expr *expr;
1701   unsigned int ui;
1702   basic_block bb;
1703 
1704   auto_sbitmap prune_exprs (expr_hash_table.n_elems);
1705   bitmap_clear (prune_exprs);
1706   for (ui = 0; ui < expr_hash_table.size; ui++)
1707     {
1708       for (expr = expr_hash_table.table[ui]; expr; expr = expr->next_same_hash)
1709 	{
1710 	  /* Note potentially trapping expressions.  */
1711 	  if (may_trap_p (expr->expr))
1712 	    {
1713 	      bitmap_set_bit (prune_exprs, expr->bitmap_index);
1714 	      continue;
1715 	    }
1716 
1717 	  if (!pre_p && contains_mem_rtx_p (expr->expr))
1718 	    /* Note memory references that can be clobbered by a call.
1719 	       We do not split abnormal edges in hoisting, so would
1720 	       a memory reference get hoisted along an abnormal edge,
1721 	       it would be placed /before/ the call.  Therefore, only
1722 	       constant memory references can be hoisted along abnormal
1723 	       edges.  */
1724 	    {
1725 	      rtx x = expr->expr;
1726 
1727 	      /* Common cases where we might find the MEM which may allow us
1728 		 to avoid pruning the expression.  */
1729 	      while (GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND)
1730 		x = XEXP (x, 0);
1731 
1732 	      /* If we found the MEM, go ahead and look at it to see if it has
1733 		 properties that allow us to avoid pruning its expression out
1734 		 of the tables.  */
1735 	      if (MEM_P (x))
1736 		{
1737 		  if (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1738 		      && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
1739 		    continue;
1740 
1741 		  if (MEM_READONLY_P (x)
1742 		      && !MEM_VOLATILE_P (x)
1743 		      && MEM_NOTRAP_P (x))
1744 		    /* Constant memory reference, e.g., a PIC address.  */
1745 		    continue;
1746 		}
1747 
1748 	      /* ??? Optimally, we would use interprocedural alias
1749 		 analysis to determine if this mem is actually killed
1750 		 by this call.  */
1751 
1752 	      bitmap_set_bit (prune_exprs, expr->bitmap_index);
1753 	    }
1754 	}
1755     }
1756 
1757   FOR_EACH_BB_FN (bb, cfun)
1758     {
1759       edge e;
1760       edge_iterator ei;
1761 
1762       /* If the current block is the destination of an abnormal edge, we
1763 	 kill all trapping (for PRE) and memory (for hoist) expressions
1764 	 because we won't be able to properly place the instruction on
1765 	 the edge.  So make them neither anticipatable nor transparent.
1766 	 This is fairly conservative.
1767 
1768 	 ??? For hoisting it may be necessary to check for set-and-jump
1769 	 instructions here, not just for abnormal edges.  The general problem
1770 	 is that when an expression cannot not be placed right at the end of
1771 	 a basic block we should account for any side-effects of a subsequent
1772 	 jump instructions that could clobber the expression.  It would
1773 	 be best to implement this check along the lines of
1774 	 should_hoist_expr_to_dom where the target block is already known
1775 	 and, hence, there's no need to conservatively prune expressions on
1776 	 "intermediate" set-and-jump instructions.  */
1777       FOR_EACH_EDGE (e, ei, bb->preds)
1778 	if ((e->flags & EDGE_ABNORMAL)
1779 	    && (pre_p || CALL_P (BB_END (e->src))))
1780 	  {
1781 	    bitmap_and_compl (antloc[bb->index],
1782 				antloc[bb->index], prune_exprs);
1783 	    bitmap_and_compl (transp[bb->index],
1784 				transp[bb->index], prune_exprs);
1785 	    break;
1786 	  }
1787     }
1788 }
1789 
1790 /* It may be necessary to insert a large number of insns on edges to
1791    make the existing occurrences of expressions fully redundant.  This
1792    routine examines the set of insertions and deletions and if the ratio
1793    of insertions to deletions is too high for a particular expression, then
1794    the expression is removed from the insertion/deletion sets.
1795 
1796    N_ELEMS is the number of elements in the hash table.  */
1797 
1798 static void
1799 prune_insertions_deletions (int n_elems)
1800 {
1801   sbitmap_iterator sbi;
1802 
1803   /* We always use I to iterate over blocks/edges and J to iterate over
1804      expressions.  */
1805   unsigned int i, j;
1806 
1807   /* Counts for the number of times an expression needs to be inserted and
1808      number of times an expression can be removed as a result.  */
1809   int *insertions = GCNEWVEC (int, n_elems);
1810   int *deletions = GCNEWVEC (int, n_elems);
1811 
1812   /* Set of expressions which require too many insertions relative to
1813      the number of deletions achieved.  We will prune these out of the
1814      insertion/deletion sets.  */
1815   auto_sbitmap prune_exprs (n_elems);
1816   bitmap_clear (prune_exprs);
1817 
1818   /* Iterate over the edges counting the number of times each expression
1819      needs to be inserted.  */
1820   for (i = 0; i < (unsigned) n_edges_for_fn (cfun); i++)
1821     {
1822       EXECUTE_IF_SET_IN_BITMAP (pre_insert_map[i], 0, j, sbi)
1823 	insertions[j]++;
1824     }
1825 
1826   /* Similarly for deletions, but those occur in blocks rather than on
1827      edges.  */
1828   for (i = 0; i < (unsigned) last_basic_block_for_fn (cfun); i++)
1829     {
1830       EXECUTE_IF_SET_IN_BITMAP (pre_delete_map[i], 0, j, sbi)
1831 	deletions[j]++;
1832     }
1833 
1834   /* Now that we have accurate counts, iterate over the elements in the
1835      hash table and see if any need too many insertions relative to the
1836      number of evaluations that can be removed.  If so, mark them in
1837      PRUNE_EXPRS.  */
1838   for (j = 0; j < (unsigned) n_elems; j++)
1839     if (deletions[j]
1840 	&& ((unsigned) insertions[j] / deletions[j]) > MAX_GCSE_INSERTION_RATIO)
1841       bitmap_set_bit (prune_exprs, j);
1842 
1843   /* Now prune PRE_INSERT_MAP and PRE_DELETE_MAP based on PRUNE_EXPRS.  */
1844   EXECUTE_IF_SET_IN_BITMAP (prune_exprs, 0, j, sbi)
1845     {
1846       for (i = 0; i < (unsigned) n_edges_for_fn (cfun); i++)
1847 	bitmap_clear_bit (pre_insert_map[i], j);
1848 
1849       for (i = 0; i < (unsigned) last_basic_block_for_fn (cfun); i++)
1850 	bitmap_clear_bit (pre_delete_map[i], j);
1851     }
1852 
1853   free (insertions);
1854   free (deletions);
1855 }
1856 
1857 /* Top level routine to do the dataflow analysis needed by PRE.  */
1858 
1859 static struct edge_list *
1860 compute_pre_data (void)
1861 {
1862   struct edge_list *edge_list;
1863   basic_block bb;
1864 
1865   compute_local_properties (transp, comp, antloc, &expr_hash_table);
1866   prune_expressions (true);
1867   bitmap_vector_clear (ae_kill, last_basic_block_for_fn (cfun));
1868 
1869   /* Compute ae_kill for each basic block using:
1870 
1871      ~(TRANSP | COMP)
1872   */
1873 
1874   FOR_EACH_BB_FN (bb, cfun)
1875     {
1876       bitmap_ior (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
1877       bitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
1878     }
1879 
1880   edge_list = pre_edge_lcm (expr_hash_table.n_elems, transp, comp, antloc,
1881 			    ae_kill, &pre_insert_map, &pre_delete_map);
1882   sbitmap_vector_free (antloc);
1883   antloc = NULL;
1884   sbitmap_vector_free (ae_kill);
1885   ae_kill = NULL;
1886 
1887   prune_insertions_deletions (expr_hash_table.n_elems);
1888 
1889   return edge_list;
1890 }
1891 
1892 /* PRE utilities */
1893 
1894 /* Return nonzero if an occurrence of expression EXPR in OCCR_BB would reach
1895    block BB.
1896 
1897    VISITED is a pointer to a working buffer for tracking which BB's have
1898    been visited.  It is NULL for the top-level call.
1899 
1900    We treat reaching expressions that go through blocks containing the same
1901    reaching expression as "not reaching".  E.g. if EXPR is generated in blocks
1902    2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
1903    2 as not reaching.  The intent is to improve the probability of finding
1904    only one reaching expression and to reduce register lifetimes by picking
1905    the closest such expression.  */
1906 
1907 static int
1908 pre_expr_reaches_here_p_work (basic_block occr_bb, struct gcse_expr *expr,
1909 			      basic_block bb, char *visited)
1910 {
1911   edge pred;
1912   edge_iterator ei;
1913 
1914   FOR_EACH_EDGE (pred, ei, bb->preds)
1915     {
1916       basic_block pred_bb = pred->src;
1917 
1918       if (pred->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
1919 	  /* Has predecessor has already been visited?  */
1920 	  || visited[pred_bb->index])
1921 	;/* Nothing to do.  */
1922 
1923       /* Does this predecessor generate this expression?  */
1924       else if (bitmap_bit_p (comp[pred_bb->index], expr->bitmap_index))
1925 	{
1926 	  /* Is this the occurrence we're looking for?
1927 	     Note that there's only one generating occurrence per block
1928 	     so we just need to check the block number.  */
1929 	  if (occr_bb == pred_bb)
1930 	    return 1;
1931 
1932 	  visited[pred_bb->index] = 1;
1933 	}
1934       /* Ignore this predecessor if it kills the expression.  */
1935       else if (! bitmap_bit_p (transp[pred_bb->index], expr->bitmap_index))
1936 	visited[pred_bb->index] = 1;
1937 
1938       /* Neither gen nor kill.  */
1939       else
1940 	{
1941 	  visited[pred_bb->index] = 1;
1942 	  if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
1943 	    return 1;
1944 	}
1945     }
1946 
1947   /* All paths have been checked.  */
1948   return 0;
1949 }
1950 
1951 /* The wrapper for pre_expr_reaches_here_work that ensures that any
1952    memory allocated for that function is returned.  */
1953 
1954 static int
1955 pre_expr_reaches_here_p (basic_block occr_bb, struct gcse_expr *expr, basic_block bb)
1956 {
1957   int rval;
1958   char *visited = XCNEWVEC (char, last_basic_block_for_fn (cfun));
1959 
1960   rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
1961 
1962   free (visited);
1963   return rval;
1964 }
1965 
1966 /* Generate RTL to copy an EXPR to its `reaching_reg' and return it.  */
1967 
1968 static rtx_insn *
1969 process_insert_insn (struct gcse_expr *expr)
1970 {
1971   rtx reg = expr->reaching_reg;
1972   /* Copy the expression to make sure we don't have any sharing issues.  */
1973   rtx exp = copy_rtx (expr->expr);
1974   rtx_insn *pat;
1975 
1976   start_sequence ();
1977 
1978   /* If the expression is something that's an operand, like a constant,
1979      just copy it to a register.  */
1980   if (general_operand (exp, GET_MODE (reg)))
1981     emit_move_insn (reg, exp);
1982 
1983   /* Otherwise, make a new insn to compute this expression and make sure the
1984      insn will be recognized (this also adds any needed CLOBBERs).  */
1985   else
1986     {
1987       rtx_insn *insn = emit_insn (gen_rtx_SET (reg, exp));
1988 
1989       if (insn_invalid_p (insn, false))
1990 	gcc_unreachable ();
1991     }
1992 
1993   pat = get_insns ();
1994   end_sequence ();
1995 
1996   return pat;
1997 }
1998 
1999 /* Add EXPR to the end of basic block BB.
2000 
2001    This is used by both the PRE and code hoisting.  */
2002 
2003 static void
2004 insert_insn_end_basic_block (struct gcse_expr *expr, basic_block bb)
2005 {
2006   rtx_insn *insn = BB_END (bb);
2007   rtx_insn *new_insn;
2008   rtx reg = expr->reaching_reg;
2009   int regno = REGNO (reg);
2010   rtx_insn *pat, *pat_end;
2011 
2012   pat = process_insert_insn (expr);
2013   gcc_assert (pat && INSN_P (pat));
2014 
2015   pat_end = pat;
2016   while (NEXT_INSN (pat_end) != NULL_RTX)
2017     pat_end = NEXT_INSN (pat_end);
2018 
2019   /* If the last insn is a jump, insert EXPR in front [taking care to
2020      handle cc0, etc. properly].  Similarly we need to care trapping
2021      instructions in presence of non-call exceptions.  */
2022 
2023   if (JUMP_P (insn)
2024       || (NONJUMP_INSN_P (insn)
2025 	  && (!single_succ_p (bb)
2026 	      || single_succ_edge (bb)->flags & EDGE_ABNORMAL)))
2027     {
2028       /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
2029 	 if cc0 isn't set.  */
2030       if (HAVE_cc0)
2031 	{
2032 	  rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
2033 	  if (note)
2034 	    insn = safe_as_a <rtx_insn *> (XEXP (note, 0));
2035 	  else
2036 	    {
2037 	      rtx_insn *maybe_cc0_setter = prev_nonnote_insn (insn);
2038 	      if (maybe_cc0_setter
2039 		  && INSN_P (maybe_cc0_setter)
2040 		  && sets_cc0_p (PATTERN (maybe_cc0_setter)))
2041 		insn = maybe_cc0_setter;
2042 	    }
2043 	}
2044 
2045       /* FIXME: What if something in cc0/jump uses value set in new insn?  */
2046       new_insn = emit_insn_before_noloc (pat, insn, bb);
2047     }
2048 
2049   /* Likewise if the last insn is a call, as will happen in the presence
2050      of exception handling.  */
2051   else if (CALL_P (insn)
2052 	   && (!single_succ_p (bb)
2053 	       || single_succ_edge (bb)->flags & EDGE_ABNORMAL))
2054     {
2055       /* Keeping in mind targets with small register classes and parameters
2056          in registers, we search backward and place the instructions before
2057 	 the first parameter is loaded.  Do this for everyone for consistency
2058 	 and a presumption that we'll get better code elsewhere as well.  */
2059 
2060       /* Since different machines initialize their parameter registers
2061 	 in different orders, assume nothing.  Collect the set of all
2062 	 parameter registers.  */
2063       insn = find_first_parameter_load (insn, BB_HEAD (bb));
2064 
2065       /* If we found all the parameter loads, then we want to insert
2066 	 before the first parameter load.
2067 
2068 	 If we did not find all the parameter loads, then we might have
2069 	 stopped on the head of the block, which could be a CODE_LABEL.
2070 	 If we inserted before the CODE_LABEL, then we would be putting
2071 	 the insn in the wrong basic block.  In that case, put the insn
2072 	 after the CODE_LABEL.  Also, respect NOTE_INSN_BASIC_BLOCK.  */
2073       while (LABEL_P (insn)
2074 	     || NOTE_INSN_BASIC_BLOCK_P (insn))
2075 	insn = NEXT_INSN (insn);
2076 
2077       new_insn = emit_insn_before_noloc (pat, insn, bb);
2078     }
2079   else
2080     new_insn = emit_insn_after_noloc (pat, insn, bb);
2081 
2082   while (1)
2083     {
2084       if (INSN_P (pat))
2085 	add_label_notes (PATTERN (pat), new_insn);
2086       if (pat == pat_end)
2087 	break;
2088       pat = NEXT_INSN (pat);
2089     }
2090 
2091   gcse_create_count++;
2092 
2093   if (dump_file)
2094     {
2095       fprintf (dump_file, "PRE/HOIST: end of bb %d, insn %d, ",
2096 	       bb->index, INSN_UID (new_insn));
2097       fprintf (dump_file, "copying expression %d to reg %d\n",
2098 	       expr->bitmap_index, regno);
2099     }
2100 }
2101 
2102 /* Insert partially redundant expressions on edges in the CFG to make
2103    the expressions fully redundant.  */
2104 
2105 static int
2106 pre_edge_insert (struct edge_list *edge_list, struct gcse_expr **index_map)
2107 {
2108   int e, i, j, num_edges, set_size, did_insert = 0;
2109   sbitmap *inserted;
2110 
2111   /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
2112      if it reaches any of the deleted expressions.  */
2113 
2114   set_size = pre_insert_map[0]->size;
2115   num_edges = NUM_EDGES (edge_list);
2116   inserted = sbitmap_vector_alloc (num_edges, expr_hash_table.n_elems);
2117   bitmap_vector_clear (inserted, num_edges);
2118 
2119   for (e = 0; e < num_edges; e++)
2120     {
2121       int indx;
2122       basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
2123 
2124       for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
2125 	{
2126 	  SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
2127 
2128 	  for (j = indx;
2129 	       insert && j < (int) expr_hash_table.n_elems;
2130 	       j++, insert >>= 1)
2131 	    if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
2132 	      {
2133 		struct gcse_expr *expr = index_map[j];
2134 		struct gcse_occr *occr;
2135 
2136 		/* Now look at each deleted occurrence of this expression.  */
2137 		for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2138 		  {
2139 		    if (! occr->deleted_p)
2140 		      continue;
2141 
2142 		    /* Insert this expression on this edge if it would
2143 		       reach the deleted occurrence in BB.  */
2144 		    if (!bitmap_bit_p (inserted[e], j))
2145 		      {
2146 			rtx_insn *insn;
2147 			edge eg = INDEX_EDGE (edge_list, e);
2148 
2149 			/* We can't insert anything on an abnormal and
2150 			   critical edge, so we insert the insn at the end of
2151 			   the previous block. There are several alternatives
2152 			   detailed in Morgans book P277 (sec 10.5) for
2153 			   handling this situation.  This one is easiest for
2154 			   now.  */
2155 
2156 			if (eg->flags & EDGE_ABNORMAL)
2157 			  insert_insn_end_basic_block (index_map[j], bb);
2158 			else
2159 			  {
2160 			    insn = process_insert_insn (index_map[j]);
2161 			    insert_insn_on_edge (insn, eg);
2162 			  }
2163 
2164 			if (dump_file)
2165 			  {
2166 			    fprintf (dump_file, "PRE: edge (%d,%d), ",
2167 				     bb->index,
2168 				     INDEX_EDGE_SUCC_BB (edge_list, e)->index);
2169 			    fprintf (dump_file, "copy expression %d\n",
2170 				     expr->bitmap_index);
2171 			  }
2172 
2173 			update_ld_motion_stores (expr);
2174 			bitmap_set_bit (inserted[e], j);
2175 			did_insert = 1;
2176 			gcse_create_count++;
2177 		      }
2178 		  }
2179 	      }
2180 	}
2181     }
2182 
2183   sbitmap_vector_free (inserted);
2184   return did_insert;
2185 }
2186 
2187 /* Copy the result of EXPR->EXPR generated by INSN to EXPR->REACHING_REG.
2188    Given "old_reg <- expr" (INSN), instead of adding after it
2189      reaching_reg <- old_reg
2190    it's better to do the following:
2191      reaching_reg <- expr
2192      old_reg      <- reaching_reg
2193    because this way copy propagation can discover additional PRE
2194    opportunities.  But if this fails, we try the old way.
2195    When "expr" is a store, i.e.
2196    given "MEM <- old_reg", instead of adding after it
2197      reaching_reg <- old_reg
2198    it's better to add it before as follows:
2199      reaching_reg <- old_reg
2200      MEM          <- reaching_reg.  */
2201 
2202 static void
2203 pre_insert_copy_insn (struct gcse_expr *expr, rtx_insn *insn)
2204 {
2205   rtx reg = expr->reaching_reg;
2206   int regno = REGNO (reg);
2207   int indx = expr->bitmap_index;
2208   rtx pat = PATTERN (insn);
2209   rtx set, first_set;
2210   rtx_insn *new_insn;
2211   rtx old_reg;
2212   int i;
2213 
2214   /* This block matches the logic in hash_scan_insn.  */
2215   switch (GET_CODE (pat))
2216     {
2217     case SET:
2218       set = pat;
2219       break;
2220 
2221     case PARALLEL:
2222       /* Search through the parallel looking for the set whose
2223 	 source was the expression that we're interested in.  */
2224       first_set = NULL_RTX;
2225       set = NULL_RTX;
2226       for (i = 0; i < XVECLEN (pat, 0); i++)
2227 	{
2228 	  rtx x = XVECEXP (pat, 0, i);
2229 	  if (GET_CODE (x) == SET)
2230 	    {
2231 	      /* If the source was a REG_EQUAL or REG_EQUIV note, we
2232 		 may not find an equivalent expression, but in this
2233 		 case the PARALLEL will have a single set.  */
2234 	      if (first_set == NULL_RTX)
2235 		first_set = x;
2236 	      if (expr_equiv_p (SET_SRC (x), expr->expr))
2237 	        {
2238 	          set = x;
2239 	          break;
2240 	        }
2241 	    }
2242 	}
2243 
2244       gcc_assert (first_set);
2245       if (set == NULL_RTX)
2246         set = first_set;
2247       break;
2248 
2249     default:
2250       gcc_unreachable ();
2251     }
2252 
2253   if (REG_P (SET_DEST (set)))
2254     {
2255       old_reg = SET_DEST (set);
2256       /* Check if we can modify the set destination in the original insn.  */
2257       if (validate_change (insn, &SET_DEST (set), reg, 0))
2258         {
2259           new_insn = gen_move_insn (old_reg, reg);
2260           new_insn = emit_insn_after (new_insn, insn);
2261         }
2262       else
2263         {
2264           new_insn = gen_move_insn (reg, old_reg);
2265           new_insn = emit_insn_after (new_insn, insn);
2266         }
2267     }
2268   else /* This is possible only in case of a store to memory.  */
2269     {
2270       old_reg = SET_SRC (set);
2271       new_insn = gen_move_insn (reg, old_reg);
2272 
2273       /* Check if we can modify the set source in the original insn.  */
2274       if (validate_change (insn, &SET_SRC (set), reg, 0))
2275         new_insn = emit_insn_before (new_insn, insn);
2276       else
2277         new_insn = emit_insn_after (new_insn, insn);
2278     }
2279 
2280   gcse_create_count++;
2281 
2282   if (dump_file)
2283     fprintf (dump_file,
2284 	     "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
2285 	      BLOCK_FOR_INSN (insn)->index, INSN_UID (new_insn), indx,
2286 	      INSN_UID (insn), regno);
2287 }
2288 
2289 /* Copy available expressions that reach the redundant expression
2290    to `reaching_reg'.  */
2291 
2292 static void
2293 pre_insert_copies (void)
2294 {
2295   unsigned int i, added_copy;
2296   struct gcse_expr *expr;
2297   struct gcse_occr *occr;
2298   struct gcse_occr *avail;
2299 
2300   /* For each available expression in the table, copy the result to
2301      `reaching_reg' if the expression reaches a deleted one.
2302 
2303      ??? The current algorithm is rather brute force.
2304      Need to do some profiling.  */
2305 
2306   for (i = 0; i < expr_hash_table.size; i++)
2307     for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2308       {
2309 	/* If the basic block isn't reachable, PPOUT will be TRUE.  However,
2310 	   we don't want to insert a copy here because the expression may not
2311 	   really be redundant.  So only insert an insn if the expression was
2312 	   deleted.  This test also avoids further processing if the
2313 	   expression wasn't deleted anywhere.  */
2314 	if (expr->reaching_reg == NULL)
2315 	  continue;
2316 
2317 	/* Set when we add a copy for that expression.  */
2318 	added_copy = 0;
2319 
2320 	for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2321 	  {
2322 	    if (! occr->deleted_p)
2323 	      continue;
2324 
2325 	    for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
2326 	      {
2327 		rtx_insn *insn = avail->insn;
2328 
2329 		/* No need to handle this one if handled already.  */
2330 		if (avail->copied_p)
2331 		  continue;
2332 
2333 		/* Don't handle this one if it's a redundant one.  */
2334 		if (insn->deleted ())
2335 		  continue;
2336 
2337 		/* Or if the expression doesn't reach the deleted one.  */
2338 		if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
2339 					       expr,
2340 					       BLOCK_FOR_INSN (occr->insn)))
2341 		  continue;
2342 
2343                 added_copy = 1;
2344 
2345 		/* Copy the result of avail to reaching_reg.  */
2346 		pre_insert_copy_insn (expr, insn);
2347 		avail->copied_p = 1;
2348 	      }
2349 	  }
2350 
2351 	  if (added_copy)
2352             update_ld_motion_stores (expr);
2353       }
2354 }
2355 
2356 struct set_data
2357 {
2358   rtx_insn *insn;
2359   const_rtx set;
2360   int nsets;
2361 };
2362 
2363 /* Increment number of sets and record set in DATA.  */
2364 
2365 static void
2366 record_set_data (rtx dest, const_rtx set, void *data)
2367 {
2368   struct set_data *s = (struct set_data *)data;
2369 
2370   if (GET_CODE (set) == SET)
2371     {
2372       /* We allow insns having multiple sets, where all but one are
2373 	 dead as single set insns.  In the common case only a single
2374 	 set is present, so we want to avoid checking for REG_UNUSED
2375 	 notes unless necessary.  */
2376       if (s->nsets == 1
2377 	  && find_reg_note (s->insn, REG_UNUSED, SET_DEST (s->set))
2378 	  && !side_effects_p (s->set))
2379 	s->nsets = 0;
2380 
2381       if (!s->nsets)
2382 	{
2383 	  /* Record this set.  */
2384 	  s->nsets += 1;
2385 	  s->set = set;
2386 	}
2387       else if (!find_reg_note (s->insn, REG_UNUSED, dest)
2388 	       || side_effects_p (set))
2389 	s->nsets += 1;
2390     }
2391 }
2392 
2393 static const_rtx
2394 single_set_gcse (rtx_insn *insn)
2395 {
2396   struct set_data s;
2397   rtx pattern;
2398 
2399   gcc_assert (INSN_P (insn));
2400 
2401   /* Optimize common case.  */
2402   pattern = PATTERN (insn);
2403   if (GET_CODE (pattern) == SET)
2404     return pattern;
2405 
2406   s.insn = insn;
2407   s.nsets = 0;
2408   note_stores (pattern, record_set_data, &s);
2409 
2410   /* Considered invariant insns have exactly one set.  */
2411   gcc_assert (s.nsets == 1);
2412   return s.set;
2413 }
2414 
2415 /* Emit move from SRC to DEST noting the equivalence with expression computed
2416    in INSN.  */
2417 
2418 static rtx_insn *
2419 gcse_emit_move_after (rtx dest, rtx src, rtx_insn *insn)
2420 {
2421   rtx_insn *new_rtx;
2422   const_rtx set = single_set_gcse (insn);
2423   rtx set2;
2424   rtx note;
2425   rtx eqv = NULL_RTX;
2426 
2427   /* This should never fail since we're creating a reg->reg copy
2428      we've verified to be valid.  */
2429 
2430   new_rtx = emit_insn_after (gen_move_insn (dest, src), insn);
2431 
2432   /* Note the equivalence for local CSE pass.  Take the note from the old
2433      set if there was one.  Otherwise record the SET_SRC from the old set
2434      unless DEST is also an operand of the SET_SRC.  */
2435   set2 = single_set (new_rtx);
2436   if (!set2 || !rtx_equal_p (SET_DEST (set2), dest))
2437     return new_rtx;
2438   if ((note = find_reg_equal_equiv_note (insn)))
2439     eqv = XEXP (note, 0);
2440   else if (! REG_P (dest)
2441 	   || ! reg_mentioned_p (dest, SET_SRC (set)))
2442     eqv = SET_SRC (set);
2443 
2444   if (eqv != NULL_RTX)
2445     set_unique_reg_note (new_rtx, REG_EQUAL, copy_insn_1 (eqv));
2446 
2447   return new_rtx;
2448 }
2449 
2450 /* Delete redundant computations.
2451    Deletion is done by changing the insn to copy the `reaching_reg' of
2452    the expression into the result of the SET.  It is left to later passes
2453    to propagate the copy or eliminate it.
2454 
2455    Return nonzero if a change is made.  */
2456 
2457 static int
2458 pre_delete (void)
2459 {
2460   unsigned int i;
2461   int changed;
2462   struct gcse_expr *expr;
2463   struct gcse_occr *occr;
2464 
2465   changed = 0;
2466   for (i = 0; i < expr_hash_table.size; i++)
2467     for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2468       {
2469 	int indx = expr->bitmap_index;
2470 
2471 	/* We only need to search antic_occr since we require ANTLOC != 0.  */
2472 	for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2473 	  {
2474 	    rtx_insn *insn = occr->insn;
2475 	    rtx set;
2476 	    basic_block bb = BLOCK_FOR_INSN (insn);
2477 
2478 	    /* We only delete insns that have a single_set.  */
2479 	    if (bitmap_bit_p (pre_delete_map[bb->index], indx)
2480 		&& (set = single_set (insn)) != 0
2481                 && dbg_cnt (pre_insn))
2482 	      {
2483 		/* Create a pseudo-reg to store the result of reaching
2484 		   expressions into.  Get the mode for the new pseudo from
2485 		   the mode of the original destination pseudo.  */
2486 		if (expr->reaching_reg == NULL)
2487 		  expr->reaching_reg = gen_reg_rtx_and_attrs (SET_DEST (set));
2488 
2489 		gcse_emit_move_after (SET_DEST (set), expr->reaching_reg, insn);
2490 		delete_insn (insn);
2491 		occr->deleted_p = 1;
2492 		changed = 1;
2493 		gcse_subst_count++;
2494 
2495 		if (dump_file)
2496 		  {
2497 		    fprintf (dump_file,
2498 			     "PRE: redundant insn %d (expression %d) in ",
2499 			       INSN_UID (insn), indx);
2500 		    fprintf (dump_file, "bb %d, reaching reg is %d\n",
2501 			     bb->index, REGNO (expr->reaching_reg));
2502 		  }
2503 	      }
2504 	  }
2505       }
2506 
2507   return changed;
2508 }
2509 
2510 /* Perform GCSE optimizations using PRE.
2511    This is called by one_pre_gcse_pass after all the dataflow analysis
2512    has been done.
2513 
2514    This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
2515    lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
2516    Compiler Design and Implementation.
2517 
2518    ??? A new pseudo reg is created to hold the reaching expression.  The nice
2519    thing about the classical approach is that it would try to use an existing
2520    reg.  If the register can't be adequately optimized [i.e. we introduce
2521    reload problems], one could add a pass here to propagate the new register
2522    through the block.
2523 
2524    ??? We don't handle single sets in PARALLELs because we're [currently] not
2525    able to copy the rest of the parallel when we insert copies to create full
2526    redundancies from partial redundancies.  However, there's no reason why we
2527    can't handle PARALLELs in the cases where there are no partial
2528    redundancies.  */
2529 
2530 static int
2531 pre_gcse (struct edge_list *edge_list)
2532 {
2533   unsigned int i;
2534   int did_insert, changed;
2535   struct gcse_expr **index_map;
2536   struct gcse_expr *expr;
2537 
2538   /* Compute a mapping from expression number (`bitmap_index') to
2539      hash table entry.  */
2540 
2541   index_map = XCNEWVEC (struct gcse_expr *, expr_hash_table.n_elems);
2542   for (i = 0; i < expr_hash_table.size; i++)
2543     for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2544       index_map[expr->bitmap_index] = expr;
2545 
2546   /* Delete the redundant insns first so that
2547      - we know what register to use for the new insns and for the other
2548        ones with reaching expressions
2549      - we know which insns are redundant when we go to create copies  */
2550 
2551   changed = pre_delete ();
2552   did_insert = pre_edge_insert (edge_list, index_map);
2553 
2554   /* In other places with reaching expressions, copy the expression to the
2555      specially allocated pseudo-reg that reaches the redundant expr.  */
2556   pre_insert_copies ();
2557   if (did_insert)
2558     {
2559       commit_edge_insertions ();
2560       changed = 1;
2561     }
2562 
2563   free (index_map);
2564   return changed;
2565 }
2566 
2567 /* Top level routine to perform one PRE GCSE pass.
2568 
2569    Return nonzero if a change was made.  */
2570 
2571 static int
2572 one_pre_gcse_pass (void)
2573 {
2574   int changed = 0;
2575 
2576   gcse_subst_count = 0;
2577   gcse_create_count = 0;
2578 
2579   /* Return if there's nothing to do, or it is too expensive.  */
2580   if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
2581       || gcse_or_cprop_is_too_expensive (_("PRE disabled")))
2582     return 0;
2583 
2584   /* We need alias.  */
2585   init_alias_analysis ();
2586 
2587   bytes_used = 0;
2588   gcc_obstack_init (&gcse_obstack);
2589   alloc_gcse_mem ();
2590 
2591   alloc_hash_table (&expr_hash_table);
2592   add_noreturn_fake_exit_edges ();
2593   if (flag_gcse_lm)
2594     compute_ld_motion_mems ();
2595 
2596   compute_hash_table (&expr_hash_table);
2597   if (flag_gcse_lm)
2598     trim_ld_motion_mems ();
2599   if (dump_file)
2600     dump_hash_table (dump_file, "Expression", &expr_hash_table);
2601 
2602   if (expr_hash_table.n_elems > 0)
2603     {
2604       struct edge_list *edge_list;
2605       alloc_pre_mem (last_basic_block_for_fn (cfun), expr_hash_table.n_elems);
2606       edge_list = compute_pre_data ();
2607       changed |= pre_gcse (edge_list);
2608       free_edge_list (edge_list);
2609       free_pre_mem ();
2610     }
2611 
2612   if (flag_gcse_lm)
2613     free_ld_motion_mems ();
2614   remove_fake_exit_edges ();
2615   free_hash_table (&expr_hash_table);
2616 
2617   free_gcse_mem ();
2618   obstack_free (&gcse_obstack, NULL);
2619 
2620   /* We are finished with alias.  */
2621   end_alias_analysis ();
2622 
2623   if (dump_file)
2624     {
2625       fprintf (dump_file, "PRE GCSE of %s, %d basic blocks, %d bytes needed, ",
2626 	       current_function_name (), n_basic_blocks_for_fn (cfun),
2627 	       bytes_used);
2628       fprintf (dump_file, "%d substs, %d insns created\n",
2629 	       gcse_subst_count, gcse_create_count);
2630     }
2631 
2632   return changed;
2633 }
2634 
2635 /* If X contains any LABEL_REF's, add REG_LABEL_OPERAND notes for them
2636    to INSN.  If such notes are added to an insn which references a
2637    CODE_LABEL, the LABEL_NUSES count is incremented.  We have to add
2638    that note, because the following loop optimization pass requires
2639    them.  */
2640 
2641 /* ??? If there was a jump optimization pass after gcse and before loop,
2642    then we would not need to do this here, because jump would add the
2643    necessary REG_LABEL_OPERAND and REG_LABEL_TARGET notes.  */
2644 
2645 static void
2646 add_label_notes (rtx x, rtx_insn *insn)
2647 {
2648   enum rtx_code code = GET_CODE (x);
2649   int i, j;
2650   const char *fmt;
2651 
2652   if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
2653     {
2654       /* This code used to ignore labels that referred to dispatch tables to
2655 	 avoid flow generating (slightly) worse code.
2656 
2657 	 We no longer ignore such label references (see LABEL_REF handling in
2658 	 mark_jump_label for additional information).  */
2659 
2660       /* There's no reason for current users to emit jump-insns with
2661 	 such a LABEL_REF, so we don't have to handle REG_LABEL_TARGET
2662 	 notes.  */
2663       gcc_assert (!JUMP_P (insn));
2664       add_reg_note (insn, REG_LABEL_OPERAND, label_ref_label (x));
2665 
2666       if (LABEL_P (label_ref_label (x)))
2667 	LABEL_NUSES (label_ref_label (x))++;
2668 
2669       return;
2670     }
2671 
2672   for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2673     {
2674       if (fmt[i] == 'e')
2675 	add_label_notes (XEXP (x, i), insn);
2676       else if (fmt[i] == 'E')
2677 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2678 	  add_label_notes (XVECEXP (x, i, j), insn);
2679     }
2680 }
2681 
2682 /* Code Hoisting variables and subroutines.  */
2683 
2684 /* Very busy expressions.  */
2685 static sbitmap *hoist_vbein;
2686 static sbitmap *hoist_vbeout;
2687 
2688 /* ??? We could compute post dominators and run this algorithm in
2689    reverse to perform tail merging, doing so would probably be
2690    more effective than the tail merging code in jump.c.
2691 
2692    It's unclear if tail merging could be run in parallel with
2693    code hoisting.  It would be nice.  */
2694 
2695 /* Allocate vars used for code hoisting analysis.  */
2696 
2697 static void
2698 alloc_code_hoist_mem (int n_blocks, int n_exprs)
2699 {
2700   antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
2701   transp = sbitmap_vector_alloc (n_blocks, n_exprs);
2702   comp = sbitmap_vector_alloc (n_blocks, n_exprs);
2703 
2704   hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
2705   hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
2706 }
2707 
2708 /* Free vars used for code hoisting analysis.  */
2709 
2710 static void
2711 free_code_hoist_mem (void)
2712 {
2713   sbitmap_vector_free (antloc);
2714   sbitmap_vector_free (transp);
2715   sbitmap_vector_free (comp);
2716 
2717   sbitmap_vector_free (hoist_vbein);
2718   sbitmap_vector_free (hoist_vbeout);
2719 
2720   free_dominance_info (CDI_DOMINATORS);
2721 }
2722 
2723 /* Compute the very busy expressions at entry/exit from each block.
2724 
2725    An expression is very busy if all paths from a given point
2726    compute the expression.  */
2727 
2728 static void
2729 compute_code_hoist_vbeinout (void)
2730 {
2731   int changed, passes;
2732   basic_block bb;
2733 
2734   bitmap_vector_clear (hoist_vbeout, last_basic_block_for_fn (cfun));
2735   bitmap_vector_clear (hoist_vbein, last_basic_block_for_fn (cfun));
2736 
2737   passes = 0;
2738   changed = 1;
2739 
2740   while (changed)
2741     {
2742       changed = 0;
2743 
2744       /* We scan the blocks in the reverse order to speed up
2745 	 the convergence.  */
2746       FOR_EACH_BB_REVERSE_FN (bb, cfun)
2747 	{
2748 	  if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
2749 	    {
2750 	      bitmap_intersection_of_succs (hoist_vbeout[bb->index],
2751 					    hoist_vbein, bb);
2752 
2753 	      /* Include expressions in VBEout that are calculated
2754 		 in BB and available at its end.  */
2755 	      bitmap_ior (hoist_vbeout[bb->index],
2756 			      hoist_vbeout[bb->index], comp[bb->index]);
2757 	    }
2758 
2759 	  changed |= bitmap_or_and (hoist_vbein[bb->index],
2760 					      antloc[bb->index],
2761 					      hoist_vbeout[bb->index],
2762 					      transp[bb->index]);
2763 	}
2764 
2765       passes++;
2766     }
2767 
2768   if (dump_file)
2769     {
2770       fprintf (dump_file, "hoisting vbeinout computation: %d passes\n", passes);
2771 
2772       FOR_EACH_BB_FN (bb, cfun)
2773         {
2774 	  fprintf (dump_file, "vbein (%d): ", bb->index);
2775 	  dump_bitmap_file (dump_file, hoist_vbein[bb->index]);
2776 	  fprintf (dump_file, "vbeout(%d): ", bb->index);
2777 	  dump_bitmap_file (dump_file, hoist_vbeout[bb->index]);
2778 	}
2779     }
2780 }
2781 
2782 /* Top level routine to do the dataflow analysis needed by code hoisting.  */
2783 
2784 static void
2785 compute_code_hoist_data (void)
2786 {
2787   compute_local_properties (transp, comp, antloc, &expr_hash_table);
2788   prune_expressions (false);
2789   compute_code_hoist_vbeinout ();
2790   calculate_dominance_info (CDI_DOMINATORS);
2791   if (dump_file)
2792     fprintf (dump_file, "\n");
2793 }
2794 
2795 /* Update register pressure for BB when hoisting an expression from
2796    instruction FROM, if live ranges of inputs are shrunk.  Also
2797    maintain live_in information if live range of register referred
2798    in FROM is shrunk.
2799 
2800    Return 0 if register pressure doesn't change, otherwise return
2801    the number by which register pressure is decreased.
2802 
2803    NOTE: Register pressure won't be increased in this function.  */
2804 
2805 static int
2806 update_bb_reg_pressure (basic_block bb, rtx_insn *from)
2807 {
2808   rtx dreg;
2809   rtx_insn *insn;
2810   basic_block succ_bb;
2811   df_ref use, op_ref;
2812   edge succ;
2813   edge_iterator ei;
2814   int decreased_pressure = 0;
2815   int nregs;
2816   enum reg_class pressure_class;
2817 
2818   FOR_EACH_INSN_USE (use, from)
2819     {
2820       dreg = DF_REF_REAL_REG (use);
2821       /* The live range of register is shrunk only if it isn't:
2822 	 1. referred on any path from the end of this block to EXIT, or
2823 	 2. referred by insns other than FROM in this block.  */
2824       FOR_EACH_EDGE (succ, ei, bb->succs)
2825 	{
2826 	  succ_bb = succ->dest;
2827 	  if (succ_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
2828 	    continue;
2829 
2830 	  if (bitmap_bit_p (BB_DATA (succ_bb)->live_in, REGNO (dreg)))
2831 	    break;
2832 	}
2833       if (succ != NULL)
2834 	continue;
2835 
2836       op_ref = DF_REG_USE_CHAIN (REGNO (dreg));
2837       for (; op_ref; op_ref = DF_REF_NEXT_REG (op_ref))
2838 	{
2839 	  if (!DF_REF_INSN_INFO (op_ref))
2840 	    continue;
2841 
2842 	  insn = DF_REF_INSN (op_ref);
2843 	  if (BLOCK_FOR_INSN (insn) == bb
2844 	      && NONDEBUG_INSN_P (insn) && insn != from)
2845 	    break;
2846 	}
2847 
2848       pressure_class = get_regno_pressure_class (REGNO (dreg), &nregs);
2849       /* Decrease register pressure and update live_in information for
2850 	 this block.  */
2851       if (!op_ref && pressure_class != NO_REGS)
2852 	{
2853 	  decreased_pressure += nregs;
2854 	  BB_DATA (bb)->max_reg_pressure[pressure_class] -= nregs;
2855 	  bitmap_clear_bit (BB_DATA (bb)->live_in, REGNO (dreg));
2856 	}
2857     }
2858   return decreased_pressure;
2859 }
2860 
2861 /* Determine if the expression EXPR should be hoisted to EXPR_BB up in
2862    flow graph, if it can reach BB unimpared.  Stop the search if the
2863    expression would need to be moved more than DISTANCE instructions.
2864 
2865    DISTANCE is the number of instructions through which EXPR can be
2866    hoisted up in flow graph.
2867 
2868    BB_SIZE points to an array which contains the number of instructions
2869    for each basic block.
2870 
2871    PRESSURE_CLASS and NREGS are register class and number of hard registers
2872    for storing EXPR.
2873 
2874    HOISTED_BBS points to a bitmap indicating basic blocks through which
2875    EXPR is hoisted.
2876 
2877    FROM is the instruction from which EXPR is hoisted.
2878 
2879    It's unclear exactly what Muchnick meant by "unimpared".  It seems
2880    to me that the expression must either be computed or transparent in
2881    *every* block in the path(s) from EXPR_BB to BB.  Any other definition
2882    would allow the expression to be hoisted out of loops, even if
2883    the expression wasn't a loop invariant.
2884 
2885    Contrast this to reachability for PRE where an expression is
2886    considered reachable if *any* path reaches instead of *all*
2887    paths.  */
2888 
2889 static int
2890 should_hoist_expr_to_dom (basic_block expr_bb, struct gcse_expr *expr,
2891 			  basic_block bb, sbitmap visited,
2892 			  HOST_WIDE_INT distance,
2893 			  int *bb_size, enum reg_class pressure_class,
2894 			  int *nregs, bitmap hoisted_bbs, rtx_insn *from)
2895 {
2896   unsigned int i;
2897   edge pred;
2898   edge_iterator ei;
2899   sbitmap_iterator sbi;
2900   int visited_allocated_locally = 0;
2901   int decreased_pressure = 0;
2902 
2903   if (flag_ira_hoist_pressure)
2904     {
2905       /* Record old information of basic block BB when it is visited
2906 	 at the first time.  */
2907       if (!bitmap_bit_p (hoisted_bbs, bb->index))
2908 	{
2909 	  struct bb_data *data = BB_DATA (bb);
2910 	  bitmap_copy (data->backup, data->live_in);
2911 	  data->old_pressure = data->max_reg_pressure[pressure_class];
2912 	}
2913       decreased_pressure = update_bb_reg_pressure (bb, from);
2914     }
2915   /* Terminate the search if distance, for which EXPR is allowed to move,
2916      is exhausted.  */
2917   if (distance > 0)
2918     {
2919       if (flag_ira_hoist_pressure)
2920 	{
2921 	  /* Prefer to hoist EXPR if register pressure is decreased.  */
2922 	  if (decreased_pressure > *nregs)
2923 	    distance += bb_size[bb->index];
2924 	  /* Let EXPR be hoisted through basic block at no cost if one
2925 	     of following conditions is satisfied:
2926 
2927 	     1. The basic block has low register pressure.
2928 	     2. Register pressure won't be increases after hoisting EXPR.
2929 
2930 	     Constant expressions is handled conservatively, because
2931 	     hoisting constant expression aggressively results in worse
2932 	     code.  This decision is made by the observation of CSiBE
2933 	     on ARM target, while it has no obvious effect on other
2934 	     targets like x86, x86_64, mips and powerpc.  */
2935 	  else if (CONST_INT_P (expr->expr)
2936 		   || (BB_DATA (bb)->max_reg_pressure[pressure_class]
2937 			 >= ira_class_hard_regs_num[pressure_class]
2938 		       && decreased_pressure < *nregs))
2939 	    distance -= bb_size[bb->index];
2940 	}
2941       else
2942 	distance -= bb_size[bb->index];
2943 
2944       if (distance <= 0)
2945 	return 0;
2946     }
2947   else
2948     gcc_assert (distance == 0);
2949 
2950   if (visited == NULL)
2951     {
2952       visited_allocated_locally = 1;
2953       visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
2954       bitmap_clear (visited);
2955     }
2956 
2957   FOR_EACH_EDGE (pred, ei, bb->preds)
2958     {
2959       basic_block pred_bb = pred->src;
2960 
2961       if (pred->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2962 	break;
2963       else if (pred_bb == expr_bb)
2964 	continue;
2965       else if (bitmap_bit_p (visited, pred_bb->index))
2966 	continue;
2967       else if (! bitmap_bit_p (transp[pred_bb->index], expr->bitmap_index))
2968 	break;
2969       /* Not killed.  */
2970       else
2971 	{
2972 	  bitmap_set_bit (visited, pred_bb->index);
2973 	  if (! should_hoist_expr_to_dom (expr_bb, expr, pred_bb,
2974 					  visited, distance, bb_size,
2975 					  pressure_class, nregs,
2976 					  hoisted_bbs, from))
2977 	    break;
2978 	}
2979     }
2980   if (visited_allocated_locally)
2981     {
2982       /* If EXPR can be hoisted to expr_bb, record basic blocks through
2983 	 which EXPR is hoisted in hoisted_bbs.  */
2984       if (flag_ira_hoist_pressure && !pred)
2985 	{
2986 	  /* Record the basic block from which EXPR is hoisted.  */
2987 	  bitmap_set_bit (visited, bb->index);
2988 	  EXECUTE_IF_SET_IN_BITMAP (visited, 0, i, sbi)
2989 	    bitmap_set_bit (hoisted_bbs, i);
2990 	}
2991       sbitmap_free (visited);
2992     }
2993 
2994   return (pred == NULL);
2995 }
2996 
2997 /* Find occurrence in BB.  */
2998 
2999 static struct gcse_occr *
3000 find_occr_in_bb (struct gcse_occr *occr, basic_block bb)
3001 {
3002   /* Find the right occurrence of this expression.  */
3003   while (occr && BLOCK_FOR_INSN (occr->insn) != bb)
3004     occr = occr->next;
3005 
3006   return occr;
3007 }
3008 
3009 /* Actually perform code hoisting.
3010 
3011    The code hoisting pass can hoist multiple computations of the same
3012    expression along dominated path to a dominating basic block, like
3013    from b2/b3 to b1 as depicted below:
3014 
3015           b1      ------
3016           /\         |
3017          /  \        |
3018         bx   by   distance
3019        /      \      |
3020       /        \     |
3021      b2        b3 ------
3022 
3023    Unfortunately code hoisting generally extends the live range of an
3024    output pseudo register, which increases register pressure and hurts
3025    register allocation.  To address this issue, an attribute MAX_DISTANCE
3026    is computed and attached to each expression.  The attribute is computed
3027    from rtx cost of the corresponding expression and it's used to control
3028    how long the expression can be hoisted up in flow graph.  As the
3029    expression is hoisted up in flow graph, GCC decreases its DISTANCE
3030    and stops the hoist if DISTANCE reaches 0.  Code hoisting can decrease
3031    register pressure if live ranges of inputs are shrunk.
3032 
3033    Option "-fira-hoist-pressure" implements register pressure directed
3034    hoist based on upper method.  The rationale is:
3035      1. Calculate register pressure for each basic block by reusing IRA
3036 	facility.
3037      2. When expression is hoisted through one basic block, GCC checks
3038 	the change of live ranges for inputs/output.  The basic block's
3039 	register pressure will be increased because of extended live
3040 	range of output.  However, register pressure will be decreased
3041 	if the live ranges of inputs are shrunk.
3042      3. After knowing how hoisting affects register pressure, GCC prefers
3043 	to hoist the expression if it can decrease register pressure, by
3044 	increasing DISTANCE of the corresponding expression.
3045      4. If hoisting the expression increases register pressure, GCC checks
3046 	register pressure of the basic block and decrease DISTANCE only if
3047 	the register pressure is high.  In other words, expression will be
3048 	hoisted through at no cost if the basic block has low register
3049 	pressure.
3050      5. Update register pressure information for basic blocks through
3051 	which expression is hoisted.  */
3052 
3053 static int
3054 hoist_code (void)
3055 {
3056   basic_block bb, dominated;
3057   vec<basic_block> dom_tree_walk;
3058   unsigned int dom_tree_walk_index;
3059   vec<basic_block> domby;
3060   unsigned int i, j, k;
3061   struct gcse_expr **index_map;
3062   struct gcse_expr *expr;
3063   int *to_bb_head;
3064   int *bb_size;
3065   int changed = 0;
3066   struct bb_data *data;
3067   /* Basic blocks that have occurrences reachable from BB.  */
3068   bitmap from_bbs;
3069   /* Basic blocks through which expr is hoisted.  */
3070   bitmap hoisted_bbs = NULL;
3071   bitmap_iterator bi;
3072 
3073   /* Compute a mapping from expression number (`bitmap_index') to
3074      hash table entry.  */
3075 
3076   index_map = XCNEWVEC (struct gcse_expr *, expr_hash_table.n_elems);
3077   for (i = 0; i < expr_hash_table.size; i++)
3078     for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
3079       index_map[expr->bitmap_index] = expr;
3080 
3081   /* Calculate sizes of basic blocks and note how far
3082      each instruction is from the start of its block.  We then use this
3083      data to restrict distance an expression can travel.  */
3084 
3085   to_bb_head = XCNEWVEC (int, get_max_uid ());
3086   bb_size = XCNEWVEC (int, last_basic_block_for_fn (cfun));
3087 
3088   FOR_EACH_BB_FN (bb, cfun)
3089     {
3090       rtx_insn *insn;
3091       int to_head;
3092 
3093       to_head = 0;
3094       FOR_BB_INSNS (bb, insn)
3095 	{
3096 	  /* Don't count debug instructions to avoid them affecting
3097 	     decision choices.  */
3098 	  if (NONDEBUG_INSN_P (insn))
3099 	    to_bb_head[INSN_UID (insn)] = to_head++;
3100 	}
3101 
3102       bb_size[bb->index] = to_head;
3103     }
3104 
3105   gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs) == 1
3106 	      && (EDGE_SUCC (ENTRY_BLOCK_PTR_FOR_FN (cfun), 0)->dest
3107 		  == ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb));
3108 
3109   from_bbs = BITMAP_ALLOC (NULL);
3110   if (flag_ira_hoist_pressure)
3111     hoisted_bbs = BITMAP_ALLOC (NULL);
3112 
3113   dom_tree_walk = get_all_dominated_blocks (CDI_DOMINATORS,
3114 					    ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb);
3115 
3116   /* Walk over each basic block looking for potentially hoistable
3117      expressions, nothing gets hoisted from the entry block.  */
3118   FOR_EACH_VEC_ELT (dom_tree_walk, dom_tree_walk_index, bb)
3119     {
3120       domby = get_dominated_to_depth (CDI_DOMINATORS, bb, MAX_HOIST_DEPTH);
3121 
3122       if (domby.length () == 0)
3123 	continue;
3124 
3125       /* Examine each expression that is very busy at the exit of this
3126 	 block.  These are the potentially hoistable expressions.  */
3127       for (i = 0; i < SBITMAP_SIZE (hoist_vbeout[bb->index]); i++)
3128 	{
3129 	  if (bitmap_bit_p (hoist_vbeout[bb->index], i))
3130 	    {
3131 	      int nregs = 0;
3132 	      enum reg_class pressure_class = NO_REGS;
3133 	      /* Current expression.  */
3134 	      struct gcse_expr *expr = index_map[i];
3135 	      /* Number of occurrences of EXPR that can be hoisted to BB.  */
3136 	      int hoistable = 0;
3137 	      /* Occurrences reachable from BB.  */
3138 	      vec<occr_t> occrs_to_hoist = vNULL;
3139 	      /* We want to insert the expression into BB only once, so
3140 		 note when we've inserted it.  */
3141 	      int insn_inserted_p;
3142 	      occr_t occr;
3143 
3144 	      /* If an expression is computed in BB and is available at end of
3145 		 BB, hoist all occurrences dominated by BB to BB.  */
3146 	      if (bitmap_bit_p (comp[bb->index], i))
3147 		{
3148 		  occr = find_occr_in_bb (expr->antic_occr, bb);
3149 
3150 		  if (occr)
3151 		    {
3152 		      /* An occurrence might've been already deleted
3153 			 while processing a dominator of BB.  */
3154 		      if (!occr->deleted_p)
3155 			{
3156 			  gcc_assert (NONDEBUG_INSN_P (occr->insn));
3157 			  hoistable++;
3158 			}
3159 		    }
3160 		  else
3161 		    hoistable++;
3162 		}
3163 
3164 	      /* We've found a potentially hoistable expression, now
3165 		 we look at every block BB dominates to see if it
3166 		 computes the expression.  */
3167 	      FOR_EACH_VEC_ELT (domby, j, dominated)
3168 		{
3169 		  HOST_WIDE_INT max_distance;
3170 
3171 		  /* Ignore self dominance.  */
3172 		  if (bb == dominated)
3173 		    continue;
3174 		  /* We've found a dominated block, now see if it computes
3175 		     the busy expression and whether or not moving that
3176 		     expression to the "beginning" of that block is safe.  */
3177 		  if (!bitmap_bit_p (antloc[dominated->index], i))
3178 		    continue;
3179 
3180 		  occr = find_occr_in_bb (expr->antic_occr, dominated);
3181 		  gcc_assert (occr);
3182 
3183 		  /* An occurrence might've been already deleted
3184 		     while processing a dominator of BB.  */
3185 		  if (occr->deleted_p)
3186 		    continue;
3187 		  gcc_assert (NONDEBUG_INSN_P (occr->insn));
3188 
3189 		  max_distance = expr->max_distance;
3190 		  if (max_distance > 0)
3191 		    /* Adjust MAX_DISTANCE to account for the fact that
3192 		       OCCR won't have to travel all of DOMINATED, but
3193 		       only part of it.  */
3194 		    max_distance += (bb_size[dominated->index]
3195 				     - to_bb_head[INSN_UID (occr->insn)]);
3196 
3197 		  pressure_class = get_pressure_class_and_nregs (occr->insn,
3198 								 &nregs);
3199 
3200 		  /* Note if the expression should be hoisted from the dominated
3201 		     block to BB if it can reach DOMINATED unimpared.
3202 
3203 		     Keep track of how many times this expression is hoistable
3204 		     from a dominated block into BB.  */
3205 		  if (should_hoist_expr_to_dom (bb, expr, dominated, NULL,
3206 						max_distance, bb_size,
3207 						pressure_class,	&nregs,
3208 						hoisted_bbs, occr->insn))
3209 		    {
3210 		      hoistable++;
3211 		      occrs_to_hoist.safe_push (occr);
3212 		      bitmap_set_bit (from_bbs, dominated->index);
3213 		    }
3214 		}
3215 
3216 	      /* If we found more than one hoistable occurrence of this
3217 		 expression, then note it in the vector of expressions to
3218 		 hoist.  It makes no sense to hoist things which are computed
3219 		 in only one BB, and doing so tends to pessimize register
3220 		 allocation.  One could increase this value to try harder
3221 		 to avoid any possible code expansion due to register
3222 		 allocation issues; however experiments have shown that
3223 		 the vast majority of hoistable expressions are only movable
3224 		 from two successors, so raising this threshold is likely
3225 		 to nullify any benefit we get from code hoisting.  */
3226 	      if (hoistable > 1 && dbg_cnt (hoist_insn))
3227 		{
3228 		  /* If (hoistable != vec::length), then there is
3229 		     an occurrence of EXPR in BB itself.  Don't waste
3230 		     time looking for LCA in this case.  */
3231 		  if ((unsigned) hoistable == occrs_to_hoist.length ())
3232 		    {
3233 		      basic_block lca;
3234 
3235 		      lca = nearest_common_dominator_for_set (CDI_DOMINATORS,
3236 							      from_bbs);
3237 		      if (lca != bb)
3238 			/* Punt, it's better to hoist these occurrences to
3239 			   LCA.  */
3240 			occrs_to_hoist.release ();
3241 		    }
3242 		}
3243 	      else
3244 		/* Punt, no point hoisting a single occurrence.  */
3245 		occrs_to_hoist.release ();
3246 
3247 	      if (flag_ira_hoist_pressure
3248 		  && !occrs_to_hoist.is_empty ())
3249 		{
3250 		  /* Increase register pressure of basic blocks to which
3251 		     expr is hoisted because of extended live range of
3252 		     output.  */
3253 		  data = BB_DATA (bb);
3254 		  data->max_reg_pressure[pressure_class] += nregs;
3255 		  EXECUTE_IF_SET_IN_BITMAP (hoisted_bbs, 0, k, bi)
3256 		    {
3257 		      data = BB_DATA (BASIC_BLOCK_FOR_FN (cfun, k));
3258 		      data->max_reg_pressure[pressure_class] += nregs;
3259 		    }
3260 		}
3261 	      else if (flag_ira_hoist_pressure)
3262 		{
3263 		  /* Restore register pressure and live_in info for basic
3264 		     blocks recorded in hoisted_bbs when expr will not be
3265 		     hoisted.  */
3266 		  EXECUTE_IF_SET_IN_BITMAP (hoisted_bbs, 0, k, bi)
3267 		    {
3268 		      data = BB_DATA (BASIC_BLOCK_FOR_FN (cfun, k));
3269 		      bitmap_copy (data->live_in, data->backup);
3270 		      data->max_reg_pressure[pressure_class]
3271 			  = data->old_pressure;
3272 		    }
3273 		}
3274 
3275 	      if (flag_ira_hoist_pressure)
3276 		bitmap_clear (hoisted_bbs);
3277 
3278 	      insn_inserted_p = 0;
3279 
3280 	      /* Walk through occurrences of I'th expressions we want
3281 		 to hoist to BB and make the transformations.  */
3282 	      FOR_EACH_VEC_ELT (occrs_to_hoist, j, occr)
3283 		{
3284 		  rtx_insn *insn;
3285 		  const_rtx set;
3286 
3287 		  gcc_assert (!occr->deleted_p);
3288 
3289 		  insn = occr->insn;
3290 		  set = single_set_gcse (insn);
3291 
3292 		  /* Create a pseudo-reg to store the result of reaching
3293 		     expressions into.  Get the mode for the new pseudo
3294 		     from the mode of the original destination pseudo.
3295 
3296 		     It is important to use new pseudos whenever we
3297 		     emit a set.  This will allow reload to use
3298 		     rematerialization for such registers.  */
3299 		  if (!insn_inserted_p)
3300 		    expr->reaching_reg
3301 		      = gen_reg_rtx_and_attrs (SET_DEST (set));
3302 
3303 		  gcse_emit_move_after (SET_DEST (set), expr->reaching_reg,
3304 					insn);
3305 		  delete_insn (insn);
3306 		  occr->deleted_p = 1;
3307 		  changed = 1;
3308 		  gcse_subst_count++;
3309 
3310 		  if (!insn_inserted_p)
3311 		    {
3312 		      insert_insn_end_basic_block (expr, bb);
3313 		      insn_inserted_p = 1;
3314 		    }
3315 		}
3316 
3317 	      occrs_to_hoist.release ();
3318 	      bitmap_clear (from_bbs);
3319 	    }
3320 	}
3321       domby.release ();
3322     }
3323 
3324   dom_tree_walk.release ();
3325   BITMAP_FREE (from_bbs);
3326   if (flag_ira_hoist_pressure)
3327     BITMAP_FREE (hoisted_bbs);
3328 
3329   free (bb_size);
3330   free (to_bb_head);
3331   free (index_map);
3332 
3333   return changed;
3334 }
3335 
3336 /* Return pressure class and number of needed hard registers (through
3337    *NREGS) of register REGNO.  */
3338 static enum reg_class
3339 get_regno_pressure_class (int regno, int *nregs)
3340 {
3341   if (regno >= FIRST_PSEUDO_REGISTER)
3342     {
3343       enum reg_class pressure_class;
3344 
3345       pressure_class = reg_allocno_class (regno);
3346       pressure_class = ira_pressure_class_translate[pressure_class];
3347       *nregs
3348 	= ira_reg_class_max_nregs[pressure_class][PSEUDO_REGNO_MODE (regno)];
3349       return pressure_class;
3350     }
3351   else if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno)
3352 	   && ! TEST_HARD_REG_BIT (eliminable_regset, regno))
3353     {
3354       *nregs = 1;
3355       return ira_pressure_class_translate[REGNO_REG_CLASS (regno)];
3356     }
3357   else
3358     {
3359       *nregs = 0;
3360       return NO_REGS;
3361     }
3362 }
3363 
3364 /* Return pressure class and number of hard registers (through *NREGS)
3365    for destination of INSN. */
3366 static enum reg_class
3367 get_pressure_class_and_nregs (rtx_insn *insn, int *nregs)
3368 {
3369   rtx reg;
3370   enum reg_class pressure_class;
3371   const_rtx set = single_set_gcse (insn);
3372 
3373   reg = SET_DEST (set);
3374   if (GET_CODE (reg) == SUBREG)
3375     reg = SUBREG_REG (reg);
3376   if (MEM_P (reg))
3377     {
3378       *nregs = 0;
3379       pressure_class = NO_REGS;
3380     }
3381   else
3382     {
3383       gcc_assert (REG_P (reg));
3384       pressure_class = reg_allocno_class (REGNO (reg));
3385       pressure_class = ira_pressure_class_translate[pressure_class];
3386       *nregs
3387 	= ira_reg_class_max_nregs[pressure_class][GET_MODE (SET_SRC (set))];
3388     }
3389   return pressure_class;
3390 }
3391 
3392 /* Increase (if INCR_P) or decrease current register pressure for
3393    register REGNO.  */
3394 static void
3395 change_pressure (int regno, bool incr_p)
3396 {
3397   int nregs;
3398   enum reg_class pressure_class;
3399 
3400   pressure_class = get_regno_pressure_class (regno, &nregs);
3401   if (! incr_p)
3402     curr_reg_pressure[pressure_class] -= nregs;
3403   else
3404     {
3405       curr_reg_pressure[pressure_class] += nregs;
3406       if (BB_DATA (curr_bb)->max_reg_pressure[pressure_class]
3407 	  < curr_reg_pressure[pressure_class])
3408 	BB_DATA (curr_bb)->max_reg_pressure[pressure_class]
3409 	  = curr_reg_pressure[pressure_class];
3410     }
3411 }
3412 
3413 /* Calculate register pressure for each basic block by walking insns
3414    from last to first.  */
3415 static void
3416 calculate_bb_reg_pressure (void)
3417 {
3418   int i;
3419   unsigned int j;
3420   rtx_insn *insn;
3421   basic_block bb;
3422   bitmap curr_regs_live;
3423   bitmap_iterator bi;
3424 
3425 
3426   ira_setup_eliminable_regset ();
3427   curr_regs_live = BITMAP_ALLOC (&reg_obstack);
3428   FOR_EACH_BB_FN (bb, cfun)
3429     {
3430       curr_bb = bb;
3431       BB_DATA (bb)->live_in = BITMAP_ALLOC (NULL);
3432       BB_DATA (bb)->backup = BITMAP_ALLOC (NULL);
3433       bitmap_copy (BB_DATA (bb)->live_in, df_get_live_in (bb));
3434       bitmap_copy (curr_regs_live, df_get_live_out (bb));
3435       for (i = 0; i < ira_pressure_classes_num; i++)
3436 	curr_reg_pressure[ira_pressure_classes[i]] = 0;
3437       EXECUTE_IF_SET_IN_BITMAP (curr_regs_live, 0, j, bi)
3438 	change_pressure (j, true);
3439 
3440       FOR_BB_INSNS_REVERSE (bb, insn)
3441 	{
3442 	  rtx dreg;
3443 	  int regno;
3444 	  df_ref def, use;
3445 
3446 	  if (! NONDEBUG_INSN_P (insn))
3447 	    continue;
3448 
3449 	  FOR_EACH_INSN_DEF (def, insn)
3450 	    {
3451 	      dreg = DF_REF_REAL_REG (def);
3452 	      gcc_assert (REG_P (dreg));
3453 	      regno = REGNO (dreg);
3454 	      if (!(DF_REF_FLAGS (def)
3455 		    & (DF_REF_PARTIAL | DF_REF_CONDITIONAL)))
3456 		{
3457 		  if (bitmap_clear_bit (curr_regs_live, regno))
3458 		    change_pressure (regno, false);
3459 		}
3460 	    }
3461 
3462 	  FOR_EACH_INSN_USE (use, insn)
3463 	    {
3464 	      dreg = DF_REF_REAL_REG (use);
3465 	      gcc_assert (REG_P (dreg));
3466 	      regno = REGNO (dreg);
3467 	      if (bitmap_set_bit (curr_regs_live, regno))
3468 		change_pressure (regno, true);
3469 	    }
3470 	}
3471     }
3472   BITMAP_FREE (curr_regs_live);
3473 
3474   if (dump_file == NULL)
3475     return;
3476 
3477   fprintf (dump_file, "\nRegister Pressure: \n");
3478   FOR_EACH_BB_FN (bb, cfun)
3479     {
3480       fprintf (dump_file, "  Basic block %d: \n", bb->index);
3481       for (i = 0; (int) i < ira_pressure_classes_num; i++)
3482 	{
3483 	  enum reg_class pressure_class;
3484 
3485 	  pressure_class = ira_pressure_classes[i];
3486 	  if (BB_DATA (bb)->max_reg_pressure[pressure_class] == 0)
3487 	    continue;
3488 
3489 	  fprintf (dump_file, "    %s=%d\n", reg_class_names[pressure_class],
3490 		   BB_DATA (bb)->max_reg_pressure[pressure_class]);
3491 	}
3492     }
3493   fprintf (dump_file, "\n");
3494 }
3495 
3496 /* Top level routine to perform one code hoisting (aka unification) pass
3497 
3498    Return nonzero if a change was made.  */
3499 
3500 static int
3501 one_code_hoisting_pass (void)
3502 {
3503   int changed = 0;
3504 
3505   gcse_subst_count = 0;
3506   gcse_create_count = 0;
3507 
3508   /* Return if there's nothing to do, or it is too expensive.  */
3509   if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
3510       || gcse_or_cprop_is_too_expensive (_("GCSE disabled")))
3511     return 0;
3512 
3513   doing_code_hoisting_p = true;
3514 
3515   /* Calculate register pressure for each basic block.  */
3516   if (flag_ira_hoist_pressure)
3517     {
3518       regstat_init_n_sets_and_refs ();
3519       ira_set_pseudo_classes (false, dump_file);
3520       alloc_aux_for_blocks (sizeof (struct bb_data));
3521       calculate_bb_reg_pressure ();
3522       regstat_free_n_sets_and_refs ();
3523     }
3524 
3525   /* We need alias.  */
3526   init_alias_analysis ();
3527 
3528   bytes_used = 0;
3529   gcc_obstack_init (&gcse_obstack);
3530   alloc_gcse_mem ();
3531 
3532   alloc_hash_table (&expr_hash_table);
3533   compute_hash_table (&expr_hash_table);
3534   if (dump_file)
3535     dump_hash_table (dump_file, "Code Hosting Expressions", &expr_hash_table);
3536 
3537   if (expr_hash_table.n_elems > 0)
3538     {
3539       alloc_code_hoist_mem (last_basic_block_for_fn (cfun),
3540 			    expr_hash_table.n_elems);
3541       compute_code_hoist_data ();
3542       changed = hoist_code ();
3543       free_code_hoist_mem ();
3544     }
3545 
3546   if (flag_ira_hoist_pressure)
3547     {
3548       free_aux_for_blocks ();
3549       free_reg_info ();
3550     }
3551   free_hash_table (&expr_hash_table);
3552   free_gcse_mem ();
3553   obstack_free (&gcse_obstack, NULL);
3554 
3555   /* We are finished with alias.  */
3556   end_alias_analysis ();
3557 
3558   if (dump_file)
3559     {
3560       fprintf (dump_file, "HOIST of %s, %d basic blocks, %d bytes needed, ",
3561 	       current_function_name (), n_basic_blocks_for_fn (cfun),
3562 	       bytes_used);
3563       fprintf (dump_file, "%d substs, %d insns created\n",
3564 	       gcse_subst_count, gcse_create_count);
3565     }
3566 
3567   doing_code_hoisting_p = false;
3568 
3569   return changed;
3570 }
3571 
3572 /*  Here we provide the things required to do store motion towards the exit.
3573     In order for this to be effective, gcse also needed to be taught how to
3574     move a load when it is killed only by a store to itself.
3575 
3576 	    int i;
3577 	    float a[10];
3578 
3579 	    void foo(float scale)
3580 	    {
3581 	      for (i=0; i<10; i++)
3582 		a[i] *= scale;
3583 	    }
3584 
3585     'i' is both loaded and stored to in the loop. Normally, gcse cannot move
3586     the load out since its live around the loop, and stored at the bottom
3587     of the loop.
3588 
3589       The 'Load Motion' referred to and implemented in this file is
3590     an enhancement to gcse which when using edge based LCM, recognizes
3591     this situation and allows gcse to move the load out of the loop.
3592 
3593       Once gcse has hoisted the load, store motion can then push this
3594     load towards the exit, and we end up with no loads or stores of 'i'
3595     in the loop.  */
3596 
3597 /* This will search the ldst list for a matching expression. If it
3598    doesn't find one, we create one and initialize it.  */
3599 
3600 static struct ls_expr *
3601 ldst_entry (rtx x)
3602 {
3603   int do_not_record_p = 0;
3604   struct ls_expr * ptr;
3605   unsigned int hash;
3606   ls_expr **slot;
3607   struct ls_expr e;
3608 
3609   hash = hash_rtx (x, GET_MODE (x), &do_not_record_p,
3610 		   NULL,  /*have_reg_qty=*/false);
3611 
3612   e.pattern = x;
3613   slot = pre_ldst_table->find_slot_with_hash (&e, hash, INSERT);
3614   if (*slot)
3615     return *slot;
3616 
3617   ptr = XNEW (struct ls_expr);
3618 
3619   ptr->next         = pre_ldst_mems;
3620   ptr->expr         = NULL;
3621   ptr->pattern      = x;
3622   ptr->pattern_regs = NULL_RTX;
3623   ptr->stores.create (0);
3624   ptr->reaching_reg = NULL_RTX;
3625   ptr->invalid      = 0;
3626   ptr->index        = 0;
3627   ptr->hash_index   = hash;
3628   pre_ldst_mems     = ptr;
3629   *slot = ptr;
3630 
3631   return ptr;
3632 }
3633 
3634 /* Free up an individual ldst entry.  */
3635 
3636 static void
3637 free_ldst_entry (struct ls_expr * ptr)
3638 {
3639   ptr->stores.release ();
3640 
3641   free (ptr);
3642 }
3643 
3644 /* Free up all memory associated with the ldst list.  */
3645 
3646 static void
3647 free_ld_motion_mems (void)
3648 {
3649   delete pre_ldst_table;
3650   pre_ldst_table = NULL;
3651 
3652   while (pre_ldst_mems)
3653     {
3654       struct ls_expr * tmp = pre_ldst_mems;
3655 
3656       pre_ldst_mems = pre_ldst_mems->next;
3657 
3658       free_ldst_entry (tmp);
3659     }
3660 
3661   pre_ldst_mems = NULL;
3662 }
3663 
3664 /* Dump debugging info about the ldst list.  */
3665 
3666 static void
3667 print_ldst_list (FILE * file)
3668 {
3669   struct ls_expr * ptr;
3670 
3671   fprintf (file, "LDST list: \n");
3672 
3673   for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
3674     {
3675       fprintf (file, "  Pattern (%3d): ", ptr->index);
3676 
3677       print_rtl (file, ptr->pattern);
3678 
3679       fprintf (file, "\n	Stores : ");
3680       print_rtx_insn_vec (file, ptr->stores);
3681 
3682       fprintf (file, "\n\n");
3683     }
3684 
3685   fprintf (file, "\n");
3686 }
3687 
3688 /* Returns 1 if X is in the list of ldst only expressions.  */
3689 
3690 static struct ls_expr *
3691 find_rtx_in_ldst (rtx x)
3692 {
3693   struct ls_expr e;
3694   ls_expr **slot;
3695   if (!pre_ldst_table)
3696     return NULL;
3697   e.pattern = x;
3698   slot = pre_ldst_table->find_slot (&e, NO_INSERT);
3699   if (!slot || (*slot)->invalid)
3700     return NULL;
3701   return *slot;
3702 }
3703 
3704 /* Load Motion for loads which only kill themselves.  */
3705 
3706 /* Return true if x, a MEM, is a simple access with no side effects.
3707    These are the types of loads we consider for the ld_motion list,
3708    otherwise we let the usual aliasing take care of it.  */
3709 
3710 static int
3711 simple_mem (const_rtx x)
3712 {
3713   if (MEM_VOLATILE_P (x))
3714     return 0;
3715 
3716   if (GET_MODE (x) == BLKmode)
3717     return 0;
3718 
3719   /* If we are handling exceptions, we must be careful with memory references
3720      that may trap.  If we are not, the behavior is undefined, so we may just
3721      continue.  */
3722   if (cfun->can_throw_non_call_exceptions && may_trap_p (x))
3723     return 0;
3724 
3725   if (side_effects_p (x))
3726     return 0;
3727 
3728   /* Do not consider function arguments passed on stack.  */
3729   if (reg_mentioned_p (stack_pointer_rtx, x))
3730     return 0;
3731 
3732   if (flag_float_store && FLOAT_MODE_P (GET_MODE (x)))
3733     return 0;
3734 
3735   return 1;
3736 }
3737 
3738 /* Make sure there isn't a buried reference in this pattern anywhere.
3739    If there is, invalidate the entry for it since we're not capable
3740    of fixing it up just yet.. We have to be sure we know about ALL
3741    loads since the aliasing code will allow all entries in the
3742    ld_motion list to not-alias itself.  If we miss a load, we will get
3743    the wrong value since gcse might common it and we won't know to
3744    fix it up.  */
3745 
3746 static void
3747 invalidate_any_buried_refs (rtx x)
3748 {
3749   const char * fmt;
3750   int i, j;
3751   struct ls_expr * ptr;
3752 
3753   /* Invalidate it in the list.  */
3754   if (MEM_P (x) && simple_mem (x))
3755     {
3756       ptr = ldst_entry (x);
3757       ptr->invalid = 1;
3758     }
3759 
3760   /* Recursively process the insn.  */
3761   fmt = GET_RTX_FORMAT (GET_CODE (x));
3762 
3763   for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3764     {
3765       if (fmt[i] == 'e')
3766 	invalidate_any_buried_refs (XEXP (x, i));
3767       else if (fmt[i] == 'E')
3768 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3769 	  invalidate_any_buried_refs (XVECEXP (x, i, j));
3770     }
3771 }
3772 
3773 /* Find all the 'simple' MEMs which are used in LOADs and STORES.  Simple
3774    being defined as MEM loads and stores to symbols, with no side effects
3775    and no registers in the expression.  For a MEM destination, we also
3776    check that the insn is still valid if we replace the destination with a
3777    REG, as is done in update_ld_motion_stores.  If there are any uses/defs
3778    which don't match this criteria, they are invalidated and trimmed out
3779    later.  */
3780 
3781 static void
3782 compute_ld_motion_mems (void)
3783 {
3784   struct ls_expr * ptr;
3785   basic_block bb;
3786   rtx_insn *insn;
3787 
3788   pre_ldst_mems = NULL;
3789   pre_ldst_table = new hash_table<pre_ldst_expr_hasher> (13);
3790 
3791   FOR_EACH_BB_FN (bb, cfun)
3792     {
3793       FOR_BB_INSNS (bb, insn)
3794 	{
3795 	  if (NONDEBUG_INSN_P (insn))
3796 	    {
3797 	      if (GET_CODE (PATTERN (insn)) == SET)
3798 		{
3799 		  rtx src = SET_SRC (PATTERN (insn));
3800 		  rtx dest = SET_DEST (PATTERN (insn));
3801 
3802 		  /* Check for a simple load.  */
3803 		  if (MEM_P (src) && simple_mem (src))
3804 		    {
3805 		      ptr = ldst_entry (src);
3806 		      if (!REG_P (dest))
3807 			ptr->invalid = 1;
3808 		    }
3809 		  else
3810 		    {
3811 		      /* Make sure there isn't a buried load somewhere.  */
3812 		      invalidate_any_buried_refs (src);
3813 		    }
3814 
3815 		  /* Check for a simple load through a REG_EQUAL note.  */
3816 		  rtx note = find_reg_equal_equiv_note (insn), src_eq;
3817 		  if (note
3818 		      && REG_NOTE_KIND (note) == REG_EQUAL
3819 		      && (src_eq = XEXP (note, 0))
3820 		      && !(MEM_P (src_eq) && simple_mem (src_eq)))
3821 		    invalidate_any_buried_refs (src_eq);
3822 
3823 		  /* Check for stores. Don't worry about aliased ones, they
3824 		     will block any movement we might do later. We only care
3825 		     about this exact pattern since those are the only
3826 		     circumstance that we will ignore the aliasing info.  */
3827 		  if (MEM_P (dest) && simple_mem (dest))
3828 		    {
3829 		      ptr = ldst_entry (dest);
3830 		      machine_mode src_mode = GET_MODE (src);
3831 		      if (! MEM_P (src)
3832 			  && GET_CODE (src) != ASM_OPERANDS
3833 			  /* Check for REG manually since want_to_gcse_p
3834 			     returns 0 for all REGs.  */
3835 			  && can_assign_to_reg_without_clobbers_p (src,
3836 								    src_mode))
3837 			ptr->stores.safe_push (insn);
3838 		      else
3839 			ptr->invalid = 1;
3840 		    }
3841 		}
3842 	      else
3843 		{
3844 		  /* Invalidate all MEMs in the pattern and...  */
3845 		  invalidate_any_buried_refs (PATTERN (insn));
3846 
3847 		  /* ...in REG_EQUAL notes for PARALLELs with single SET.  */
3848 		  rtx note = find_reg_equal_equiv_note (insn), src_eq;
3849 		  if (note
3850 		      && REG_NOTE_KIND (note) == REG_EQUAL
3851 		      && (src_eq = XEXP (note, 0)))
3852 		    invalidate_any_buried_refs (src_eq);
3853 		}
3854 	    }
3855 	}
3856     }
3857 }
3858 
3859 /* Remove any references that have been either invalidated or are not in the
3860    expression list for pre gcse.  */
3861 
3862 static void
3863 trim_ld_motion_mems (void)
3864 {
3865   struct ls_expr * * last = & pre_ldst_mems;
3866   struct ls_expr * ptr = pre_ldst_mems;
3867 
3868   while (ptr != NULL)
3869     {
3870       struct gcse_expr * expr;
3871 
3872       /* Delete if entry has been made invalid.  */
3873       if (! ptr->invalid)
3874 	{
3875 	  /* Delete if we cannot find this mem in the expression list.  */
3876 	  unsigned int hash = ptr->hash_index % expr_hash_table.size;
3877 
3878 	  for (expr = expr_hash_table.table[hash];
3879 	       expr != NULL;
3880 	       expr = expr->next_same_hash)
3881 	    if (expr_equiv_p (expr->expr, ptr->pattern))
3882 	      break;
3883 	}
3884       else
3885 	expr = (struct gcse_expr *) 0;
3886 
3887       if (expr)
3888 	{
3889 	  /* Set the expression field if we are keeping it.  */
3890 	  ptr->expr = expr;
3891 	  last = & ptr->next;
3892 	  ptr = ptr->next;
3893 	}
3894       else
3895 	{
3896 	  *last = ptr->next;
3897 	  pre_ldst_table->remove_elt_with_hash (ptr, ptr->hash_index);
3898 	  free_ldst_entry (ptr);
3899 	  ptr = * last;
3900 	}
3901     }
3902 
3903   /* Show the world what we've found.  */
3904   if (dump_file && pre_ldst_mems != NULL)
3905     print_ldst_list (dump_file);
3906 }
3907 
3908 /* This routine will take an expression which we are replacing with
3909    a reaching register, and update any stores that are needed if
3910    that expression is in the ld_motion list.  Stores are updated by
3911    copying their SRC to the reaching register, and then storing
3912    the reaching register into the store location. These keeps the
3913    correct value in the reaching register for the loads.  */
3914 
3915 static void
3916 update_ld_motion_stores (struct gcse_expr * expr)
3917 {
3918   struct ls_expr * mem_ptr;
3919 
3920   if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
3921     {
3922       /* We can try to find just the REACHED stores, but is shouldn't
3923 	 matter to set the reaching reg everywhere...  some might be
3924 	 dead and should be eliminated later.  */
3925 
3926       /* We replace (set mem expr) with (set reg expr) (set mem reg)
3927 	 where reg is the reaching reg used in the load.  We checked in
3928 	 compute_ld_motion_mems that we can replace (set mem expr) with
3929 	 (set reg expr) in that insn.  */
3930       rtx_insn *insn;
3931       unsigned int i;
3932       FOR_EACH_VEC_ELT_REVERSE (mem_ptr->stores, i, insn)
3933 	{
3934 	  rtx pat = PATTERN (insn);
3935 	  rtx src = SET_SRC (pat);
3936 	  rtx reg = expr->reaching_reg;
3937 
3938 	  /* If we've already copied it, continue.  */
3939 	  if (expr->reaching_reg == src)
3940 	    continue;
3941 
3942 	  if (dump_file)
3943 	    {
3944 	      fprintf (dump_file, "PRE:  store updated with reaching reg ");
3945 	      print_rtl (dump_file, reg);
3946 	      fprintf (dump_file, ":\n	");
3947 	      print_inline_rtx (dump_file, insn, 8);
3948 	      fprintf (dump_file, "\n");
3949 	    }
3950 
3951 	  rtx_insn *copy = gen_move_insn (reg, copy_rtx (SET_SRC (pat)));
3952 	  emit_insn_before (copy, insn);
3953 	  SET_SRC (pat) = reg;
3954 	  df_insn_rescan (insn);
3955 
3956 	  /* un-recognize this pattern since it's probably different now.  */
3957 	  INSN_CODE (insn) = -1;
3958 	  gcse_create_count++;
3959 	}
3960     }
3961 }
3962 
3963 /* Return true if the graph is too expensive to optimize. PASS is the
3964    optimization about to be performed.  */
3965 
3966 bool
3967 gcse_or_cprop_is_too_expensive (const char *pass)
3968 {
3969   unsigned int memory_request = (n_basic_blocks_for_fn (cfun)
3970 				 * SBITMAP_SET_SIZE (max_reg_num ())
3971 				 * sizeof (SBITMAP_ELT_TYPE));
3972 
3973   /* Trying to perform global optimizations on flow graphs which have
3974      a high connectivity will take a long time and is unlikely to be
3975      particularly useful.
3976 
3977      In normal circumstances a cfg should have about twice as many
3978      edges as blocks.  But we do not want to punish small functions
3979      which have a couple switch statements.  Rather than simply
3980      threshold the number of blocks, uses something with a more
3981      graceful degradation.  */
3982   if (n_edges_for_fn (cfun) > 20000 + n_basic_blocks_for_fn (cfun) * 4)
3983     {
3984       warning (OPT_Wdisabled_optimization,
3985 	       "%s: %d basic blocks and %d edges/basic block",
3986 	       pass, n_basic_blocks_for_fn (cfun),
3987 	       n_edges_for_fn (cfun) / n_basic_blocks_for_fn (cfun));
3988 
3989       return true;
3990     }
3991 
3992   /* If allocating memory for the dataflow bitmaps would take up too much
3993      storage it's better just to disable the optimization.  */
3994   if (memory_request > MAX_GCSE_MEMORY)
3995     {
3996       warning (OPT_Wdisabled_optimization,
3997 	       "%s: %d basic blocks and %d registers; increase --param max-gcse-memory above %d",
3998 	       pass, n_basic_blocks_for_fn (cfun), max_reg_num (),
3999 	       memory_request);
4000 
4001       return true;
4002     }
4003 
4004   return false;
4005 }
4006 
4007 static unsigned int
4008 execute_rtl_pre (void)
4009 {
4010   int changed;
4011   delete_unreachable_blocks ();
4012   df_analyze ();
4013   changed = one_pre_gcse_pass ();
4014   flag_rerun_cse_after_global_opts |= changed;
4015   if (changed)
4016     cleanup_cfg (0);
4017   return 0;
4018 }
4019 
4020 static unsigned int
4021 execute_rtl_hoist (void)
4022 {
4023   int changed;
4024   delete_unreachable_blocks ();
4025   df_analyze ();
4026   changed = one_code_hoisting_pass ();
4027   flag_rerun_cse_after_global_opts |= changed;
4028   if (changed)
4029     cleanup_cfg (0);
4030   return 0;
4031 }
4032 
4033 namespace {
4034 
4035 const pass_data pass_data_rtl_pre =
4036 {
4037   RTL_PASS, /* type */
4038   "rtl pre", /* name */
4039   OPTGROUP_NONE, /* optinfo_flags */
4040   TV_PRE, /* tv_id */
4041   PROP_cfglayout, /* properties_required */
4042   0, /* properties_provided */
4043   0, /* properties_destroyed */
4044   0, /* todo_flags_start */
4045   TODO_df_finish, /* todo_flags_finish */
4046 };
4047 
4048 class pass_rtl_pre : public rtl_opt_pass
4049 {
4050 public:
4051   pass_rtl_pre (gcc::context *ctxt)
4052     : rtl_opt_pass (pass_data_rtl_pre, ctxt)
4053   {}
4054 
4055   /* opt_pass methods: */
4056   virtual bool gate (function *);
4057   virtual unsigned int execute (function *) { return execute_rtl_pre (); }
4058 
4059 }; // class pass_rtl_pre
4060 
4061 /* We do not construct an accurate cfg in functions which call
4062    setjmp, so none of these passes runs if the function calls
4063    setjmp.
4064    FIXME: Should just handle setjmp via REG_SETJMP notes.  */
4065 
4066 bool
4067 pass_rtl_pre::gate (function *fun)
4068 {
4069   return optimize > 0 && flag_gcse
4070     && !fun->calls_setjmp
4071     && optimize_function_for_speed_p (fun)
4072     && dbg_cnt (pre);
4073 }
4074 
4075 } // anon namespace
4076 
4077 rtl_opt_pass *
4078 make_pass_rtl_pre (gcc::context *ctxt)
4079 {
4080   return new pass_rtl_pre (ctxt);
4081 }
4082 
4083 namespace {
4084 
4085 const pass_data pass_data_rtl_hoist =
4086 {
4087   RTL_PASS, /* type */
4088   "hoist", /* name */
4089   OPTGROUP_NONE, /* optinfo_flags */
4090   TV_HOIST, /* tv_id */
4091   PROP_cfglayout, /* properties_required */
4092   0, /* properties_provided */
4093   0, /* properties_destroyed */
4094   0, /* todo_flags_start */
4095   TODO_df_finish, /* todo_flags_finish */
4096 };
4097 
4098 class pass_rtl_hoist : public rtl_opt_pass
4099 {
4100 public:
4101   pass_rtl_hoist (gcc::context *ctxt)
4102     : rtl_opt_pass (pass_data_rtl_hoist, ctxt)
4103   {}
4104 
4105   /* opt_pass methods: */
4106   virtual bool gate (function *);
4107   virtual unsigned int execute (function *) { return execute_rtl_hoist (); }
4108 
4109 }; // class pass_rtl_hoist
4110 
4111 bool
4112 pass_rtl_hoist::gate (function *)
4113 {
4114   return optimize > 0 && flag_gcse
4115     && !cfun->calls_setjmp
4116     /* It does not make sense to run code hoisting unless we are optimizing
4117        for code size -- it rarely makes programs faster, and can make then
4118        bigger if we did PRE (when optimizing for space, we don't run PRE).  */
4119     && optimize_function_for_size_p (cfun)
4120     && dbg_cnt (hoist);
4121 }
4122 
4123 } // anon namespace
4124 
4125 rtl_opt_pass *
4126 make_pass_rtl_hoist (gcc::context *ctxt)
4127 {
4128   return new pass_rtl_hoist (ctxt);
4129 }
4130 
4131 /* Reset all state within gcse.c so that we can rerun the compiler
4132    within the same process.  For use by toplev::finalize.  */
4133 
4134 void
4135 gcse_c_finalize (void)
4136 {
4137   test_insn = NULL;
4138 }
4139 
4140 #include "gt-gcse.h"
4141