xref: /openbsd/gnu/gcc/gcc/gcse.c (revision 404b540a)
1 /* Global common subexpression elimination/Partial redundancy elimination
2    and global constant/copy propagation for GNU compiler.
3    Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
4    Free Software Foundation, Inc.
5 
6 This file is part of GCC.
7 
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12 
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16 for more details.
17 
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING.  If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301, USA.  */
22 
23 /* TODO
24    - reordering of memory allocation and freeing to be more space efficient
25    - do rough calc of how many regs are needed in each block, and a rough
26      calc of how many regs are available in each class and use that to
27      throttle back the code in cases where RTX_COST is minimal.
28    - a store to the same address as a load does not kill the load if the
29      source of the store is also the destination of the load.  Handling this
30      allows more load motion, particularly out of loops.
31    - ability to realloc sbitmap vectors would allow one initial computation
32      of reg_set_in_block with only subsequent additions, rather than
33      recomputing it for each pass
34 
35 */
36 
37 /* References searched while implementing this.
38 
39    Compilers Principles, Techniques and Tools
40    Aho, Sethi, Ullman
41    Addison-Wesley, 1988
42 
43    Global Optimization by Suppression of Partial Redundancies
44    E. Morel, C. Renvoise
45    communications of the acm, Vol. 22, Num. 2, Feb. 1979
46 
47    A Portable Machine-Independent Global Optimizer - Design and Measurements
48    Frederick Chow
49    Stanford Ph.D. thesis, Dec. 1983
50 
51    A Fast Algorithm for Code Movement Optimization
52    D.M. Dhamdhere
53    SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
54 
55    A Solution to a Problem with Morel and Renvoise's
56    Global Optimization by Suppression of Partial Redundancies
57    K-H Drechsler, M.P. Stadel
58    ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
59 
60    Practical Adaptation of the Global Optimization
61    Algorithm of Morel and Renvoise
62    D.M. Dhamdhere
63    ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
64 
65    Efficiently Computing Static Single Assignment Form and the Control
66    Dependence Graph
67    R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
68    ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
69 
70    Lazy Code Motion
71    J. Knoop, O. Ruthing, B. Steffen
72    ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
73 
74    What's In a Region?  Or Computing Control Dependence Regions in Near-Linear
75    Time for Reducible Flow Control
76    Thomas Ball
77    ACM Letters on Programming Languages and Systems,
78    Vol. 2, Num. 1-4, Mar-Dec 1993
79 
80    An Efficient Representation for Sparse Sets
81    Preston Briggs, Linda Torczon
82    ACM Letters on Programming Languages and Systems,
83    Vol. 2, Num. 1-4, Mar-Dec 1993
84 
85    A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
86    K-H Drechsler, M.P. Stadel
87    ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
88 
89    Partial Dead Code Elimination
90    J. Knoop, O. Ruthing, B. Steffen
91    ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
92 
93    Effective Partial Redundancy Elimination
94    P. Briggs, K.D. Cooper
95    ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
96 
97    The Program Structure Tree: Computing Control Regions in Linear Time
98    R. Johnson, D. Pearson, K. Pingali
99    ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
100 
101    Optimal Code Motion: Theory and Practice
102    J. Knoop, O. Ruthing, B. Steffen
103    ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
104 
105    The power of assignment motion
106    J. Knoop, O. Ruthing, B. Steffen
107    ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
108 
109    Global code motion / global value numbering
110    C. Click
111    ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
112 
113    Value Driven Redundancy Elimination
114    L.T. Simpson
115    Rice University Ph.D. thesis, Apr. 1996
116 
117    Value Numbering
118    L.T. Simpson
119    Massively Scalar Compiler Project, Rice University, Sep. 1996
120 
121    High Performance Compilers for Parallel Computing
122    Michael Wolfe
123    Addison-Wesley, 1996
124 
125    Advanced Compiler Design and Implementation
126    Steven Muchnick
127    Morgan Kaufmann, 1997
128 
129    Building an Optimizing Compiler
130    Robert Morgan
131    Digital Press, 1998
132 
133    People wishing to speed up the code here should read:
134      Elimination Algorithms for Data Flow Analysis
135      B.G. Ryder, M.C. Paull
136      ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
137 
138      How to Analyze Large Programs Efficiently and Informatively
139      D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
140      ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
141 
142    People wishing to do something different can find various possibilities
143    in the above papers and elsewhere.
144 */
145 
146 #include "config.h"
147 #include "system.h"
148 #include "coretypes.h"
149 #include "tm.h"
150 #include "toplev.h"
151 
152 #include "rtl.h"
153 #include "tree.h"
154 #include "tm_p.h"
155 #include "regs.h"
156 #include "hard-reg-set.h"
157 #include "flags.h"
158 #include "real.h"
159 #include "insn-config.h"
160 #include "recog.h"
161 #include "basic-block.h"
162 #include "output.h"
163 #include "function.h"
164 #include "expr.h"
165 #include "except.h"
166 #include "ggc.h"
167 #include "params.h"
168 #include "cselib.h"
169 #include "intl.h"
170 #include "obstack.h"
171 #include "timevar.h"
172 #include "tree-pass.h"
173 #include "hashtab.h"
174 
175 /* Propagate flow information through back edges and thus enable PRE's
176    moving loop invariant calculations out of loops.
177 
178    Originally this tended to create worse overall code, but several
179    improvements during the development of PRE seem to have made following
180    back edges generally a win.
181 
182    Note much of the loop invariant code motion done here would normally
183    be done by loop.c, which has more heuristics for when to move invariants
184    out of loops.  At some point we might need to move some of those
185    heuristics into gcse.c.  */
186 
187 /* We support GCSE via Partial Redundancy Elimination.  PRE optimizations
188    are a superset of those done by GCSE.
189 
190    We perform the following steps:
191 
192    1) Compute basic block information.
193 
194    2) Compute table of places where registers are set.
195 
196    3) Perform copy/constant propagation.
197 
198    4) Perform global cse using lazy code motion if not optimizing
199       for size, or code hoisting if we are.
200 
201    5) Perform another pass of copy/constant propagation.
202 
203    Two passes of copy/constant propagation are done because the first one
204    enables more GCSE and the second one helps to clean up the copies that
205    GCSE creates.  This is needed more for PRE than for Classic because Classic
206    GCSE will try to use an existing register containing the common
207    subexpression rather than create a new one.  This is harder to do for PRE
208    because of the code motion (which Classic GCSE doesn't do).
209 
210    Expressions we are interested in GCSE-ing are of the form
211    (set (pseudo-reg) (expression)).
212    Function want_to_gcse_p says what these are.
213 
214    PRE handles moving invariant expressions out of loops (by treating them as
215    partially redundant).
216 
217    Eventually it would be nice to replace cse.c/gcse.c with SSA (static single
218    assignment) based GVN (global value numbering).  L. T. Simpson's paper
219    (Rice University) on value numbering is a useful reference for this.
220 
221    **********************
222 
223    We used to support multiple passes but there are diminishing returns in
224    doing so.  The first pass usually makes 90% of the changes that are doable.
225    A second pass can make a few more changes made possible by the first pass.
226    Experiments show any further passes don't make enough changes to justify
227    the expense.
228 
229    A study of spec92 using an unlimited number of passes:
230    [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
231    [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
232    [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
233 
234    It was found doing copy propagation between each pass enables further
235    substitutions.
236 
237    PRE is quite expensive in complicated functions because the DFA can take
238    a while to converge.  Hence we only perform one pass.  The parameter
239    max-gcse-passes can be modified if one wants to experiment.
240 
241    **********************
242 
243    The steps for PRE are:
244 
245    1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
246 
247    2) Perform the data flow analysis for PRE.
248 
249    3) Delete the redundant instructions
250 
251    4) Insert the required copies [if any] that make the partially
252       redundant instructions fully redundant.
253 
254    5) For other reaching expressions, insert an instruction to copy the value
255       to a newly created pseudo that will reach the redundant instruction.
256 
257    The deletion is done first so that when we do insertions we
258    know which pseudo reg to use.
259 
260    Various papers have argued that PRE DFA is expensive (O(n^2)) and others
261    argue it is not.  The number of iterations for the algorithm to converge
262    is typically 2-4 so I don't view it as that expensive (relatively speaking).
263 
264    PRE GCSE depends heavily on the second CSE pass to clean up the copies
265    we create.  To make an expression reach the place where it's redundant,
266    the result of the expression is copied to a new register, and the redundant
267    expression is deleted by replacing it with this new register.  Classic GCSE
268    doesn't have this problem as much as it computes the reaching defs of
269    each register in each block and thus can try to use an existing
270    register.  */
271 
272 /* GCSE global vars.  */
273 
274 /* Note whether or not we should run jump optimization after gcse.  We
275    want to do this for two cases.
276 
277     * If we changed any jumps via cprop.
278 
279     * If we added any labels via edge splitting.  */
280 static int run_jump_opt_after_gcse;
281 
282 /* An obstack for our working variables.  */
283 static struct obstack gcse_obstack;
284 
285 struct reg_use {rtx reg_rtx; };
286 
287 /* Hash table of expressions.  */
288 
289 struct expr
290 {
291   /* The expression (SET_SRC for expressions, PATTERN for assignments).  */
292   rtx expr;
293   /* Index in the available expression bitmaps.  */
294   int bitmap_index;
295   /* Next entry with the same hash.  */
296   struct expr *next_same_hash;
297   /* List of anticipatable occurrences in basic blocks in the function.
298      An "anticipatable occurrence" is one that is the first occurrence in the
299      basic block, the operands are not modified in the basic block prior
300      to the occurrence and the output is not used between the start of
301      the block and the occurrence.  */
302   struct occr *antic_occr;
303   /* List of available occurrence in basic blocks in the function.
304      An "available occurrence" is one that is the last occurrence in the
305      basic block and the operands are not modified by following statements in
306      the basic block [including this insn].  */
307   struct occr *avail_occr;
308   /* Non-null if the computation is PRE redundant.
309      The value is the newly created pseudo-reg to record a copy of the
310      expression in all the places that reach the redundant copy.  */
311   rtx reaching_reg;
312 };
313 
314 /* Occurrence of an expression.
315    There is one per basic block.  If a pattern appears more than once the
316    last appearance is used [or first for anticipatable expressions].  */
317 
318 struct occr
319 {
320   /* Next occurrence of this expression.  */
321   struct occr *next;
322   /* The insn that computes the expression.  */
323   rtx insn;
324   /* Nonzero if this [anticipatable] occurrence has been deleted.  */
325   char deleted_p;
326   /* Nonzero if this [available] occurrence has been copied to
327      reaching_reg.  */
328   /* ??? This is mutually exclusive with deleted_p, so they could share
329      the same byte.  */
330   char copied_p;
331 };
332 
333 /* Expression and copy propagation hash tables.
334    Each hash table is an array of buckets.
335    ??? It is known that if it were an array of entries, structure elements
336    `next_same_hash' and `bitmap_index' wouldn't be necessary.  However, it is
337    not clear whether in the final analysis a sufficient amount of memory would
338    be saved as the size of the available expression bitmaps would be larger
339    [one could build a mapping table without holes afterwards though].
340    Someday I'll perform the computation and figure it out.  */
341 
342 struct hash_table
343 {
344   /* The table itself.
345      This is an array of `expr_hash_table_size' elements.  */
346   struct expr **table;
347 
348   /* Size of the hash table, in elements.  */
349   unsigned int size;
350 
351   /* Number of hash table elements.  */
352   unsigned int n_elems;
353 
354   /* Whether the table is expression of copy propagation one.  */
355   int set_p;
356 };
357 
358 /* Expression hash table.  */
359 static struct hash_table expr_hash_table;
360 
361 /* Copy propagation hash table.  */
362 static struct hash_table set_hash_table;
363 
364 /* Mapping of uids to cuids.
365    Only real insns get cuids.  */
366 static int *uid_cuid;
367 
368 /* Highest UID in UID_CUID.  */
369 static int max_uid;
370 
371 /* Get the cuid of an insn.  */
372 #ifdef ENABLE_CHECKING
373 #define INSN_CUID(INSN) \
374   (gcc_assert (INSN_UID (INSN) <= max_uid), uid_cuid[INSN_UID (INSN)])
375 #else
376 #define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)])
377 #endif
378 
379 /* Number of cuids.  */
380 static int max_cuid;
381 
382 /* Mapping of cuids to insns.  */
383 static rtx *cuid_insn;
384 
385 /* Get insn from cuid.  */
386 #define CUID_INSN(CUID) (cuid_insn[CUID])
387 
388 /* Maximum register number in function prior to doing gcse + 1.
389    Registers created during this pass have regno >= max_gcse_regno.
390    This is named with "gcse" to not collide with global of same name.  */
391 static unsigned int max_gcse_regno;
392 
393 /* Table of registers that are modified.
394 
395    For each register, each element is a list of places where the pseudo-reg
396    is set.
397 
398    For simplicity, GCSE is done on sets of pseudo-regs only.  PRE GCSE only
399    requires knowledge of which blocks kill which regs [and thus could use
400    a bitmap instead of the lists `reg_set_table' uses].
401 
402    `reg_set_table' and could be turned into an array of bitmaps (num-bbs x
403    num-regs) [however perhaps it may be useful to keep the data as is].  One
404    advantage of recording things this way is that `reg_set_table' is fairly
405    sparse with respect to pseudo regs but for hard regs could be fairly dense
406    [relatively speaking].  And recording sets of pseudo-regs in lists speeds
407    up functions like compute_transp since in the case of pseudo-regs we only
408    need to iterate over the number of times a pseudo-reg is set, not over the
409    number of basic blocks [clearly there is a bit of a slow down in the cases
410    where a pseudo is set more than once in a block, however it is believed
411    that the net effect is to speed things up].  This isn't done for hard-regs
412    because recording call-clobbered hard-regs in `reg_set_table' at each
413    function call can consume a fair bit of memory, and iterating over
414    hard-regs stored this way in compute_transp will be more expensive.  */
415 
416 typedef struct reg_set
417 {
418   /* The next setting of this register.  */
419   struct reg_set *next;
420   /* The index of the block where it was set.  */
421   int bb_index;
422 } reg_set;
423 
424 static reg_set **reg_set_table;
425 
426 /* Size of `reg_set_table'.
427    The table starts out at max_gcse_regno + slop, and is enlarged as
428    necessary.  */
429 static int reg_set_table_size;
430 
431 /* Amount to grow `reg_set_table' by when it's full.  */
432 #define REG_SET_TABLE_SLOP 100
433 
434 /* This is a list of expressions which are MEMs and will be used by load
435    or store motion.
436    Load motion tracks MEMs which aren't killed by
437    anything except itself. (i.e., loads and stores to a single location).
438    We can then allow movement of these MEM refs with a little special
439    allowance. (all stores copy the same value to the reaching reg used
440    for the loads).  This means all values used to store into memory must have
441    no side effects so we can re-issue the setter value.
442    Store Motion uses this structure as an expression table to track stores
443    which look interesting, and might be moveable towards the exit block.  */
444 
445 struct ls_expr
446 {
447   struct expr * expr;		/* Gcse expression reference for LM.  */
448   rtx pattern;			/* Pattern of this mem.  */
449   rtx pattern_regs;		/* List of registers mentioned by the mem.  */
450   rtx loads;			/* INSN list of loads seen.  */
451   rtx stores;			/* INSN list of stores seen.  */
452   struct ls_expr * next;	/* Next in the list.  */
453   int invalid;			/* Invalid for some reason.  */
454   int index;			/* If it maps to a bitmap index.  */
455   unsigned int hash_index;	/* Index when in a hash table.  */
456   rtx reaching_reg;		/* Register to use when re-writing.  */
457 };
458 
459 /* Array of implicit set patterns indexed by basic block index.  */
460 static rtx *implicit_sets;
461 
462 /* Head of the list of load/store memory refs.  */
463 static struct ls_expr * pre_ldst_mems = NULL;
464 
465 /* Hashtable for the load/store memory refs.  */
466 static htab_t pre_ldst_table = NULL;
467 
468 /* Bitmap containing one bit for each register in the program.
469    Used when performing GCSE to track which registers have been set since
470    the start of the basic block.  */
471 static regset reg_set_bitmap;
472 
473 /* For each block, a bitmap of registers set in the block.
474    This is used by compute_transp.
475    It is computed during hash table computation and not by compute_sets
476    as it includes registers added since the last pass (or between cprop and
477    gcse) and it's currently not easy to realloc sbitmap vectors.  */
478 static sbitmap *reg_set_in_block;
479 
480 /* Array, indexed by basic block number for a list of insns which modify
481    memory within that block.  */
482 static rtx * modify_mem_list;
483 static bitmap modify_mem_list_set;
484 
485 /* This array parallels modify_mem_list, but is kept canonicalized.  */
486 static rtx * canon_modify_mem_list;
487 
488 /* Bitmap indexed by block numbers to record which blocks contain
489    function calls.  */
490 static bitmap blocks_with_calls;
491 
492 /* Various variables for statistics gathering.  */
493 
494 /* Memory used in a pass.
495    This isn't intended to be absolutely precise.  Its intent is only
496    to keep an eye on memory usage.  */
497 static int bytes_used;
498 
499 /* GCSE substitutions made.  */
500 static int gcse_subst_count;
501 /* Number of copy instructions created.  */
502 static int gcse_create_count;
503 /* Number of local constants propagated.  */
504 static int local_const_prop_count;
505 /* Number of local copies propagated.  */
506 static int local_copy_prop_count;
507 /* Number of global constants propagated.  */
508 static int global_const_prop_count;
509 /* Number of global copies propagated.  */
510 static int global_copy_prop_count;
511 
512 /* For available exprs */
513 static sbitmap *ae_kill, *ae_gen;
514 
515 static void compute_can_copy (void);
516 static void *gmalloc (size_t) ATTRIBUTE_MALLOC;
517 static void *gcalloc (size_t, size_t) ATTRIBUTE_MALLOC;
518 static void *grealloc (void *, size_t);
519 static void *gcse_alloc (unsigned long);
520 static void alloc_gcse_mem (void);
521 static void free_gcse_mem (void);
522 static void alloc_reg_set_mem (int);
523 static void free_reg_set_mem (void);
524 static void record_one_set (int, rtx);
525 static void record_set_info (rtx, rtx, void *);
526 static void compute_sets (void);
527 static void hash_scan_insn (rtx, struct hash_table *, int);
528 static void hash_scan_set (rtx, rtx, struct hash_table *);
529 static void hash_scan_clobber (rtx, rtx, struct hash_table *);
530 static void hash_scan_call (rtx, rtx, struct hash_table *);
531 static int want_to_gcse_p (rtx);
532 static bool can_assign_to_reg_p (rtx);
533 static bool gcse_constant_p (rtx);
534 static int oprs_unchanged_p (rtx, rtx, int);
535 static int oprs_anticipatable_p (rtx, rtx);
536 static int oprs_available_p (rtx, rtx);
537 static void insert_expr_in_table (rtx, enum machine_mode, rtx, int, int,
538 				  struct hash_table *);
539 static void insert_set_in_table (rtx, rtx, struct hash_table *);
540 static unsigned int hash_expr (rtx, enum machine_mode, int *, int);
541 static unsigned int hash_set (int, int);
542 static int expr_equiv_p (rtx, rtx);
543 static void record_last_reg_set_info (rtx, int);
544 static void record_last_mem_set_info (rtx);
545 static void record_last_set_info (rtx, rtx, void *);
546 static void compute_hash_table (struct hash_table *);
547 static void alloc_hash_table (int, struct hash_table *, int);
548 static void free_hash_table (struct hash_table *);
549 static void compute_hash_table_work (struct hash_table *);
550 static void dump_hash_table (FILE *, const char *, struct hash_table *);
551 static struct expr *lookup_set (unsigned int, struct hash_table *);
552 static struct expr *next_set (unsigned int, struct expr *);
553 static void reset_opr_set_tables (void);
554 static int oprs_not_set_p (rtx, rtx);
555 static void mark_call (rtx);
556 static void mark_set (rtx, rtx);
557 static void mark_clobber (rtx, rtx);
558 static void mark_oprs_set (rtx);
559 static void alloc_cprop_mem (int, int);
560 static void free_cprop_mem (void);
561 static void compute_transp (rtx, int, sbitmap *, int);
562 static void compute_transpout (void);
563 static void compute_local_properties (sbitmap *, sbitmap *, sbitmap *,
564 				      struct hash_table *);
565 static void compute_cprop_data (void);
566 static void find_used_regs (rtx *, void *);
567 static int try_replace_reg (rtx, rtx, rtx);
568 static struct expr *find_avail_set (int, rtx);
569 static int cprop_jump (basic_block, rtx, rtx, rtx, rtx);
570 static void mems_conflict_for_gcse_p (rtx, rtx, void *);
571 static int load_killed_in_block_p (basic_block, int, rtx, int);
572 static void canon_list_insert (rtx, rtx, void *);
573 static int cprop_insn (rtx, int);
574 static int cprop (int);
575 static void find_implicit_sets (void);
576 static int one_cprop_pass (int, bool, bool);
577 static bool constprop_register (rtx, rtx, rtx, bool);
578 static struct expr *find_bypass_set (int, int);
579 static bool reg_killed_on_edge (rtx, edge);
580 static int bypass_block (basic_block, rtx, rtx);
581 static int bypass_conditional_jumps (void);
582 static void alloc_pre_mem (int, int);
583 static void free_pre_mem (void);
584 static void compute_pre_data (void);
585 static int pre_expr_reaches_here_p (basic_block, struct expr *,
586 				    basic_block);
587 static void insert_insn_end_bb (struct expr *, basic_block, int);
588 static void pre_insert_copy_insn (struct expr *, rtx);
589 static void pre_insert_copies (void);
590 static int pre_delete (void);
591 static int pre_gcse (void);
592 static int one_pre_gcse_pass (int);
593 static void add_label_notes (rtx, rtx);
594 static void alloc_code_hoist_mem (int, int);
595 static void free_code_hoist_mem (void);
596 static void compute_code_hoist_vbeinout (void);
597 static void compute_code_hoist_data (void);
598 static int hoist_expr_reaches_here_p (basic_block, int, basic_block, char *);
599 static void hoist_code (void);
600 static int one_code_hoisting_pass (void);
601 static rtx process_insert_insn (struct expr *);
602 static int pre_edge_insert (struct edge_list *, struct expr **);
603 static int pre_expr_reaches_here_p_work (basic_block, struct expr *,
604 					 basic_block, char *);
605 static struct ls_expr * ldst_entry (rtx);
606 static void free_ldst_entry (struct ls_expr *);
607 static void free_ldst_mems (void);
608 static void print_ldst_list (FILE *);
609 static struct ls_expr * find_rtx_in_ldst (rtx);
610 static int enumerate_ldsts (void);
611 static inline struct ls_expr * first_ls_expr (void);
612 static inline struct ls_expr * next_ls_expr (struct ls_expr *);
613 static int simple_mem (rtx);
614 static void invalidate_any_buried_refs (rtx);
615 static void compute_ld_motion_mems (void);
616 static void trim_ld_motion_mems (void);
617 static void update_ld_motion_stores (struct expr *);
618 static void reg_set_info (rtx, rtx, void *);
619 static void reg_clear_last_set (rtx, rtx, void *);
620 static bool store_ops_ok (rtx, int *);
621 static rtx extract_mentioned_regs (rtx);
622 static rtx extract_mentioned_regs_helper (rtx, rtx);
623 static void find_moveable_store (rtx, int *, int *);
624 static int compute_store_table (void);
625 static bool load_kills_store (rtx, rtx, int);
626 static bool find_loads (rtx, rtx, int);
627 static bool store_killed_in_insn (rtx, rtx, rtx, int);
628 static bool store_killed_after (rtx, rtx, rtx, basic_block, int *, rtx *);
629 static bool store_killed_before (rtx, rtx, rtx, basic_block, int *);
630 static void build_store_vectors (void);
631 static void insert_insn_start_bb (rtx, basic_block);
632 static int insert_store (struct ls_expr *, edge);
633 static void remove_reachable_equiv_notes (basic_block, struct ls_expr *);
634 static void replace_store_insn (rtx, rtx, basic_block, struct ls_expr *);
635 static void delete_store (struct ls_expr *, basic_block);
636 static void free_store_memory (void);
637 static void store_motion (void);
638 static void free_insn_expr_list_list (rtx *);
639 static void clear_modify_mem_tables (void);
640 static void free_modify_mem_tables (void);
641 static rtx gcse_emit_move_after (rtx, rtx, rtx);
642 static void local_cprop_find_used_regs (rtx *, void *);
643 static bool do_local_cprop (rtx, rtx, bool, rtx*);
644 static bool adjust_libcall_notes (rtx, rtx, rtx, rtx*);
645 static void local_cprop_pass (bool);
646 static bool is_too_expensive (const char *);
647 
648 
649 /* Entry point for global common subexpression elimination.
650    F is the first instruction in the function.  Return nonzero if a
651    change is mode.  */
652 
653 static int
gcse_main(rtx f ATTRIBUTE_UNUSED)654 gcse_main (rtx f ATTRIBUTE_UNUSED)
655 {
656   int changed, pass;
657   /* Bytes used at start of pass.  */
658   int initial_bytes_used;
659   /* Maximum number of bytes used by a pass.  */
660   int max_pass_bytes;
661   /* Point to release obstack data from for each pass.  */
662   char *gcse_obstack_bottom;
663 
664   /* We do not construct an accurate cfg in functions which call
665      setjmp, so just punt to be safe.  */
666   if (current_function_calls_setjmp)
667     return 0;
668 
669   /* Assume that we do not need to run jump optimizations after gcse.  */
670   run_jump_opt_after_gcse = 0;
671 
672   /* Identify the basic block information for this function, including
673      successors and predecessors.  */
674   max_gcse_regno = max_reg_num ();
675 
676   if (dump_file)
677     dump_flow_info (dump_file, dump_flags);
678 
679   /* Return if there's nothing to do, or it is too expensive.  */
680   if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
681       || is_too_expensive (_("GCSE disabled")))
682     return 0;
683 
684   gcc_obstack_init (&gcse_obstack);
685   bytes_used = 0;
686 
687   /* We need alias.  */
688   init_alias_analysis ();
689   /* Record where pseudo-registers are set.  This data is kept accurate
690      during each pass.  ??? We could also record hard-reg information here
691      [since it's unchanging], however it is currently done during hash table
692      computation.
693 
694      It may be tempting to compute MEM set information here too, but MEM sets
695      will be subject to code motion one day and thus we need to compute
696      information about memory sets when we build the hash tables.  */
697 
698   alloc_reg_set_mem (max_gcse_regno);
699   compute_sets ();
700 
701   pass = 0;
702   initial_bytes_used = bytes_used;
703   max_pass_bytes = 0;
704   gcse_obstack_bottom = gcse_alloc (1);
705   changed = 1;
706   while (changed && pass < MAX_GCSE_PASSES)
707     {
708       changed = 0;
709       if (dump_file)
710 	fprintf (dump_file, "GCSE pass %d\n\n", pass + 1);
711 
712       /* Initialize bytes_used to the space for the pred/succ lists,
713 	 and the reg_set_table data.  */
714       bytes_used = initial_bytes_used;
715 
716       /* Each pass may create new registers, so recalculate each time.  */
717       max_gcse_regno = max_reg_num ();
718 
719       alloc_gcse_mem ();
720 
721       /* Don't allow constant propagation to modify jumps
722 	 during this pass.  */
723       timevar_push (TV_CPROP1);
724       changed = one_cprop_pass (pass + 1, false, false);
725       timevar_pop (TV_CPROP1);
726 
727       if (optimize_size)
728 	/* Do nothing.  */ ;
729       else
730 	{
731 	  timevar_push (TV_PRE);
732 	  changed |= one_pre_gcse_pass (pass + 1);
733 	  /* We may have just created new basic blocks.  Release and
734 	     recompute various things which are sized on the number of
735 	     basic blocks.  */
736 	  if (changed)
737 	    {
738 	      free_modify_mem_tables ();
739 	      modify_mem_list = gcalloc (last_basic_block, sizeof (rtx));
740 	      canon_modify_mem_list = gcalloc (last_basic_block, sizeof (rtx));
741 	    }
742 	  free_reg_set_mem ();
743 	  alloc_reg_set_mem (max_reg_num ());
744 	  compute_sets ();
745 	  run_jump_opt_after_gcse = 1;
746 	  timevar_pop (TV_PRE);
747 	}
748 
749       if (max_pass_bytes < bytes_used)
750 	max_pass_bytes = bytes_used;
751 
752       /* Free up memory, then reallocate for code hoisting.  We can
753 	 not re-use the existing allocated memory because the tables
754 	 will not have info for the insns or registers created by
755 	 partial redundancy elimination.  */
756       free_gcse_mem ();
757 
758       /* It does not make sense to run code hoisting unless we are optimizing
759 	 for code size -- it rarely makes programs faster, and can make
760 	 them bigger if we did partial redundancy elimination (when optimizing
761 	 for space, we don't run the partial redundancy algorithms).  */
762       if (optimize_size)
763 	{
764 	  timevar_push (TV_HOIST);
765 	  max_gcse_regno = max_reg_num ();
766 	  alloc_gcse_mem ();
767 	  changed |= one_code_hoisting_pass ();
768 	  free_gcse_mem ();
769 
770 	  if (max_pass_bytes < bytes_used)
771 	    max_pass_bytes = bytes_used;
772 	  timevar_pop (TV_HOIST);
773 	}
774 
775       if (dump_file)
776 	{
777 	  fprintf (dump_file, "\n");
778 	  fflush (dump_file);
779 	}
780 
781       obstack_free (&gcse_obstack, gcse_obstack_bottom);
782       pass++;
783     }
784 
785   /* Do one last pass of copy propagation, including cprop into
786      conditional jumps.  */
787 
788   max_gcse_regno = max_reg_num ();
789   alloc_gcse_mem ();
790   /* This time, go ahead and allow cprop to alter jumps.  */
791   timevar_push (TV_CPROP2);
792   one_cprop_pass (pass + 1, true, false);
793   timevar_pop (TV_CPROP2);
794   free_gcse_mem ();
795 
796   if (dump_file)
797     {
798       fprintf (dump_file, "GCSE of %s: %d basic blocks, ",
799 	       current_function_name (), n_basic_blocks);
800       fprintf (dump_file, "%d pass%s, %d bytes\n\n",
801 	       pass, pass > 1 ? "es" : "", max_pass_bytes);
802     }
803 
804   obstack_free (&gcse_obstack, NULL);
805   free_reg_set_mem ();
806 
807   /* We are finished with alias.  */
808   end_alias_analysis ();
809   allocate_reg_info (max_reg_num (), FALSE, FALSE);
810 
811   if (!optimize_size && flag_gcse_sm)
812     {
813       timevar_push (TV_LSM);
814       store_motion ();
815       timevar_pop (TV_LSM);
816     }
817 
818   /* Record where pseudo-registers are set.  */
819   return run_jump_opt_after_gcse;
820 }
821 
822 /* Misc. utilities.  */
823 
824 /* Nonzero for each mode that supports (set (reg) (reg)).
825    This is trivially true for integer and floating point values.
826    It may or may not be true for condition codes.  */
827 static char can_copy[(int) NUM_MACHINE_MODES];
828 
829 /* Compute which modes support reg/reg copy operations.  */
830 
831 static void
compute_can_copy(void)832 compute_can_copy (void)
833 {
834   int i;
835 #ifndef AVOID_CCMODE_COPIES
836   rtx reg, insn;
837 #endif
838   memset (can_copy, 0, NUM_MACHINE_MODES);
839 
840   start_sequence ();
841   for (i = 0; i < NUM_MACHINE_MODES; i++)
842     if (GET_MODE_CLASS (i) == MODE_CC)
843       {
844 #ifdef AVOID_CCMODE_COPIES
845 	can_copy[i] = 0;
846 #else
847 	reg = gen_rtx_REG ((enum machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
848 	insn = emit_insn (gen_rtx_SET (VOIDmode, reg, reg));
849 	if (recog (PATTERN (insn), insn, NULL) >= 0)
850 	  can_copy[i] = 1;
851 #endif
852       }
853     else
854       can_copy[i] = 1;
855 
856   end_sequence ();
857 }
858 
859 /* Returns whether the mode supports reg/reg copy operations.  */
860 
861 bool
can_copy_p(enum machine_mode mode)862 can_copy_p (enum machine_mode mode)
863 {
864   static bool can_copy_init_p = false;
865 
866   if (! can_copy_init_p)
867     {
868       compute_can_copy ();
869       can_copy_init_p = true;
870     }
871 
872   return can_copy[mode] != 0;
873 }
874 
875 /* Cover function to xmalloc to record bytes allocated.  */
876 
877 static void *
gmalloc(size_t size)878 gmalloc (size_t size)
879 {
880   bytes_used += size;
881   return xmalloc (size);
882 }
883 
884 /* Cover function to xcalloc to record bytes allocated.  */
885 
886 static void *
gcalloc(size_t nelem,size_t elsize)887 gcalloc (size_t nelem, size_t elsize)
888 {
889   bytes_used += nelem * elsize;
890   return xcalloc (nelem, elsize);
891 }
892 
893 /* Cover function to xrealloc.
894    We don't record the additional size since we don't know it.
895    It won't affect memory usage stats much anyway.  */
896 
897 static void *
grealloc(void * ptr,size_t size)898 grealloc (void *ptr, size_t size)
899 {
900   return xrealloc (ptr, size);
901 }
902 
903 /* Cover function to obstack_alloc.  */
904 
905 static void *
gcse_alloc(unsigned long size)906 gcse_alloc (unsigned long size)
907 {
908   bytes_used += size;
909   return obstack_alloc (&gcse_obstack, size);
910 }
911 
912 /* Allocate memory for the cuid mapping array,
913    and reg/memory set tracking tables.
914 
915    This is called at the start of each pass.  */
916 
917 static void
alloc_gcse_mem(void)918 alloc_gcse_mem (void)
919 {
920   int i;
921   basic_block bb;
922   rtx insn;
923 
924   /* Find the largest UID and create a mapping from UIDs to CUIDs.
925      CUIDs are like UIDs except they increase monotonically, have no gaps,
926      and only apply to real insns.
927      (Actually, there are gaps, for insn that are not inside a basic block.
928      but we should never see those anyway, so this is OK.)  */
929 
930   max_uid = get_max_uid ();
931   uid_cuid = gcalloc (max_uid + 1, sizeof (int));
932   i = 0;
933   FOR_EACH_BB (bb)
934     FOR_BB_INSNS (bb, insn)
935       {
936 	if (INSN_P (insn))
937 	  uid_cuid[INSN_UID (insn)] = i++;
938 	else
939 	  uid_cuid[INSN_UID (insn)] = i;
940       }
941 
942   /* Create a table mapping cuids to insns.  */
943 
944   max_cuid = i;
945   cuid_insn = gcalloc (max_cuid + 1, sizeof (rtx));
946   i = 0;
947   FOR_EACH_BB (bb)
948     FOR_BB_INSNS (bb, insn)
949       if (INSN_P (insn))
950 	CUID_INSN (i++) = insn;
951 
952   /* Allocate vars to track sets of regs.  */
953   reg_set_bitmap = BITMAP_ALLOC (NULL);
954 
955   /* Allocate vars to track sets of regs, memory per block.  */
956   reg_set_in_block = sbitmap_vector_alloc (last_basic_block, max_gcse_regno);
957   /* Allocate array to keep a list of insns which modify memory in each
958      basic block.  */
959   modify_mem_list = gcalloc (last_basic_block, sizeof (rtx));
960   canon_modify_mem_list = gcalloc (last_basic_block, sizeof (rtx));
961   modify_mem_list_set = BITMAP_ALLOC (NULL);
962   blocks_with_calls = BITMAP_ALLOC (NULL);
963 }
964 
965 /* Free memory allocated by alloc_gcse_mem.  */
966 
967 static void
free_gcse_mem(void)968 free_gcse_mem (void)
969 {
970   free (uid_cuid);
971   free (cuid_insn);
972 
973   BITMAP_FREE (reg_set_bitmap);
974 
975   sbitmap_vector_free (reg_set_in_block);
976   free_modify_mem_tables ();
977   BITMAP_FREE (modify_mem_list_set);
978   BITMAP_FREE (blocks_with_calls);
979 }
980 
981 /* Compute the local properties of each recorded expression.
982 
983    Local properties are those that are defined by the block, irrespective of
984    other blocks.
985 
986    An expression is transparent in a block if its operands are not modified
987    in the block.
988 
989    An expression is computed (locally available) in a block if it is computed
990    at least once and expression would contain the same value if the
991    computation was moved to the end of the block.
992 
993    An expression is locally anticipatable in a block if it is computed at
994    least once and expression would contain the same value if the computation
995    was moved to the beginning of the block.
996 
997    We call this routine for cprop, pre and code hoisting.  They all compute
998    basically the same information and thus can easily share this code.
999 
1000    TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
1001    properties.  If NULL, then it is not necessary to compute or record that
1002    particular property.
1003 
1004    TABLE controls which hash table to look at.  If it is  set hash table,
1005    additionally, TRANSP is computed as ~TRANSP, since this is really cprop's
1006    ABSALTERED.  */
1007 
1008 static void
compute_local_properties(sbitmap * transp,sbitmap * comp,sbitmap * antloc,struct hash_table * table)1009 compute_local_properties (sbitmap *transp, sbitmap *comp, sbitmap *antloc,
1010 			  struct hash_table *table)
1011 {
1012   unsigned int i;
1013 
1014   /* Initialize any bitmaps that were passed in.  */
1015   if (transp)
1016     {
1017       if (table->set_p)
1018 	sbitmap_vector_zero (transp, last_basic_block);
1019       else
1020 	sbitmap_vector_ones (transp, last_basic_block);
1021     }
1022 
1023   if (comp)
1024     sbitmap_vector_zero (comp, last_basic_block);
1025   if (antloc)
1026     sbitmap_vector_zero (antloc, last_basic_block);
1027 
1028   for (i = 0; i < table->size; i++)
1029     {
1030       struct expr *expr;
1031 
1032       for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
1033 	{
1034 	  int indx = expr->bitmap_index;
1035 	  struct occr *occr;
1036 
1037 	  /* The expression is transparent in this block if it is not killed.
1038 	     We start by assuming all are transparent [none are killed], and
1039 	     then reset the bits for those that are.  */
1040 	  if (transp)
1041 	    compute_transp (expr->expr, indx, transp, table->set_p);
1042 
1043 	  /* The occurrences recorded in antic_occr are exactly those that
1044 	     we want to set to nonzero in ANTLOC.  */
1045 	  if (antloc)
1046 	    for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
1047 	      {
1048 		SET_BIT (antloc[BLOCK_NUM (occr->insn)], indx);
1049 
1050 		/* While we're scanning the table, this is a good place to
1051 		   initialize this.  */
1052 		occr->deleted_p = 0;
1053 	      }
1054 
1055 	  /* The occurrences recorded in avail_occr are exactly those that
1056 	     we want to set to nonzero in COMP.  */
1057 	  if (comp)
1058 	    for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
1059 	      {
1060 		SET_BIT (comp[BLOCK_NUM (occr->insn)], indx);
1061 
1062 		/* While we're scanning the table, this is a good place to
1063 		   initialize this.  */
1064 		occr->copied_p = 0;
1065 	      }
1066 
1067 	  /* While we're scanning the table, this is a good place to
1068 	     initialize this.  */
1069 	  expr->reaching_reg = 0;
1070 	}
1071     }
1072 }
1073 
1074 /* Register set information.
1075 
1076    `reg_set_table' records where each register is set or otherwise
1077    modified.  */
1078 
1079 static struct obstack reg_set_obstack;
1080 
1081 static void
alloc_reg_set_mem(int n_regs)1082 alloc_reg_set_mem (int n_regs)
1083 {
1084   reg_set_table_size = n_regs + REG_SET_TABLE_SLOP;
1085   reg_set_table = gcalloc (reg_set_table_size, sizeof (struct reg_set *));
1086 
1087   gcc_obstack_init (&reg_set_obstack);
1088 }
1089 
1090 static void
free_reg_set_mem(void)1091 free_reg_set_mem (void)
1092 {
1093   free (reg_set_table);
1094   obstack_free (&reg_set_obstack, NULL);
1095 }
1096 
1097 /* Record REGNO in the reg_set table.  */
1098 
1099 static void
record_one_set(int regno,rtx insn)1100 record_one_set (int regno, rtx insn)
1101 {
1102   /* Allocate a new reg_set element and link it onto the list.  */
1103   struct reg_set *new_reg_info;
1104 
1105   /* If the table isn't big enough, enlarge it.  */
1106   if (regno >= reg_set_table_size)
1107     {
1108       int new_size = regno + REG_SET_TABLE_SLOP;
1109 
1110       reg_set_table = grealloc (reg_set_table,
1111 				new_size * sizeof (struct reg_set *));
1112       memset (reg_set_table + reg_set_table_size, 0,
1113 	      (new_size - reg_set_table_size) * sizeof (struct reg_set *));
1114       reg_set_table_size = new_size;
1115     }
1116 
1117   new_reg_info = obstack_alloc (&reg_set_obstack, sizeof (struct reg_set));
1118   bytes_used += sizeof (struct reg_set);
1119   new_reg_info->bb_index = BLOCK_NUM (insn);
1120   new_reg_info->next = reg_set_table[regno];
1121   reg_set_table[regno] = new_reg_info;
1122 }
1123 
1124 /* Called from compute_sets via note_stores to handle one SET or CLOBBER in
1125    an insn.  The DATA is really the instruction in which the SET is
1126    occurring.  */
1127 
1128 static void
record_set_info(rtx dest,rtx setter ATTRIBUTE_UNUSED,void * data)1129 record_set_info (rtx dest, rtx setter ATTRIBUTE_UNUSED, void *data)
1130 {
1131   rtx record_set_insn = (rtx) data;
1132 
1133   if (REG_P (dest) && REGNO (dest) >= FIRST_PSEUDO_REGISTER)
1134     record_one_set (REGNO (dest), record_set_insn);
1135 }
1136 
1137 /* Scan the function and record each set of each pseudo-register.
1138 
1139    This is called once, at the start of the gcse pass.  See the comments for
1140    `reg_set_table' for further documentation.  */
1141 
1142 static void
compute_sets(void)1143 compute_sets (void)
1144 {
1145   basic_block bb;
1146   rtx insn;
1147 
1148   FOR_EACH_BB (bb)
1149     FOR_BB_INSNS (bb, insn)
1150       if (INSN_P (insn))
1151 	note_stores (PATTERN (insn), record_set_info, insn);
1152 }
1153 
1154 /* Hash table support.  */
1155 
1156 struct reg_avail_info
1157 {
1158   basic_block last_bb;
1159   int first_set;
1160   int last_set;
1161 };
1162 
1163 static struct reg_avail_info *reg_avail_info;
1164 static basic_block current_bb;
1165 
1166 
1167 /* See whether X, the source of a set, is something we want to consider for
1168    GCSE.  */
1169 
1170 static int
want_to_gcse_p(rtx x)1171 want_to_gcse_p (rtx x)
1172 {
1173 #ifdef STACK_REGS
1174   /* On register stack architectures, don't GCSE constants from the
1175      constant pool, as the benefits are often swamped by the overhead
1176      of shuffling the register stack between basic blocks.  */
1177   if (IS_STACK_MODE (GET_MODE (x)))
1178     x = avoid_constant_pool_reference (x);
1179 #endif
1180 
1181   switch (GET_CODE (x))
1182     {
1183     case REG:
1184     case SUBREG:
1185     case CONST_INT:
1186     case CONST_DOUBLE:
1187     case CONST_VECTOR:
1188     case CALL:
1189       return 0;
1190 
1191     default:
1192       return can_assign_to_reg_p (x);
1193     }
1194 }
1195 
1196 /* Used internally by can_assign_to_reg_p.  */
1197 
1198 static GTY(()) rtx test_insn;
1199 
1200 /* Return true if we can assign X to a pseudo register.  */
1201 
1202 static bool
can_assign_to_reg_p(rtx x)1203 can_assign_to_reg_p (rtx x)
1204 {
1205   int num_clobbers = 0;
1206   int icode;
1207 
1208   /* If this is a valid operand, we are OK.  If it's VOIDmode, we aren't.  */
1209   if (general_operand (x, GET_MODE (x)))
1210     return 1;
1211   else if (GET_MODE (x) == VOIDmode)
1212     return 0;
1213 
1214   /* Otherwise, check if we can make a valid insn from it.  First initialize
1215      our test insn if we haven't already.  */
1216   if (test_insn == 0)
1217     {
1218       test_insn
1219 	= make_insn_raw (gen_rtx_SET (VOIDmode,
1220 				      gen_rtx_REG (word_mode,
1221 						   FIRST_PSEUDO_REGISTER * 2),
1222 				      const0_rtx));
1223       NEXT_INSN (test_insn) = PREV_INSN (test_insn) = 0;
1224     }
1225 
1226   /* Now make an insn like the one we would make when GCSE'ing and see if
1227      valid.  */
1228   PUT_MODE (SET_DEST (PATTERN (test_insn)), GET_MODE (x));
1229   SET_SRC (PATTERN (test_insn)) = x;
1230   return ((icode = recog (PATTERN (test_insn), test_insn, &num_clobbers)) >= 0
1231 	  && (num_clobbers == 0 || ! added_clobbers_hard_reg_p (icode)));
1232 }
1233 
1234 /* Return nonzero if the operands of expression X are unchanged from the
1235    start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
1236    or from INSN to the end of INSN's basic block (if AVAIL_P != 0).  */
1237 
1238 static int
oprs_unchanged_p(rtx x,rtx insn,int avail_p)1239 oprs_unchanged_p (rtx x, rtx insn, int avail_p)
1240 {
1241   int i, j;
1242   enum rtx_code code;
1243   const char *fmt;
1244 
1245   if (x == 0)
1246     return 1;
1247 
1248   code = GET_CODE (x);
1249   switch (code)
1250     {
1251     case REG:
1252       {
1253 	struct reg_avail_info *info = &reg_avail_info[REGNO (x)];
1254 
1255 	if (info->last_bb != current_bb)
1256 	  return 1;
1257 	if (avail_p)
1258 	  return info->last_set < INSN_CUID (insn);
1259 	else
1260 	  return info->first_set >= INSN_CUID (insn);
1261       }
1262 
1263     case MEM:
1264       if (load_killed_in_block_p (current_bb, INSN_CUID (insn),
1265 				  x, avail_p))
1266 	return 0;
1267       else
1268 	return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
1269 
1270     case PRE_DEC:
1271     case PRE_INC:
1272     case POST_DEC:
1273     case POST_INC:
1274     case PRE_MODIFY:
1275     case POST_MODIFY:
1276       return 0;
1277 
1278     case PC:
1279     case CC0: /*FIXME*/
1280     case CONST:
1281     case CONST_INT:
1282     case CONST_DOUBLE:
1283     case CONST_VECTOR:
1284     case SYMBOL_REF:
1285     case LABEL_REF:
1286     case ADDR_VEC:
1287     case ADDR_DIFF_VEC:
1288       return 1;
1289 
1290     default:
1291       break;
1292     }
1293 
1294   for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1295     {
1296       if (fmt[i] == 'e')
1297 	{
1298 	  /* If we are about to do the last recursive call needed at this
1299 	     level, change it into iteration.  This function is called enough
1300 	     to be worth it.  */
1301 	  if (i == 0)
1302 	    return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
1303 
1304 	  else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
1305 	    return 0;
1306 	}
1307       else if (fmt[i] == 'E')
1308 	for (j = 0; j < XVECLEN (x, i); j++)
1309 	  if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
1310 	    return 0;
1311     }
1312 
1313   return 1;
1314 }
1315 
1316 /* Used for communication between mems_conflict_for_gcse_p and
1317    load_killed_in_block_p.  Nonzero if mems_conflict_for_gcse_p finds a
1318    conflict between two memory references.  */
1319 static int gcse_mems_conflict_p;
1320 
1321 /* Used for communication between mems_conflict_for_gcse_p and
1322    load_killed_in_block_p.  A memory reference for a load instruction,
1323    mems_conflict_for_gcse_p will see if a memory store conflicts with
1324    this memory load.  */
1325 static rtx gcse_mem_operand;
1326 
1327 /* DEST is the output of an instruction.  If it is a memory reference, and
1328    possibly conflicts with the load found in gcse_mem_operand, then set
1329    gcse_mems_conflict_p to a nonzero value.  */
1330 
1331 static void
mems_conflict_for_gcse_p(rtx dest,rtx setter ATTRIBUTE_UNUSED,void * data ATTRIBUTE_UNUSED)1332 mems_conflict_for_gcse_p (rtx dest, rtx setter ATTRIBUTE_UNUSED,
1333 			  void *data ATTRIBUTE_UNUSED)
1334 {
1335   while (GET_CODE (dest) == SUBREG
1336 	 || GET_CODE (dest) == ZERO_EXTRACT
1337 	 || GET_CODE (dest) == STRICT_LOW_PART)
1338     dest = XEXP (dest, 0);
1339 
1340   /* If DEST is not a MEM, then it will not conflict with the load.  Note
1341      that function calls are assumed to clobber memory, but are handled
1342      elsewhere.  */
1343   if (! MEM_P (dest))
1344     return;
1345 
1346   /* If we are setting a MEM in our list of specially recognized MEMs,
1347      don't mark as killed this time.  */
1348 
1349   if (expr_equiv_p (dest, gcse_mem_operand) && pre_ldst_mems != NULL)
1350     {
1351       if (!find_rtx_in_ldst (dest))
1352 	gcse_mems_conflict_p = 1;
1353       return;
1354     }
1355 
1356   if (true_dependence (dest, GET_MODE (dest), gcse_mem_operand,
1357 		       rtx_addr_varies_p))
1358     gcse_mems_conflict_p = 1;
1359 }
1360 
1361 /* Return nonzero if the expression in X (a memory reference) is killed
1362    in block BB before or after the insn with the CUID in UID_LIMIT.
1363    AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
1364    before UID_LIMIT.
1365 
1366    To check the entire block, set UID_LIMIT to max_uid + 1 and
1367    AVAIL_P to 0.  */
1368 
1369 static int
load_killed_in_block_p(basic_block bb,int uid_limit,rtx x,int avail_p)1370 load_killed_in_block_p (basic_block bb, int uid_limit, rtx x, int avail_p)
1371 {
1372   rtx list_entry = modify_mem_list[bb->index];
1373 
1374   /* If this is a readonly then we aren't going to be changing it.  */
1375   if (MEM_READONLY_P (x))
1376     return 0;
1377 
1378   while (list_entry)
1379     {
1380       rtx setter;
1381       /* Ignore entries in the list that do not apply.  */
1382       if ((avail_p
1383 	   && INSN_CUID (XEXP (list_entry, 0)) < uid_limit)
1384 	  || (! avail_p
1385 	      && INSN_CUID (XEXP (list_entry, 0)) > uid_limit))
1386 	{
1387 	  list_entry = XEXP (list_entry, 1);
1388 	  continue;
1389 	}
1390 
1391       setter = XEXP (list_entry, 0);
1392 
1393       /* If SETTER is a call everything is clobbered.  Note that calls
1394 	 to pure functions are never put on the list, so we need not
1395 	 worry about them.  */
1396       if (CALL_P (setter))
1397 	return 1;
1398 
1399       /* SETTER must be an INSN of some kind that sets memory.  Call
1400 	 note_stores to examine each hunk of memory that is modified.
1401 
1402 	 The note_stores interface is pretty limited, so we have to
1403 	 communicate via global variables.  Yuk.  */
1404       gcse_mem_operand = x;
1405       gcse_mems_conflict_p = 0;
1406       note_stores (PATTERN (setter), mems_conflict_for_gcse_p, NULL);
1407       if (gcse_mems_conflict_p)
1408 	return 1;
1409       list_entry = XEXP (list_entry, 1);
1410     }
1411   return 0;
1412 }
1413 
1414 /* Return nonzero if the operands of expression X are unchanged from
1415    the start of INSN's basic block up to but not including INSN.  */
1416 
1417 static int
oprs_anticipatable_p(rtx x,rtx insn)1418 oprs_anticipatable_p (rtx x, rtx insn)
1419 {
1420   return oprs_unchanged_p (x, insn, 0);
1421 }
1422 
1423 /* Return nonzero if the operands of expression X are unchanged from
1424    INSN to the end of INSN's basic block.  */
1425 
1426 static int
oprs_available_p(rtx x,rtx insn)1427 oprs_available_p (rtx x, rtx insn)
1428 {
1429   return oprs_unchanged_p (x, insn, 1);
1430 }
1431 
1432 /* Hash expression X.
1433 
1434    MODE is only used if X is a CONST_INT.  DO_NOT_RECORD_P is a boolean
1435    indicating if a volatile operand is found or if the expression contains
1436    something we don't want to insert in the table.  HASH_TABLE_SIZE is
1437    the current size of the hash table to be probed.  */
1438 
1439 static unsigned int
hash_expr(rtx x,enum machine_mode mode,int * do_not_record_p,int hash_table_size)1440 hash_expr (rtx x, enum machine_mode mode, int *do_not_record_p,
1441 	   int hash_table_size)
1442 {
1443   unsigned int hash;
1444 
1445   *do_not_record_p = 0;
1446 
1447   hash = hash_rtx (x, mode, do_not_record_p,
1448 		   NULL,  /*have_reg_qty=*/false);
1449   return hash % hash_table_size;
1450 }
1451 
1452 /* Hash a set of register REGNO.
1453 
1454    Sets are hashed on the register that is set.  This simplifies the PRE copy
1455    propagation code.
1456 
1457    ??? May need to make things more elaborate.  Later, as necessary.  */
1458 
1459 static unsigned int
hash_set(int regno,int hash_table_size)1460 hash_set (int regno, int hash_table_size)
1461 {
1462   unsigned int hash;
1463 
1464   hash = regno;
1465   return hash % hash_table_size;
1466 }
1467 
1468 /* Return nonzero if exp1 is equivalent to exp2.  */
1469 
1470 static int
expr_equiv_p(rtx x,rtx y)1471 expr_equiv_p (rtx x, rtx y)
1472 {
1473   return exp_equiv_p (x, y, 0, true);
1474 }
1475 
1476 /* Insert expression X in INSN in the hash TABLE.
1477    If it is already present, record it as the last occurrence in INSN's
1478    basic block.
1479 
1480    MODE is the mode of the value X is being stored into.
1481    It is only used if X is a CONST_INT.
1482 
1483    ANTIC_P is nonzero if X is an anticipatable expression.
1484    AVAIL_P is nonzero if X is an available expression.  */
1485 
1486 static void
insert_expr_in_table(rtx x,enum machine_mode mode,rtx insn,int antic_p,int avail_p,struct hash_table * table)1487 insert_expr_in_table (rtx x, enum machine_mode mode, rtx insn, int antic_p,
1488 		      int avail_p, struct hash_table *table)
1489 {
1490   int found, do_not_record_p;
1491   unsigned int hash;
1492   struct expr *cur_expr, *last_expr = NULL;
1493   struct occr *antic_occr, *avail_occr;
1494 
1495   hash = hash_expr (x, mode, &do_not_record_p, table->size);
1496 
1497   /* Do not insert expression in table if it contains volatile operands,
1498      or if hash_expr determines the expression is something we don't want
1499      to or can't handle.  */
1500   if (do_not_record_p)
1501     return;
1502 
1503   cur_expr = table->table[hash];
1504   found = 0;
1505 
1506   while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1507     {
1508       /* If the expression isn't found, save a pointer to the end of
1509 	 the list.  */
1510       last_expr = cur_expr;
1511       cur_expr = cur_expr->next_same_hash;
1512     }
1513 
1514   if (! found)
1515     {
1516       cur_expr = gcse_alloc (sizeof (struct expr));
1517       bytes_used += sizeof (struct expr);
1518       if (table->table[hash] == NULL)
1519 	/* This is the first pattern that hashed to this index.  */
1520 	table->table[hash] = cur_expr;
1521       else
1522 	/* Add EXPR to end of this hash chain.  */
1523 	last_expr->next_same_hash = cur_expr;
1524 
1525       /* Set the fields of the expr element.  */
1526       cur_expr->expr = x;
1527       cur_expr->bitmap_index = table->n_elems++;
1528       cur_expr->next_same_hash = NULL;
1529       cur_expr->antic_occr = NULL;
1530       cur_expr->avail_occr = NULL;
1531     }
1532 
1533   /* Now record the occurrence(s).  */
1534   if (antic_p)
1535     {
1536       antic_occr = cur_expr->antic_occr;
1537 
1538       if (antic_occr && BLOCK_NUM (antic_occr->insn) != BLOCK_NUM (insn))
1539 	antic_occr = NULL;
1540 
1541       if (antic_occr)
1542 	/* Found another instance of the expression in the same basic block.
1543 	   Prefer the currently recorded one.  We want the first one in the
1544 	   block and the block is scanned from start to end.  */
1545 	; /* nothing to do */
1546       else
1547 	{
1548 	  /* First occurrence of this expression in this basic block.  */
1549 	  antic_occr = gcse_alloc (sizeof (struct occr));
1550 	  bytes_used += sizeof (struct occr);
1551 	  antic_occr->insn = insn;
1552 	  antic_occr->next = cur_expr->antic_occr;
1553 	  antic_occr->deleted_p = 0;
1554 	  cur_expr->antic_occr = antic_occr;
1555 	}
1556     }
1557 
1558   if (avail_p)
1559     {
1560       avail_occr = cur_expr->avail_occr;
1561 
1562       if (avail_occr && BLOCK_NUM (avail_occr->insn) == BLOCK_NUM (insn))
1563 	{
1564 	  /* Found another instance of the expression in the same basic block.
1565 	     Prefer this occurrence to the currently recorded one.  We want
1566 	     the last one in the block and the block is scanned from start
1567 	     to end.  */
1568 	  avail_occr->insn = insn;
1569 	}
1570       else
1571 	{
1572 	  /* First occurrence of this expression in this basic block.  */
1573 	  avail_occr = gcse_alloc (sizeof (struct occr));
1574 	  bytes_used += sizeof (struct occr);
1575 	  avail_occr->insn = insn;
1576 	  avail_occr->next = cur_expr->avail_occr;
1577 	  avail_occr->deleted_p = 0;
1578 	  cur_expr->avail_occr = avail_occr;
1579 	}
1580     }
1581 }
1582 
1583 /* Insert pattern X in INSN in the hash table.
1584    X is a SET of a reg to either another reg or a constant.
1585    If it is already present, record it as the last occurrence in INSN's
1586    basic block.  */
1587 
1588 static void
insert_set_in_table(rtx x,rtx insn,struct hash_table * table)1589 insert_set_in_table (rtx x, rtx insn, struct hash_table *table)
1590 {
1591   int found;
1592   unsigned int hash;
1593   struct expr *cur_expr, *last_expr = NULL;
1594   struct occr *cur_occr;
1595 
1596   gcc_assert (GET_CODE (x) == SET && REG_P (SET_DEST (x)));
1597 
1598   hash = hash_set (REGNO (SET_DEST (x)), table->size);
1599 
1600   cur_expr = table->table[hash];
1601   found = 0;
1602 
1603   while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1604     {
1605       /* If the expression isn't found, save a pointer to the end of
1606 	 the list.  */
1607       last_expr = cur_expr;
1608       cur_expr = cur_expr->next_same_hash;
1609     }
1610 
1611   if (! found)
1612     {
1613       cur_expr = gcse_alloc (sizeof (struct expr));
1614       bytes_used += sizeof (struct expr);
1615       if (table->table[hash] == NULL)
1616 	/* This is the first pattern that hashed to this index.  */
1617 	table->table[hash] = cur_expr;
1618       else
1619 	/* Add EXPR to end of this hash chain.  */
1620 	last_expr->next_same_hash = cur_expr;
1621 
1622       /* Set the fields of the expr element.
1623 	 We must copy X because it can be modified when copy propagation is
1624 	 performed on its operands.  */
1625       cur_expr->expr = copy_rtx (x);
1626       cur_expr->bitmap_index = table->n_elems++;
1627       cur_expr->next_same_hash = NULL;
1628       cur_expr->antic_occr = NULL;
1629       cur_expr->avail_occr = NULL;
1630     }
1631 
1632   /* Now record the occurrence.  */
1633   cur_occr = cur_expr->avail_occr;
1634 
1635   if (cur_occr && BLOCK_NUM (cur_occr->insn) == BLOCK_NUM (insn))
1636     {
1637       /* Found another instance of the expression in the same basic block.
1638 	 Prefer this occurrence to the currently recorded one.  We want
1639 	 the last one in the block and the block is scanned from start
1640 	 to end.  */
1641       cur_occr->insn = insn;
1642     }
1643   else
1644     {
1645       /* First occurrence of this expression in this basic block.  */
1646       cur_occr = gcse_alloc (sizeof (struct occr));
1647       bytes_used += sizeof (struct occr);
1648 
1649 	  cur_occr->insn = insn;
1650 	  cur_occr->next = cur_expr->avail_occr;
1651 	  cur_occr->deleted_p = 0;
1652 	  cur_expr->avail_occr = cur_occr;
1653     }
1654 }
1655 
1656 /* Determine whether the rtx X should be treated as a constant for
1657    the purposes of GCSE's constant propagation.  */
1658 
1659 static bool
gcse_constant_p(rtx x)1660 gcse_constant_p (rtx x)
1661 {
1662   /* Consider a COMPARE of two integers constant.  */
1663   if (GET_CODE (x) == COMPARE
1664       && GET_CODE (XEXP (x, 0)) == CONST_INT
1665       && GET_CODE (XEXP (x, 1)) == CONST_INT)
1666     return true;
1667 
1668   /* Consider a COMPARE of the same registers is a constant
1669      if they are not floating point registers.  */
1670   if (GET_CODE(x) == COMPARE
1671       && REG_P (XEXP (x, 0)) && REG_P (XEXP (x, 1))
1672       && REGNO (XEXP (x, 0)) == REGNO (XEXP (x, 1))
1673       && ! FLOAT_MODE_P (GET_MODE (XEXP (x, 0)))
1674       && ! FLOAT_MODE_P (GET_MODE (XEXP (x, 1))))
1675     return true;
1676 
1677   return CONSTANT_P (x);
1678 }
1679 
1680 /* Scan pattern PAT of INSN and add an entry to the hash TABLE (set or
1681    expression one).  */
1682 
1683 static void
hash_scan_set(rtx pat,rtx insn,struct hash_table * table)1684 hash_scan_set (rtx pat, rtx insn, struct hash_table *table)
1685 {
1686   rtx src = SET_SRC (pat);
1687   rtx dest = SET_DEST (pat);
1688   rtx note;
1689 
1690   if (GET_CODE (src) == CALL)
1691     hash_scan_call (src, insn, table);
1692 
1693   else if (REG_P (dest))
1694     {
1695       unsigned int regno = REGNO (dest);
1696       rtx tmp;
1697 
1698       /* See if a REG_NOTE shows this equivalent to a simpler expression.
1699 	 This allows us to do a single GCSE pass and still eliminate
1700 	 redundant constants, addresses or other expressions that are
1701 	 constructed with multiple instructions.  */
1702       note = find_reg_equal_equiv_note (insn);
1703       if (note != 0
1704 	  && (table->set_p
1705 	      ? gcse_constant_p (XEXP (note, 0))
1706 	      : want_to_gcse_p (XEXP (note, 0))))
1707 	src = XEXP (note, 0), pat = gen_rtx_SET (VOIDmode, dest, src);
1708 
1709       /* Only record sets of pseudo-regs in the hash table.  */
1710       if (! table->set_p
1711 	  && regno >= FIRST_PSEUDO_REGISTER
1712 	  /* Don't GCSE something if we can't do a reg/reg copy.  */
1713 	  && can_copy_p (GET_MODE (dest))
1714 	  /* GCSE commonly inserts instruction after the insn.  We can't
1715 	     do that easily for EH_REGION notes so disable GCSE on these
1716 	     for now.  */
1717 	  && !find_reg_note (insn, REG_EH_REGION, NULL_RTX)
1718 	  /* Is SET_SRC something we want to gcse?  */
1719 	  && want_to_gcse_p (src)
1720 	  /* Don't CSE a nop.  */
1721 	  && ! set_noop_p (pat)
1722 	  /* Don't GCSE if it has attached REG_EQUIV note.
1723 	     At this point this only function parameters should have
1724 	     REG_EQUIV notes and if the argument slot is used somewhere
1725 	     explicitly, it means address of parameter has been taken,
1726 	     so we should not extend the lifetime of the pseudo.  */
1727 	  && (note == NULL_RTX || ! MEM_P (XEXP (note, 0))))
1728 	{
1729 	  /* An expression is not anticipatable if its operands are
1730 	     modified before this insn or if this is not the only SET in
1731 	     this insn.  */
1732 	  int antic_p = oprs_anticipatable_p (src, insn) && single_set (insn);
1733 	  /* An expression is not available if its operands are
1734 	     subsequently modified, including this insn.  It's also not
1735 	     available if this is a branch, because we can't insert
1736 	     a set after the branch.  */
1737 	  int avail_p = (oprs_available_p (src, insn)
1738 			 && ! JUMP_P (insn));
1739 
1740 	  insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p, table);
1741 	}
1742 
1743       /* Record sets for constant/copy propagation.  */
1744       else if (table->set_p
1745 	       && regno >= FIRST_PSEUDO_REGISTER
1746 	       && ((REG_P (src)
1747 		    && REGNO (src) >= FIRST_PSEUDO_REGISTER
1748 		    && can_copy_p (GET_MODE (dest))
1749 		    && REGNO (src) != regno)
1750 		   || gcse_constant_p (src))
1751 	       /* A copy is not available if its src or dest is subsequently
1752 		  modified.  Here we want to search from INSN+1 on, but
1753 		  oprs_available_p searches from INSN on.  */
1754 	       && (insn == BB_END (BLOCK_FOR_INSN (insn))
1755 		   || ((tmp = next_nonnote_insn (insn)) != NULL_RTX
1756 		       && oprs_available_p (pat, tmp))))
1757 	insert_set_in_table (pat, insn, table);
1758     }
1759   /* In case of store we want to consider the memory value as available in
1760      the REG stored in that memory. This makes it possible to remove
1761      redundant loads from due to stores to the same location.  */
1762   else if (flag_gcse_las && REG_P (src) && MEM_P (dest))
1763       {
1764         unsigned int regno = REGNO (src);
1765 
1766         /* Do not do this for constant/copy propagation.  */
1767         if (! table->set_p
1768             /* Only record sets of pseudo-regs in the hash table.  */
1769 	    && regno >= FIRST_PSEUDO_REGISTER
1770 	   /* Don't GCSE something if we can't do a reg/reg copy.  */
1771 	   && can_copy_p (GET_MODE (src))
1772 	   /* GCSE commonly inserts instruction after the insn.  We can't
1773 	      do that easily for EH_REGION notes so disable GCSE on these
1774 	      for now.  */
1775 	   && ! find_reg_note (insn, REG_EH_REGION, NULL_RTX)
1776 	   /* Is SET_DEST something we want to gcse?  */
1777 	   && want_to_gcse_p (dest)
1778 	   /* Don't CSE a nop.  */
1779 	   && ! set_noop_p (pat)
1780 	   /* Don't GCSE if it has attached REG_EQUIV note.
1781 	      At this point this only function parameters should have
1782 	      REG_EQUIV notes and if the argument slot is used somewhere
1783 	      explicitly, it means address of parameter has been taken,
1784 	      so we should not extend the lifetime of the pseudo.  */
1785 	   && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
1786 	       || ! MEM_P (XEXP (note, 0))))
1787              {
1788                /* Stores are never anticipatable.  */
1789                int antic_p = 0;
1790 	       /* An expression is not available if its operands are
1791 	          subsequently modified, including this insn.  It's also not
1792 	          available if this is a branch, because we can't insert
1793 	          a set after the branch.  */
1794                int avail_p = oprs_available_p (dest, insn)
1795 			     && ! JUMP_P (insn);
1796 
1797 	       /* Record the memory expression (DEST) in the hash table.  */
1798 	       insert_expr_in_table (dest, GET_MODE (dest), insn,
1799 				     antic_p, avail_p, table);
1800              }
1801       }
1802 }
1803 
1804 static void
hash_scan_clobber(rtx x ATTRIBUTE_UNUSED,rtx insn ATTRIBUTE_UNUSED,struct hash_table * table ATTRIBUTE_UNUSED)1805 hash_scan_clobber (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
1806 		   struct hash_table *table ATTRIBUTE_UNUSED)
1807 {
1808   /* Currently nothing to do.  */
1809 }
1810 
1811 static void
hash_scan_call(rtx x ATTRIBUTE_UNUSED,rtx insn ATTRIBUTE_UNUSED,struct hash_table * table ATTRIBUTE_UNUSED)1812 hash_scan_call (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
1813 		struct hash_table *table ATTRIBUTE_UNUSED)
1814 {
1815   /* Currently nothing to do.  */
1816 }
1817 
1818 /* Process INSN and add hash table entries as appropriate.
1819 
1820    Only available expressions that set a single pseudo-reg are recorded.
1821 
1822    Single sets in a PARALLEL could be handled, but it's an extra complication
1823    that isn't dealt with right now.  The trick is handling the CLOBBERs that
1824    are also in the PARALLEL.  Later.
1825 
1826    If SET_P is nonzero, this is for the assignment hash table,
1827    otherwise it is for the expression hash table.
1828    If IN_LIBCALL_BLOCK nonzero, we are in a libcall block, and should
1829    not record any expressions.  */
1830 
1831 static void
hash_scan_insn(rtx insn,struct hash_table * table,int in_libcall_block)1832 hash_scan_insn (rtx insn, struct hash_table *table, int in_libcall_block)
1833 {
1834   rtx pat = PATTERN (insn);
1835   int i;
1836 
1837   if (in_libcall_block)
1838     return;
1839 
1840   /* Pick out the sets of INSN and for other forms of instructions record
1841      what's been modified.  */
1842 
1843   if (GET_CODE (pat) == SET)
1844     hash_scan_set (pat, insn, table);
1845   else if (GET_CODE (pat) == PARALLEL)
1846     for (i = 0; i < XVECLEN (pat, 0); i++)
1847       {
1848 	rtx x = XVECEXP (pat, 0, i);
1849 
1850 	if (GET_CODE (x) == SET)
1851 	  hash_scan_set (x, insn, table);
1852 	else if (GET_CODE (x) == CLOBBER)
1853 	  hash_scan_clobber (x, insn, table);
1854 	else if (GET_CODE (x) == CALL)
1855 	  hash_scan_call (x, insn, table);
1856       }
1857 
1858   else if (GET_CODE (pat) == CLOBBER)
1859     hash_scan_clobber (pat, insn, table);
1860   else if (GET_CODE (pat) == CALL)
1861     hash_scan_call (pat, insn, table);
1862 }
1863 
1864 static void
dump_hash_table(FILE * file,const char * name,struct hash_table * table)1865 dump_hash_table (FILE *file, const char *name, struct hash_table *table)
1866 {
1867   int i;
1868   /* Flattened out table, so it's printed in proper order.  */
1869   struct expr **flat_table;
1870   unsigned int *hash_val;
1871   struct expr *expr;
1872 
1873   flat_table = xcalloc (table->n_elems, sizeof (struct expr *));
1874   hash_val = xmalloc (table->n_elems * sizeof (unsigned int));
1875 
1876   for (i = 0; i < (int) table->size; i++)
1877     for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
1878       {
1879 	flat_table[expr->bitmap_index] = expr;
1880 	hash_val[expr->bitmap_index] = i;
1881       }
1882 
1883   fprintf (file, "%s hash table (%d buckets, %d entries)\n",
1884 	   name, table->size, table->n_elems);
1885 
1886   for (i = 0; i < (int) table->n_elems; i++)
1887     if (flat_table[i] != 0)
1888       {
1889 	expr = flat_table[i];
1890 	fprintf (file, "Index %d (hash value %d)\n  ",
1891 		 expr->bitmap_index, hash_val[i]);
1892 	print_rtl (file, expr->expr);
1893 	fprintf (file, "\n");
1894       }
1895 
1896   fprintf (file, "\n");
1897 
1898   free (flat_table);
1899   free (hash_val);
1900 }
1901 
1902 /* Record register first/last/block set information for REGNO in INSN.
1903 
1904    first_set records the first place in the block where the register
1905    is set and is used to compute "anticipatability".
1906 
1907    last_set records the last place in the block where the register
1908    is set and is used to compute "availability".
1909 
1910    last_bb records the block for which first_set and last_set are
1911    valid, as a quick test to invalidate them.
1912 
1913    reg_set_in_block records whether the register is set in the block
1914    and is used to compute "transparency".  */
1915 
1916 static void
record_last_reg_set_info(rtx insn,int regno)1917 record_last_reg_set_info (rtx insn, int regno)
1918 {
1919   struct reg_avail_info *info = &reg_avail_info[regno];
1920   int cuid = INSN_CUID (insn);
1921 
1922   info->last_set = cuid;
1923   if (info->last_bb != current_bb)
1924     {
1925       info->last_bb = current_bb;
1926       info->first_set = cuid;
1927       SET_BIT (reg_set_in_block[current_bb->index], regno);
1928     }
1929 }
1930 
1931 
1932 /* Record all of the canonicalized MEMs of record_last_mem_set_info's insn.
1933    Note we store a pair of elements in the list, so they have to be
1934    taken off pairwise.  */
1935 
1936 static void
canon_list_insert(rtx dest ATTRIBUTE_UNUSED,rtx unused1 ATTRIBUTE_UNUSED,void * v_insn)1937 canon_list_insert (rtx dest ATTRIBUTE_UNUSED, rtx unused1 ATTRIBUTE_UNUSED,
1938 		   void * v_insn)
1939 {
1940   rtx dest_addr, insn;
1941   int bb;
1942 
1943   while (GET_CODE (dest) == SUBREG
1944       || GET_CODE (dest) == ZERO_EXTRACT
1945       || GET_CODE (dest) == STRICT_LOW_PART)
1946     dest = XEXP (dest, 0);
1947 
1948   /* If DEST is not a MEM, then it will not conflict with a load.  Note
1949      that function calls are assumed to clobber memory, but are handled
1950      elsewhere.  */
1951 
1952   if (! MEM_P (dest))
1953     return;
1954 
1955   dest_addr = get_addr (XEXP (dest, 0));
1956   dest_addr = canon_rtx (dest_addr);
1957   insn = (rtx) v_insn;
1958   bb = BLOCK_NUM (insn);
1959 
1960   canon_modify_mem_list[bb] =
1961     alloc_EXPR_LIST (VOIDmode, dest_addr, canon_modify_mem_list[bb]);
1962   canon_modify_mem_list[bb] =
1963     alloc_EXPR_LIST (VOIDmode, dest, canon_modify_mem_list[bb]);
1964 }
1965 
1966 /* Record memory modification information for INSN.  We do not actually care
1967    about the memory location(s) that are set, or even how they are set (consider
1968    a CALL_INSN).  We merely need to record which insns modify memory.  */
1969 
1970 static void
record_last_mem_set_info(rtx insn)1971 record_last_mem_set_info (rtx insn)
1972 {
1973   int bb = BLOCK_NUM (insn);
1974 
1975   /* load_killed_in_block_p will handle the case of calls clobbering
1976      everything.  */
1977   modify_mem_list[bb] = alloc_INSN_LIST (insn, modify_mem_list[bb]);
1978   bitmap_set_bit (modify_mem_list_set, bb);
1979 
1980   if (CALL_P (insn))
1981     {
1982       /* Note that traversals of this loop (other than for free-ing)
1983 	 will break after encountering a CALL_INSN.  So, there's no
1984 	 need to insert a pair of items, as canon_list_insert does.  */
1985       canon_modify_mem_list[bb] =
1986 	alloc_INSN_LIST (insn, canon_modify_mem_list[bb]);
1987       bitmap_set_bit (blocks_with_calls, bb);
1988     }
1989   else
1990     note_stores (PATTERN (insn), canon_list_insert, (void*) insn);
1991 }
1992 
1993 /* Called from compute_hash_table via note_stores to handle one
1994    SET or CLOBBER in an insn.  DATA is really the instruction in which
1995    the SET is taking place.  */
1996 
1997 static void
record_last_set_info(rtx dest,rtx setter ATTRIBUTE_UNUSED,void * data)1998 record_last_set_info (rtx dest, rtx setter ATTRIBUTE_UNUSED, void *data)
1999 {
2000   rtx last_set_insn = (rtx) data;
2001 
2002   if (GET_CODE (dest) == SUBREG)
2003     dest = SUBREG_REG (dest);
2004 
2005   if (REG_P (dest))
2006     record_last_reg_set_info (last_set_insn, REGNO (dest));
2007   else if (MEM_P (dest)
2008 	   /* Ignore pushes, they clobber nothing.  */
2009 	   && ! push_operand (dest, GET_MODE (dest)))
2010     record_last_mem_set_info (last_set_insn);
2011 }
2012 
2013 /* Top level function to create an expression or assignment hash table.
2014 
2015    Expression entries are placed in the hash table if
2016    - they are of the form (set (pseudo-reg) src),
2017    - src is something we want to perform GCSE on,
2018    - none of the operands are subsequently modified in the block
2019 
2020    Assignment entries are placed in the hash table if
2021    - they are of the form (set (pseudo-reg) src),
2022    - src is something we want to perform const/copy propagation on,
2023    - none of the operands or target are subsequently modified in the block
2024 
2025    Currently src must be a pseudo-reg or a const_int.
2026 
2027    TABLE is the table computed.  */
2028 
2029 static void
compute_hash_table_work(struct hash_table * table)2030 compute_hash_table_work (struct hash_table *table)
2031 {
2032   unsigned int i;
2033 
2034   /* While we compute the hash table we also compute a bit array of which
2035      registers are set in which blocks.
2036      ??? This isn't needed during const/copy propagation, but it's cheap to
2037      compute.  Later.  */
2038   sbitmap_vector_zero (reg_set_in_block, last_basic_block);
2039 
2040   /* re-Cache any INSN_LIST nodes we have allocated.  */
2041   clear_modify_mem_tables ();
2042   /* Some working arrays used to track first and last set in each block.  */
2043   reg_avail_info = gmalloc (max_gcse_regno * sizeof (struct reg_avail_info));
2044 
2045   for (i = 0; i < max_gcse_regno; ++i)
2046     reg_avail_info[i].last_bb = NULL;
2047 
2048   FOR_EACH_BB (current_bb)
2049     {
2050       rtx insn;
2051       unsigned int regno;
2052       int in_libcall_block;
2053 
2054       /* First pass over the instructions records information used to
2055 	 determine when registers and memory are first and last set.
2056 	 ??? hard-reg reg_set_in_block computation
2057 	 could be moved to compute_sets since they currently don't change.  */
2058 
2059       FOR_BB_INSNS (current_bb, insn)
2060 	{
2061 	  if (! INSN_P (insn))
2062 	    continue;
2063 
2064 	  if (CALL_P (insn))
2065 	    {
2066 	      for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
2067 		if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
2068 		  record_last_reg_set_info (insn, regno);
2069 
2070 	      mark_call (insn);
2071 	    }
2072 
2073 	  note_stores (PATTERN (insn), record_last_set_info, insn);
2074 	}
2075 
2076       /* Insert implicit sets in the hash table.  */
2077       if (table->set_p
2078 	  && implicit_sets[current_bb->index] != NULL_RTX)
2079 	hash_scan_set (implicit_sets[current_bb->index],
2080 		       BB_HEAD (current_bb), table);
2081 
2082       /* The next pass builds the hash table.  */
2083       in_libcall_block = 0;
2084       FOR_BB_INSNS (current_bb, insn)
2085 	if (INSN_P (insn))
2086 	  {
2087 	    if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
2088 	      in_libcall_block = 1;
2089 	    else if (table->set_p && find_reg_note (insn, REG_RETVAL, NULL_RTX))
2090 	      in_libcall_block = 0;
2091 	    hash_scan_insn (insn, table, in_libcall_block);
2092 	    if (!table->set_p && find_reg_note (insn, REG_RETVAL, NULL_RTX))
2093 	      in_libcall_block = 0;
2094 	  }
2095     }
2096 
2097   free (reg_avail_info);
2098   reg_avail_info = NULL;
2099 }
2100 
2101 /* Allocate space for the set/expr hash TABLE.
2102    N_INSNS is the number of instructions in the function.
2103    It is used to determine the number of buckets to use.
2104    SET_P determines whether set or expression table will
2105    be created.  */
2106 
2107 static void
alloc_hash_table(int n_insns,struct hash_table * table,int set_p)2108 alloc_hash_table (int n_insns, struct hash_table *table, int set_p)
2109 {
2110   int n;
2111 
2112   table->size = n_insns / 4;
2113   if (table->size < 11)
2114     table->size = 11;
2115 
2116   /* Attempt to maintain efficient use of hash table.
2117      Making it an odd number is simplest for now.
2118      ??? Later take some measurements.  */
2119   table->size |= 1;
2120   n = table->size * sizeof (struct expr *);
2121   table->table = gmalloc (n);
2122   table->set_p = set_p;
2123 }
2124 
2125 /* Free things allocated by alloc_hash_table.  */
2126 
2127 static void
free_hash_table(struct hash_table * table)2128 free_hash_table (struct hash_table *table)
2129 {
2130   free (table->table);
2131 }
2132 
2133 /* Compute the hash TABLE for doing copy/const propagation or
2134    expression hash table.  */
2135 
2136 static void
compute_hash_table(struct hash_table * table)2137 compute_hash_table (struct hash_table *table)
2138 {
2139   /* Initialize count of number of entries in hash table.  */
2140   table->n_elems = 0;
2141   memset (table->table, 0, table->size * sizeof (struct expr *));
2142 
2143   compute_hash_table_work (table);
2144 }
2145 
2146 /* Expression tracking support.  */
2147 
2148 /* Lookup REGNO in the set TABLE.  The result is a pointer to the
2149    table entry, or NULL if not found.  */
2150 
2151 static struct expr *
lookup_set(unsigned int regno,struct hash_table * table)2152 lookup_set (unsigned int regno, struct hash_table *table)
2153 {
2154   unsigned int hash = hash_set (regno, table->size);
2155   struct expr *expr;
2156 
2157   expr = table->table[hash];
2158 
2159   while (expr && REGNO (SET_DEST (expr->expr)) != regno)
2160     expr = expr->next_same_hash;
2161 
2162   return expr;
2163 }
2164 
2165 /* Return the next entry for REGNO in list EXPR.  */
2166 
2167 static struct expr *
next_set(unsigned int regno,struct expr * expr)2168 next_set (unsigned int regno, struct expr *expr)
2169 {
2170   do
2171     expr = expr->next_same_hash;
2172   while (expr && REGNO (SET_DEST (expr->expr)) != regno);
2173 
2174   return expr;
2175 }
2176 
2177 /* Like free_INSN_LIST_list or free_EXPR_LIST_list, except that the node
2178    types may be mixed.  */
2179 
2180 static void
free_insn_expr_list_list(rtx * listp)2181 free_insn_expr_list_list (rtx *listp)
2182 {
2183   rtx list, next;
2184 
2185   for (list = *listp; list ; list = next)
2186     {
2187       next = XEXP (list, 1);
2188       if (GET_CODE (list) == EXPR_LIST)
2189 	free_EXPR_LIST_node (list);
2190       else
2191 	free_INSN_LIST_node (list);
2192     }
2193 
2194   *listp = NULL;
2195 }
2196 
2197 /* Clear canon_modify_mem_list and modify_mem_list tables.  */
2198 static void
clear_modify_mem_tables(void)2199 clear_modify_mem_tables (void)
2200 {
2201   unsigned i;
2202   bitmap_iterator bi;
2203 
2204   EXECUTE_IF_SET_IN_BITMAP (modify_mem_list_set, 0, i, bi)
2205     {
2206       free_INSN_LIST_list (modify_mem_list + i);
2207       free_insn_expr_list_list (canon_modify_mem_list + i);
2208     }
2209   bitmap_clear (modify_mem_list_set);
2210   bitmap_clear (blocks_with_calls);
2211 }
2212 
2213 /* Release memory used by modify_mem_list_set.  */
2214 
2215 static void
free_modify_mem_tables(void)2216 free_modify_mem_tables (void)
2217 {
2218   clear_modify_mem_tables ();
2219   free (modify_mem_list);
2220   free (canon_modify_mem_list);
2221   modify_mem_list = 0;
2222   canon_modify_mem_list = 0;
2223 }
2224 
2225 /* Reset tables used to keep track of what's still available [since the
2226    start of the block].  */
2227 
2228 static void
reset_opr_set_tables(void)2229 reset_opr_set_tables (void)
2230 {
2231   /* Maintain a bitmap of which regs have been set since beginning of
2232      the block.  */
2233   CLEAR_REG_SET (reg_set_bitmap);
2234 
2235   /* Also keep a record of the last instruction to modify memory.
2236      For now this is very trivial, we only record whether any memory
2237      location has been modified.  */
2238   clear_modify_mem_tables ();
2239 }
2240 
2241 /* Return nonzero if the operands of X are not set before INSN in
2242    INSN's basic block.  */
2243 
2244 static int
oprs_not_set_p(rtx x,rtx insn)2245 oprs_not_set_p (rtx x, rtx insn)
2246 {
2247   int i, j;
2248   enum rtx_code code;
2249   const char *fmt;
2250 
2251   if (x == 0)
2252     return 1;
2253 
2254   code = GET_CODE (x);
2255   switch (code)
2256     {
2257     case PC:
2258     case CC0:
2259     case CONST:
2260     case CONST_INT:
2261     case CONST_DOUBLE:
2262     case CONST_VECTOR:
2263     case SYMBOL_REF:
2264     case LABEL_REF:
2265     case ADDR_VEC:
2266     case ADDR_DIFF_VEC:
2267       return 1;
2268 
2269     case MEM:
2270       if (load_killed_in_block_p (BLOCK_FOR_INSN (insn),
2271 				  INSN_CUID (insn), x, 0))
2272 	return 0;
2273       else
2274 	return oprs_not_set_p (XEXP (x, 0), insn);
2275 
2276     case REG:
2277       return ! REGNO_REG_SET_P (reg_set_bitmap, REGNO (x));
2278 
2279     default:
2280       break;
2281     }
2282 
2283   for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2284     {
2285       if (fmt[i] == 'e')
2286 	{
2287 	  /* If we are about to do the last recursive call
2288 	     needed at this level, change it into iteration.
2289 	     This function is called enough to be worth it.  */
2290 	  if (i == 0)
2291 	    return oprs_not_set_p (XEXP (x, i), insn);
2292 
2293 	  if (! oprs_not_set_p (XEXP (x, i), insn))
2294 	    return 0;
2295 	}
2296       else if (fmt[i] == 'E')
2297 	for (j = 0; j < XVECLEN (x, i); j++)
2298 	  if (! oprs_not_set_p (XVECEXP (x, i, j), insn))
2299 	    return 0;
2300     }
2301 
2302   return 1;
2303 }
2304 
2305 /* Mark things set by a CALL.  */
2306 
2307 static void
mark_call(rtx insn)2308 mark_call (rtx insn)
2309 {
2310   if (! CONST_OR_PURE_CALL_P (insn))
2311     record_last_mem_set_info (insn);
2312 }
2313 
2314 /* Mark things set by a SET.  */
2315 
2316 static void
mark_set(rtx pat,rtx insn)2317 mark_set (rtx pat, rtx insn)
2318 {
2319   rtx dest = SET_DEST (pat);
2320 
2321   while (GET_CODE (dest) == SUBREG
2322 	 || GET_CODE (dest) == ZERO_EXTRACT
2323 	 || GET_CODE (dest) == STRICT_LOW_PART)
2324     dest = XEXP (dest, 0);
2325 
2326   if (REG_P (dest))
2327     SET_REGNO_REG_SET (reg_set_bitmap, REGNO (dest));
2328   else if (MEM_P (dest))
2329     record_last_mem_set_info (insn);
2330 
2331   if (GET_CODE (SET_SRC (pat)) == CALL)
2332     mark_call (insn);
2333 }
2334 
2335 /* Record things set by a CLOBBER.  */
2336 
2337 static void
mark_clobber(rtx pat,rtx insn)2338 mark_clobber (rtx pat, rtx insn)
2339 {
2340   rtx clob = XEXP (pat, 0);
2341 
2342   while (GET_CODE (clob) == SUBREG || GET_CODE (clob) == STRICT_LOW_PART)
2343     clob = XEXP (clob, 0);
2344 
2345   if (REG_P (clob))
2346     SET_REGNO_REG_SET (reg_set_bitmap, REGNO (clob));
2347   else
2348     record_last_mem_set_info (insn);
2349 }
2350 
2351 /* Record things set by INSN.
2352    This data is used by oprs_not_set_p.  */
2353 
2354 static void
mark_oprs_set(rtx insn)2355 mark_oprs_set (rtx insn)
2356 {
2357   rtx pat = PATTERN (insn);
2358   int i;
2359 
2360   if (GET_CODE (pat) == SET)
2361     mark_set (pat, insn);
2362   else if (GET_CODE (pat) == PARALLEL)
2363     for (i = 0; i < XVECLEN (pat, 0); i++)
2364       {
2365 	rtx x = XVECEXP (pat, 0, i);
2366 
2367 	if (GET_CODE (x) == SET)
2368 	  mark_set (x, insn);
2369 	else if (GET_CODE (x) == CLOBBER)
2370 	  mark_clobber (x, insn);
2371 	else if (GET_CODE (x) == CALL)
2372 	  mark_call (insn);
2373       }
2374 
2375   else if (GET_CODE (pat) == CLOBBER)
2376     mark_clobber (pat, insn);
2377   else if (GET_CODE (pat) == CALL)
2378     mark_call (insn);
2379 }
2380 
2381 
2382 /* Compute copy/constant propagation working variables.  */
2383 
2384 /* Local properties of assignments.  */
2385 static sbitmap *cprop_pavloc;
2386 static sbitmap *cprop_absaltered;
2387 
2388 /* Global properties of assignments (computed from the local properties).  */
2389 static sbitmap *cprop_avin;
2390 static sbitmap *cprop_avout;
2391 
2392 /* Allocate vars used for copy/const propagation.  N_BLOCKS is the number of
2393    basic blocks.  N_SETS is the number of sets.  */
2394 
2395 static void
alloc_cprop_mem(int n_blocks,int n_sets)2396 alloc_cprop_mem (int n_blocks, int n_sets)
2397 {
2398   cprop_pavloc = sbitmap_vector_alloc (n_blocks, n_sets);
2399   cprop_absaltered = sbitmap_vector_alloc (n_blocks, n_sets);
2400 
2401   cprop_avin = sbitmap_vector_alloc (n_blocks, n_sets);
2402   cprop_avout = sbitmap_vector_alloc (n_blocks, n_sets);
2403 }
2404 
2405 /* Free vars used by copy/const propagation.  */
2406 
2407 static void
free_cprop_mem(void)2408 free_cprop_mem (void)
2409 {
2410   sbitmap_vector_free (cprop_pavloc);
2411   sbitmap_vector_free (cprop_absaltered);
2412   sbitmap_vector_free (cprop_avin);
2413   sbitmap_vector_free (cprop_avout);
2414 }
2415 
2416 /* For each block, compute whether X is transparent.  X is either an
2417    expression or an assignment [though we don't care which, for this context
2418    an assignment is treated as an expression].  For each block where an
2419    element of X is modified, set (SET_P == 1) or reset (SET_P == 0) the INDX
2420    bit in BMAP.  */
2421 
2422 static void
compute_transp(rtx x,int indx,sbitmap * bmap,int set_p)2423 compute_transp (rtx x, int indx, sbitmap *bmap, int set_p)
2424 {
2425   int i, j;
2426   basic_block bb;
2427   enum rtx_code code;
2428   reg_set *r;
2429   const char *fmt;
2430 
2431   /* repeat is used to turn tail-recursion into iteration since GCC
2432      can't do it when there's no return value.  */
2433  repeat:
2434 
2435   if (x == 0)
2436     return;
2437 
2438   code = GET_CODE (x);
2439   switch (code)
2440     {
2441     case REG:
2442       if (set_p)
2443 	{
2444 	  if (REGNO (x) < FIRST_PSEUDO_REGISTER)
2445 	    {
2446 	      FOR_EACH_BB (bb)
2447 		if (TEST_BIT (reg_set_in_block[bb->index], REGNO (x)))
2448 		  SET_BIT (bmap[bb->index], indx);
2449 	    }
2450 	  else
2451 	    {
2452 	      for (r = reg_set_table[REGNO (x)]; r != NULL; r = r->next)
2453 		SET_BIT (bmap[r->bb_index], indx);
2454 	    }
2455 	}
2456       else
2457 	{
2458 	  if (REGNO (x) < FIRST_PSEUDO_REGISTER)
2459 	    {
2460 	      FOR_EACH_BB (bb)
2461 		if (TEST_BIT (reg_set_in_block[bb->index], REGNO (x)))
2462 		  RESET_BIT (bmap[bb->index], indx);
2463 	    }
2464 	  else
2465 	    {
2466 	      for (r = reg_set_table[REGNO (x)]; r != NULL; r = r->next)
2467 		RESET_BIT (bmap[r->bb_index], indx);
2468 	    }
2469 	}
2470 
2471       return;
2472 
2473     case MEM:
2474       if (! MEM_READONLY_P (x))
2475 	{
2476 	  bitmap_iterator bi;
2477 	  unsigned bb_index;
2478 
2479 	  /* First handle all the blocks with calls.  We don't need to
2480 	     do any list walking for them.  */
2481 	  EXECUTE_IF_SET_IN_BITMAP (blocks_with_calls, 0, bb_index, bi)
2482 	    {
2483 	      if (set_p)
2484 		SET_BIT (bmap[bb_index], indx);
2485 	      else
2486 		RESET_BIT (bmap[bb_index], indx);
2487 	    }
2488 
2489 	    /* Now iterate over the blocks which have memory modifications
2490 	       but which do not have any calls.  */
2491 	    EXECUTE_IF_AND_COMPL_IN_BITMAP (modify_mem_list_set,
2492 					    blocks_with_calls,
2493 					    0, bb_index, bi)
2494 	      {
2495 		rtx list_entry = canon_modify_mem_list[bb_index];
2496 
2497 		while (list_entry)
2498 		  {
2499 		    rtx dest, dest_addr;
2500 
2501 		    /* LIST_ENTRY must be an INSN of some kind that sets memory.
2502 		       Examine each hunk of memory that is modified.  */
2503 
2504 		    dest = XEXP (list_entry, 0);
2505 		    list_entry = XEXP (list_entry, 1);
2506 		    dest_addr = XEXP (list_entry, 0);
2507 
2508 		    if (canon_true_dependence (dest, GET_MODE (dest), dest_addr,
2509 					       x, rtx_addr_varies_p))
2510 		      {
2511 			if (set_p)
2512 			  SET_BIT (bmap[bb_index], indx);
2513 			else
2514 			  RESET_BIT (bmap[bb_index], indx);
2515 			break;
2516 		      }
2517 		    list_entry = XEXP (list_entry, 1);
2518 	          }
2519 	      }
2520 	}
2521 
2522       x = XEXP (x, 0);
2523       goto repeat;
2524 
2525     case PC:
2526     case CC0: /*FIXME*/
2527     case CONST:
2528     case CONST_INT:
2529     case CONST_DOUBLE:
2530     case CONST_VECTOR:
2531     case SYMBOL_REF:
2532     case LABEL_REF:
2533     case ADDR_VEC:
2534     case ADDR_DIFF_VEC:
2535       return;
2536 
2537     default:
2538       break;
2539     }
2540 
2541   for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2542     {
2543       if (fmt[i] == 'e')
2544 	{
2545 	  /* If we are about to do the last recursive call
2546 	     needed at this level, change it into iteration.
2547 	     This function is called enough to be worth it.  */
2548 	  if (i == 0)
2549 	    {
2550 	      x = XEXP (x, i);
2551 	      goto repeat;
2552 	    }
2553 
2554 	  compute_transp (XEXP (x, i), indx, bmap, set_p);
2555 	}
2556       else if (fmt[i] == 'E')
2557 	for (j = 0; j < XVECLEN (x, i); j++)
2558 	  compute_transp (XVECEXP (x, i, j), indx, bmap, set_p);
2559     }
2560 }
2561 
2562 /* Top level routine to do the dataflow analysis needed by copy/const
2563    propagation.  */
2564 
2565 static void
compute_cprop_data(void)2566 compute_cprop_data (void)
2567 {
2568   compute_local_properties (cprop_absaltered, cprop_pavloc, NULL, &set_hash_table);
2569   compute_available (cprop_pavloc, cprop_absaltered,
2570 		     cprop_avout, cprop_avin);
2571 }
2572 
2573 /* Copy/constant propagation.  */
2574 
2575 /* Maximum number of register uses in an insn that we handle.  */
2576 #define MAX_USES 8
2577 
2578 /* Table of uses found in an insn.
2579    Allocated statically to avoid alloc/free complexity and overhead.  */
2580 static struct reg_use reg_use_table[MAX_USES];
2581 
2582 /* Index into `reg_use_table' while building it.  */
2583 static int reg_use_count;
2584 
2585 /* Set up a list of register numbers used in INSN.  The found uses are stored
2586    in `reg_use_table'.  `reg_use_count' is initialized to zero before entry,
2587    and contains the number of uses in the table upon exit.
2588 
2589    ??? If a register appears multiple times we will record it multiple times.
2590    This doesn't hurt anything but it will slow things down.  */
2591 
2592 static void
find_used_regs(rtx * xptr,void * data ATTRIBUTE_UNUSED)2593 find_used_regs (rtx *xptr, void *data ATTRIBUTE_UNUSED)
2594 {
2595   int i, j;
2596   enum rtx_code code;
2597   const char *fmt;
2598   rtx x = *xptr;
2599 
2600   /* repeat is used to turn tail-recursion into iteration since GCC
2601      can't do it when there's no return value.  */
2602  repeat:
2603   if (x == 0)
2604     return;
2605 
2606   code = GET_CODE (x);
2607   if (REG_P (x))
2608     {
2609       if (reg_use_count == MAX_USES)
2610 	return;
2611 
2612       reg_use_table[reg_use_count].reg_rtx = x;
2613       reg_use_count++;
2614     }
2615 
2616   /* Recursively scan the operands of this expression.  */
2617 
2618   for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2619     {
2620       if (fmt[i] == 'e')
2621 	{
2622 	  /* If we are about to do the last recursive call
2623 	     needed at this level, change it into iteration.
2624 	     This function is called enough to be worth it.  */
2625 	  if (i == 0)
2626 	    {
2627 	      x = XEXP (x, 0);
2628 	      goto repeat;
2629 	    }
2630 
2631 	  find_used_regs (&XEXP (x, i), data);
2632 	}
2633       else if (fmt[i] == 'E')
2634 	for (j = 0; j < XVECLEN (x, i); j++)
2635 	  find_used_regs (&XVECEXP (x, i, j), data);
2636     }
2637 }
2638 
2639 /* Try to replace all non-SET_DEST occurrences of FROM in INSN with TO.
2640    Returns nonzero is successful.  */
2641 
2642 static int
try_replace_reg(rtx from,rtx to,rtx insn)2643 try_replace_reg (rtx from, rtx to, rtx insn)
2644 {
2645   rtx note = find_reg_equal_equiv_note (insn);
2646   rtx src = 0;
2647   int success = 0;
2648   rtx set = single_set (insn);
2649 
2650   validate_replace_src_group (from, to, insn);
2651   if (num_changes_pending () && apply_change_group ())
2652     success = 1;
2653 
2654   /* Try to simplify SET_SRC if we have substituted a constant.  */
2655   if (success && set && CONSTANT_P (to))
2656     {
2657       src = simplify_rtx (SET_SRC (set));
2658 
2659       if (src)
2660 	validate_change (insn, &SET_SRC (set), src, 0);
2661     }
2662 
2663   /* If there is already a REG_EQUAL note, update the expression in it
2664      with our replacement.  */
2665   if (note != 0 && REG_NOTE_KIND (note) == REG_EQUAL)
2666     XEXP (note, 0) = simplify_replace_rtx (XEXP (note, 0), from, to);
2667 
2668   if (!success && set && reg_mentioned_p (from, SET_SRC (set)))
2669     {
2670       /* If above failed and this is a single set, try to simplify the source of
2671 	 the set given our substitution.  We could perhaps try this for multiple
2672 	 SETs, but it probably won't buy us anything.  */
2673       src = simplify_replace_rtx (SET_SRC (set), from, to);
2674 
2675       if (!rtx_equal_p (src, SET_SRC (set))
2676 	  && validate_change (insn, &SET_SRC (set), src, 0))
2677 	success = 1;
2678 
2679       /* If we've failed to do replacement, have a single SET, don't already
2680 	 have a note, and have no special SET, add a REG_EQUAL note to not
2681 	 lose information.  */
2682       if (!success && note == 0 && set != 0
2683 	  && GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
2684 	  && GET_CODE (SET_DEST (set)) != STRICT_LOW_PART)
2685 	note = set_unique_reg_note (insn, REG_EQUAL, copy_rtx (src));
2686     }
2687 
2688   /* REG_EQUAL may get simplified into register.
2689      We don't allow that. Remove that note. This code ought
2690      not to happen, because previous code ought to synthesize
2691      reg-reg move, but be on the safe side.  */
2692   if (note && REG_NOTE_KIND (note) == REG_EQUAL && REG_P (XEXP (note, 0)))
2693     remove_note (insn, note);
2694 
2695   return success;
2696 }
2697 
2698 /* Find a set of REGNOs that are available on entry to INSN's block.  Returns
2699    NULL no such set is found.  */
2700 
2701 static struct expr *
find_avail_set(int regno,rtx insn)2702 find_avail_set (int regno, rtx insn)
2703 {
2704   /* SET1 contains the last set found that can be returned to the caller for
2705      use in a substitution.  */
2706   struct expr *set1 = 0;
2707 
2708   /* Loops are not possible here.  To get a loop we would need two sets
2709      available at the start of the block containing INSN.  i.e. we would
2710      need two sets like this available at the start of the block:
2711 
2712        (set (reg X) (reg Y))
2713        (set (reg Y) (reg X))
2714 
2715      This can not happen since the set of (reg Y) would have killed the
2716      set of (reg X) making it unavailable at the start of this block.  */
2717   while (1)
2718     {
2719       rtx src;
2720       struct expr *set = lookup_set (regno, &set_hash_table);
2721 
2722       /* Find a set that is available at the start of the block
2723 	 which contains INSN.  */
2724       while (set)
2725 	{
2726 	  if (TEST_BIT (cprop_avin[BLOCK_NUM (insn)], set->bitmap_index))
2727 	    break;
2728 	  set = next_set (regno, set);
2729 	}
2730 
2731       /* If no available set was found we've reached the end of the
2732 	 (possibly empty) copy chain.  */
2733       if (set == 0)
2734 	break;
2735 
2736       gcc_assert (GET_CODE (set->expr) == SET);
2737 
2738       src = SET_SRC (set->expr);
2739 
2740       /* We know the set is available.
2741 	 Now check that SRC is ANTLOC (i.e. none of the source operands
2742 	 have changed since the start of the block).
2743 
2744          If the source operand changed, we may still use it for the next
2745          iteration of this loop, but we may not use it for substitutions.  */
2746 
2747       if (gcse_constant_p (src) || oprs_not_set_p (src, insn))
2748 	set1 = set;
2749 
2750       /* If the source of the set is anything except a register, then
2751 	 we have reached the end of the copy chain.  */
2752       if (! REG_P (src))
2753 	break;
2754 
2755       /* Follow the copy chain, i.e. start another iteration of the loop
2756 	 and see if we have an available copy into SRC.  */
2757       regno = REGNO (src);
2758     }
2759 
2760   /* SET1 holds the last set that was available and anticipatable at
2761      INSN.  */
2762   return set1;
2763 }
2764 
2765 /* Subroutine of cprop_insn that tries to propagate constants into
2766    JUMP_INSNS.  JUMP must be a conditional jump.  If SETCC is non-NULL
2767    it is the instruction that immediately precedes JUMP, and must be a
2768    single SET of a register.  FROM is what we will try to replace,
2769    SRC is the constant we will try to substitute for it.  Returns nonzero
2770    if a change was made.  */
2771 
2772 static int
cprop_jump(basic_block bb,rtx setcc,rtx jump,rtx from,rtx src)2773 cprop_jump (basic_block bb, rtx setcc, rtx jump, rtx from, rtx src)
2774 {
2775   rtx new, set_src, note_src;
2776   rtx set = pc_set (jump);
2777   rtx note = find_reg_equal_equiv_note (jump);
2778 
2779   if (note)
2780     {
2781       note_src = XEXP (note, 0);
2782       if (GET_CODE (note_src) == EXPR_LIST)
2783 	note_src = NULL_RTX;
2784     }
2785   else note_src = NULL_RTX;
2786 
2787   /* Prefer REG_EQUAL notes except those containing EXPR_LISTs.  */
2788   set_src = note_src ? note_src : SET_SRC (set);
2789 
2790   /* First substitute the SETCC condition into the JUMP instruction,
2791      then substitute that given values into this expanded JUMP.  */
2792   if (setcc != NULL_RTX
2793       && !modified_between_p (from, setcc, jump)
2794       && !modified_between_p (src, setcc, jump))
2795     {
2796       rtx setcc_src;
2797       rtx setcc_set = single_set (setcc);
2798       rtx setcc_note = find_reg_equal_equiv_note (setcc);
2799       setcc_src = (setcc_note && GET_CODE (XEXP (setcc_note, 0)) != EXPR_LIST)
2800 		? XEXP (setcc_note, 0) : SET_SRC (setcc_set);
2801       set_src = simplify_replace_rtx (set_src, SET_DEST (setcc_set),
2802 				      setcc_src);
2803     }
2804   else
2805     setcc = NULL_RTX;
2806 
2807   new = simplify_replace_rtx (set_src, from, src);
2808 
2809   /* If no simplification can be made, then try the next register.  */
2810   if (rtx_equal_p (new, SET_SRC (set)))
2811     return 0;
2812 
2813   /* If this is now a no-op delete it, otherwise this must be a valid insn.  */
2814   if (new == pc_rtx)
2815     delete_insn (jump);
2816   else
2817     {
2818       /* Ensure the value computed inside the jump insn to be equivalent
2819          to one computed by setcc.  */
2820       if (setcc && modified_in_p (new, setcc))
2821 	return 0;
2822       if (! validate_change (jump, &SET_SRC (set), new, 0))
2823 	{
2824 	  /* When (some) constants are not valid in a comparison, and there
2825 	     are two registers to be replaced by constants before the entire
2826 	     comparison can be folded into a constant, we need to keep
2827 	     intermediate information in REG_EQUAL notes.  For targets with
2828 	     separate compare insns, such notes are added by try_replace_reg.
2829 	     When we have a combined compare-and-branch instruction, however,
2830 	     we need to attach a note to the branch itself to make this
2831 	     optimization work.  */
2832 
2833 	  if (!rtx_equal_p (new, note_src))
2834 	    set_unique_reg_note (jump, REG_EQUAL, copy_rtx (new));
2835 	  return 0;
2836 	}
2837 
2838       /* Remove REG_EQUAL note after simplification.  */
2839       if (note_src)
2840 	remove_note (jump, note);
2841 
2842       /* If this has turned into an unconditional jump,
2843 	 then put a barrier after it so that the unreachable
2844 	 code will be deleted.  */
2845       if (GET_CODE (SET_SRC (set)) == LABEL_REF)
2846 	emit_barrier_after (jump);
2847      }
2848 
2849 #ifdef HAVE_cc0
2850   /* Delete the cc0 setter.  */
2851   if (setcc != NULL && CC0_P (SET_DEST (single_set (setcc))))
2852     delete_insn (setcc);
2853 #endif
2854 
2855   run_jump_opt_after_gcse = 1;
2856 
2857   global_const_prop_count++;
2858   if (dump_file != NULL)
2859     {
2860       fprintf (dump_file,
2861 	       "GLOBAL CONST-PROP: Replacing reg %d in jump_insn %d with constant ",
2862 	       REGNO (from), INSN_UID (jump));
2863       print_rtl (dump_file, src);
2864       fprintf (dump_file, "\n");
2865     }
2866   purge_dead_edges (bb);
2867 
2868   return 1;
2869 }
2870 
2871 static bool
constprop_register(rtx insn,rtx from,rtx to,bool alter_jumps)2872 constprop_register (rtx insn, rtx from, rtx to, bool alter_jumps)
2873 {
2874   rtx sset;
2875 
2876   /* Check for reg or cc0 setting instructions followed by
2877      conditional branch instructions first.  */
2878   if (alter_jumps
2879       && (sset = single_set (insn)) != NULL
2880       && NEXT_INSN (insn)
2881       && any_condjump_p (NEXT_INSN (insn)) && onlyjump_p (NEXT_INSN (insn)))
2882     {
2883       rtx dest = SET_DEST (sset);
2884       if ((REG_P (dest) || CC0_P (dest))
2885 	  && cprop_jump (BLOCK_FOR_INSN (insn), insn, NEXT_INSN (insn), from, to))
2886 	return 1;
2887     }
2888 
2889   /* Handle normal insns next.  */
2890   if (NONJUMP_INSN_P (insn)
2891       && try_replace_reg (from, to, insn))
2892     return 1;
2893 
2894   /* Try to propagate a CONST_INT into a conditional jump.
2895      We're pretty specific about what we will handle in this
2896      code, we can extend this as necessary over time.
2897 
2898      Right now the insn in question must look like
2899      (set (pc) (if_then_else ...))  */
2900   else if (alter_jumps && any_condjump_p (insn) && onlyjump_p (insn))
2901     return cprop_jump (BLOCK_FOR_INSN (insn), NULL, insn, from, to);
2902   return 0;
2903 }
2904 
2905 /* Perform constant and copy propagation on INSN.
2906    The result is nonzero if a change was made.  */
2907 
2908 static int
cprop_insn(rtx insn,int alter_jumps)2909 cprop_insn (rtx insn, int alter_jumps)
2910 {
2911   struct reg_use *reg_used;
2912   int changed = 0;
2913   rtx note;
2914 
2915   if (!INSN_P (insn))
2916     return 0;
2917 
2918   reg_use_count = 0;
2919   note_uses (&PATTERN (insn), find_used_regs, NULL);
2920 
2921   note = find_reg_equal_equiv_note (insn);
2922 
2923   /* We may win even when propagating constants into notes.  */
2924   if (note)
2925     find_used_regs (&XEXP (note, 0), NULL);
2926 
2927   for (reg_used = &reg_use_table[0]; reg_use_count > 0;
2928        reg_used++, reg_use_count--)
2929     {
2930       unsigned int regno = REGNO (reg_used->reg_rtx);
2931       rtx pat, src;
2932       struct expr *set;
2933 
2934       /* Ignore registers created by GCSE.
2935 	 We do this because ...  */
2936       if (regno >= max_gcse_regno)
2937 	continue;
2938 
2939       /* If the register has already been set in this block, there's
2940 	 nothing we can do.  */
2941       if (! oprs_not_set_p (reg_used->reg_rtx, insn))
2942 	continue;
2943 
2944       /* Find an assignment that sets reg_used and is available
2945 	 at the start of the block.  */
2946       set = find_avail_set (regno, insn);
2947       if (! set)
2948 	continue;
2949 
2950       pat = set->expr;
2951       /* ??? We might be able to handle PARALLELs.  Later.  */
2952       gcc_assert (GET_CODE (pat) == SET);
2953 
2954       src = SET_SRC (pat);
2955 
2956       /* Constant propagation.  */
2957       if (gcse_constant_p (src))
2958 	{
2959           if (constprop_register (insn, reg_used->reg_rtx, src, alter_jumps))
2960 	    {
2961 	      changed = 1;
2962 	      global_const_prop_count++;
2963 	      if (dump_file != NULL)
2964 		{
2965 		  fprintf (dump_file, "GLOBAL CONST-PROP: Replacing reg %d in ", regno);
2966 		  fprintf (dump_file, "insn %d with constant ", INSN_UID (insn));
2967 		  print_rtl (dump_file, src);
2968 		  fprintf (dump_file, "\n");
2969 		}
2970 	      if (INSN_DELETED_P (insn))
2971 		return 1;
2972 	    }
2973 	}
2974       else if (REG_P (src)
2975 	       && REGNO (src) >= FIRST_PSEUDO_REGISTER
2976 	       && REGNO (src) != regno)
2977 	{
2978 	  if (try_replace_reg (reg_used->reg_rtx, src, insn))
2979 	    {
2980 	      changed = 1;
2981 	      global_copy_prop_count++;
2982 	      if (dump_file != NULL)
2983 		{
2984 		  fprintf (dump_file, "GLOBAL COPY-PROP: Replacing reg %d in insn %d",
2985 			   regno, INSN_UID (insn));
2986 		  fprintf (dump_file, " with reg %d\n", REGNO (src));
2987 		}
2988 
2989 	      /* The original insn setting reg_used may or may not now be
2990 		 deletable.  We leave the deletion to flow.  */
2991 	      /* FIXME: If it turns out that the insn isn't deletable,
2992 		 then we may have unnecessarily extended register lifetimes
2993 		 and made things worse.  */
2994 	    }
2995 	}
2996     }
2997 
2998   return changed;
2999 }
3000 
3001 /* Like find_used_regs, but avoid recording uses that appear in
3002    input-output contexts such as zero_extract or pre_dec.  This
3003    restricts the cases we consider to those for which local cprop
3004    can legitimately make replacements.  */
3005 
3006 static void
local_cprop_find_used_regs(rtx * xptr,void * data)3007 local_cprop_find_used_regs (rtx *xptr, void *data)
3008 {
3009   rtx x = *xptr;
3010 
3011   if (x == 0)
3012     return;
3013 
3014   switch (GET_CODE (x))
3015     {
3016     case ZERO_EXTRACT:
3017     case SIGN_EXTRACT:
3018     case STRICT_LOW_PART:
3019       return;
3020 
3021     case PRE_DEC:
3022     case PRE_INC:
3023     case POST_DEC:
3024     case POST_INC:
3025     case PRE_MODIFY:
3026     case POST_MODIFY:
3027       /* Can only legitimately appear this early in the context of
3028 	 stack pushes for function arguments, but handle all of the
3029 	 codes nonetheless.  */
3030       return;
3031 
3032     case SUBREG:
3033       /* Setting a subreg of a register larger than word_mode leaves
3034 	 the non-written words unchanged.  */
3035       if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) > BITS_PER_WORD)
3036 	return;
3037       break;
3038 
3039     default:
3040       break;
3041     }
3042 
3043   find_used_regs (xptr, data);
3044 }
3045 
3046 /* LIBCALL_SP is a zero-terminated array of insns at the end of a libcall;
3047    their REG_EQUAL notes need updating.  */
3048 
3049 static bool
do_local_cprop(rtx x,rtx insn,bool alter_jumps,rtx * libcall_sp)3050 do_local_cprop (rtx x, rtx insn, bool alter_jumps, rtx *libcall_sp)
3051 {
3052   rtx newreg = NULL, newcnst = NULL;
3053 
3054   /* Rule out USE instructions and ASM statements as we don't want to
3055      change the hard registers mentioned.  */
3056   if (REG_P (x)
3057       && (REGNO (x) >= FIRST_PSEUDO_REGISTER
3058           || (GET_CODE (PATTERN (insn)) != USE
3059 	      && asm_noperands (PATTERN (insn)) < 0)))
3060     {
3061       cselib_val *val = cselib_lookup (x, GET_MODE (x), 0);
3062       struct elt_loc_list *l;
3063 
3064       if (!val)
3065 	return false;
3066       for (l = val->locs; l; l = l->next)
3067 	{
3068 	  rtx this_rtx = l->loc;
3069 	  rtx note;
3070 
3071 	  /* Don't CSE non-constant values out of libcall blocks.  */
3072 	  if (l->in_libcall && ! CONSTANT_P (this_rtx))
3073 	    continue;
3074 
3075 	  if (gcse_constant_p (this_rtx))
3076 	    newcnst = this_rtx;
3077 	  if (REG_P (this_rtx) && REGNO (this_rtx) >= FIRST_PSEUDO_REGISTER
3078 	      /* Don't copy propagate if it has attached REG_EQUIV note.
3079 		 At this point this only function parameters should have
3080 		 REG_EQUIV notes and if the argument slot is used somewhere
3081 		 explicitly, it means address of parameter has been taken,
3082 		 so we should not extend the lifetime of the pseudo.  */
3083 	      && (!(note = find_reg_note (l->setting_insn, REG_EQUIV, NULL_RTX))
3084 		  || ! MEM_P (XEXP (note, 0))))
3085 	    newreg = this_rtx;
3086 	}
3087       if (newcnst && constprop_register (insn, x, newcnst, alter_jumps))
3088 	{
3089 	  /* If we find a case where we can't fix the retval REG_EQUAL notes
3090 	     match the new register, we either have to abandon this replacement
3091 	     or fix delete_trivially_dead_insns to preserve the setting insn,
3092 	     or make it delete the REG_EUAQL note, and fix up all passes that
3093 	     require the REG_EQUAL note there.  */
3094 	  bool adjusted;
3095 
3096 	  adjusted = adjust_libcall_notes (x, newcnst, insn, libcall_sp);
3097 	  gcc_assert (adjusted);
3098 
3099 	  if (dump_file != NULL)
3100 	    {
3101 	      fprintf (dump_file, "LOCAL CONST-PROP: Replacing reg %d in ",
3102 		       REGNO (x));
3103 	      fprintf (dump_file, "insn %d with constant ",
3104 		       INSN_UID (insn));
3105 	      print_rtl (dump_file, newcnst);
3106 	      fprintf (dump_file, "\n");
3107 	    }
3108 	  local_const_prop_count++;
3109 	  return true;
3110 	}
3111       else if (newreg && newreg != x && try_replace_reg (x, newreg, insn))
3112 	{
3113 	  adjust_libcall_notes (x, newreg, insn, libcall_sp);
3114 	  if (dump_file != NULL)
3115 	    {
3116 	      fprintf (dump_file,
3117 		       "LOCAL COPY-PROP: Replacing reg %d in insn %d",
3118 		       REGNO (x), INSN_UID (insn));
3119 	      fprintf (dump_file, " with reg %d\n", REGNO (newreg));
3120 	    }
3121 	  local_copy_prop_count++;
3122 	  return true;
3123 	}
3124     }
3125   return false;
3126 }
3127 
3128 /* LIBCALL_SP is a zero-terminated array of insns at the end of a libcall;
3129    their REG_EQUAL notes need updating to reflect that OLDREG has been
3130    replaced with NEWVAL in INSN.  Return true if all substitutions could
3131    be made.  */
3132 static bool
adjust_libcall_notes(rtx oldreg,rtx newval,rtx insn,rtx * libcall_sp)3133 adjust_libcall_notes (rtx oldreg, rtx newval, rtx insn, rtx *libcall_sp)
3134 {
3135   rtx end;
3136 
3137   while ((end = *libcall_sp++))
3138     {
3139       rtx note = find_reg_equal_equiv_note (end);
3140 
3141       if (! note)
3142 	continue;
3143 
3144       if (REG_P (newval))
3145 	{
3146 	  if (reg_set_between_p (newval, PREV_INSN (insn), end))
3147 	    {
3148 	      do
3149 		{
3150 		  note = find_reg_equal_equiv_note (end);
3151 		  if (! note)
3152 		    continue;
3153 		  if (reg_mentioned_p (newval, XEXP (note, 0)))
3154 		    return false;
3155 		}
3156 	      while ((end = *libcall_sp++));
3157 	      return true;
3158 	    }
3159 	}
3160       XEXP (note, 0) = simplify_replace_rtx (XEXP (note, 0), oldreg, newval);
3161       insn = end;
3162     }
3163   return true;
3164 }
3165 
3166 #define MAX_NESTED_LIBCALLS 9
3167 
3168 /* Do local const/copy propagation (i.e. within each basic block).
3169    If ALTER_JUMPS is true, allow propagating into jump insns, which
3170    could modify the CFG.  */
3171 
3172 static void
local_cprop_pass(bool alter_jumps)3173 local_cprop_pass (bool alter_jumps)
3174 {
3175   basic_block bb;
3176   rtx insn;
3177   struct reg_use *reg_used;
3178   rtx libcall_stack[MAX_NESTED_LIBCALLS + 1], *libcall_sp;
3179   bool changed = false;
3180 
3181   cselib_init (false);
3182   libcall_sp = &libcall_stack[MAX_NESTED_LIBCALLS];
3183   *libcall_sp = 0;
3184   FOR_EACH_BB (bb)
3185     {
3186       FOR_BB_INSNS (bb, insn)
3187 	{
3188 	  if (INSN_P (insn))
3189 	    {
3190 	      rtx note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
3191 
3192 	      if (note)
3193 		{
3194 		  gcc_assert (libcall_sp != libcall_stack);
3195 		  *--libcall_sp = XEXP (note, 0);
3196 		}
3197 	      note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
3198 	      if (note)
3199 		libcall_sp++;
3200 	      note = find_reg_equal_equiv_note (insn);
3201 	      do
3202 		{
3203 		  reg_use_count = 0;
3204 		  note_uses (&PATTERN (insn), local_cprop_find_used_regs,
3205 			     NULL);
3206 		  if (note)
3207 		    local_cprop_find_used_regs (&XEXP (note, 0), NULL);
3208 
3209 		  for (reg_used = &reg_use_table[0]; reg_use_count > 0;
3210 		       reg_used++, reg_use_count--)
3211 		    if (do_local_cprop (reg_used->reg_rtx, insn, alter_jumps,
3212 			libcall_sp))
3213 		      {
3214 			changed = true;
3215 			break;
3216 		      }
3217 		  if (INSN_DELETED_P (insn))
3218 		    break;
3219 		}
3220 	      while (reg_use_count);
3221 	    }
3222 	  cselib_process_insn (insn);
3223 	}
3224 
3225       /* Forget everything at the end of a basic block.  Make sure we are
3226 	 not inside a libcall, they should never cross basic blocks.  */
3227       cselib_clear_table ();
3228       gcc_assert (libcall_sp == &libcall_stack[MAX_NESTED_LIBCALLS]);
3229     }
3230 
3231   cselib_finish ();
3232 
3233   /* Global analysis may get into infinite loops for unreachable blocks.  */
3234   if (changed && alter_jumps)
3235     {
3236       delete_unreachable_blocks ();
3237       free_reg_set_mem ();
3238       alloc_reg_set_mem (max_reg_num ());
3239       compute_sets ();
3240     }
3241 }
3242 
3243 /* Forward propagate copies.  This includes copies and constants.  Return
3244    nonzero if a change was made.  */
3245 
3246 static int
cprop(int alter_jumps)3247 cprop (int alter_jumps)
3248 {
3249   int changed;
3250   basic_block bb;
3251   rtx insn;
3252 
3253   /* Note we start at block 1.  */
3254   if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
3255     {
3256       if (dump_file != NULL)
3257 	fprintf (dump_file, "\n");
3258       return 0;
3259     }
3260 
3261   changed = 0;
3262   FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb, EXIT_BLOCK_PTR, next_bb)
3263     {
3264       /* Reset tables used to keep track of what's still valid [since the
3265 	 start of the block].  */
3266       reset_opr_set_tables ();
3267 
3268       FOR_BB_INSNS (bb, insn)
3269 	if (INSN_P (insn))
3270 	  {
3271 	    changed |= cprop_insn (insn, alter_jumps);
3272 
3273 	    /* Keep track of everything modified by this insn.  */
3274 	    /* ??? Need to be careful w.r.t. mods done to INSN.  Don't
3275 	       call mark_oprs_set if we turned the insn into a NOTE.  */
3276 	    if (! NOTE_P (insn))
3277 	      mark_oprs_set (insn);
3278 	  }
3279     }
3280 
3281   if (dump_file != NULL)
3282     fprintf (dump_file, "\n");
3283 
3284   return changed;
3285 }
3286 
3287 /* Similar to get_condition, only the resulting condition must be
3288    valid at JUMP, instead of at EARLIEST.
3289 
3290    This differs from noce_get_condition in ifcvt.c in that we prefer not to
3291    settle for the condition variable in the jump instruction being integral.
3292    We prefer to be able to record the value of a user variable, rather than
3293    the value of a temporary used in a condition.  This could be solved by
3294    recording the value of *every* register scanned by canonicalize_condition,
3295    but this would require some code reorganization.  */
3296 
3297 rtx
fis_get_condition(rtx jump)3298 fis_get_condition (rtx jump)
3299 {
3300   return get_condition (jump, NULL, false, true);
3301 }
3302 
3303 /* Check the comparison COND to see if we can safely form an implicit set from
3304    it.  COND is either an EQ or NE comparison.  */
3305 
3306 static bool
implicit_set_cond_p(rtx cond)3307 implicit_set_cond_p (rtx cond)
3308 {
3309   enum machine_mode mode = GET_MODE (XEXP (cond, 0));
3310   rtx cst = XEXP (cond, 1);
3311 
3312   /* We can't perform this optimization if either operand might be or might
3313      contain a signed zero.  */
3314   if (HONOR_SIGNED_ZEROS (mode))
3315     {
3316       /* It is sufficient to check if CST is or contains a zero.  We must
3317 	 handle float, complex, and vector.  If any subpart is a zero, then
3318 	 the optimization can't be performed.  */
3319       /* ??? The complex and vector checks are not implemented yet.  We just
3320 	 always return zero for them.  */
3321       if (GET_CODE (cst) == CONST_DOUBLE)
3322 	{
3323 	  REAL_VALUE_TYPE d;
3324 	  REAL_VALUE_FROM_CONST_DOUBLE (d, cst);
3325 	  if (REAL_VALUES_EQUAL (d, dconst0))
3326 	    return 0;
3327 	}
3328       else
3329 	return 0;
3330     }
3331 
3332   return gcse_constant_p (cst);
3333 }
3334 
3335 /* Find the implicit sets of a function.  An "implicit set" is a constraint
3336    on the value of a variable, implied by a conditional jump.  For example,
3337    following "if (x == 2)", the then branch may be optimized as though the
3338    conditional performed an "explicit set", in this example, "x = 2".  This
3339    function records the set patterns that are implicit at the start of each
3340    basic block.  */
3341 
3342 static void
find_implicit_sets(void)3343 find_implicit_sets (void)
3344 {
3345   basic_block bb, dest;
3346   unsigned int count;
3347   rtx cond, new;
3348 
3349   count = 0;
3350   FOR_EACH_BB (bb)
3351     /* Check for more than one successor.  */
3352     if (EDGE_COUNT (bb->succs) > 1)
3353       {
3354 	cond = fis_get_condition (BB_END (bb));
3355 
3356 	if (cond
3357 	    && (GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
3358 	    && REG_P (XEXP (cond, 0))
3359 	    && REGNO (XEXP (cond, 0)) >= FIRST_PSEUDO_REGISTER
3360 	    && implicit_set_cond_p (cond))
3361 	  {
3362 	    dest = GET_CODE (cond) == EQ ? BRANCH_EDGE (bb)->dest
3363 					 : FALLTHRU_EDGE (bb)->dest;
3364 
3365 	    if (dest && single_pred_p (dest)
3366 		&& dest != EXIT_BLOCK_PTR)
3367 	      {
3368 		new = gen_rtx_SET (VOIDmode, XEXP (cond, 0),
3369 					     XEXP (cond, 1));
3370 		implicit_sets[dest->index] = new;
3371 		if (dump_file)
3372 		  {
3373 		    fprintf(dump_file, "Implicit set of reg %d in ",
3374 			    REGNO (XEXP (cond, 0)));
3375 		    fprintf(dump_file, "basic block %d\n", dest->index);
3376 		  }
3377 		count++;
3378 	      }
3379 	  }
3380       }
3381 
3382   if (dump_file)
3383     fprintf (dump_file, "Found %d implicit sets\n", count);
3384 }
3385 
3386 /* Perform one copy/constant propagation pass.
3387    PASS is the pass count.  If CPROP_JUMPS is true, perform constant
3388    propagation into conditional jumps.  If BYPASS_JUMPS is true,
3389    perform conditional jump bypassing optimizations.  */
3390 
3391 static int
one_cprop_pass(int pass,bool cprop_jumps,bool bypass_jumps)3392 one_cprop_pass (int pass, bool cprop_jumps, bool bypass_jumps)
3393 {
3394   int changed = 0;
3395 
3396   global_const_prop_count = local_const_prop_count = 0;
3397   global_copy_prop_count = local_copy_prop_count = 0;
3398 
3399   local_cprop_pass (cprop_jumps);
3400 
3401   /* Determine implicit sets.  */
3402   implicit_sets = XCNEWVEC (rtx, last_basic_block);
3403   find_implicit_sets ();
3404 
3405   alloc_hash_table (max_cuid, &set_hash_table, 1);
3406   compute_hash_table (&set_hash_table);
3407 
3408   /* Free implicit_sets before peak usage.  */
3409   free (implicit_sets);
3410   implicit_sets = NULL;
3411 
3412   if (dump_file)
3413     dump_hash_table (dump_file, "SET", &set_hash_table);
3414   if (set_hash_table.n_elems > 0)
3415     {
3416       alloc_cprop_mem (last_basic_block, set_hash_table.n_elems);
3417       compute_cprop_data ();
3418       changed = cprop (cprop_jumps);
3419       if (bypass_jumps)
3420 	changed |= bypass_conditional_jumps ();
3421       free_cprop_mem ();
3422     }
3423 
3424   free_hash_table (&set_hash_table);
3425 
3426   if (dump_file)
3427     {
3428       fprintf (dump_file, "CPROP of %s, pass %d: %d bytes needed, ",
3429 	       current_function_name (), pass, bytes_used);
3430       fprintf (dump_file, "%d local const props, %d local copy props, ",
3431 	       local_const_prop_count, local_copy_prop_count);
3432       fprintf (dump_file, "%d global const props, %d global copy props\n\n",
3433 	       global_const_prop_count, global_copy_prop_count);
3434     }
3435   /* Global analysis may get into infinite loops for unreachable blocks.  */
3436   if (changed && cprop_jumps)
3437     delete_unreachable_blocks ();
3438 
3439   return changed;
3440 }
3441 
3442 /* Bypass conditional jumps.  */
3443 
3444 /* The value of last_basic_block at the beginning of the jump_bypass
3445    pass.  The use of redirect_edge_and_branch_force may introduce new
3446    basic blocks, but the data flow analysis is only valid for basic
3447    block indices less than bypass_last_basic_block.  */
3448 
3449 static int bypass_last_basic_block;
3450 
3451 /* Find a set of REGNO to a constant that is available at the end of basic
3452    block BB.  Returns NULL if no such set is found.  Based heavily upon
3453    find_avail_set.  */
3454 
3455 static struct expr *
find_bypass_set(int regno,int bb)3456 find_bypass_set (int regno, int bb)
3457 {
3458   struct expr *result = 0;
3459 
3460   for (;;)
3461     {
3462       rtx src;
3463       struct expr *set = lookup_set (regno, &set_hash_table);
3464 
3465       while (set)
3466 	{
3467 	  if (TEST_BIT (cprop_avout[bb], set->bitmap_index))
3468 	    break;
3469 	  set = next_set (regno, set);
3470 	}
3471 
3472       if (set == 0)
3473 	break;
3474 
3475       gcc_assert (GET_CODE (set->expr) == SET);
3476 
3477       src = SET_SRC (set->expr);
3478       if (gcse_constant_p (src))
3479 	result = set;
3480 
3481       if (! REG_P (src))
3482 	break;
3483 
3484       regno = REGNO (src);
3485     }
3486   return result;
3487 }
3488 
3489 
3490 /* Subroutine of bypass_block that checks whether a pseudo is killed by
3491    any of the instructions inserted on an edge.  Jump bypassing places
3492    condition code setters on CFG edges using insert_insn_on_edge.  This
3493    function is required to check that our data flow analysis is still
3494    valid prior to commit_edge_insertions.  */
3495 
3496 static bool
reg_killed_on_edge(rtx reg,edge e)3497 reg_killed_on_edge (rtx reg, edge e)
3498 {
3499   rtx insn;
3500 
3501   for (insn = e->insns.r; insn; insn = NEXT_INSN (insn))
3502     if (INSN_P (insn) && reg_set_p (reg, insn))
3503       return true;
3504 
3505   return false;
3506 }
3507 
3508 /* Subroutine of bypass_conditional_jumps that attempts to bypass the given
3509    basic block BB which has more than one predecessor.  If not NULL, SETCC
3510    is the first instruction of BB, which is immediately followed by JUMP_INSN
3511    JUMP.  Otherwise, SETCC is NULL, and JUMP is the first insn of BB.
3512    Returns nonzero if a change was made.
3513 
3514    During the jump bypassing pass, we may place copies of SETCC instructions
3515    on CFG edges.  The following routine must be careful to pay attention to
3516    these inserted insns when performing its transformations.  */
3517 
3518 static int
bypass_block(basic_block bb,rtx setcc,rtx jump)3519 bypass_block (basic_block bb, rtx setcc, rtx jump)
3520 {
3521   rtx insn, note;
3522   edge e, edest;
3523   int i, change;
3524   int may_be_loop_header;
3525   unsigned removed_p;
3526   edge_iterator ei;
3527 
3528   insn = (setcc != NULL) ? setcc : jump;
3529 
3530   /* Determine set of register uses in INSN.  */
3531   reg_use_count = 0;
3532   note_uses (&PATTERN (insn), find_used_regs, NULL);
3533   note = find_reg_equal_equiv_note (insn);
3534   if (note)
3535     find_used_regs (&XEXP (note, 0), NULL);
3536 
3537   may_be_loop_header = false;
3538   FOR_EACH_EDGE (e, ei, bb->preds)
3539     if (e->flags & EDGE_DFS_BACK)
3540       {
3541 	may_be_loop_header = true;
3542 	break;
3543       }
3544 
3545   change = 0;
3546   for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
3547     {
3548       removed_p = 0;
3549 
3550       if (e->flags & EDGE_COMPLEX)
3551 	{
3552 	  ei_next (&ei);
3553 	  continue;
3554 	}
3555 
3556       /* We can't redirect edges from new basic blocks.  */
3557       if (e->src->index >= bypass_last_basic_block)
3558 	{
3559 	  ei_next (&ei);
3560 	  continue;
3561 	}
3562 
3563       /* The irreducible loops created by redirecting of edges entering the
3564 	 loop from outside would decrease effectiveness of some of the following
3565 	 optimizations, so prevent this.  */
3566       if (may_be_loop_header
3567 	  && !(e->flags & EDGE_DFS_BACK))
3568 	{
3569 	  ei_next (&ei);
3570 	  continue;
3571 	}
3572 
3573       for (i = 0; i < reg_use_count; i++)
3574 	{
3575 	  struct reg_use *reg_used = &reg_use_table[i];
3576 	  unsigned int regno = REGNO (reg_used->reg_rtx);
3577 	  basic_block dest, old_dest;
3578 	  struct expr *set;
3579 	  rtx src, new;
3580 
3581 	  if (regno >= max_gcse_regno)
3582 	    continue;
3583 
3584 	  set = find_bypass_set (regno, e->src->index);
3585 
3586 	  if (! set)
3587 	    continue;
3588 
3589 	  /* Check the data flow is valid after edge insertions.  */
3590 	  if (e->insns.r && reg_killed_on_edge (reg_used->reg_rtx, e))
3591 	    continue;
3592 
3593 	  src = SET_SRC (pc_set (jump));
3594 
3595 	  if (setcc != NULL)
3596 	      src = simplify_replace_rtx (src,
3597 					  SET_DEST (PATTERN (setcc)),
3598 					  SET_SRC (PATTERN (setcc)));
3599 
3600 	  new = simplify_replace_rtx (src, reg_used->reg_rtx,
3601 				      SET_SRC (set->expr));
3602 
3603 	  /* Jump bypassing may have already placed instructions on
3604 	     edges of the CFG.  We can't bypass an outgoing edge that
3605 	     has instructions associated with it, as these insns won't
3606 	     get executed if the incoming edge is redirected.  */
3607 
3608 	  if (new == pc_rtx)
3609 	    {
3610 	      edest = FALLTHRU_EDGE (bb);
3611 	      dest = edest->insns.r ? NULL : edest->dest;
3612 	    }
3613 	  else if (GET_CODE (new) == LABEL_REF)
3614 	    {
3615 	      dest = BLOCK_FOR_INSN (XEXP (new, 0));
3616 	      /* Don't bypass edges containing instructions.  */
3617 	      edest = find_edge (bb, dest);
3618 	      if (edest && edest->insns.r)
3619 		dest = NULL;
3620 	    }
3621 	  else
3622 	    dest = NULL;
3623 
3624 	  /* Avoid unification of the edge with other edges from original
3625 	     branch.  We would end up emitting the instruction on "both"
3626 	     edges.  */
3627 
3628 	  if (dest && setcc && !CC0_P (SET_DEST (PATTERN (setcc)))
3629 	      && find_edge (e->src, dest))
3630 	    dest = NULL;
3631 
3632 	  old_dest = e->dest;
3633 	  if (dest != NULL
3634 	      && dest != old_dest
3635 	      && dest != EXIT_BLOCK_PTR)
3636             {
3637 	      redirect_edge_and_branch_force (e, dest);
3638 
3639 	      /* Copy the register setter to the redirected edge.
3640 		 Don't copy CC0 setters, as CC0 is dead after jump.  */
3641 	      if (setcc)
3642 		{
3643 		  rtx pat = PATTERN (setcc);
3644 		  if (!CC0_P (SET_DEST (pat)))
3645 		    insert_insn_on_edge (copy_insn (pat), e);
3646 		}
3647 
3648 	      if (dump_file != NULL)
3649 		{
3650 		  fprintf (dump_file, "JUMP-BYPASS: Proved reg %d "
3651 				      "in jump_insn %d equals constant ",
3652 			   regno, INSN_UID (jump));
3653 		  print_rtl (dump_file, SET_SRC (set->expr));
3654 		  fprintf (dump_file, "\nBypass edge from %d->%d to %d\n",
3655 			   e->src->index, old_dest->index, dest->index);
3656 		}
3657 	      change = 1;
3658 	      removed_p = 1;
3659 	      break;
3660 	    }
3661 	}
3662       if (!removed_p)
3663 	ei_next (&ei);
3664     }
3665   return change;
3666 }
3667 
3668 /* Find basic blocks with more than one predecessor that only contain a
3669    single conditional jump.  If the result of the comparison is known at
3670    compile-time from any incoming edge, redirect that edge to the
3671    appropriate target.  Returns nonzero if a change was made.
3672 
3673    This function is now mis-named, because we also handle indirect jumps.  */
3674 
3675 static int
bypass_conditional_jumps(void)3676 bypass_conditional_jumps (void)
3677 {
3678   basic_block bb;
3679   int changed;
3680   rtx setcc;
3681   rtx insn;
3682   rtx dest;
3683 
3684   /* Note we start at block 1.  */
3685   if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
3686     return 0;
3687 
3688   bypass_last_basic_block = last_basic_block;
3689   mark_dfs_back_edges ();
3690 
3691   changed = 0;
3692   FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb,
3693 		  EXIT_BLOCK_PTR, next_bb)
3694     {
3695       /* Check for more than one predecessor.  */
3696       if (!single_pred_p (bb))
3697 	{
3698 	  setcc = NULL_RTX;
3699 	  FOR_BB_INSNS (bb, insn)
3700 	    if (NONJUMP_INSN_P (insn))
3701 	      {
3702 		if (setcc)
3703 		  break;
3704 		if (GET_CODE (PATTERN (insn)) != SET)
3705 		  break;
3706 
3707 		dest = SET_DEST (PATTERN (insn));
3708 		if (REG_P (dest) || CC0_P (dest))
3709 		  setcc = insn;
3710 		else
3711 		  break;
3712 	      }
3713 	    else if (JUMP_P (insn))
3714 	      {
3715 		if ((any_condjump_p (insn) || computed_jump_p (insn))
3716 		    && onlyjump_p (insn))
3717 		  changed |= bypass_block (bb, setcc, insn);
3718 		break;
3719 	      }
3720 	    else if (INSN_P (insn))
3721 	      break;
3722 	}
3723     }
3724 
3725   /* If we bypassed any register setting insns, we inserted a
3726      copy on the redirected edge.  These need to be committed.  */
3727   if (changed)
3728     commit_edge_insertions();
3729 
3730   return changed;
3731 }
3732 
3733 /* Compute PRE+LCM working variables.  */
3734 
3735 /* Local properties of expressions.  */
3736 /* Nonzero for expressions that are transparent in the block.  */
3737 static sbitmap *transp;
3738 
3739 /* Nonzero for expressions that are transparent at the end of the block.
3740    This is only zero for expressions killed by abnormal critical edge
3741    created by a calls.  */
3742 static sbitmap *transpout;
3743 
3744 /* Nonzero for expressions that are computed (available) in the block.  */
3745 static sbitmap *comp;
3746 
3747 /* Nonzero for expressions that are locally anticipatable in the block.  */
3748 static sbitmap *antloc;
3749 
3750 /* Nonzero for expressions where this block is an optimal computation
3751    point.  */
3752 static sbitmap *pre_optimal;
3753 
3754 /* Nonzero for expressions which are redundant in a particular block.  */
3755 static sbitmap *pre_redundant;
3756 
3757 /* Nonzero for expressions which should be inserted on a specific edge.  */
3758 static sbitmap *pre_insert_map;
3759 
3760 /* Nonzero for expressions which should be deleted in a specific block.  */
3761 static sbitmap *pre_delete_map;
3762 
3763 /* Contains the edge_list returned by pre_edge_lcm.  */
3764 static struct edge_list *edge_list;
3765 
3766 /* Redundant insns.  */
3767 static sbitmap pre_redundant_insns;
3768 
3769 /* Allocate vars used for PRE analysis.  */
3770 
3771 static void
alloc_pre_mem(int n_blocks,int n_exprs)3772 alloc_pre_mem (int n_blocks, int n_exprs)
3773 {
3774   transp = sbitmap_vector_alloc (n_blocks, n_exprs);
3775   comp = sbitmap_vector_alloc (n_blocks, n_exprs);
3776   antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
3777 
3778   pre_optimal = NULL;
3779   pre_redundant = NULL;
3780   pre_insert_map = NULL;
3781   pre_delete_map = NULL;
3782   ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
3783 
3784   /* pre_insert and pre_delete are allocated later.  */
3785 }
3786 
3787 /* Free vars used for PRE analysis.  */
3788 
3789 static void
free_pre_mem(void)3790 free_pre_mem (void)
3791 {
3792   sbitmap_vector_free (transp);
3793   sbitmap_vector_free (comp);
3794 
3795   /* ANTLOC and AE_KILL are freed just after pre_lcm finishes.  */
3796 
3797   if (pre_optimal)
3798     sbitmap_vector_free (pre_optimal);
3799   if (pre_redundant)
3800     sbitmap_vector_free (pre_redundant);
3801   if (pre_insert_map)
3802     sbitmap_vector_free (pre_insert_map);
3803   if (pre_delete_map)
3804     sbitmap_vector_free (pre_delete_map);
3805 
3806   transp = comp = NULL;
3807   pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
3808 }
3809 
3810 /* Top level routine to do the dataflow analysis needed by PRE.  */
3811 
3812 static void
compute_pre_data(void)3813 compute_pre_data (void)
3814 {
3815   sbitmap trapping_expr;
3816   basic_block bb;
3817   unsigned int ui;
3818 
3819   compute_local_properties (transp, comp, antloc, &expr_hash_table);
3820   sbitmap_vector_zero (ae_kill, last_basic_block);
3821 
3822   /* Collect expressions which might trap.  */
3823   trapping_expr = sbitmap_alloc (expr_hash_table.n_elems);
3824   sbitmap_zero (trapping_expr);
3825   for (ui = 0; ui < expr_hash_table.size; ui++)
3826     {
3827       struct expr *e;
3828       for (e = expr_hash_table.table[ui]; e != NULL; e = e->next_same_hash)
3829 	if (may_trap_p (e->expr))
3830 	  SET_BIT (trapping_expr, e->bitmap_index);
3831     }
3832 
3833   /* Compute ae_kill for each basic block using:
3834 
3835      ~(TRANSP | COMP)
3836   */
3837 
3838   FOR_EACH_BB (bb)
3839     {
3840       edge e;
3841       edge_iterator ei;
3842 
3843       /* If the current block is the destination of an abnormal edge, we
3844 	 kill all trapping expressions because we won't be able to properly
3845 	 place the instruction on the edge.  So make them neither
3846 	 anticipatable nor transparent.  This is fairly conservative.  */
3847       FOR_EACH_EDGE (e, ei, bb->preds)
3848 	if (e->flags & EDGE_ABNORMAL)
3849 	  {
3850 	    sbitmap_difference (antloc[bb->index], antloc[bb->index], trapping_expr);
3851 	    sbitmap_difference (transp[bb->index], transp[bb->index], trapping_expr);
3852 	    break;
3853 	  }
3854 
3855       sbitmap_a_or_b (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
3856       sbitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
3857     }
3858 
3859   edge_list = pre_edge_lcm (expr_hash_table.n_elems, transp, comp, antloc,
3860 			    ae_kill, &pre_insert_map, &pre_delete_map);
3861   sbitmap_vector_free (antloc);
3862   antloc = NULL;
3863   sbitmap_vector_free (ae_kill);
3864   ae_kill = NULL;
3865   sbitmap_free (trapping_expr);
3866 }
3867 
3868 /* PRE utilities */
3869 
3870 /* Return nonzero if an occurrence of expression EXPR in OCCR_BB would reach
3871    block BB.
3872 
3873    VISITED is a pointer to a working buffer for tracking which BB's have
3874    been visited.  It is NULL for the top-level call.
3875 
3876    We treat reaching expressions that go through blocks containing the same
3877    reaching expression as "not reaching".  E.g. if EXPR is generated in blocks
3878    2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
3879    2 as not reaching.  The intent is to improve the probability of finding
3880    only one reaching expression and to reduce register lifetimes by picking
3881    the closest such expression.  */
3882 
3883 static int
pre_expr_reaches_here_p_work(basic_block occr_bb,struct expr * expr,basic_block bb,char * visited)3884 pre_expr_reaches_here_p_work (basic_block occr_bb, struct expr *expr, basic_block bb, char *visited)
3885 {
3886   edge pred;
3887   edge_iterator ei;
3888 
3889   FOR_EACH_EDGE (pred, ei, bb->preds)
3890     {
3891       basic_block pred_bb = pred->src;
3892 
3893       if (pred->src == ENTRY_BLOCK_PTR
3894 	  /* Has predecessor has already been visited?  */
3895 	  || visited[pred_bb->index])
3896 	;/* Nothing to do.  */
3897 
3898       /* Does this predecessor generate this expression?  */
3899       else if (TEST_BIT (comp[pred_bb->index], expr->bitmap_index))
3900 	{
3901 	  /* Is this the occurrence we're looking for?
3902 	     Note that there's only one generating occurrence per block
3903 	     so we just need to check the block number.  */
3904 	  if (occr_bb == pred_bb)
3905 	    return 1;
3906 
3907 	  visited[pred_bb->index] = 1;
3908 	}
3909       /* Ignore this predecessor if it kills the expression.  */
3910       else if (! TEST_BIT (transp[pred_bb->index], expr->bitmap_index))
3911 	visited[pred_bb->index] = 1;
3912 
3913       /* Neither gen nor kill.  */
3914       else
3915 	{
3916 	  visited[pred_bb->index] = 1;
3917 	  if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
3918 	    return 1;
3919 	}
3920     }
3921 
3922   /* All paths have been checked.  */
3923   return 0;
3924 }
3925 
3926 /* The wrapper for pre_expr_reaches_here_work that ensures that any
3927    memory allocated for that function is returned.  */
3928 
3929 static int
pre_expr_reaches_here_p(basic_block occr_bb,struct expr * expr,basic_block bb)3930 pre_expr_reaches_here_p (basic_block occr_bb, struct expr *expr, basic_block bb)
3931 {
3932   int rval;
3933   char *visited = XCNEWVEC (char, last_basic_block);
3934 
3935   rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
3936 
3937   free (visited);
3938   return rval;
3939 }
3940 
3941 
3942 /* Given an expr, generate RTL which we can insert at the end of a BB,
3943    or on an edge.  Set the block number of any insns generated to
3944    the value of BB.  */
3945 
3946 static rtx
process_insert_insn(struct expr * expr)3947 process_insert_insn (struct expr *expr)
3948 {
3949   rtx reg = expr->reaching_reg;
3950   rtx exp = copy_rtx (expr->expr);
3951   rtx pat;
3952 
3953   start_sequence ();
3954 
3955   /* If the expression is something that's an operand, like a constant,
3956      just copy it to a register.  */
3957   if (general_operand (exp, GET_MODE (reg)))
3958     emit_move_insn (reg, exp);
3959 
3960   /* Otherwise, make a new insn to compute this expression and make sure the
3961      insn will be recognized (this also adds any needed CLOBBERs).  Copy the
3962      expression to make sure we don't have any sharing issues.  */
3963   else
3964     {
3965       rtx insn = emit_insn (gen_rtx_SET (VOIDmode, reg, exp));
3966 
3967       if (insn_invalid_p (insn))
3968 	gcc_unreachable ();
3969     }
3970 
3971 
3972   pat = get_insns ();
3973   end_sequence ();
3974 
3975   return pat;
3976 }
3977 
3978 /* Add EXPR to the end of basic block BB.
3979 
3980    This is used by both the PRE and code hoisting.
3981 
3982    For PRE, we want to verify that the expr is either transparent
3983    or locally anticipatable in the target block.  This check makes
3984    no sense for code hoisting.  */
3985 
3986 static void
insert_insn_end_bb(struct expr * expr,basic_block bb,int pre)3987 insert_insn_end_bb (struct expr *expr, basic_block bb, int pre)
3988 {
3989   rtx insn = BB_END (bb);
3990   rtx new_insn;
3991   rtx reg = expr->reaching_reg;
3992   int regno = REGNO (reg);
3993   rtx pat, pat_end;
3994 
3995   pat = process_insert_insn (expr);
3996   gcc_assert (pat && INSN_P (pat));
3997 
3998   pat_end = pat;
3999   while (NEXT_INSN (pat_end) != NULL_RTX)
4000     pat_end = NEXT_INSN (pat_end);
4001 
4002   /* If the last insn is a jump, insert EXPR in front [taking care to
4003      handle cc0, etc. properly].  Similarly we need to care trapping
4004      instructions in presence of non-call exceptions.  */
4005 
4006   if (JUMP_P (insn)
4007       || (NONJUMP_INSN_P (insn)
4008 	  && (!single_succ_p (bb)
4009 	      || single_succ_edge (bb)->flags & EDGE_ABNORMAL)))
4010     {
4011 #ifdef HAVE_cc0
4012       rtx note;
4013 #endif
4014       /* It should always be the case that we can put these instructions
4015 	 anywhere in the basic block with performing PRE optimizations.
4016 	 Check this.  */
4017       gcc_assert (!NONJUMP_INSN_P (insn) || !pre
4018 		  || TEST_BIT (antloc[bb->index], expr->bitmap_index)
4019 		  || TEST_BIT (transp[bb->index], expr->bitmap_index));
4020 
4021       /* If this is a jump table, then we can't insert stuff here.  Since
4022 	 we know the previous real insn must be the tablejump, we insert
4023 	 the new instruction just before the tablejump.  */
4024       if (GET_CODE (PATTERN (insn)) == ADDR_VEC
4025 	  || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
4026 	insn = prev_real_insn (insn);
4027 
4028 #ifdef HAVE_cc0
4029       /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
4030 	 if cc0 isn't set.  */
4031       note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
4032       if (note)
4033 	insn = XEXP (note, 0);
4034       else
4035 	{
4036 	  rtx maybe_cc0_setter = prev_nonnote_insn (insn);
4037 	  if (maybe_cc0_setter
4038 	      && INSN_P (maybe_cc0_setter)
4039 	      && sets_cc0_p (PATTERN (maybe_cc0_setter)))
4040 	    insn = maybe_cc0_setter;
4041 	}
4042 #endif
4043       /* FIXME: What if something in cc0/jump uses value set in new insn?  */
4044       new_insn = emit_insn_before_noloc (pat, insn);
4045     }
4046 
4047   /* Likewise if the last insn is a call, as will happen in the presence
4048      of exception handling.  */
4049   else if (CALL_P (insn)
4050 	   && (!single_succ_p (bb)
4051 	       || single_succ_edge (bb)->flags & EDGE_ABNORMAL))
4052     {
4053       /* Keeping in mind SMALL_REGISTER_CLASSES and parameters in registers,
4054 	 we search backward and place the instructions before the first
4055 	 parameter is loaded.  Do this for everyone for consistency and a
4056 	 presumption that we'll get better code elsewhere as well.
4057 
4058 	 It should always be the case that we can put these instructions
4059 	 anywhere in the basic block with performing PRE optimizations.
4060 	 Check this.  */
4061 
4062       gcc_assert (!pre
4063 		  || TEST_BIT (antloc[bb->index], expr->bitmap_index)
4064 		  || TEST_BIT (transp[bb->index], expr->bitmap_index));
4065 
4066       /* Since different machines initialize their parameter registers
4067 	 in different orders, assume nothing.  Collect the set of all
4068 	 parameter registers.  */
4069       insn = find_first_parameter_load (insn, BB_HEAD (bb));
4070 
4071       /* If we found all the parameter loads, then we want to insert
4072 	 before the first parameter load.
4073 
4074 	 If we did not find all the parameter loads, then we might have
4075 	 stopped on the head of the block, which could be a CODE_LABEL.
4076 	 If we inserted before the CODE_LABEL, then we would be putting
4077 	 the insn in the wrong basic block.  In that case, put the insn
4078 	 after the CODE_LABEL.  Also, respect NOTE_INSN_BASIC_BLOCK.  */
4079       while (LABEL_P (insn)
4080 	     || NOTE_INSN_BASIC_BLOCK_P (insn))
4081 	insn = NEXT_INSN (insn);
4082 
4083       new_insn = emit_insn_before_noloc (pat, insn);
4084     }
4085   else
4086     new_insn = emit_insn_after_noloc (pat, insn);
4087 
4088   while (1)
4089     {
4090       if (INSN_P (pat))
4091 	{
4092 	  add_label_notes (PATTERN (pat), new_insn);
4093 	  note_stores (PATTERN (pat), record_set_info, pat);
4094 	}
4095       if (pat == pat_end)
4096 	break;
4097       pat = NEXT_INSN (pat);
4098     }
4099 
4100   gcse_create_count++;
4101 
4102   if (dump_file)
4103     {
4104       fprintf (dump_file, "PRE/HOIST: end of bb %d, insn %d, ",
4105 	       bb->index, INSN_UID (new_insn));
4106       fprintf (dump_file, "copying expression %d to reg %d\n",
4107 	       expr->bitmap_index, regno);
4108     }
4109 }
4110 
4111 /* Insert partially redundant expressions on edges in the CFG to make
4112    the expressions fully redundant.  */
4113 
4114 static int
pre_edge_insert(struct edge_list * edge_list,struct expr ** index_map)4115 pre_edge_insert (struct edge_list *edge_list, struct expr **index_map)
4116 {
4117   int e, i, j, num_edges, set_size, did_insert = 0;
4118   sbitmap *inserted;
4119 
4120   /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
4121      if it reaches any of the deleted expressions.  */
4122 
4123   set_size = pre_insert_map[0]->size;
4124   num_edges = NUM_EDGES (edge_list);
4125   inserted = sbitmap_vector_alloc (num_edges, expr_hash_table.n_elems);
4126   sbitmap_vector_zero (inserted, num_edges);
4127 
4128   for (e = 0; e < num_edges; e++)
4129     {
4130       int indx;
4131       basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
4132 
4133       for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
4134 	{
4135 	  SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
4136 
4137 	  for (j = indx; insert && j < (int) expr_hash_table.n_elems; j++, insert >>= 1)
4138 	    if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
4139 	      {
4140 		struct expr *expr = index_map[j];
4141 		struct occr *occr;
4142 
4143 		/* Now look at each deleted occurrence of this expression.  */
4144 		for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
4145 		  {
4146 		    if (! occr->deleted_p)
4147 		      continue;
4148 
4149 		    /* Insert this expression on this edge if it would
4150 		       reach the deleted occurrence in BB.  */
4151 		    if (!TEST_BIT (inserted[e], j))
4152 		      {
4153 			rtx insn;
4154 			edge eg = INDEX_EDGE (edge_list, e);
4155 
4156 			/* We can't insert anything on an abnormal and
4157 			   critical edge, so we insert the insn at the end of
4158 			   the previous block. There are several alternatives
4159 			   detailed in Morgans book P277 (sec 10.5) for
4160 			   handling this situation.  This one is easiest for
4161 			   now.  */
4162 
4163 			if (eg->flags & EDGE_ABNORMAL)
4164 			  insert_insn_end_bb (index_map[j], bb, 0);
4165 			else
4166 			  {
4167 			    insn = process_insert_insn (index_map[j]);
4168 			    insert_insn_on_edge (insn, eg);
4169 			  }
4170 
4171 			if (dump_file)
4172 			  {
4173 			    fprintf (dump_file, "PRE/HOIST: edge (%d,%d), ",
4174 				     bb->index,
4175 				     INDEX_EDGE_SUCC_BB (edge_list, e)->index);
4176 			    fprintf (dump_file, "copy expression %d\n",
4177 				     expr->bitmap_index);
4178 			  }
4179 
4180 			update_ld_motion_stores (expr);
4181 			SET_BIT (inserted[e], j);
4182 			did_insert = 1;
4183 			gcse_create_count++;
4184 		      }
4185 		  }
4186 	      }
4187 	}
4188     }
4189 
4190   sbitmap_vector_free (inserted);
4191   return did_insert;
4192 }
4193 
4194 /* Copy the result of EXPR->EXPR generated by INSN to EXPR->REACHING_REG.
4195    Given "old_reg <- expr" (INSN), instead of adding after it
4196      reaching_reg <- old_reg
4197    it's better to do the following:
4198      reaching_reg <- expr
4199      old_reg      <- reaching_reg
4200    because this way copy propagation can discover additional PRE
4201    opportunities.  But if this fails, we try the old way.
4202    When "expr" is a store, i.e.
4203    given "MEM <- old_reg", instead of adding after it
4204      reaching_reg <- old_reg
4205    it's better to add it before as follows:
4206      reaching_reg <- old_reg
4207      MEM          <- reaching_reg.  */
4208 
4209 static void
pre_insert_copy_insn(struct expr * expr,rtx insn)4210 pre_insert_copy_insn (struct expr *expr, rtx insn)
4211 {
4212   rtx reg = expr->reaching_reg;
4213   int regno = REGNO (reg);
4214   int indx = expr->bitmap_index;
4215   rtx pat = PATTERN (insn);
4216   rtx set, first_set, new_insn;
4217   rtx old_reg;
4218   int i;
4219 
4220   /* This block matches the logic in hash_scan_insn.  */
4221   switch (GET_CODE (pat))
4222     {
4223     case SET:
4224       set = pat;
4225       break;
4226 
4227     case PARALLEL:
4228       /* Search through the parallel looking for the set whose
4229 	 source was the expression that we're interested in.  */
4230       first_set = NULL_RTX;
4231       set = NULL_RTX;
4232       for (i = 0; i < XVECLEN (pat, 0); i++)
4233 	{
4234 	  rtx x = XVECEXP (pat, 0, i);
4235 	  if (GET_CODE (x) == SET)
4236 	    {
4237 	      /* If the source was a REG_EQUAL or REG_EQUIV note, we
4238 		 may not find an equivalent expression, but in this
4239 		 case the PARALLEL will have a single set.  */
4240 	      if (first_set == NULL_RTX)
4241 		first_set = x;
4242 	      if (expr_equiv_p (SET_SRC (x), expr->expr))
4243 	        {
4244 	          set = x;
4245 	          break;
4246 	        }
4247 	    }
4248 	}
4249 
4250       gcc_assert (first_set);
4251       if (set == NULL_RTX)
4252         set = first_set;
4253       break;
4254 
4255     default:
4256       gcc_unreachable ();
4257     }
4258 
4259   if (REG_P (SET_DEST (set)))
4260     {
4261       old_reg = SET_DEST (set);
4262       /* Check if we can modify the set destination in the original insn.  */
4263       if (validate_change (insn, &SET_DEST (set), reg, 0))
4264         {
4265           new_insn = gen_move_insn (old_reg, reg);
4266           new_insn = emit_insn_after (new_insn, insn);
4267 
4268           /* Keep register set table up to date.  */
4269           record_one_set (regno, insn);
4270         }
4271       else
4272         {
4273           new_insn = gen_move_insn (reg, old_reg);
4274           new_insn = emit_insn_after (new_insn, insn);
4275 
4276           /* Keep register set table up to date.  */
4277           record_one_set (regno, new_insn);
4278         }
4279     }
4280   else /* This is possible only in case of a store to memory.  */
4281     {
4282       old_reg = SET_SRC (set);
4283       new_insn = gen_move_insn (reg, old_reg);
4284 
4285       /* Check if we can modify the set source in the original insn.  */
4286       if (validate_change (insn, &SET_SRC (set), reg, 0))
4287         new_insn = emit_insn_before (new_insn, insn);
4288       else
4289         new_insn = emit_insn_after (new_insn, insn);
4290 
4291       /* Keep register set table up to date.  */
4292       record_one_set (regno, new_insn);
4293     }
4294 
4295   gcse_create_count++;
4296 
4297   if (dump_file)
4298     fprintf (dump_file,
4299 	     "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
4300 	      BLOCK_NUM (insn), INSN_UID (new_insn), indx,
4301 	      INSN_UID (insn), regno);
4302 }
4303 
4304 /* Copy available expressions that reach the redundant expression
4305    to `reaching_reg'.  */
4306 
4307 static void
pre_insert_copies(void)4308 pre_insert_copies (void)
4309 {
4310   unsigned int i, added_copy;
4311   struct expr *expr;
4312   struct occr *occr;
4313   struct occr *avail;
4314 
4315   /* For each available expression in the table, copy the result to
4316      `reaching_reg' if the expression reaches a deleted one.
4317 
4318      ??? The current algorithm is rather brute force.
4319      Need to do some profiling.  */
4320 
4321   for (i = 0; i < expr_hash_table.size; i++)
4322     for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
4323       {
4324 	/* If the basic block isn't reachable, PPOUT will be TRUE.  However,
4325 	   we don't want to insert a copy here because the expression may not
4326 	   really be redundant.  So only insert an insn if the expression was
4327 	   deleted.  This test also avoids further processing if the
4328 	   expression wasn't deleted anywhere.  */
4329 	if (expr->reaching_reg == NULL)
4330 	  continue;
4331 
4332 	/* Set when we add a copy for that expression.  */
4333 	added_copy = 0;
4334 
4335 	for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
4336 	  {
4337 	    if (! occr->deleted_p)
4338 	      continue;
4339 
4340 	    for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
4341 	      {
4342 		rtx insn = avail->insn;
4343 
4344 		/* No need to handle this one if handled already.  */
4345 		if (avail->copied_p)
4346 		  continue;
4347 
4348 		/* Don't handle this one if it's a redundant one.  */
4349 		if (TEST_BIT (pre_redundant_insns, INSN_CUID (insn)))
4350 		  continue;
4351 
4352 		/* Or if the expression doesn't reach the deleted one.  */
4353 		if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
4354 					       expr,
4355 					       BLOCK_FOR_INSN (occr->insn)))
4356 		  continue;
4357 
4358                 added_copy = 1;
4359 
4360 		/* Copy the result of avail to reaching_reg.  */
4361 		pre_insert_copy_insn (expr, insn);
4362 		avail->copied_p = 1;
4363 	      }
4364 	  }
4365 
4366 	  if (added_copy)
4367             update_ld_motion_stores (expr);
4368       }
4369 }
4370 
4371 /* Emit move from SRC to DEST noting the equivalence with expression computed
4372    in INSN.  */
4373 static rtx
gcse_emit_move_after(rtx src,rtx dest,rtx insn)4374 gcse_emit_move_after (rtx src, rtx dest, rtx insn)
4375 {
4376   rtx new;
4377   rtx set = single_set (insn), set2;
4378   rtx note;
4379   rtx eqv;
4380 
4381   /* This should never fail since we're creating a reg->reg copy
4382      we've verified to be valid.  */
4383 
4384   new = emit_insn_after (gen_move_insn (dest, src), insn);
4385 
4386   /* Note the equivalence for local CSE pass.  */
4387   set2 = single_set (new);
4388   if (!set2 || !rtx_equal_p (SET_DEST (set2), dest))
4389     return new;
4390   if ((note = find_reg_equal_equiv_note (insn)))
4391     eqv = XEXP (note, 0);
4392   else
4393     eqv = SET_SRC (set);
4394 
4395   set_unique_reg_note (new, REG_EQUAL, copy_insn_1 (eqv));
4396 
4397   return new;
4398 }
4399 
4400 /* Delete redundant computations.
4401    Deletion is done by changing the insn to copy the `reaching_reg' of
4402    the expression into the result of the SET.  It is left to later passes
4403    (cprop, cse2, flow, combine, regmove) to propagate the copy or eliminate it.
4404 
4405    Returns nonzero if a change is made.  */
4406 
4407 static int
pre_delete(void)4408 pre_delete (void)
4409 {
4410   unsigned int i;
4411   int changed;
4412   struct expr *expr;
4413   struct occr *occr;
4414 
4415   changed = 0;
4416   for (i = 0; i < expr_hash_table.size; i++)
4417     for (expr = expr_hash_table.table[i];
4418 	 expr != NULL;
4419 	 expr = expr->next_same_hash)
4420       {
4421 	int indx = expr->bitmap_index;
4422 
4423 	/* We only need to search antic_occr since we require
4424 	   ANTLOC != 0.  */
4425 
4426 	for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
4427 	  {
4428 	    rtx insn = occr->insn;
4429 	    rtx set;
4430 	    basic_block bb = BLOCK_FOR_INSN (insn);
4431 
4432 	    /* We only delete insns that have a single_set.  */
4433 	    if (TEST_BIT (pre_delete_map[bb->index], indx)
4434 		&& (set = single_set (insn)) != 0)
4435 	      {
4436 		/* Create a pseudo-reg to store the result of reaching
4437 		   expressions into.  Get the mode for the new pseudo from
4438 		   the mode of the original destination pseudo.  */
4439 		if (expr->reaching_reg == NULL)
4440 		  expr->reaching_reg
4441 		    = gen_reg_rtx (GET_MODE (SET_DEST (set)));
4442 
4443 		gcse_emit_move_after (expr->reaching_reg, SET_DEST (set), insn);
4444 		delete_insn (insn);
4445 		occr->deleted_p = 1;
4446 		SET_BIT (pre_redundant_insns, INSN_CUID (insn));
4447 		changed = 1;
4448 		gcse_subst_count++;
4449 
4450 		if (dump_file)
4451 		  {
4452 		    fprintf (dump_file,
4453 			     "PRE: redundant insn %d (expression %d) in ",
4454 			       INSN_UID (insn), indx);
4455 		    fprintf (dump_file, "bb %d, reaching reg is %d\n",
4456 			     bb->index, REGNO (expr->reaching_reg));
4457 		  }
4458 	      }
4459 	  }
4460       }
4461 
4462   return changed;
4463 }
4464 
4465 /* Perform GCSE optimizations using PRE.
4466    This is called by one_pre_gcse_pass after all the dataflow analysis
4467    has been done.
4468 
4469    This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
4470    lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
4471    Compiler Design and Implementation.
4472 
4473    ??? A new pseudo reg is created to hold the reaching expression.  The nice
4474    thing about the classical approach is that it would try to use an existing
4475    reg.  If the register can't be adequately optimized [i.e. we introduce
4476    reload problems], one could add a pass here to propagate the new register
4477    through the block.
4478 
4479    ??? We don't handle single sets in PARALLELs because we're [currently] not
4480    able to copy the rest of the parallel when we insert copies to create full
4481    redundancies from partial redundancies.  However, there's no reason why we
4482    can't handle PARALLELs in the cases where there are no partial
4483    redundancies.  */
4484 
4485 static int
pre_gcse(void)4486 pre_gcse (void)
4487 {
4488   unsigned int i;
4489   int did_insert, changed;
4490   struct expr **index_map;
4491   struct expr *expr;
4492 
4493   /* Compute a mapping from expression number (`bitmap_index') to
4494      hash table entry.  */
4495 
4496   index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
4497   for (i = 0; i < expr_hash_table.size; i++)
4498     for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
4499       index_map[expr->bitmap_index] = expr;
4500 
4501   /* Reset bitmap used to track which insns are redundant.  */
4502   pre_redundant_insns = sbitmap_alloc (max_cuid);
4503   sbitmap_zero (pre_redundant_insns);
4504 
4505   /* Delete the redundant insns first so that
4506      - we know what register to use for the new insns and for the other
4507        ones with reaching expressions
4508      - we know which insns are redundant when we go to create copies  */
4509 
4510   changed = pre_delete ();
4511 
4512   did_insert = pre_edge_insert (edge_list, index_map);
4513 
4514   /* In other places with reaching expressions, copy the expression to the
4515      specially allocated pseudo-reg that reaches the redundant expr.  */
4516   pre_insert_copies ();
4517   if (did_insert)
4518     {
4519       commit_edge_insertions ();
4520       changed = 1;
4521     }
4522 
4523   free (index_map);
4524   sbitmap_free (pre_redundant_insns);
4525   return changed;
4526 }
4527 
4528 /* Top level routine to perform one PRE GCSE pass.
4529 
4530    Return nonzero if a change was made.  */
4531 
4532 static int
one_pre_gcse_pass(int pass)4533 one_pre_gcse_pass (int pass)
4534 {
4535   int changed = 0;
4536 
4537   gcse_subst_count = 0;
4538   gcse_create_count = 0;
4539 
4540   alloc_hash_table (max_cuid, &expr_hash_table, 0);
4541   add_noreturn_fake_exit_edges ();
4542   if (flag_gcse_lm)
4543     compute_ld_motion_mems ();
4544 
4545   compute_hash_table (&expr_hash_table);
4546   trim_ld_motion_mems ();
4547   if (dump_file)
4548     dump_hash_table (dump_file, "Expression", &expr_hash_table);
4549 
4550   if (expr_hash_table.n_elems > 0)
4551     {
4552       alloc_pre_mem (last_basic_block, expr_hash_table.n_elems);
4553       compute_pre_data ();
4554       changed |= pre_gcse ();
4555       free_edge_list (edge_list);
4556       free_pre_mem ();
4557     }
4558 
4559   free_ldst_mems ();
4560   remove_fake_exit_edges ();
4561   free_hash_table (&expr_hash_table);
4562 
4563   if (dump_file)
4564     {
4565       fprintf (dump_file, "\nPRE GCSE of %s, pass %d: %d bytes needed, ",
4566 	       current_function_name (), pass, bytes_used);
4567       fprintf (dump_file, "%d substs, %d insns created\n",
4568 	       gcse_subst_count, gcse_create_count);
4569     }
4570 
4571   return changed;
4572 }
4573 
4574 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to INSN.
4575    If notes are added to an insn which references a CODE_LABEL, the
4576    LABEL_NUSES count is incremented.  We have to add REG_LABEL notes,
4577    because the following loop optimization pass requires them.  */
4578 
4579 /* ??? If there was a jump optimization pass after gcse and before loop,
4580    then we would not need to do this here, because jump would add the
4581    necessary REG_LABEL notes.  */
4582 
4583 static void
add_label_notes(rtx x,rtx insn)4584 add_label_notes (rtx x, rtx insn)
4585 {
4586   enum rtx_code code = GET_CODE (x);
4587   int i, j;
4588   const char *fmt;
4589 
4590   if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
4591     {
4592       /* This code used to ignore labels that referred to dispatch tables to
4593 	 avoid flow generating (slightly) worse code.
4594 
4595 	 We no longer ignore such label references (see LABEL_REF handling in
4596 	 mark_jump_label for additional information).  */
4597 
4598       REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
4599 					    REG_NOTES (insn));
4600       if (LABEL_P (XEXP (x, 0)))
4601 	LABEL_NUSES (XEXP (x, 0))++;
4602       return;
4603     }
4604 
4605   for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
4606     {
4607       if (fmt[i] == 'e')
4608 	add_label_notes (XEXP (x, i), insn);
4609       else if (fmt[i] == 'E')
4610 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4611 	  add_label_notes (XVECEXP (x, i, j), insn);
4612     }
4613 }
4614 
4615 /* Compute transparent outgoing information for each block.
4616 
4617    An expression is transparent to an edge unless it is killed by
4618    the edge itself.  This can only happen with abnormal control flow,
4619    when the edge is traversed through a call.  This happens with
4620    non-local labels and exceptions.
4621 
4622    This would not be necessary if we split the edge.  While this is
4623    normally impossible for abnormal critical edges, with some effort
4624    it should be possible with exception handling, since we still have
4625    control over which handler should be invoked.  But due to increased
4626    EH table sizes, this may not be worthwhile.  */
4627 
4628 static void
compute_transpout(void)4629 compute_transpout (void)
4630 {
4631   basic_block bb;
4632   unsigned int i;
4633   struct expr *expr;
4634 
4635   sbitmap_vector_ones (transpout, last_basic_block);
4636 
4637   FOR_EACH_BB (bb)
4638     {
4639       /* Note that flow inserted a nop a the end of basic blocks that
4640 	 end in call instructions for reasons other than abnormal
4641 	 control flow.  */
4642       if (! CALL_P (BB_END (bb)))
4643 	continue;
4644 
4645       for (i = 0; i < expr_hash_table.size; i++)
4646 	for (expr = expr_hash_table.table[i]; expr ; expr = expr->next_same_hash)
4647 	  if (MEM_P (expr->expr))
4648 	    {
4649 	      if (GET_CODE (XEXP (expr->expr, 0)) == SYMBOL_REF
4650 		  && CONSTANT_POOL_ADDRESS_P (XEXP (expr->expr, 0)))
4651 		continue;
4652 
4653 	      /* ??? Optimally, we would use interprocedural alias
4654 		 analysis to determine if this mem is actually killed
4655 		 by this call.  */
4656 	      RESET_BIT (transpout[bb->index], expr->bitmap_index);
4657 	    }
4658     }
4659 }
4660 
4661 /* Code Hoisting variables and subroutines.  */
4662 
4663 /* Very busy expressions.  */
4664 static sbitmap *hoist_vbein;
4665 static sbitmap *hoist_vbeout;
4666 
4667 /* Hoistable expressions.  */
4668 static sbitmap *hoist_exprs;
4669 
4670 /* ??? We could compute post dominators and run this algorithm in
4671    reverse to perform tail merging, doing so would probably be
4672    more effective than the tail merging code in jump.c.
4673 
4674    It's unclear if tail merging could be run in parallel with
4675    code hoisting.  It would be nice.  */
4676 
4677 /* Allocate vars used for code hoisting analysis.  */
4678 
4679 static void
alloc_code_hoist_mem(int n_blocks,int n_exprs)4680 alloc_code_hoist_mem (int n_blocks, int n_exprs)
4681 {
4682   antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
4683   transp = sbitmap_vector_alloc (n_blocks, n_exprs);
4684   comp = sbitmap_vector_alloc (n_blocks, n_exprs);
4685 
4686   hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
4687   hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
4688   hoist_exprs = sbitmap_vector_alloc (n_blocks, n_exprs);
4689   transpout = sbitmap_vector_alloc (n_blocks, n_exprs);
4690 }
4691 
4692 /* Free vars used for code hoisting analysis.  */
4693 
4694 static void
free_code_hoist_mem(void)4695 free_code_hoist_mem (void)
4696 {
4697   sbitmap_vector_free (antloc);
4698   sbitmap_vector_free (transp);
4699   sbitmap_vector_free (comp);
4700 
4701   sbitmap_vector_free (hoist_vbein);
4702   sbitmap_vector_free (hoist_vbeout);
4703   sbitmap_vector_free (hoist_exprs);
4704   sbitmap_vector_free (transpout);
4705 
4706   free_dominance_info (CDI_DOMINATORS);
4707 }
4708 
4709 /* Compute the very busy expressions at entry/exit from each block.
4710 
4711    An expression is very busy if all paths from a given point
4712    compute the expression.  */
4713 
4714 static void
compute_code_hoist_vbeinout(void)4715 compute_code_hoist_vbeinout (void)
4716 {
4717   int changed, passes;
4718   basic_block bb;
4719 
4720   sbitmap_vector_zero (hoist_vbeout, last_basic_block);
4721   sbitmap_vector_zero (hoist_vbein, last_basic_block);
4722 
4723   passes = 0;
4724   changed = 1;
4725 
4726   while (changed)
4727     {
4728       changed = 0;
4729 
4730       /* We scan the blocks in the reverse order to speed up
4731 	 the convergence.  */
4732       FOR_EACH_BB_REVERSE (bb)
4733 	{
4734 	  changed |= sbitmap_a_or_b_and_c_cg (hoist_vbein[bb->index], antloc[bb->index],
4735 					      hoist_vbeout[bb->index], transp[bb->index]);
4736 	  if (bb->next_bb != EXIT_BLOCK_PTR)
4737 	    sbitmap_intersection_of_succs (hoist_vbeout[bb->index], hoist_vbein, bb->index);
4738 	}
4739 
4740       passes++;
4741     }
4742 
4743   if (dump_file)
4744     fprintf (dump_file, "hoisting vbeinout computation: %d passes\n", passes);
4745 }
4746 
4747 /* Top level routine to do the dataflow analysis needed by code hoisting.  */
4748 
4749 static void
compute_code_hoist_data(void)4750 compute_code_hoist_data (void)
4751 {
4752   compute_local_properties (transp, comp, antloc, &expr_hash_table);
4753   compute_transpout ();
4754   compute_code_hoist_vbeinout ();
4755   calculate_dominance_info (CDI_DOMINATORS);
4756   if (dump_file)
4757     fprintf (dump_file, "\n");
4758 }
4759 
4760 /* Determine if the expression identified by EXPR_INDEX would
4761    reach BB unimpared if it was placed at the end of EXPR_BB.
4762 
4763    It's unclear exactly what Muchnick meant by "unimpared".  It seems
4764    to me that the expression must either be computed or transparent in
4765    *every* block in the path(s) from EXPR_BB to BB.  Any other definition
4766    would allow the expression to be hoisted out of loops, even if
4767    the expression wasn't a loop invariant.
4768 
4769    Contrast this to reachability for PRE where an expression is
4770    considered reachable if *any* path reaches instead of *all*
4771    paths.  */
4772 
4773 static int
hoist_expr_reaches_here_p(basic_block expr_bb,int expr_index,basic_block bb,char * visited)4774 hoist_expr_reaches_here_p (basic_block expr_bb, int expr_index, basic_block bb, char *visited)
4775 {
4776   edge pred;
4777   edge_iterator ei;
4778   int visited_allocated_locally = 0;
4779 
4780 
4781   if (visited == NULL)
4782     {
4783       visited_allocated_locally = 1;
4784       visited = XCNEWVEC (char, last_basic_block);
4785     }
4786 
4787   FOR_EACH_EDGE (pred, ei, bb->preds)
4788     {
4789       basic_block pred_bb = pred->src;
4790 
4791       if (pred->src == ENTRY_BLOCK_PTR)
4792 	break;
4793       else if (pred_bb == expr_bb)
4794 	continue;
4795       else if (visited[pred_bb->index])
4796 	continue;
4797 
4798       /* Does this predecessor generate this expression?  */
4799       else if (TEST_BIT (comp[pred_bb->index], expr_index))
4800 	break;
4801       else if (! TEST_BIT (transp[pred_bb->index], expr_index))
4802 	break;
4803 
4804       /* Not killed.  */
4805       else
4806 	{
4807 	  visited[pred_bb->index] = 1;
4808 	  if (! hoist_expr_reaches_here_p (expr_bb, expr_index,
4809 					   pred_bb, visited))
4810 	    break;
4811 	}
4812     }
4813   if (visited_allocated_locally)
4814     free (visited);
4815 
4816   return (pred == NULL);
4817 }
4818 
4819 /* Actually perform code hoisting.  */
4820 
4821 static void
hoist_code(void)4822 hoist_code (void)
4823 {
4824   basic_block bb, dominated;
4825   basic_block *domby;
4826   unsigned int domby_len;
4827   unsigned int i,j;
4828   struct expr **index_map;
4829   struct expr *expr;
4830 
4831   sbitmap_vector_zero (hoist_exprs, last_basic_block);
4832 
4833   /* Compute a mapping from expression number (`bitmap_index') to
4834      hash table entry.  */
4835 
4836   index_map = XCNEWVEC (struct expr *, expr_hash_table.n_elems);
4837   for (i = 0; i < expr_hash_table.size; i++)
4838     for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
4839       index_map[expr->bitmap_index] = expr;
4840 
4841   /* Walk over each basic block looking for potentially hoistable
4842      expressions, nothing gets hoisted from the entry block.  */
4843   FOR_EACH_BB (bb)
4844     {
4845       int found = 0;
4846       int insn_inserted_p;
4847 
4848       domby_len = get_dominated_by (CDI_DOMINATORS, bb, &domby);
4849       /* Examine each expression that is very busy at the exit of this
4850 	 block.  These are the potentially hoistable expressions.  */
4851       for (i = 0; i < hoist_vbeout[bb->index]->n_bits; i++)
4852 	{
4853 	  int hoistable = 0;
4854 
4855 	  if (TEST_BIT (hoist_vbeout[bb->index], i)
4856 	      && TEST_BIT (transpout[bb->index], i))
4857 	    {
4858 	      /* We've found a potentially hoistable expression, now
4859 		 we look at every block BB dominates to see if it
4860 		 computes the expression.  */
4861 	      for (j = 0; j < domby_len; j++)
4862 		{
4863 		  dominated = domby[j];
4864 		  /* Ignore self dominance.  */
4865 		  if (bb == dominated)
4866 		    continue;
4867 		  /* We've found a dominated block, now see if it computes
4868 		     the busy expression and whether or not moving that
4869 		     expression to the "beginning" of that block is safe.  */
4870 		  if (!TEST_BIT (antloc[dominated->index], i))
4871 		    continue;
4872 
4873 		  /* Note if the expression would reach the dominated block
4874 		     unimpared if it was placed at the end of BB.
4875 
4876 		     Keep track of how many times this expression is hoistable
4877 		     from a dominated block into BB.  */
4878 		  if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
4879 		    hoistable++;
4880 		}
4881 
4882 	      /* If we found more than one hoistable occurrence of this
4883 		 expression, then note it in the bitmap of expressions to
4884 		 hoist.  It makes no sense to hoist things which are computed
4885 		 in only one BB, and doing so tends to pessimize register
4886 		 allocation.  One could increase this value to try harder
4887 		 to avoid any possible code expansion due to register
4888 		 allocation issues; however experiments have shown that
4889 		 the vast majority of hoistable expressions are only movable
4890 		 from two successors, so raising this threshold is likely
4891 		 to nullify any benefit we get from code hoisting.  */
4892 	      if (hoistable > 1)
4893 		{
4894 		  SET_BIT (hoist_exprs[bb->index], i);
4895 		  found = 1;
4896 		}
4897 	    }
4898 	}
4899       /* If we found nothing to hoist, then quit now.  */
4900       if (! found)
4901         {
4902 	  free (domby);
4903 	continue;
4904 	}
4905 
4906       /* Loop over all the hoistable expressions.  */
4907       for (i = 0; i < hoist_exprs[bb->index]->n_bits; i++)
4908 	{
4909 	  /* We want to insert the expression into BB only once, so
4910 	     note when we've inserted it.  */
4911 	  insn_inserted_p = 0;
4912 
4913 	  /* These tests should be the same as the tests above.  */
4914 	  if (TEST_BIT (hoist_exprs[bb->index], i))
4915 	    {
4916 	      /* We've found a potentially hoistable expression, now
4917 		 we look at every block BB dominates to see if it
4918 		 computes the expression.  */
4919 	      for (j = 0; j < domby_len; j++)
4920 		{
4921 		  dominated = domby[j];
4922 		  /* Ignore self dominance.  */
4923 		  if (bb == dominated)
4924 		    continue;
4925 
4926 		  /* We've found a dominated block, now see if it computes
4927 		     the busy expression and whether or not moving that
4928 		     expression to the "beginning" of that block is safe.  */
4929 		  if (!TEST_BIT (antloc[dominated->index], i))
4930 		    continue;
4931 
4932 		  /* The expression is computed in the dominated block and
4933 		     it would be safe to compute it at the start of the
4934 		     dominated block.  Now we have to determine if the
4935 		     expression would reach the dominated block if it was
4936 		     placed at the end of BB.  */
4937 		  if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
4938 		    {
4939 		      struct expr *expr = index_map[i];
4940 		      struct occr *occr = expr->antic_occr;
4941 		      rtx insn;
4942 		      rtx set;
4943 
4944 		      /* Find the right occurrence of this expression.  */
4945 		      while (BLOCK_FOR_INSN (occr->insn) != dominated && occr)
4946 			occr = occr->next;
4947 
4948 		      gcc_assert (occr);
4949 		      insn = occr->insn;
4950 		      set = single_set (insn);
4951 		      gcc_assert (set);
4952 
4953 		      /* Create a pseudo-reg to store the result of reaching
4954 			 expressions into.  Get the mode for the new pseudo
4955 			 from the mode of the original destination pseudo.  */
4956 		      if (expr->reaching_reg == NULL)
4957 			expr->reaching_reg
4958 			  = gen_reg_rtx (GET_MODE (SET_DEST (set)));
4959 
4960 		      gcse_emit_move_after (expr->reaching_reg, SET_DEST (set), insn);
4961 		      delete_insn (insn);
4962 		      occr->deleted_p = 1;
4963 		      if (!insn_inserted_p)
4964 			{
4965 			  insert_insn_end_bb (index_map[i], bb, 0);
4966 			  insn_inserted_p = 1;
4967 			}
4968 		    }
4969 		}
4970 	    }
4971 	}
4972       free (domby);
4973     }
4974 
4975   free (index_map);
4976 }
4977 
4978 /* Top level routine to perform one code hoisting (aka unification) pass
4979 
4980    Return nonzero if a change was made.  */
4981 
4982 static int
one_code_hoisting_pass(void)4983 one_code_hoisting_pass (void)
4984 {
4985   int changed = 0;
4986 
4987   alloc_hash_table (max_cuid, &expr_hash_table, 0);
4988   compute_hash_table (&expr_hash_table);
4989   if (dump_file)
4990     dump_hash_table (dump_file, "Code Hosting Expressions", &expr_hash_table);
4991 
4992   if (expr_hash_table.n_elems > 0)
4993     {
4994       alloc_code_hoist_mem (last_basic_block, expr_hash_table.n_elems);
4995       compute_code_hoist_data ();
4996       hoist_code ();
4997       free_code_hoist_mem ();
4998     }
4999 
5000   free_hash_table (&expr_hash_table);
5001 
5002   return changed;
5003 }
5004 
5005 /*  Here we provide the things required to do store motion towards
5006     the exit. In order for this to be effective, gcse also needed to
5007     be taught how to move a load when it is kill only by a store to itself.
5008 
5009 	    int i;
5010 	    float a[10];
5011 
5012 	    void foo(float scale)
5013 	    {
5014 	      for (i=0; i<10; i++)
5015 		a[i] *= scale;
5016 	    }
5017 
5018     'i' is both loaded and stored to in the loop. Normally, gcse cannot move
5019     the load out since its live around the loop, and stored at the bottom
5020     of the loop.
5021 
5022       The 'Load Motion' referred to and implemented in this file is
5023     an enhancement to gcse which when using edge based lcm, recognizes
5024     this situation and allows gcse to move the load out of the loop.
5025 
5026       Once gcse has hoisted the load, store motion can then push this
5027     load towards the exit, and we end up with no loads or stores of 'i'
5028     in the loop.  */
5029 
5030 static hashval_t
pre_ldst_expr_hash(const void * p)5031 pre_ldst_expr_hash (const void *p)
5032 {
5033   int do_not_record_p = 0;
5034   const struct ls_expr *x = p;
5035   return hash_rtx (x->pattern, GET_MODE (x->pattern), &do_not_record_p, NULL, false);
5036 }
5037 
5038 static int
pre_ldst_expr_eq(const void * p1,const void * p2)5039 pre_ldst_expr_eq (const void *p1, const void *p2)
5040 {
5041   const struct ls_expr *ptr1 = p1, *ptr2 = p2;
5042   return expr_equiv_p (ptr1->pattern, ptr2->pattern);
5043 }
5044 
5045 /* This will search the ldst list for a matching expression. If it
5046    doesn't find one, we create one and initialize it.  */
5047 
5048 static struct ls_expr *
ldst_entry(rtx x)5049 ldst_entry (rtx x)
5050 {
5051   int do_not_record_p = 0;
5052   struct ls_expr * ptr;
5053   unsigned int hash;
5054   void **slot;
5055   struct ls_expr e;
5056 
5057   hash = hash_rtx (x, GET_MODE (x), &do_not_record_p,
5058 		   NULL,  /*have_reg_qty=*/false);
5059 
5060   e.pattern = x;
5061   slot = htab_find_slot_with_hash (pre_ldst_table, &e, hash, INSERT);
5062   if (*slot)
5063     return (struct ls_expr *)*slot;
5064 
5065   ptr = XNEW (struct ls_expr);
5066 
5067   ptr->next         = pre_ldst_mems;
5068   ptr->expr         = NULL;
5069   ptr->pattern      = x;
5070   ptr->pattern_regs = NULL_RTX;
5071   ptr->loads        = NULL_RTX;
5072   ptr->stores       = NULL_RTX;
5073   ptr->reaching_reg = NULL_RTX;
5074   ptr->invalid      = 0;
5075   ptr->index        = 0;
5076   ptr->hash_index   = hash;
5077   pre_ldst_mems     = ptr;
5078   *slot = ptr;
5079 
5080   return ptr;
5081 }
5082 
5083 /* Free up an individual ldst entry.  */
5084 
5085 static void
free_ldst_entry(struct ls_expr * ptr)5086 free_ldst_entry (struct ls_expr * ptr)
5087 {
5088   free_INSN_LIST_list (& ptr->loads);
5089   free_INSN_LIST_list (& ptr->stores);
5090 
5091   free (ptr);
5092 }
5093 
5094 /* Free up all memory associated with the ldst list.  */
5095 
5096 static void
free_ldst_mems(void)5097 free_ldst_mems (void)
5098 {
5099   if (pre_ldst_table)
5100     htab_delete (pre_ldst_table);
5101   pre_ldst_table = NULL;
5102 
5103   while (pre_ldst_mems)
5104     {
5105       struct ls_expr * tmp = pre_ldst_mems;
5106 
5107       pre_ldst_mems = pre_ldst_mems->next;
5108 
5109       free_ldst_entry (tmp);
5110     }
5111 
5112   pre_ldst_mems = NULL;
5113 }
5114 
5115 /* Dump debugging info about the ldst list.  */
5116 
5117 static void
print_ldst_list(FILE * file)5118 print_ldst_list (FILE * file)
5119 {
5120   struct ls_expr * ptr;
5121 
5122   fprintf (file, "LDST list: \n");
5123 
5124   for (ptr = first_ls_expr(); ptr != NULL; ptr = next_ls_expr (ptr))
5125     {
5126       fprintf (file, "  Pattern (%3d): ", ptr->index);
5127 
5128       print_rtl (file, ptr->pattern);
5129 
5130       fprintf (file, "\n	 Loads : ");
5131 
5132       if (ptr->loads)
5133 	print_rtl (file, ptr->loads);
5134       else
5135 	fprintf (file, "(nil)");
5136 
5137       fprintf (file, "\n	Stores : ");
5138 
5139       if (ptr->stores)
5140 	print_rtl (file, ptr->stores);
5141       else
5142 	fprintf (file, "(nil)");
5143 
5144       fprintf (file, "\n\n");
5145     }
5146 
5147   fprintf (file, "\n");
5148 }
5149 
5150 /* Returns 1 if X is in the list of ldst only expressions.  */
5151 
5152 static struct ls_expr *
find_rtx_in_ldst(rtx x)5153 find_rtx_in_ldst (rtx x)
5154 {
5155   struct ls_expr e;
5156   void **slot;
5157   if (!pre_ldst_table)
5158     return NULL;
5159   e.pattern = x;
5160   slot = htab_find_slot (pre_ldst_table, &e, NO_INSERT);
5161   if (!slot || ((struct ls_expr *)*slot)->invalid)
5162     return NULL;
5163   return *slot;
5164 }
5165 
5166 /* Assign each element of the list of mems a monotonically increasing value.  */
5167 
5168 static int
enumerate_ldsts(void)5169 enumerate_ldsts (void)
5170 {
5171   struct ls_expr * ptr;
5172   int n = 0;
5173 
5174   for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
5175     ptr->index = n++;
5176 
5177   return n;
5178 }
5179 
5180 /* Return first item in the list.  */
5181 
5182 static inline struct ls_expr *
first_ls_expr(void)5183 first_ls_expr (void)
5184 {
5185   return pre_ldst_mems;
5186 }
5187 
5188 /* Return the next item in the list after the specified one.  */
5189 
5190 static inline struct ls_expr *
next_ls_expr(struct ls_expr * ptr)5191 next_ls_expr (struct ls_expr * ptr)
5192 {
5193   return ptr->next;
5194 }
5195 
5196 /* Load Motion for loads which only kill themselves.  */
5197 
5198 /* Return true if x is a simple MEM operation, with no registers or
5199    side effects. These are the types of loads we consider for the
5200    ld_motion list, otherwise we let the usual aliasing take care of it.  */
5201 
5202 static int
simple_mem(rtx x)5203 simple_mem (rtx x)
5204 {
5205   if (! MEM_P (x))
5206     return 0;
5207 
5208   if (MEM_VOLATILE_P (x))
5209     return 0;
5210 
5211   if (GET_MODE (x) == BLKmode)
5212     return 0;
5213 
5214   /* If we are handling exceptions, we must be careful with memory references
5215      that may trap. If we are not, the behavior is undefined, so we may just
5216      continue.  */
5217   if (flag_non_call_exceptions && may_trap_p (x))
5218     return 0;
5219 
5220   if (side_effects_p (x))
5221     return 0;
5222 
5223   /* Do not consider function arguments passed on stack.  */
5224   if (reg_mentioned_p (stack_pointer_rtx, x))
5225     return 0;
5226 
5227   if (flag_float_store && FLOAT_MODE_P (GET_MODE (x)))
5228     return 0;
5229 
5230   return 1;
5231 }
5232 
5233 /* Make sure there isn't a buried reference in this pattern anywhere.
5234    If there is, invalidate the entry for it since we're not capable
5235    of fixing it up just yet.. We have to be sure we know about ALL
5236    loads since the aliasing code will allow all entries in the
5237    ld_motion list to not-alias itself.  If we miss a load, we will get
5238    the wrong value since gcse might common it and we won't know to
5239    fix it up.  */
5240 
5241 static void
invalidate_any_buried_refs(rtx x)5242 invalidate_any_buried_refs (rtx x)
5243 {
5244   const char * fmt;
5245   int i, j;
5246   struct ls_expr * ptr;
5247 
5248   /* Invalidate it in the list.  */
5249   if (MEM_P (x) && simple_mem (x))
5250     {
5251       ptr = ldst_entry (x);
5252       ptr->invalid = 1;
5253     }
5254 
5255   /* Recursively process the insn.  */
5256   fmt = GET_RTX_FORMAT (GET_CODE (x));
5257 
5258   for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5259     {
5260       if (fmt[i] == 'e')
5261 	invalidate_any_buried_refs (XEXP (x, i));
5262       else if (fmt[i] == 'E')
5263 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5264 	  invalidate_any_buried_refs (XVECEXP (x, i, j));
5265     }
5266 }
5267 
5268 /* Find all the 'simple' MEMs which are used in LOADs and STORES.  Simple
5269    being defined as MEM loads and stores to symbols, with no side effects
5270    and no registers in the expression.  For a MEM destination, we also
5271    check that the insn is still valid if we replace the destination with a
5272    REG, as is done in update_ld_motion_stores.  If there are any uses/defs
5273    which don't match this criteria, they are invalidated and trimmed out
5274    later.  */
5275 
5276 static void
compute_ld_motion_mems(void)5277 compute_ld_motion_mems (void)
5278 {
5279   struct ls_expr * ptr;
5280   basic_block bb;
5281   rtx insn;
5282 
5283   pre_ldst_mems = NULL;
5284   pre_ldst_table = htab_create (13, pre_ldst_expr_hash,
5285 				pre_ldst_expr_eq, NULL);
5286 
5287   FOR_EACH_BB (bb)
5288     {
5289       FOR_BB_INSNS (bb, insn)
5290 	{
5291 	  if (INSN_P (insn))
5292 	    {
5293 	      if (GET_CODE (PATTERN (insn)) == SET)
5294 		{
5295 		  rtx src = SET_SRC (PATTERN (insn));
5296 		  rtx dest = SET_DEST (PATTERN (insn));
5297 
5298 		  /* Check for a simple LOAD...  */
5299 		  if (MEM_P (src) && simple_mem (src))
5300 		    {
5301 		      ptr = ldst_entry (src);
5302 		      if (REG_P (dest))
5303 			ptr->loads = alloc_INSN_LIST (insn, ptr->loads);
5304 		      else
5305 			ptr->invalid = 1;
5306 		    }
5307 		  else
5308 		    {
5309 		      /* Make sure there isn't a buried load somewhere.  */
5310 		      invalidate_any_buried_refs (src);
5311 		    }
5312 
5313 		  /* Check for stores. Don't worry about aliased ones, they
5314 		     will block any movement we might do later. We only care
5315 		     about this exact pattern since those are the only
5316 		     circumstance that we will ignore the aliasing info.  */
5317 		  if (MEM_P (dest) && simple_mem (dest))
5318 		    {
5319 		      ptr = ldst_entry (dest);
5320 
5321 		      if (! MEM_P (src)
5322 			  && GET_CODE (src) != ASM_OPERANDS
5323 			  /* Check for REG manually since want_to_gcse_p
5324 			     returns 0 for all REGs.  */
5325 			  && can_assign_to_reg_p (src))
5326 			ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
5327 		      else
5328 			ptr->invalid = 1;
5329 		    }
5330 		}
5331 	      else
5332 		invalidate_any_buried_refs (PATTERN (insn));
5333 	    }
5334 	}
5335     }
5336 }
5337 
5338 /* Remove any references that have been either invalidated or are not in the
5339    expression list for pre gcse.  */
5340 
5341 static void
trim_ld_motion_mems(void)5342 trim_ld_motion_mems (void)
5343 {
5344   struct ls_expr * * last = & pre_ldst_mems;
5345   struct ls_expr * ptr = pre_ldst_mems;
5346 
5347   while (ptr != NULL)
5348     {
5349       struct expr * expr;
5350 
5351       /* Delete if entry has been made invalid.  */
5352       if (! ptr->invalid)
5353 	{
5354 	  /* Delete if we cannot find this mem in the expression list.  */
5355 	  unsigned int hash = ptr->hash_index % expr_hash_table.size;
5356 
5357 	  for (expr = expr_hash_table.table[hash];
5358 	       expr != NULL;
5359 	       expr = expr->next_same_hash)
5360 	    if (expr_equiv_p (expr->expr, ptr->pattern))
5361 	      break;
5362 	}
5363       else
5364 	expr = (struct expr *) 0;
5365 
5366       if (expr)
5367 	{
5368 	  /* Set the expression field if we are keeping it.  */
5369 	  ptr->expr = expr;
5370 	  last = & ptr->next;
5371 	  ptr = ptr->next;
5372 	}
5373       else
5374 	{
5375 	  *last = ptr->next;
5376 	  htab_remove_elt_with_hash (pre_ldst_table, ptr, ptr->hash_index);
5377 	  free_ldst_entry (ptr);
5378 	  ptr = * last;
5379 	}
5380     }
5381 
5382   /* Show the world what we've found.  */
5383   if (dump_file && pre_ldst_mems != NULL)
5384     print_ldst_list (dump_file);
5385 }
5386 
5387 /* This routine will take an expression which we are replacing with
5388    a reaching register, and update any stores that are needed if
5389    that expression is in the ld_motion list.  Stores are updated by
5390    copying their SRC to the reaching register, and then storing
5391    the reaching register into the store location. These keeps the
5392    correct value in the reaching register for the loads.  */
5393 
5394 static void
update_ld_motion_stores(struct expr * expr)5395 update_ld_motion_stores (struct expr * expr)
5396 {
5397   struct ls_expr * mem_ptr;
5398 
5399   if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
5400     {
5401       /* We can try to find just the REACHED stores, but is shouldn't
5402 	 matter to set the reaching reg everywhere...  some might be
5403 	 dead and should be eliminated later.  */
5404 
5405       /* We replace (set mem expr) with (set reg expr) (set mem reg)
5406 	 where reg is the reaching reg used in the load.  We checked in
5407 	 compute_ld_motion_mems that we can replace (set mem expr) with
5408 	 (set reg expr) in that insn.  */
5409       rtx list = mem_ptr->stores;
5410 
5411       for ( ; list != NULL_RTX; list = XEXP (list, 1))
5412 	{
5413 	  rtx insn = XEXP (list, 0);
5414 	  rtx pat = PATTERN (insn);
5415 	  rtx src = SET_SRC (pat);
5416 	  rtx reg = expr->reaching_reg;
5417 	  rtx copy, new;
5418 
5419 	  /* If we've already copied it, continue.  */
5420 	  if (expr->reaching_reg == src)
5421 	    continue;
5422 
5423 	  if (dump_file)
5424 	    {
5425 	      fprintf (dump_file, "PRE:  store updated with reaching reg ");
5426 	      print_rtl (dump_file, expr->reaching_reg);
5427 	      fprintf (dump_file, ":\n	");
5428 	      print_inline_rtx (dump_file, insn, 8);
5429 	      fprintf (dump_file, "\n");
5430 	    }
5431 
5432 	  copy = gen_move_insn ( reg, copy_rtx (SET_SRC (pat)));
5433 	  new = emit_insn_before (copy, insn);
5434 	  record_one_set (REGNO (reg), new);
5435 	  SET_SRC (pat) = reg;
5436 
5437 	  /* un-recognize this pattern since it's probably different now.  */
5438 	  INSN_CODE (insn) = -1;
5439 	  gcse_create_count++;
5440 	}
5441     }
5442 }
5443 
5444 /* Store motion code.  */
5445 
5446 #define ANTIC_STORE_LIST(x)		((x)->loads)
5447 #define AVAIL_STORE_LIST(x)		((x)->stores)
5448 #define LAST_AVAIL_CHECK_FAILURE(x)	((x)->reaching_reg)
5449 
5450 /* This is used to communicate the target bitvector we want to use in the
5451    reg_set_info routine when called via the note_stores mechanism.  */
5452 static int * regvec;
5453 
5454 /* And current insn, for the same routine.  */
5455 static rtx compute_store_table_current_insn;
5456 
5457 /* Used in computing the reverse edge graph bit vectors.  */
5458 static sbitmap * st_antloc;
5459 
5460 /* Global holding the number of store expressions we are dealing with.  */
5461 static int num_stores;
5462 
5463 /* Checks to set if we need to mark a register set.  Called from
5464    note_stores.  */
5465 
5466 static void
reg_set_info(rtx dest,rtx setter ATTRIBUTE_UNUSED,void * data)5467 reg_set_info (rtx dest, rtx setter ATTRIBUTE_UNUSED,
5468 	      void *data)
5469 {
5470   sbitmap bb_reg = data;
5471 
5472   if (GET_CODE (dest) == SUBREG)
5473     dest = SUBREG_REG (dest);
5474 
5475   if (REG_P (dest))
5476     {
5477       regvec[REGNO (dest)] = INSN_UID (compute_store_table_current_insn);
5478       if (bb_reg)
5479 	SET_BIT (bb_reg, REGNO (dest));
5480     }
5481 }
5482 
5483 /* Clear any mark that says that this insn sets dest.  Called from
5484    note_stores.  */
5485 
5486 static void
reg_clear_last_set(rtx dest,rtx setter ATTRIBUTE_UNUSED,void * data)5487 reg_clear_last_set (rtx dest, rtx setter ATTRIBUTE_UNUSED,
5488 	      void *data)
5489 {
5490   int *dead_vec = data;
5491 
5492   if (GET_CODE (dest) == SUBREG)
5493     dest = SUBREG_REG (dest);
5494 
5495   if (REG_P (dest) &&
5496       dead_vec[REGNO (dest)] == INSN_UID (compute_store_table_current_insn))
5497     dead_vec[REGNO (dest)] = 0;
5498 }
5499 
5500 /* Return zero if some of the registers in list X are killed
5501    due to set of registers in bitmap REGS_SET.  */
5502 
5503 static bool
store_ops_ok(rtx x,int * regs_set)5504 store_ops_ok (rtx x, int *regs_set)
5505 {
5506   rtx reg;
5507 
5508   for (; x; x = XEXP (x, 1))
5509     {
5510       reg = XEXP (x, 0);
5511       if (regs_set[REGNO(reg)])
5512 	return false;
5513     }
5514 
5515   return true;
5516 }
5517 
5518 /* Returns a list of registers mentioned in X.  */
5519 static rtx
extract_mentioned_regs(rtx x)5520 extract_mentioned_regs (rtx x)
5521 {
5522   return extract_mentioned_regs_helper (x, NULL_RTX);
5523 }
5524 
5525 /* Helper for extract_mentioned_regs; ACCUM is used to accumulate used
5526    registers.  */
5527 static rtx
extract_mentioned_regs_helper(rtx x,rtx accum)5528 extract_mentioned_regs_helper (rtx x, rtx accum)
5529 {
5530   int i;
5531   enum rtx_code code;
5532   const char * fmt;
5533 
5534   /* Repeat is used to turn tail-recursion into iteration.  */
5535  repeat:
5536 
5537   if (x == 0)
5538     return accum;
5539 
5540   code = GET_CODE (x);
5541   switch (code)
5542     {
5543     case REG:
5544       return alloc_EXPR_LIST (0, x, accum);
5545 
5546     case MEM:
5547       x = XEXP (x, 0);
5548       goto repeat;
5549 
5550     case PRE_DEC:
5551     case PRE_INC:
5552     case POST_DEC:
5553     case POST_INC:
5554       /* We do not run this function with arguments having side effects.  */
5555       gcc_unreachable ();
5556 
5557     case PC:
5558     case CC0: /*FIXME*/
5559     case CONST:
5560     case CONST_INT:
5561     case CONST_DOUBLE:
5562     case CONST_VECTOR:
5563     case SYMBOL_REF:
5564     case LABEL_REF:
5565     case ADDR_VEC:
5566     case ADDR_DIFF_VEC:
5567       return accum;
5568 
5569     default:
5570       break;
5571     }
5572 
5573   i = GET_RTX_LENGTH (code) - 1;
5574   fmt = GET_RTX_FORMAT (code);
5575 
5576   for (; i >= 0; i--)
5577     {
5578       if (fmt[i] == 'e')
5579 	{
5580 	  rtx tem = XEXP (x, i);
5581 
5582 	  /* If we are about to do the last recursive call
5583 	     needed at this level, change it into iteration.  */
5584 	  if (i == 0)
5585 	    {
5586 	      x = tem;
5587 	      goto repeat;
5588 	    }
5589 
5590 	  accum = extract_mentioned_regs_helper (tem, accum);
5591 	}
5592       else if (fmt[i] == 'E')
5593 	{
5594 	  int j;
5595 
5596 	  for (j = 0; j < XVECLEN (x, i); j++)
5597 	    accum = extract_mentioned_regs_helper (XVECEXP (x, i, j), accum);
5598 	}
5599     }
5600 
5601   return accum;
5602 }
5603 
5604 /* Determine whether INSN is MEM store pattern that we will consider moving.
5605    REGS_SET_BEFORE is bitmap of registers set before (and including) the
5606    current insn, REGS_SET_AFTER is bitmap of registers set after (and
5607    including) the insn in this basic block.  We must be passing through BB from
5608    head to end, as we are using this fact to speed things up.
5609 
5610    The results are stored this way:
5611 
5612    -- the first anticipatable expression is added into ANTIC_STORE_LIST
5613    -- if the processed expression is not anticipatable, NULL_RTX is added
5614       there instead, so that we can use it as indicator that no further
5615       expression of this type may be anticipatable
5616    -- if the expression is available, it is added as head of AVAIL_STORE_LIST;
5617       consequently, all of them but this head are dead and may be deleted.
5618    -- if the expression is not available, the insn due to that it fails to be
5619       available is stored in reaching_reg.
5620 
5621    The things are complicated a bit by fact that there already may be stores
5622    to the same MEM from other blocks; also caller must take care of the
5623    necessary cleanup of the temporary markers after end of the basic block.
5624    */
5625 
5626 static void
find_moveable_store(rtx insn,int * regs_set_before,int * regs_set_after)5627 find_moveable_store (rtx insn, int *regs_set_before, int *regs_set_after)
5628 {
5629   struct ls_expr * ptr;
5630   rtx dest, set, tmp;
5631   int check_anticipatable, check_available;
5632   basic_block bb = BLOCK_FOR_INSN (insn);
5633 
5634   set = single_set (insn);
5635   if (!set)
5636     return;
5637 
5638   dest = SET_DEST (set);
5639 
5640   if (! MEM_P (dest) || MEM_VOLATILE_P (dest)
5641       || GET_MODE (dest) == BLKmode)
5642     return;
5643 
5644   if (side_effects_p (dest))
5645     return;
5646 
5647   /* If we are handling exceptions, we must be careful with memory references
5648      that may trap. If we are not, the behavior is undefined, so we may just
5649      continue.  */
5650   if (flag_non_call_exceptions && may_trap_p (dest))
5651     return;
5652 
5653   /* Even if the destination cannot trap, the source may.  In this case we'd
5654      need to handle updating the REG_EH_REGION note.  */
5655   if (find_reg_note (insn, REG_EH_REGION, NULL_RTX))
5656     return;
5657 
5658   /* Make sure that the SET_SRC of this store insns can be assigned to
5659      a register, or we will fail later on in replace_store_insn, which
5660      assumes that we can do this.  But sometimes the target machine has
5661      oddities like MEM read-modify-write instruction.  See for example
5662      PR24257.  */
5663   if (!can_assign_to_reg_p (SET_SRC (set)))
5664     return;
5665 
5666   ptr = ldst_entry (dest);
5667   if (!ptr->pattern_regs)
5668     ptr->pattern_regs = extract_mentioned_regs (dest);
5669 
5670   /* Do not check for anticipatability if we either found one anticipatable
5671      store already, or tested for one and found out that it was killed.  */
5672   check_anticipatable = 0;
5673   if (!ANTIC_STORE_LIST (ptr))
5674     check_anticipatable = 1;
5675   else
5676     {
5677       tmp = XEXP (ANTIC_STORE_LIST (ptr), 0);
5678       if (tmp != NULL_RTX
5679 	  && BLOCK_FOR_INSN (tmp) != bb)
5680 	check_anticipatable = 1;
5681     }
5682   if (check_anticipatable)
5683     {
5684       if (store_killed_before (dest, ptr->pattern_regs, insn, bb, regs_set_before))
5685 	tmp = NULL_RTX;
5686       else
5687 	tmp = insn;
5688       ANTIC_STORE_LIST (ptr) = alloc_INSN_LIST (tmp,
5689 						ANTIC_STORE_LIST (ptr));
5690     }
5691 
5692   /* It is not necessary to check whether store is available if we did
5693      it successfully before; if we failed before, do not bother to check
5694      until we reach the insn that caused us to fail.  */
5695   check_available = 0;
5696   if (!AVAIL_STORE_LIST (ptr))
5697     check_available = 1;
5698   else
5699     {
5700       tmp = XEXP (AVAIL_STORE_LIST (ptr), 0);
5701       if (BLOCK_FOR_INSN (tmp) != bb)
5702 	check_available = 1;
5703     }
5704   if (check_available)
5705     {
5706       /* Check that we have already reached the insn at that the check
5707 	 failed last time.  */
5708       if (LAST_AVAIL_CHECK_FAILURE (ptr))
5709 	{
5710 	  for (tmp = BB_END (bb);
5711 	       tmp != insn && tmp != LAST_AVAIL_CHECK_FAILURE (ptr);
5712 	       tmp = PREV_INSN (tmp))
5713 	    continue;
5714 	  if (tmp == insn)
5715 	    check_available = 0;
5716 	}
5717       else
5718 	check_available = store_killed_after (dest, ptr->pattern_regs, insn,
5719 					      bb, regs_set_after,
5720 					      &LAST_AVAIL_CHECK_FAILURE (ptr));
5721     }
5722   if (!check_available)
5723     AVAIL_STORE_LIST (ptr) = alloc_INSN_LIST (insn, AVAIL_STORE_LIST (ptr));
5724 }
5725 
5726 /* Find available and anticipatable stores.  */
5727 
5728 static int
compute_store_table(void)5729 compute_store_table (void)
5730 {
5731   int ret;
5732   basic_block bb;
5733   unsigned regno;
5734   rtx insn, pat, tmp;
5735   int *last_set_in, *already_set;
5736   struct ls_expr * ptr, **prev_next_ptr_ptr;
5737 
5738   max_gcse_regno = max_reg_num ();
5739 
5740   reg_set_in_block = sbitmap_vector_alloc (last_basic_block,
5741 						       max_gcse_regno);
5742   sbitmap_vector_zero (reg_set_in_block, last_basic_block);
5743   pre_ldst_mems = 0;
5744   pre_ldst_table = htab_create (13, pre_ldst_expr_hash,
5745 				pre_ldst_expr_eq, NULL);
5746   last_set_in = XCNEWVEC (int, max_gcse_regno);
5747   already_set = XNEWVEC (int, max_gcse_regno);
5748 
5749   /* Find all the stores we care about.  */
5750   FOR_EACH_BB (bb)
5751     {
5752       /* First compute the registers set in this block.  */
5753       regvec = last_set_in;
5754 
5755       FOR_BB_INSNS (bb, insn)
5756 	{
5757 	  if (! INSN_P (insn))
5758 	    continue;
5759 
5760 	  if (CALL_P (insn))
5761 	    {
5762 	      for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5763 		if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
5764 		  {
5765 		    last_set_in[regno] = INSN_UID (insn);
5766 		    SET_BIT (reg_set_in_block[bb->index], regno);
5767 		  }
5768 	    }
5769 
5770 	  pat = PATTERN (insn);
5771 	  compute_store_table_current_insn = insn;
5772 	  note_stores (pat, reg_set_info, reg_set_in_block[bb->index]);
5773 	}
5774 
5775       /* Now find the stores.  */
5776       memset (already_set, 0, sizeof (int) * max_gcse_regno);
5777       regvec = already_set;
5778       FOR_BB_INSNS (bb, insn)
5779 	{
5780 	  if (! INSN_P (insn))
5781 	    continue;
5782 
5783 	  if (CALL_P (insn))
5784 	    {
5785 	      for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5786 		if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
5787 		  already_set[regno] = 1;
5788 	    }
5789 
5790 	  pat = PATTERN (insn);
5791 	  note_stores (pat, reg_set_info, NULL);
5792 
5793 	  /* Now that we've marked regs, look for stores.  */
5794 	  find_moveable_store (insn, already_set, last_set_in);
5795 
5796 	  /* Unmark regs that are no longer set.  */
5797 	  compute_store_table_current_insn = insn;
5798 	  note_stores (pat, reg_clear_last_set, last_set_in);
5799 	  if (CALL_P (insn))
5800 	    {
5801 	      for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5802 		if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno)
5803 		    && last_set_in[regno] == INSN_UID (insn))
5804 		  last_set_in[regno] = 0;
5805 	    }
5806 	}
5807 
5808 #ifdef ENABLE_CHECKING
5809       /* last_set_in should now be all-zero.  */
5810       for (regno = 0; regno < max_gcse_regno; regno++)
5811 	gcc_assert (!last_set_in[regno]);
5812 #endif
5813 
5814       /* Clear temporary marks.  */
5815       for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
5816 	{
5817 	  LAST_AVAIL_CHECK_FAILURE(ptr) = NULL_RTX;
5818 	  if (ANTIC_STORE_LIST (ptr)
5819 	      && (tmp = XEXP (ANTIC_STORE_LIST (ptr), 0)) == NULL_RTX)
5820 	    ANTIC_STORE_LIST (ptr) = XEXP (ANTIC_STORE_LIST (ptr), 1);
5821 	}
5822     }
5823 
5824   /* Remove the stores that are not available anywhere, as there will
5825      be no opportunity to optimize them.  */
5826   for (ptr = pre_ldst_mems, prev_next_ptr_ptr = &pre_ldst_mems;
5827        ptr != NULL;
5828        ptr = *prev_next_ptr_ptr)
5829     {
5830       if (!AVAIL_STORE_LIST (ptr))
5831 	{
5832 	  *prev_next_ptr_ptr = ptr->next;
5833 	  htab_remove_elt_with_hash (pre_ldst_table, ptr, ptr->hash_index);
5834 	  free_ldst_entry (ptr);
5835 	}
5836       else
5837 	prev_next_ptr_ptr = &ptr->next;
5838     }
5839 
5840   ret = enumerate_ldsts ();
5841 
5842   if (dump_file)
5843     {
5844       fprintf (dump_file, "ST_avail and ST_antic (shown under loads..)\n");
5845       print_ldst_list (dump_file);
5846     }
5847 
5848   free (last_set_in);
5849   free (already_set);
5850   return ret;
5851 }
5852 
5853 /* Check to see if the load X is aliased with STORE_PATTERN.
5854    AFTER is true if we are checking the case when STORE_PATTERN occurs
5855    after the X.  */
5856 
5857 static bool
load_kills_store(rtx x,rtx store_pattern,int after)5858 load_kills_store (rtx x, rtx store_pattern, int after)
5859 {
5860   if (after)
5861     return anti_dependence (x, store_pattern);
5862   else
5863     return true_dependence (store_pattern, GET_MODE (store_pattern), x,
5864 			    rtx_addr_varies_p);
5865 }
5866 
5867 /* Go through the entire insn X, looking for any loads which might alias
5868    STORE_PATTERN.  Return true if found.
5869    AFTER is true if we are checking the case when STORE_PATTERN occurs
5870    after the insn X.  */
5871 
5872 static bool
find_loads(rtx x,rtx store_pattern,int after)5873 find_loads (rtx x, rtx store_pattern, int after)
5874 {
5875   const char * fmt;
5876   int i, j;
5877   int ret = false;
5878 
5879   if (!x)
5880     return false;
5881 
5882   if (GET_CODE (x) == SET)
5883     x = SET_SRC (x);
5884 
5885   if (MEM_P (x))
5886     {
5887       if (load_kills_store (x, store_pattern, after))
5888 	return true;
5889     }
5890 
5891   /* Recursively process the insn.  */
5892   fmt = GET_RTX_FORMAT (GET_CODE (x));
5893 
5894   for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0 && !ret; i--)
5895     {
5896       if (fmt[i] == 'e')
5897 	ret |= find_loads (XEXP (x, i), store_pattern, after);
5898       else if (fmt[i] == 'E')
5899 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5900 	  ret |= find_loads (XVECEXP (x, i, j), store_pattern, after);
5901     }
5902   return ret;
5903 }
5904 
5905 /* Check if INSN kills the store pattern X (is aliased with it).
5906    AFTER is true if we are checking the case when store X occurs
5907    after the insn.  Return true if it does.  */
5908 
5909 static bool
store_killed_in_insn(rtx x,rtx x_regs,rtx insn,int after)5910 store_killed_in_insn (rtx x, rtx x_regs, rtx insn, int after)
5911 {
5912   rtx reg, base, note;
5913 
5914   if (!INSN_P (insn))
5915     return false;
5916 
5917   if (CALL_P (insn))
5918     {
5919       /* A normal or pure call might read from pattern,
5920 	 but a const call will not.  */
5921       if (! CONST_OR_PURE_CALL_P (insn) || pure_call_p (insn))
5922 	return true;
5923 
5924       /* But even a const call reads its parameters.  Check whether the
5925 	 base of some of registers used in mem is stack pointer.  */
5926       for (reg = x_regs; reg; reg = XEXP (reg, 1))
5927 	{
5928 	  base = find_base_term (XEXP (reg, 0));
5929 	  if (!base
5930 	      || (GET_CODE (base) == ADDRESS
5931 		  && GET_MODE (base) == Pmode
5932 		  && XEXP (base, 0) == stack_pointer_rtx))
5933 	    return true;
5934 	}
5935 
5936       return false;
5937     }
5938 
5939   if (GET_CODE (PATTERN (insn)) == SET)
5940     {
5941       rtx pat = PATTERN (insn);
5942       rtx dest = SET_DEST (pat);
5943 
5944       if (GET_CODE (dest) == ZERO_EXTRACT)
5945 	dest = XEXP (dest, 0);
5946 
5947       /* Check for memory stores to aliased objects.  */
5948       if (MEM_P (dest)
5949 	  && !expr_equiv_p (dest, x))
5950 	{
5951 	  if (after)
5952 	    {
5953 	      if (output_dependence (dest, x))
5954 		return true;
5955 	    }
5956 	  else
5957 	    {
5958 	      if (output_dependence (x, dest))
5959 		return true;
5960 	    }
5961 	}
5962       if (find_loads (SET_SRC (pat), x, after))
5963 	return true;
5964     }
5965   else if (find_loads (PATTERN (insn), x, after))
5966     return true;
5967 
5968   /* If this insn has a REG_EQUAL or REG_EQUIV note referencing a memory
5969      location aliased with X, then this insn kills X.  */
5970   note = find_reg_equal_equiv_note (insn);
5971   if (! note)
5972     return false;
5973   note = XEXP (note, 0);
5974 
5975   /* However, if the note represents a must alias rather than a may
5976      alias relationship, then it does not kill X.  */
5977   if (expr_equiv_p (note, x))
5978     return false;
5979 
5980   /* See if there are any aliased loads in the note.  */
5981   return find_loads (note, x, after);
5982 }
5983 
5984 /* Returns true if the expression X is loaded or clobbered on or after INSN
5985    within basic block BB.  REGS_SET_AFTER is bitmap of registers set in
5986    or after the insn.  X_REGS is list of registers mentioned in X. If the store
5987    is killed, return the last insn in that it occurs in FAIL_INSN.  */
5988 
5989 static bool
store_killed_after(rtx x,rtx x_regs,rtx insn,basic_block bb,int * regs_set_after,rtx * fail_insn)5990 store_killed_after (rtx x, rtx x_regs, rtx insn, basic_block bb,
5991 		    int *regs_set_after, rtx *fail_insn)
5992 {
5993   rtx last = BB_END (bb), act;
5994 
5995   if (!store_ops_ok (x_regs, regs_set_after))
5996     {
5997       /* We do not know where it will happen.  */
5998       if (fail_insn)
5999 	*fail_insn = NULL_RTX;
6000       return true;
6001     }
6002 
6003   /* Scan from the end, so that fail_insn is determined correctly.  */
6004   for (act = last; act != PREV_INSN (insn); act = PREV_INSN (act))
6005     if (store_killed_in_insn (x, x_regs, act, false))
6006       {
6007 	if (fail_insn)
6008 	  *fail_insn = act;
6009 	return true;
6010       }
6011 
6012   return false;
6013 }
6014 
6015 /* Returns true if the expression X is loaded or clobbered on or before INSN
6016    within basic block BB. X_REGS is list of registers mentioned in X.
6017    REGS_SET_BEFORE is bitmap of registers set before or in this insn.  */
6018 static bool
store_killed_before(rtx x,rtx x_regs,rtx insn,basic_block bb,int * regs_set_before)6019 store_killed_before (rtx x, rtx x_regs, rtx insn, basic_block bb,
6020 		     int *regs_set_before)
6021 {
6022   rtx first = BB_HEAD (bb);
6023 
6024   if (!store_ops_ok (x_regs, regs_set_before))
6025     return true;
6026 
6027   for ( ; insn != PREV_INSN (first); insn = PREV_INSN (insn))
6028     if (store_killed_in_insn (x, x_regs, insn, true))
6029       return true;
6030 
6031   return false;
6032 }
6033 
6034 /* Fill in available, anticipatable, transparent and kill vectors in
6035    STORE_DATA, based on lists of available and anticipatable stores.  */
6036 static void
build_store_vectors(void)6037 build_store_vectors (void)
6038 {
6039   basic_block bb;
6040   int *regs_set_in_block;
6041   rtx insn, st;
6042   struct ls_expr * ptr;
6043   unsigned regno;
6044 
6045   /* Build the gen_vector. This is any store in the table which is not killed
6046      by aliasing later in its block.  */
6047   ae_gen = sbitmap_vector_alloc (last_basic_block, num_stores);
6048   sbitmap_vector_zero (ae_gen, last_basic_block);
6049 
6050   st_antloc = sbitmap_vector_alloc (last_basic_block, num_stores);
6051   sbitmap_vector_zero (st_antloc, last_basic_block);
6052 
6053   for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
6054     {
6055       for (st = AVAIL_STORE_LIST (ptr); st != NULL; st = XEXP (st, 1))
6056 	{
6057 	  insn = XEXP (st, 0);
6058 	  bb = BLOCK_FOR_INSN (insn);
6059 
6060 	  /* If we've already seen an available expression in this block,
6061 	     we can delete this one (It occurs earlier in the block). We'll
6062 	     copy the SRC expression to an unused register in case there
6063 	     are any side effects.  */
6064 	  if (TEST_BIT (ae_gen[bb->index], ptr->index))
6065 	    {
6066 	      rtx r = gen_reg_rtx (GET_MODE (ptr->pattern));
6067 	      if (dump_file)
6068 		fprintf (dump_file, "Removing redundant store:\n");
6069 	      replace_store_insn (r, XEXP (st, 0), bb, ptr);
6070 	      continue;
6071 	    }
6072 	  SET_BIT (ae_gen[bb->index], ptr->index);
6073 	}
6074 
6075       for (st = ANTIC_STORE_LIST (ptr); st != NULL; st = XEXP (st, 1))
6076 	{
6077 	  insn = XEXP (st, 0);
6078 	  bb = BLOCK_FOR_INSN (insn);
6079 	  SET_BIT (st_antloc[bb->index], ptr->index);
6080 	}
6081     }
6082 
6083   ae_kill = sbitmap_vector_alloc (last_basic_block, num_stores);
6084   sbitmap_vector_zero (ae_kill, last_basic_block);
6085 
6086   transp = sbitmap_vector_alloc (last_basic_block, num_stores);
6087   sbitmap_vector_zero (transp, last_basic_block);
6088   regs_set_in_block = XNEWVEC (int, max_gcse_regno);
6089 
6090   FOR_EACH_BB (bb)
6091     {
6092       for (regno = 0; regno < max_gcse_regno; regno++)
6093 	regs_set_in_block[regno] = TEST_BIT (reg_set_in_block[bb->index], regno);
6094 
6095       for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
6096 	{
6097 	  if (store_killed_after (ptr->pattern, ptr->pattern_regs, BB_HEAD (bb),
6098 				  bb, regs_set_in_block, NULL))
6099 	    {
6100 	      /* It should not be necessary to consider the expression
6101 		 killed if it is both anticipatable and available.  */
6102 	      if (!TEST_BIT (st_antloc[bb->index], ptr->index)
6103 		  || !TEST_BIT (ae_gen[bb->index], ptr->index))
6104 		SET_BIT (ae_kill[bb->index], ptr->index);
6105 	    }
6106 	  else
6107 	    SET_BIT (transp[bb->index], ptr->index);
6108 	}
6109     }
6110 
6111   free (regs_set_in_block);
6112 
6113   if (dump_file)
6114     {
6115       dump_sbitmap_vector (dump_file, "st_antloc", "", st_antloc, last_basic_block);
6116       dump_sbitmap_vector (dump_file, "st_kill", "", ae_kill, last_basic_block);
6117       dump_sbitmap_vector (dump_file, "Transpt", "", transp, last_basic_block);
6118       dump_sbitmap_vector (dump_file, "st_avloc", "", ae_gen, last_basic_block);
6119     }
6120 }
6121 
6122 /* Insert an instruction at the beginning of a basic block, and update
6123    the BB_HEAD if needed.  */
6124 
6125 static void
insert_insn_start_bb(rtx insn,basic_block bb)6126 insert_insn_start_bb (rtx insn, basic_block bb)
6127 {
6128   /* Insert at start of successor block.  */
6129   rtx prev = PREV_INSN (BB_HEAD (bb));
6130   rtx before = BB_HEAD (bb);
6131   while (before != 0)
6132     {
6133       if (! LABEL_P (before)
6134 	  && (! NOTE_P (before)
6135 	      || NOTE_LINE_NUMBER (before) != NOTE_INSN_BASIC_BLOCK))
6136 	break;
6137       prev = before;
6138       if (prev == BB_END (bb))
6139 	break;
6140       before = NEXT_INSN (before);
6141     }
6142 
6143   insn = emit_insn_after_noloc (insn, prev);
6144 
6145   if (dump_file)
6146     {
6147       fprintf (dump_file, "STORE_MOTION  insert store at start of BB %d:\n",
6148 	       bb->index);
6149       print_inline_rtx (dump_file, insn, 6);
6150       fprintf (dump_file, "\n");
6151     }
6152 }
6153 
6154 /* This routine will insert a store on an edge. EXPR is the ldst entry for
6155    the memory reference, and E is the edge to insert it on.  Returns nonzero
6156    if an edge insertion was performed.  */
6157 
6158 static int
insert_store(struct ls_expr * expr,edge e)6159 insert_store (struct ls_expr * expr, edge e)
6160 {
6161   rtx reg, insn;
6162   basic_block bb;
6163   edge tmp;
6164   edge_iterator ei;
6165 
6166   /* We did all the deleted before this insert, so if we didn't delete a
6167      store, then we haven't set the reaching reg yet either.  */
6168   if (expr->reaching_reg == NULL_RTX)
6169     return 0;
6170 
6171   if (e->flags & EDGE_FAKE)
6172     return 0;
6173 
6174   reg = expr->reaching_reg;
6175   insn = gen_move_insn (copy_rtx (expr->pattern), reg);
6176 
6177   /* If we are inserting this expression on ALL predecessor edges of a BB,
6178      insert it at the start of the BB, and reset the insert bits on the other
6179      edges so we don't try to insert it on the other edges.  */
6180   bb = e->dest;
6181   FOR_EACH_EDGE (tmp, ei, e->dest->preds)
6182     if (!(tmp->flags & EDGE_FAKE))
6183       {
6184 	int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest);
6185 
6186 	gcc_assert (index != EDGE_INDEX_NO_EDGE);
6187 	if (! TEST_BIT (pre_insert_map[index], expr->index))
6188 	  break;
6189       }
6190 
6191   /* If tmp is NULL, we found an insertion on every edge, blank the
6192      insertion vector for these edges, and insert at the start of the BB.  */
6193   if (!tmp && bb != EXIT_BLOCK_PTR)
6194     {
6195       FOR_EACH_EDGE (tmp, ei, e->dest->preds)
6196 	{
6197 	  int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest);
6198 	  RESET_BIT (pre_insert_map[index], expr->index);
6199 	}
6200       insert_insn_start_bb (insn, bb);
6201       return 0;
6202     }
6203 
6204   /* We can't put stores in the front of blocks pointed to by abnormal
6205      edges since that may put a store where one didn't used to be.  */
6206   gcc_assert (!(e->flags & EDGE_ABNORMAL));
6207 
6208   insert_insn_on_edge (insn, e);
6209 
6210   if (dump_file)
6211     {
6212       fprintf (dump_file, "STORE_MOTION  insert insn on edge (%d, %d):\n",
6213 	       e->src->index, e->dest->index);
6214       print_inline_rtx (dump_file, insn, 6);
6215       fprintf (dump_file, "\n");
6216     }
6217 
6218   return 1;
6219 }
6220 
6221 /* Remove any REG_EQUAL or REG_EQUIV notes containing a reference to the
6222    memory location in SMEXPR set in basic block BB.
6223 
6224    This could be rather expensive.  */
6225 
6226 static void
remove_reachable_equiv_notes(basic_block bb,struct ls_expr * smexpr)6227 remove_reachable_equiv_notes (basic_block bb, struct ls_expr *smexpr)
6228 {
6229   edge_iterator *stack, ei;
6230   int sp;
6231   edge act;
6232   sbitmap visited = sbitmap_alloc (last_basic_block);
6233   rtx last, insn, note;
6234   rtx mem = smexpr->pattern;
6235 
6236   stack = XNEWVEC (edge_iterator, n_basic_blocks);
6237   sp = 0;
6238   ei = ei_start (bb->succs);
6239 
6240   sbitmap_zero (visited);
6241 
6242   act = (EDGE_COUNT (ei_container (ei)) > 0 ? EDGE_I (ei_container (ei), 0) : NULL);
6243   while (1)
6244     {
6245       if (!act)
6246 	{
6247 	  if (!sp)
6248 	    {
6249 	      free (stack);
6250 	      sbitmap_free (visited);
6251 	      return;
6252 	    }
6253 	  act = ei_edge (stack[--sp]);
6254 	}
6255       bb = act->dest;
6256 
6257       if (bb == EXIT_BLOCK_PTR
6258 	  || TEST_BIT (visited, bb->index))
6259 	{
6260 	  if (!ei_end_p (ei))
6261 	      ei_next (&ei);
6262 	  act = (! ei_end_p (ei)) ? ei_edge (ei) : NULL;
6263 	  continue;
6264 	}
6265       SET_BIT (visited, bb->index);
6266 
6267       if (TEST_BIT (st_antloc[bb->index], smexpr->index))
6268 	{
6269 	  for (last = ANTIC_STORE_LIST (smexpr);
6270 	       BLOCK_FOR_INSN (XEXP (last, 0)) != bb;
6271 	       last = XEXP (last, 1))
6272 	    continue;
6273 	  last = XEXP (last, 0);
6274 	}
6275       else
6276 	last = NEXT_INSN (BB_END (bb));
6277 
6278       for (insn = BB_HEAD (bb); insn != last; insn = NEXT_INSN (insn))
6279 	if (INSN_P (insn))
6280 	  {
6281 	    note = find_reg_equal_equiv_note (insn);
6282 	    if (!note || !expr_equiv_p (XEXP (note, 0), mem))
6283 	      continue;
6284 
6285 	    if (dump_file)
6286 	      fprintf (dump_file, "STORE_MOTION  drop REG_EQUAL note at insn %d:\n",
6287 		       INSN_UID (insn));
6288 	    remove_note (insn, note);
6289 	  }
6290 
6291       if (!ei_end_p (ei))
6292 	ei_next (&ei);
6293       act = (! ei_end_p (ei)) ? ei_edge (ei) : NULL;
6294 
6295       if (EDGE_COUNT (bb->succs) > 0)
6296 	{
6297 	  if (act)
6298 	    stack[sp++] = ei;
6299 	  ei = ei_start (bb->succs);
6300 	  act = (EDGE_COUNT (ei_container (ei)) > 0 ? EDGE_I (ei_container (ei), 0) : NULL);
6301 	}
6302     }
6303 }
6304 
6305 /* This routine will replace a store with a SET to a specified register.  */
6306 
6307 static void
replace_store_insn(rtx reg,rtx del,basic_block bb,struct ls_expr * smexpr)6308 replace_store_insn (rtx reg, rtx del, basic_block bb, struct ls_expr *smexpr)
6309 {
6310   rtx insn, mem, note, set, ptr, pair;
6311 
6312   mem = smexpr->pattern;
6313   insn = gen_move_insn (reg, SET_SRC (single_set (del)));
6314   insn = emit_insn_after (insn, del);
6315 
6316   if (dump_file)
6317     {
6318       fprintf (dump_file,
6319 	       "STORE_MOTION  delete insn in BB %d:\n      ", bb->index);
6320       print_inline_rtx (dump_file, del, 6);
6321       fprintf (dump_file, "\nSTORE MOTION  replaced with insn:\n      ");
6322       print_inline_rtx (dump_file, insn, 6);
6323       fprintf (dump_file, "\n");
6324     }
6325 
6326   for (ptr = ANTIC_STORE_LIST (smexpr); ptr; ptr = XEXP (ptr, 1))
6327     if (XEXP (ptr, 0) == del)
6328       {
6329 	XEXP (ptr, 0) = insn;
6330 	break;
6331       }
6332 
6333   /* Move the notes from the deleted insn to its replacement, and patch
6334      up the LIBCALL notes.  */
6335   REG_NOTES (insn) = REG_NOTES (del);
6336 
6337   note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
6338   if (note)
6339     {
6340       pair = XEXP (note, 0);
6341       note = find_reg_note (pair, REG_LIBCALL, NULL_RTX);
6342       XEXP (note, 0) = insn;
6343     }
6344   note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
6345   if (note)
6346     {
6347       pair = XEXP (note, 0);
6348       note = find_reg_note (pair, REG_RETVAL, NULL_RTX);
6349       XEXP (note, 0) = insn;
6350     }
6351 
6352   delete_insn (del);
6353 
6354   /* Now we must handle REG_EQUAL notes whose contents is equal to the mem;
6355      they are no longer accurate provided that they are reached by this
6356      definition, so drop them.  */
6357   for (; insn != NEXT_INSN (BB_END (bb)); insn = NEXT_INSN (insn))
6358     if (INSN_P (insn))
6359       {
6360 	set = single_set (insn);
6361 	if (!set)
6362 	  continue;
6363 	if (expr_equiv_p (SET_DEST (set), mem))
6364 	  return;
6365 	note = find_reg_equal_equiv_note (insn);
6366 	if (!note || !expr_equiv_p (XEXP (note, 0), mem))
6367 	  continue;
6368 
6369 	if (dump_file)
6370 	  fprintf (dump_file, "STORE_MOTION  drop REG_EQUAL note at insn %d:\n",
6371 		   INSN_UID (insn));
6372 	remove_note (insn, note);
6373       }
6374   remove_reachable_equiv_notes (bb, smexpr);
6375 }
6376 
6377 
6378 /* Delete a store, but copy the value that would have been stored into
6379    the reaching_reg for later storing.  */
6380 
6381 static void
delete_store(struct ls_expr * expr,basic_block bb)6382 delete_store (struct ls_expr * expr, basic_block bb)
6383 {
6384   rtx reg, i, del;
6385 
6386   if (expr->reaching_reg == NULL_RTX)
6387     expr->reaching_reg = gen_reg_rtx (GET_MODE (expr->pattern));
6388 
6389   reg = expr->reaching_reg;
6390 
6391   for (i = AVAIL_STORE_LIST (expr); i; i = XEXP (i, 1))
6392     {
6393       del = XEXP (i, 0);
6394       if (BLOCK_FOR_INSN (del) == bb)
6395 	{
6396 	  /* We know there is only one since we deleted redundant
6397 	     ones during the available computation.  */
6398 	  replace_store_insn (reg, del, bb, expr);
6399 	  break;
6400 	}
6401     }
6402 }
6403 
6404 /* Free memory used by store motion.  */
6405 
6406 static void
free_store_memory(void)6407 free_store_memory (void)
6408 {
6409   free_ldst_mems ();
6410 
6411   if (ae_gen)
6412     sbitmap_vector_free (ae_gen);
6413   if (ae_kill)
6414     sbitmap_vector_free (ae_kill);
6415   if (transp)
6416     sbitmap_vector_free (transp);
6417   if (st_antloc)
6418     sbitmap_vector_free (st_antloc);
6419   if (pre_insert_map)
6420     sbitmap_vector_free (pre_insert_map);
6421   if (pre_delete_map)
6422     sbitmap_vector_free (pre_delete_map);
6423   if (reg_set_in_block)
6424     sbitmap_vector_free (reg_set_in_block);
6425 
6426   ae_gen = ae_kill = transp = st_antloc = NULL;
6427   pre_insert_map = pre_delete_map = reg_set_in_block = NULL;
6428 }
6429 
6430 /* Perform store motion. Much like gcse, except we move expressions the
6431    other way by looking at the flowgraph in reverse.  */
6432 
6433 static void
store_motion(void)6434 store_motion (void)
6435 {
6436   basic_block bb;
6437   int x;
6438   struct ls_expr * ptr;
6439   int update_flow = 0;
6440 
6441   if (dump_file)
6442     {
6443       fprintf (dump_file, "before store motion\n");
6444       print_rtl (dump_file, get_insns ());
6445     }
6446 
6447   init_alias_analysis ();
6448 
6449   /* Find all the available and anticipatable stores.  */
6450   num_stores = compute_store_table ();
6451   if (num_stores == 0)
6452     {
6453       htab_delete (pre_ldst_table);
6454       pre_ldst_table = NULL;
6455       sbitmap_vector_free (reg_set_in_block);
6456       end_alias_analysis ();
6457       return;
6458     }
6459 
6460   /* Now compute kill & transp vectors.  */
6461   build_store_vectors ();
6462   add_noreturn_fake_exit_edges ();
6463   connect_infinite_loops_to_exit ();
6464 
6465   edge_list = pre_edge_rev_lcm (num_stores, transp, ae_gen,
6466 				st_antloc, ae_kill, &pre_insert_map,
6467 				&pre_delete_map);
6468 
6469   /* Now we want to insert the new stores which are going to be needed.  */
6470   for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
6471     {
6472       /* If any of the edges we have above are abnormal, we can't move this
6473 	 store.  */
6474       for (x = NUM_EDGES (edge_list) - 1; x >= 0; x--)
6475 	if (TEST_BIT (pre_insert_map[x], ptr->index)
6476 	    && (INDEX_EDGE (edge_list, x)->flags & EDGE_ABNORMAL))
6477 	  break;
6478 
6479       if (x >= 0)
6480 	{
6481 	  if (dump_file != NULL)
6482 	    fprintf (dump_file,
6483 		     "Can't replace store %d: abnormal edge from %d to %d\n",
6484 		     ptr->index, INDEX_EDGE (edge_list, x)->src->index,
6485 		     INDEX_EDGE (edge_list, x)->dest->index);
6486 	  continue;
6487 	}
6488 
6489       /* Now we want to insert the new stores which are going to be needed.  */
6490 
6491       FOR_EACH_BB (bb)
6492 	if (TEST_BIT (pre_delete_map[bb->index], ptr->index))
6493 	  delete_store (ptr, bb);
6494 
6495       for (x = 0; x < NUM_EDGES (edge_list); x++)
6496 	if (TEST_BIT (pre_insert_map[x], ptr->index))
6497 	  update_flow |= insert_store (ptr, INDEX_EDGE (edge_list, x));
6498     }
6499 
6500   if (update_flow)
6501     commit_edge_insertions ();
6502 
6503   free_store_memory ();
6504   free_edge_list (edge_list);
6505   remove_fake_exit_edges ();
6506   end_alias_analysis ();
6507 }
6508 
6509 
6510 /* Entry point for jump bypassing optimization pass.  */
6511 
6512 static int
bypass_jumps(void)6513 bypass_jumps (void)
6514 {
6515   int changed;
6516 
6517   /* We do not construct an accurate cfg in functions which call
6518      setjmp, so just punt to be safe.  */
6519   if (current_function_calls_setjmp)
6520     return 0;
6521 
6522   /* Identify the basic block information for this function, including
6523      successors and predecessors.  */
6524   max_gcse_regno = max_reg_num ();
6525 
6526   if (dump_file)
6527     dump_flow_info (dump_file, dump_flags);
6528 
6529   /* Return if there's nothing to do, or it is too expensive.  */
6530   if (n_basic_blocks <= NUM_FIXED_BLOCKS + 1
6531       || is_too_expensive (_ ("jump bypassing disabled")))
6532     return 0;
6533 
6534   gcc_obstack_init (&gcse_obstack);
6535   bytes_used = 0;
6536 
6537   /* We need alias.  */
6538   init_alias_analysis ();
6539 
6540   /* Record where pseudo-registers are set.  This data is kept accurate
6541      during each pass.  ??? We could also record hard-reg information here
6542      [since it's unchanging], however it is currently done during hash table
6543      computation.
6544 
6545      It may be tempting to compute MEM set information here too, but MEM sets
6546      will be subject to code motion one day and thus we need to compute
6547      information about memory sets when we build the hash tables.  */
6548 
6549   alloc_reg_set_mem (max_gcse_regno);
6550   compute_sets ();
6551 
6552   max_gcse_regno = max_reg_num ();
6553   alloc_gcse_mem ();
6554   changed = one_cprop_pass (MAX_GCSE_PASSES + 2, true, true);
6555   free_gcse_mem ();
6556 
6557   if (dump_file)
6558     {
6559       fprintf (dump_file, "BYPASS of %s: %d basic blocks, ",
6560 	       current_function_name (), n_basic_blocks);
6561       fprintf (dump_file, "%d bytes\n\n", bytes_used);
6562     }
6563 
6564   obstack_free (&gcse_obstack, NULL);
6565   free_reg_set_mem ();
6566 
6567   /* We are finished with alias.  */
6568   end_alias_analysis ();
6569   allocate_reg_info (max_reg_num (), FALSE, FALSE);
6570 
6571   return changed;
6572 }
6573 
6574 /* Return true if the graph is too expensive to optimize. PASS is the
6575    optimization about to be performed.  */
6576 
6577 static bool
is_too_expensive(const char * pass)6578 is_too_expensive (const char *pass)
6579 {
6580   /* Trying to perform global optimizations on flow graphs which have
6581      a high connectivity will take a long time and is unlikely to be
6582      particularly useful.
6583 
6584      In normal circumstances a cfg should have about twice as many
6585      edges as blocks.  But we do not want to punish small functions
6586      which have a couple switch statements.  Rather than simply
6587      threshold the number of blocks, uses something with a more
6588      graceful degradation.  */
6589   if (n_edges > 20000 + n_basic_blocks * 4)
6590     {
6591       warning (OPT_Wdisabled_optimization,
6592 	       "%s: %d basic blocks and %d edges/basic block",
6593 	       pass, n_basic_blocks, n_edges / n_basic_blocks);
6594 
6595       return true;
6596     }
6597 
6598   /* If allocating memory for the cprop bitmap would take up too much
6599      storage it's better just to disable the optimization.  */
6600   if ((n_basic_blocks
6601        * SBITMAP_SET_SIZE (max_reg_num ())
6602        * sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
6603     {
6604       warning (OPT_Wdisabled_optimization,
6605 	       "%s: %d basic blocks and %d registers",
6606 	       pass, n_basic_blocks, max_reg_num ());
6607 
6608       return true;
6609     }
6610 
6611   return false;
6612 }
6613 
6614 static bool
gate_handle_jump_bypass(void)6615 gate_handle_jump_bypass (void)
6616 {
6617   return optimize > 0 && flag_gcse;
6618 }
6619 
6620 /* Perform jump bypassing and control flow optimizations.  */
6621 static unsigned int
rest_of_handle_jump_bypass(void)6622 rest_of_handle_jump_bypass (void)
6623 {
6624   cleanup_cfg (CLEANUP_EXPENSIVE);
6625   reg_scan (get_insns (), max_reg_num ());
6626 
6627   if (bypass_jumps ())
6628     {
6629       rebuild_jump_labels (get_insns ());
6630       cleanup_cfg (CLEANUP_EXPENSIVE);
6631       delete_trivially_dead_insns (get_insns (), max_reg_num ());
6632     }
6633   return 0;
6634 }
6635 
6636 struct tree_opt_pass pass_jump_bypass =
6637 {
6638   "bypass",                             /* name */
6639   gate_handle_jump_bypass,              /* gate */
6640   rest_of_handle_jump_bypass,           /* execute */
6641   NULL,                                 /* sub */
6642   NULL,                                 /* next */
6643   0,                                    /* static_pass_number */
6644   TV_BYPASS,                            /* tv_id */
6645   0,                                    /* properties_required */
6646   0,                                    /* properties_provided */
6647   0,                                    /* properties_destroyed */
6648   0,                                    /* todo_flags_start */
6649   TODO_dump_func |
6650   TODO_ggc_collect | TODO_verify_flow,  /* todo_flags_finish */
6651   'G'                                   /* letter */
6652 };
6653 
6654 
6655 static bool
gate_handle_gcse(void)6656 gate_handle_gcse (void)
6657 {
6658   return optimize > 0 && flag_gcse;
6659 }
6660 
6661 
6662 static unsigned int
rest_of_handle_gcse(void)6663 rest_of_handle_gcse (void)
6664 {
6665   int save_csb, save_cfj;
6666   int tem2 = 0, tem;
6667 
6668   tem = gcse_main (get_insns ());
6669   rebuild_jump_labels (get_insns ());
6670   delete_trivially_dead_insns (get_insns (), max_reg_num ());
6671 
6672   save_csb = flag_cse_skip_blocks;
6673   save_cfj = flag_cse_follow_jumps;
6674   flag_cse_skip_blocks = flag_cse_follow_jumps = 0;
6675 
6676   /* If -fexpensive-optimizations, re-run CSE to clean up things done
6677      by gcse.  */
6678   if (flag_expensive_optimizations)
6679     {
6680       timevar_push (TV_CSE);
6681       reg_scan (get_insns (), max_reg_num ());
6682       tem2 = cse_main (get_insns (), max_reg_num ());
6683       purge_all_dead_edges ();
6684       delete_trivially_dead_insns (get_insns (), max_reg_num ());
6685       timevar_pop (TV_CSE);
6686       cse_not_expected = !flag_rerun_cse_after_loop;
6687     }
6688 
6689   /* If gcse or cse altered any jumps, rerun jump optimizations to clean
6690      things up.  */
6691   if (tem || tem2)
6692     {
6693       timevar_push (TV_JUMP);
6694       rebuild_jump_labels (get_insns ());
6695       delete_dead_jumptables ();
6696       cleanup_cfg (CLEANUP_EXPENSIVE);
6697       timevar_pop (TV_JUMP);
6698     }
6699 
6700   flag_cse_skip_blocks = save_csb;
6701   flag_cse_follow_jumps = save_cfj;
6702   return 0;
6703 }
6704 
6705 struct tree_opt_pass pass_gcse =
6706 {
6707   "gcse1",                              /* name */
6708   gate_handle_gcse,                     /* gate */
6709   rest_of_handle_gcse,			/* execute */
6710   NULL,                                 /* sub */
6711   NULL,                                 /* next */
6712   0,                                    /* static_pass_number */
6713   TV_GCSE,                              /* tv_id */
6714   0,                                    /* properties_required */
6715   0,                                    /* properties_provided */
6716   0,                                    /* properties_destroyed */
6717   0,                                    /* todo_flags_start */
6718   TODO_dump_func |
6719   TODO_verify_flow | TODO_ggc_collect,  /* todo_flags_finish */
6720   'G'                                   /* letter */
6721 };
6722 
6723 
6724 #include "gt-gcse.h"
6725