1 /* Partial redundancy elimination / Hoisting for RTL.
2    Copyright (C) 1997-2021 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 /* TODO
21    - reordering of memory allocation and freeing to be more space efficient
22    - calc rough register pressure information and use the info to drive all
23      kinds of code motion (including code hoisting) in a unified way.
24 */
25 
26 /* References searched while implementing this.
27 
28    Compilers Principles, Techniques and Tools
29    Aho, Sethi, Ullman
30    Addison-Wesley, 1988
31 
32    Global Optimization by Suppression of Partial Redundancies
33    E. Morel, C. Renvoise
34    communications of the acm, Vol. 22, Num. 2, Feb. 1979
35 
36    A Portable Machine-Independent Global Optimizer - Design and Measurements
37    Frederick Chow
38    Stanford Ph.D. thesis, Dec. 1983
39 
40    A Fast Algorithm for Code Movement Optimization
41    D.M. Dhamdhere
42    SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
43 
44    A Solution to a Problem with Morel and Renvoise's
45    Global Optimization by Suppression of Partial Redundancies
46    K-H Drechsler, M.P. Stadel
47    ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
48 
49    Practical Adaptation of the Global Optimization
50    Algorithm of Morel and Renvoise
51    D.M. Dhamdhere
52    ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
53 
54    Efficiently Computing Static Single Assignment Form and the Control
55    Dependence Graph
56    R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
57    ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
58 
59    Lazy Code Motion
60    J. Knoop, O. Ruthing, B. Steffen
61    ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
62 
63    What's In a Region?  Or Computing Control Dependence Regions in Near-Linear
64    Time for Reducible Flow Control
65    Thomas Ball
66    ACM Letters on Programming Languages and Systems,
67    Vol. 2, Num. 1-4, Mar-Dec 1993
68 
69    An Efficient Representation for Sparse Sets
70    Preston Briggs, Linda Torczon
71    ACM Letters on Programming Languages and Systems,
72    Vol. 2, Num. 1-4, Mar-Dec 1993
73 
74    A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
75    K-H Drechsler, M.P. Stadel
76    ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
77 
78    Partial Dead Code Elimination
79    J. Knoop, O. Ruthing, B. Steffen
80    ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
81 
82    Effective Partial Redundancy Elimination
83    P. Briggs, K.D. Cooper
84    ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
85 
86    The Program Structure Tree: Computing Control Regions in Linear Time
87    R. Johnson, D. Pearson, K. Pingali
88    ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
89 
90    Optimal Code Motion: Theory and Practice
91    J. Knoop, O. Ruthing, B. Steffen
92    ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
93 
94    The power of assignment motion
95    J. Knoop, O. Ruthing, B. Steffen
96    ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
97 
98    Global code motion / global value numbering
99    C. Click
100    ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
101 
102    Value Driven Redundancy Elimination
103    L.T. Simpson
104    Rice University Ph.D. thesis, Apr. 1996
105 
106    Value Numbering
107    L.T. Simpson
108    Massively Scalar Compiler Project, Rice University, Sep. 1996
109 
110    High Performance Compilers for Parallel Computing
111    Michael Wolfe
112    Addison-Wesley, 1996
113 
114    Advanced Compiler Design and Implementation
115    Steven Muchnick
116    Morgan Kaufmann, 1997
117 
118    Building an Optimizing Compiler
119    Robert Morgan
120    Digital Press, 1998
121 
122    People wishing to speed up the code here should read:
123      Elimination Algorithms for Data Flow Analysis
124      B.G. Ryder, M.C. Paull
125      ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
126 
127      How to Analyze Large Programs Efficiently and Informatively
128      D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
129      ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
130 
131    People wishing to do something different can find various possibilities
132    in the above papers and elsewhere.
133 */
134 
135 #include "config.h"
136 #include "system.h"
137 #include "coretypes.h"
138 #include "backend.h"
139 #include "target.h"
140 #include "rtl.h"
141 #include "tree.h"
142 #include "predict.h"
143 #include "df.h"
144 #include "memmodel.h"
145 #include "tm_p.h"
146 #include "insn-config.h"
147 #include "print-rtl.h"
148 #include "regs.h"
149 #include "ira.h"
150 #include "recog.h"
151 #include "diagnostic-core.h"
152 #include "cfgrtl.h"
153 #include "cfganal.h"
154 #include "lcm.h"
155 #include "cfgcleanup.h"
156 #include "expr.h"
157 #include "intl.h"
158 #include "tree-pass.h"
159 #include "dbgcnt.h"
160 #include "gcse.h"
161 #include "gcse-common.h"
162 #include "function-abi.h"
163 
164 /* We support GCSE via Partial Redundancy Elimination.  PRE optimizations
165    are a superset of those done by classic GCSE.
166 
167    Two passes of copy/constant propagation are done around PRE or hoisting
168    because the first one enables more GCSE and the second one helps to clean
169    up the copies that PRE and HOIST create.  This is needed more for PRE than
170    for HOIST because code hoisting will try to use an existing register
171    containing the common subexpression rather than create a new one.  This is
172    harder to do for PRE because of the code motion (which HOIST doesn't do).
173 
174    Expressions we are interested in GCSE-ing are of the form
175    (set (pseudo-reg) (expression)).
176    Function want_to_gcse_p says what these are.
177 
178    In addition, expressions in REG_EQUAL notes are candidates for GCSE-ing.
179    This allows PRE to hoist expressions that are expressed in multiple insns,
180    such as complex address calculations (e.g. for PIC code, or loads with a
181    high part and a low part).
182 
183    PRE handles moving invariant expressions out of loops (by treating them as
184    partially redundant).
185 
186    **********************
187 
188    We used to support multiple passes but there are diminishing returns in
189    doing so.  The first pass usually makes 90% of the changes that are doable.
190    A second pass can make a few more changes made possible by the first pass.
191    Experiments show any further passes don't make enough changes to justify
192    the expense.
193 
194    A study of spec92 using an unlimited number of passes:
195    [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
196    [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
197    [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
198 
199    It was found doing copy propagation between each pass enables further
200    substitutions.
201 
202    This study was done before expressions in REG_EQUAL notes were added as
203    candidate expressions for optimization, and before the GIMPLE optimizers
204    were added.  Probably, multiple passes is even less efficient now than
205    at the time when the study was conducted.
206 
207    PRE is quite expensive in complicated functions because the DFA can take
208    a while to converge.  Hence we only perform one pass.
209 
210    **********************
211 
212    The steps for PRE are:
213 
214    1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
215 
216    2) Perform the data flow analysis for PRE.
217 
218    3) Delete the redundant instructions
219 
220    4) Insert the required copies [if any] that make the partially
221       redundant instructions fully redundant.
222 
223    5) For other reaching expressions, insert an instruction to copy the value
224       to a newly created pseudo that will reach the redundant instruction.
225 
226    The deletion is done first so that when we do insertions we
227    know which pseudo reg to use.
228 
229    Various papers have argued that PRE DFA is expensive (O(n^2)) and others
230    argue it is not.  The number of iterations for the algorithm to converge
231    is typically 2-4 so I don't view it as that expensive (relatively speaking).
232 
233    PRE GCSE depends heavily on the second CPROP pass to clean up the copies
234    we create.  To make an expression reach the place where it's redundant,
235    the result of the expression is copied to a new register, and the redundant
236    expression is deleted by replacing it with this new register.  Classic GCSE
237    doesn't have this problem as much as it computes the reaching defs of
238    each register in each block and thus can try to use an existing
239    register.  */
240 
241 /* GCSE global vars.  */
242 
243 struct target_gcse default_target_gcse;
244 #if SWITCHABLE_TARGET
245 struct target_gcse *this_target_gcse = &default_target_gcse;
246 #endif
247 
248 /* Set to non-zero if CSE should run after all GCSE optimizations are done.  */
249 int flag_rerun_cse_after_global_opts;
250 
251 /* An obstack for our working variables.  */
252 static struct obstack gcse_obstack;
253 
254 /* Hash table of expressions.  */
255 
256 struct gcse_expr
257 {
258   /* The expression.  */
259   rtx expr;
260   /* Index in the available expression bitmaps.  */
261   int bitmap_index;
262   /* Next entry with the same hash.  */
263   struct gcse_expr *next_same_hash;
264   /* List of anticipatable occurrences in basic blocks in the function.
265      An "anticipatable occurrence" is one that is the first occurrence in the
266      basic block, the operands are not modified in the basic block prior
267      to the occurrence and the output is not used between the start of
268      the block and the occurrence.  */
269   struct gcse_occr *antic_occr;
270   /* List of available occurrence in basic blocks in the function.
271      An "available occurrence" is one that is the last occurrence in the
272      basic block and the operands are not modified by following statements in
273      the basic block [including this insn].  */
274   struct gcse_occr *avail_occr;
275   /* Non-null if the computation is PRE redundant.
276      The value is the newly created pseudo-reg to record a copy of the
277      expression in all the places that reach the redundant copy.  */
278   rtx reaching_reg;
279   /* Maximum distance in instructions this expression can travel.
280      We avoid moving simple expressions for more than a few instructions
281      to keep register pressure under control.
282      A value of "0" removes restrictions on how far the expression can
283      travel.  */
284   HOST_WIDE_INT max_distance;
285 };
286 
287 /* Occurrence of an expression.
288    There is one per basic block.  If a pattern appears more than once the
289    last appearance is used [or first for anticipatable expressions].  */
290 
291 struct gcse_occr
292 {
293   /* Next occurrence of this expression.  */
294   struct gcse_occr *next;
295   /* The insn that computes the expression.  */
296   rtx_insn *insn;
297   /* Nonzero if this [anticipatable] occurrence has been deleted.  */
298   char deleted_p;
299   /* Nonzero if this [available] occurrence has been copied to
300      reaching_reg.  */
301   /* ??? This is mutually exclusive with deleted_p, so they could share
302      the same byte.  */
303   char copied_p;
304 };
305 
306 typedef struct gcse_occr *occr_t;
307 
308 /* Expression hash tables.
309    Each hash table is an array of buckets.
310    ??? It is known that if it were an array of entries, structure elements
311    `next_same_hash' and `bitmap_index' wouldn't be necessary.  However, it is
312    not clear whether in the final analysis a sufficient amount of memory would
313    be saved as the size of the available expression bitmaps would be larger
314    [one could build a mapping table without holes afterwards though].
315    Someday I'll perform the computation and figure it out.  */
316 
317 struct gcse_hash_table_d
318 {
319   /* The table itself.
320      This is an array of `expr_hash_table_size' elements.  */
321   struct gcse_expr **table;
322 
323   /* Size of the hash table, in elements.  */
324   unsigned int size;
325 
326   /* Number of hash table elements.  */
327   unsigned int n_elems;
328 };
329 
330 /* Expression hash table.  */
331 static struct gcse_hash_table_d expr_hash_table;
332 
333 /* This is a list of expressions which are MEMs and will be used by load
334    or store motion.
335    Load motion tracks MEMs which aren't killed by anything except itself,
336    i.e. loads and stores to a single location.
337    We can then allow movement of these MEM refs with a little special
338    allowance. (all stores copy the same value to the reaching reg used
339    for the loads).  This means all values used to store into memory must have
340    no side effects so we can re-issue the setter value.  */
341 
342 struct ls_expr
343 {
344   struct gcse_expr * expr;	/* Gcse expression reference for LM.  */
345   rtx pattern;			/* Pattern of this mem.  */
346   rtx pattern_regs;		/* List of registers mentioned by the mem.  */
347   vec<rtx_insn *> stores;	/* INSN list of stores seen.  */
348   struct ls_expr * next;	/* Next in the list.  */
349   int invalid;			/* Invalid for some reason.  */
350   int index;			/* If it maps to a bitmap index.  */
351   unsigned int hash_index;	/* Index when in a hash table.  */
352   rtx reaching_reg;		/* Register to use when re-writing.  */
353 };
354 
355 /* Head of the list of load/store memory refs.  */
356 static struct ls_expr * pre_ldst_mems = NULL;
357 
358 struct pre_ldst_expr_hasher : nofree_ptr_hash <ls_expr>
359 {
360   typedef value_type compare_type;
361   static inline hashval_t hash (const ls_expr *);
362   static inline bool equal (const ls_expr *, const ls_expr *);
363 };
364 
365 /* Hashtable helpers.  */
366 inline hashval_t
hash(const ls_expr * x)367 pre_ldst_expr_hasher::hash (const ls_expr *x)
368 {
369   int do_not_record_p = 0;
370   return
371     hash_rtx (x->pattern, GET_MODE (x->pattern), &do_not_record_p, NULL, false);
372 }
373 
374 static int expr_equiv_p (const_rtx, const_rtx);
375 
376 inline bool
equal(const ls_expr * ptr1,const ls_expr * ptr2)377 pre_ldst_expr_hasher::equal (const ls_expr *ptr1,
378 			     const ls_expr *ptr2)
379 {
380   return expr_equiv_p (ptr1->pattern, ptr2->pattern);
381 }
382 
383 /* Hashtable for the load/store memory refs.  */
384 static hash_table<pre_ldst_expr_hasher> *pre_ldst_table;
385 
386 /* Bitmap containing one bit for each register in the program.
387    Used when performing GCSE to track which registers have been set since
388    the start of the basic block.  */
389 static regset reg_set_bitmap;
390 
391 /* Array, indexed by basic block number for a list of insns which modify
392    memory within that block.  */
393 static vec<rtx_insn *> *modify_mem_list;
394 static bitmap modify_mem_list_set;
395 
396 /* This array parallels modify_mem_list, except that it stores MEMs
397    being set and their canonicalized memory addresses.  */
398 static vec<modify_pair> *canon_modify_mem_list;
399 
400 /* Bitmap indexed by block numbers to record which blocks contain
401    function calls.  */
402 static bitmap blocks_with_calls;
403 
404 /* Various variables for statistics gathering.  */
405 
406 /* Memory used in a pass.
407    This isn't intended to be absolutely precise.  Its intent is only
408    to keep an eye on memory usage.  */
409 static int bytes_used;
410 
411 /* GCSE substitutions made.  */
412 static int gcse_subst_count;
413 /* Number of copy instructions created.  */
414 static int gcse_create_count;
415 
416 /* Doing code hoisting.  */
417 static bool doing_code_hoisting_p = false;
418 
419 /* For available exprs */
420 static sbitmap *ae_kill;
421 
422 /* Data stored for each basic block.  */
423 struct bb_data
424 {
425   /* Maximal register pressure inside basic block for given register class
426      (defined only for the pressure classes).  */
427   int max_reg_pressure[N_REG_CLASSES];
428   /* Recorded register pressure of basic block before trying to hoist
429      an expression.  Will be used to restore the register pressure
430      if the expression should not be hoisted.  */
431   int old_pressure;
432   /* Recorded register live_in info of basic block during code hoisting
433      process.  BACKUP is used to record live_in info before trying to
434      hoist an expression, and will be used to restore LIVE_IN if the
435      expression should not be hoisted.  */
436   bitmap live_in, backup;
437 };
438 
439 #define BB_DATA(bb) ((struct bb_data *) (bb)->aux)
440 
441 static basic_block curr_bb;
442 
443 /* Current register pressure for each pressure class.  */
444 static int curr_reg_pressure[N_REG_CLASSES];
445 
446 
447 static void compute_can_copy (void);
448 static void *gmalloc (size_t) ATTRIBUTE_MALLOC;
449 static void *gcalloc (size_t, size_t) ATTRIBUTE_MALLOC;
450 static void *gcse_alloc (unsigned long);
451 static void alloc_gcse_mem (void);
452 static void free_gcse_mem (void);
453 static void hash_scan_insn (rtx_insn *, struct gcse_hash_table_d *);
454 static void hash_scan_set (rtx, rtx_insn *, struct gcse_hash_table_d *);
455 static void hash_scan_clobber (rtx, rtx_insn *, struct gcse_hash_table_d *);
456 static void hash_scan_call (rtx, rtx_insn *, struct gcse_hash_table_d *);
457 static int oprs_unchanged_p (const_rtx, const rtx_insn *, int);
458 static int oprs_anticipatable_p (const_rtx, const rtx_insn *);
459 static int oprs_available_p (const_rtx, const rtx_insn *);
460 static void insert_expr_in_table (rtx, machine_mode, rtx_insn *, int, int,
461 				  HOST_WIDE_INT, struct gcse_hash_table_d *);
462 static unsigned int hash_expr (const_rtx, machine_mode, int *, int);
463 static void record_last_reg_set_info (rtx_insn *, int);
464 static void record_last_mem_set_info (rtx_insn *);
465 static void record_last_set_info (rtx, const_rtx, void *);
466 static void compute_hash_table (struct gcse_hash_table_d *);
467 static void alloc_hash_table (struct gcse_hash_table_d *);
468 static void free_hash_table (struct gcse_hash_table_d *);
469 static void compute_hash_table_work (struct gcse_hash_table_d *);
470 static void dump_hash_table (FILE *, const char *, struct gcse_hash_table_d *);
471 static void compute_local_properties (sbitmap *, sbitmap *, sbitmap *,
472 				      struct gcse_hash_table_d *);
473 static void mems_conflict_for_gcse_p (rtx, const_rtx, void *);
474 static int load_killed_in_block_p (const_basic_block, int, const_rtx, int);
475 static void alloc_pre_mem (int, int);
476 static void free_pre_mem (void);
477 static struct edge_list *compute_pre_data (void);
478 static int pre_expr_reaches_here_p (basic_block, struct gcse_expr *,
479 				    basic_block);
480 static void insert_insn_end_basic_block (struct gcse_expr *, basic_block);
481 static void pre_insert_copy_insn (struct gcse_expr *, rtx_insn *);
482 static void pre_insert_copies (void);
483 static int pre_delete (void);
484 static int pre_gcse (struct edge_list *);
485 static int one_pre_gcse_pass (void);
486 static void add_label_notes (rtx, rtx_insn *);
487 static void alloc_code_hoist_mem (int, int);
488 static void free_code_hoist_mem (void);
489 static void compute_code_hoist_vbeinout (void);
490 static void compute_code_hoist_data (void);
491 static int should_hoist_expr_to_dom (basic_block, struct gcse_expr *,
492 				     basic_block,
493 				     sbitmap, HOST_WIDE_INT, int *,
494 				     enum reg_class,
495 				     int *, bitmap, rtx_insn *);
496 static int hoist_code (void);
497 static enum reg_class get_regno_pressure_class (int regno, int *nregs);
498 static enum reg_class get_pressure_class_and_nregs (rtx_insn *insn, int *nregs);
499 static int one_code_hoisting_pass (void);
500 static rtx_insn *process_insert_insn (struct gcse_expr *);
501 static int pre_edge_insert (struct edge_list *, struct gcse_expr **);
502 static int pre_expr_reaches_here_p_work (basic_block, struct gcse_expr *,
503 					 basic_block, char *);
504 static struct ls_expr * ldst_entry (rtx);
505 static void free_ldst_entry (struct ls_expr *);
506 static void free_ld_motion_mems (void);
507 static void print_ldst_list (FILE *);
508 static struct ls_expr * find_rtx_in_ldst (rtx);
509 static int simple_mem (const_rtx);
510 static void invalidate_any_buried_refs (rtx);
511 static void compute_ld_motion_mems (void);
512 static void trim_ld_motion_mems (void);
513 static void update_ld_motion_stores (struct gcse_expr *);
514 static void clear_modify_mem_tables (void);
515 static void free_modify_mem_tables (void);
516 
517 #define GNEW(T)			((T *) gmalloc (sizeof (T)))
518 #define GCNEW(T)		((T *) gcalloc (1, sizeof (T)))
519 
520 #define GNEWVEC(T, N)		((T *) gmalloc (sizeof (T) * (N)))
521 #define GCNEWVEC(T, N)		((T *) gcalloc ((N), sizeof (T)))
522 
523 #define GNEWVAR(T, S)		((T *) gmalloc ((S)))
524 #define GCNEWVAR(T, S)		((T *) gcalloc (1, (S)))
525 
526 #define GOBNEW(T)		((T *) gcse_alloc (sizeof (T)))
527 #define GOBNEWVAR(T, S)		((T *) gcse_alloc ((S)))
528 
529 /* Misc. utilities.  */
530 
531 #define can_copy \
532   (this_target_gcse->x_can_copy)
533 #define can_copy_init_p \
534   (this_target_gcse->x_can_copy_init_p)
535 
536 /* Compute which modes support reg/reg copy operations.  */
537 
538 static void
compute_can_copy(void)539 compute_can_copy (void)
540 {
541   int i;
542 #ifndef AVOID_CCMODE_COPIES
543   rtx reg;
544  rtx_insn *insn;
545 #endif
546   memset (can_copy, 0, NUM_MACHINE_MODES);
547 
548   start_sequence ();
549   for (i = 0; i < NUM_MACHINE_MODES; i++)
550     if (GET_MODE_CLASS (i) == MODE_CC)
551       {
552 #ifdef AVOID_CCMODE_COPIES
553 	can_copy[i] = 0;
554 #else
555 	reg = gen_rtx_REG ((machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
556 	insn = emit_insn (gen_rtx_SET (reg, reg));
557 	if (recog (PATTERN (insn), insn, NULL) >= 0)
558 	  can_copy[i] = 1;
559 #endif
560       }
561     else
562       can_copy[i] = 1;
563 
564   end_sequence ();
565 }
566 
567 /* Returns whether the mode supports reg/reg copy operations.  */
568 
569 bool
can_copy_p(machine_mode mode)570 can_copy_p (machine_mode mode)
571 {
572   if (! can_copy_init_p)
573     {
574       compute_can_copy ();
575       can_copy_init_p = true;
576     }
577 
578   return can_copy[mode] != 0;
579 }
580 
581 /* Cover function to xmalloc to record bytes allocated.  */
582 
583 static void *
gmalloc(size_t size)584 gmalloc (size_t size)
585 {
586   bytes_used += size;
587   return xmalloc (size);
588 }
589 
590 /* Cover function to xcalloc to record bytes allocated.  */
591 
592 static void *
gcalloc(size_t nelem,size_t elsize)593 gcalloc (size_t nelem, size_t elsize)
594 {
595   bytes_used += nelem * elsize;
596   return xcalloc (nelem, elsize);
597 }
598 
599 /* Cover function to obstack_alloc.  */
600 
601 static void *
gcse_alloc(unsigned long size)602 gcse_alloc (unsigned long size)
603 {
604   bytes_used += size;
605   return obstack_alloc (&gcse_obstack, size);
606 }
607 
608 /* Allocate memory for the reg/memory set tracking tables.
609    This is called at the start of each pass.  */
610 
611 static void
alloc_gcse_mem(void)612 alloc_gcse_mem (void)
613 {
614   /* Allocate vars to track sets of regs.  */
615   reg_set_bitmap = ALLOC_REG_SET (NULL);
616 
617   /* Allocate array to keep a list of insns which modify memory in each
618      basic block.  The two typedefs are needed to work around the
619      pre-processor limitation with template types in macro arguments.  */
620   typedef vec<rtx_insn *> vec_rtx_heap;
621   typedef vec<modify_pair> vec_modify_pair_heap;
622   modify_mem_list = GCNEWVEC (vec_rtx_heap, last_basic_block_for_fn (cfun));
623   canon_modify_mem_list = GCNEWVEC (vec_modify_pair_heap,
624 				    last_basic_block_for_fn (cfun));
625   modify_mem_list_set = BITMAP_ALLOC (NULL);
626   blocks_with_calls = BITMAP_ALLOC (NULL);
627 }
628 
629 /* Free memory allocated by alloc_gcse_mem.  */
630 
631 static void
free_gcse_mem(void)632 free_gcse_mem (void)
633 {
634   FREE_REG_SET (reg_set_bitmap);
635 
636   free_modify_mem_tables ();
637   BITMAP_FREE (modify_mem_list_set);
638   BITMAP_FREE (blocks_with_calls);
639 }
640 
641 /* Compute the local properties of each recorded expression.
642 
643    Local properties are those that are defined by the block, irrespective of
644    other blocks.
645 
646    An expression is transparent in a block if its operands are not modified
647    in the block.
648 
649    An expression is computed (locally available) in a block if it is computed
650    at least once and expression would contain the same value if the
651    computation was moved to the end of the block.
652 
653    An expression is locally anticipatable in a block if it is computed at
654    least once and expression would contain the same value if the computation
655    was moved to the beginning of the block.
656 
657    We call this routine for pre and code hoisting.  They all compute
658    basically the same information and thus can easily share this code.
659 
660    TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
661    properties.  If NULL, then it is not necessary to compute or record that
662    particular property.
663 
664    TABLE controls which hash table to look at.  */
665 
666 static void
compute_local_properties(sbitmap * transp,sbitmap * comp,sbitmap * antloc,struct gcse_hash_table_d * table)667 compute_local_properties (sbitmap *transp, sbitmap *comp, sbitmap *antloc,
668 			  struct gcse_hash_table_d *table)
669 {
670   unsigned int i;
671 
672   /* Initialize any bitmaps that were passed in.  */
673   if (transp)
674     {
675       bitmap_vector_ones (transp, last_basic_block_for_fn (cfun));
676     }
677 
678   if (comp)
679     bitmap_vector_clear (comp, last_basic_block_for_fn (cfun));
680   if (antloc)
681     bitmap_vector_clear (antloc, last_basic_block_for_fn (cfun));
682 
683   for (i = 0; i < table->size; i++)
684     {
685       struct gcse_expr *expr;
686 
687       for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
688 	{
689 	  int indx = expr->bitmap_index;
690 	  struct gcse_occr *occr;
691 
692 	  /* The expression is transparent in this block if it is not killed.
693 	     We start by assuming all are transparent [none are killed], and
694 	     then reset the bits for those that are.  */
695 	  if (transp)
696 	    compute_transp (expr->expr, indx, transp,
697 			    blocks_with_calls,
698 			    modify_mem_list_set,
699 			    canon_modify_mem_list);
700 
701 	  /* The occurrences recorded in antic_occr are exactly those that
702 	     we want to set to nonzero in ANTLOC.  */
703 	  if (antloc)
704 	    for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
705 	      {
706 		bitmap_set_bit (antloc[BLOCK_FOR_INSN (occr->insn)->index], indx);
707 
708 		/* While we're scanning the table, this is a good place to
709 		   initialize this.  */
710 		occr->deleted_p = 0;
711 	      }
712 
713 	  /* The occurrences recorded in avail_occr are exactly those that
714 	     we want to set to nonzero in COMP.  */
715 	  if (comp)
716 	    for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
717 	      {
718 		bitmap_set_bit (comp[BLOCK_FOR_INSN (occr->insn)->index], indx);
719 
720 		/* While we're scanning the table, this is a good place to
721 		   initialize this.  */
722 		occr->copied_p = 0;
723 	      }
724 
725 	  /* While we're scanning the table, this is a good place to
726 	     initialize this.  */
727 	  expr->reaching_reg = 0;
728 	}
729     }
730 }
731 
732 /* Hash table support.  */
733 
734 struct reg_avail_info
735 {
736   basic_block last_bb;
737   int first_set;
738   int last_set;
739 };
740 
741 static struct reg_avail_info *reg_avail_info;
742 static basic_block current_bb;
743 
744 /* See whether X, the source of a set, is something we want to consider for
745    GCSE.  */
746 
747 static int
want_to_gcse_p(rtx x,machine_mode mode,HOST_WIDE_INT * max_distance_ptr)748 want_to_gcse_p (rtx x, machine_mode mode, HOST_WIDE_INT *max_distance_ptr)
749 {
750 #ifdef STACK_REGS
751   /* On register stack architectures, don't GCSE constants from the
752      constant pool, as the benefits are often swamped by the overhead
753      of shuffling the register stack between basic blocks.  */
754   if (IS_STACK_MODE (GET_MODE (x)))
755     x = avoid_constant_pool_reference (x);
756 #endif
757 
758   /* GCSE'ing constants:
759 
760      We do not specifically distinguish between constant and non-constant
761      expressions in PRE and Hoist.  We use set_src_cost below to limit
762      the maximum distance simple expressions can travel.
763 
764      Nevertheless, constants are much easier to GCSE, and, hence,
765      it is easy to overdo the optimizations.  Usually, excessive PRE and
766      Hoisting of constant leads to increased register pressure.
767 
768      RA can deal with this by rematerialing some of the constants.
769      Therefore, it is important that the back-end generates sets of constants
770      in a way that allows reload rematerialize them under high register
771      pressure, i.e., a pseudo register with REG_EQUAL to constant
772      is set only once.  Failing to do so will result in IRA/reload
773      spilling such constants under high register pressure instead of
774      rematerializing them.  */
775 
776   switch (GET_CODE (x))
777     {
778     case REG:
779     case SUBREG:
780     case CALL:
781       return 0;
782 
783     CASE_CONST_ANY:
784       if (!doing_code_hoisting_p)
785 	/* Do not PRE constants.  */
786 	return 0;
787 
788       /* FALLTHRU */
789 
790     default:
791       if (doing_code_hoisting_p)
792 	/* PRE doesn't implement max_distance restriction.  */
793 	{
794 	  int cost;
795 	  HOST_WIDE_INT max_distance;
796 
797 	  gcc_assert (!optimize_function_for_speed_p (cfun)
798 		      && optimize_function_for_size_p (cfun));
799 	  cost = set_src_cost (x, mode, 0);
800 
801 	  if (cost < COSTS_N_INSNS (param_gcse_unrestricted_cost))
802 	    {
803 	      max_distance
804 		= ((HOST_WIDE_INT)param_gcse_cost_distance_ratio * cost) / 10;
805 	      if (max_distance == 0)
806 		return 0;
807 
808 	      gcc_assert (max_distance > 0);
809 	    }
810 	  else
811 	    max_distance = 0;
812 
813 	  if (max_distance_ptr)
814 	    *max_distance_ptr = max_distance;
815 	}
816 
817       return can_assign_to_reg_without_clobbers_p (x, mode);
818     }
819 }
820 
821 /* Used internally by can_assign_to_reg_without_clobbers_p.  */
822 
823 static GTY(()) rtx_insn *test_insn;
824 
825 /* Return true if we can assign X to a pseudo register of mode MODE
826    such that the resulting insn does not result in clobbering a hard
827    register as a side-effect.
828 
829    Additionally, if the target requires it, check that the resulting insn
830    can be copied.  If it cannot, this means that X is special and probably
831    has hidden side-effects we don't want to mess with.
832 
833    This function is typically used by code motion passes, to verify
834    that it is safe to insert an insn without worrying about clobbering
835    maybe live hard regs.  */
836 
837 bool
can_assign_to_reg_without_clobbers_p(rtx x,machine_mode mode)838 can_assign_to_reg_without_clobbers_p (rtx x, machine_mode mode)
839 {
840   int num_clobbers = 0;
841   int icode;
842   bool can_assign = false;
843 
844   /* If this is a valid operand, we are OK.  If it's VOIDmode, we aren't.  */
845   if (general_operand (x, mode))
846     return 1;
847   else if (GET_MODE (x) == VOIDmode)
848     return 0;
849 
850   /* Otherwise, check if we can make a valid insn from it.  First initialize
851      our test insn if we haven't already.  */
852   if (test_insn == 0)
853     {
854       test_insn
855 	= make_insn_raw (gen_rtx_SET (gen_rtx_REG (word_mode,
856 						   FIRST_PSEUDO_REGISTER * 2),
857 				      const0_rtx));
858       SET_NEXT_INSN (test_insn) = SET_PREV_INSN (test_insn) = 0;
859       INSN_LOCATION (test_insn) = UNKNOWN_LOCATION;
860     }
861 
862   /* Now make an insn like the one we would make when GCSE'ing and see if
863      valid.  */
864   PUT_MODE (SET_DEST (PATTERN (test_insn)), mode);
865   SET_SRC (PATTERN (test_insn)) = x;
866 
867   icode = recog (PATTERN (test_insn), test_insn, &num_clobbers);
868 
869   /* If the test insn is valid and doesn't need clobbers, and the target also
870      has no objections, we're good.  */
871   if (icode >= 0
872       && (num_clobbers == 0 || !added_clobbers_hard_reg_p (icode))
873       && ! (targetm.cannot_copy_insn_p
874 	    && targetm.cannot_copy_insn_p (test_insn)))
875     can_assign = true;
876 
877   /* Make sure test_insn doesn't have any pointers into GC space.  */
878   SET_SRC (PATTERN (test_insn)) = NULL_RTX;
879 
880   return can_assign;
881 }
882 
883 /* Return nonzero if the operands of expression X are unchanged from the
884    start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
885    or from INSN to the end of INSN's basic block (if AVAIL_P != 0).  */
886 
887 static int
oprs_unchanged_p(const_rtx x,const rtx_insn * insn,int avail_p)888 oprs_unchanged_p (const_rtx x, const rtx_insn *insn, int avail_p)
889 {
890   int i, j;
891   enum rtx_code code;
892   const char *fmt;
893 
894   if (x == 0)
895     return 1;
896 
897   code = GET_CODE (x);
898   switch (code)
899     {
900     case REG:
901       {
902 	struct reg_avail_info *info = &reg_avail_info[REGNO (x)];
903 
904 	if (info->last_bb != current_bb)
905 	  return 1;
906 	if (avail_p)
907 	  return info->last_set < DF_INSN_LUID (insn);
908 	else
909 	  return info->first_set >= DF_INSN_LUID (insn);
910       }
911 
912     case MEM:
913       if (! flag_gcse_lm
914 	  || load_killed_in_block_p (current_bb, DF_INSN_LUID (insn),
915 				     x, avail_p))
916 	return 0;
917       else
918 	return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
919 
920     case PRE_DEC:
921     case PRE_INC:
922     case POST_DEC:
923     case POST_INC:
924     case PRE_MODIFY:
925     case POST_MODIFY:
926       return 0;
927 
928     case PC:
929     case CC0: /*FIXME*/
930     case CONST:
931     CASE_CONST_ANY:
932     case SYMBOL_REF:
933     case LABEL_REF:
934     case ADDR_VEC:
935     case ADDR_DIFF_VEC:
936       return 1;
937 
938     default:
939       break;
940     }
941 
942   for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
943     {
944       if (fmt[i] == 'e')
945 	{
946 	  /* If we are about to do the last recursive call needed at this
947 	     level, change it into iteration.  This function is called enough
948 	     to be worth it.  */
949 	  if (i == 0)
950 	    return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
951 
952 	  else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
953 	    return 0;
954 	}
955       else if (fmt[i] == 'E')
956 	for (j = 0; j < XVECLEN (x, i); j++)
957 	  if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
958 	    return 0;
959     }
960 
961   return 1;
962 }
963 
964 /* Info passed from load_killed_in_block_p to mems_conflict_for_gcse_p.  */
965 
966 struct mem_conflict_info
967 {
968   /* A memory reference for a load instruction, mems_conflict_for_gcse_p will
969      see if a memory store conflicts with this memory load.  */
970   const_rtx mem;
971 
972   /* True if mems_conflict_for_gcse_p finds a conflict between two memory
973      references.  */
974   bool conflict;
975 };
976 
977 /* DEST is the output of an instruction.  If it is a memory reference and
978    possibly conflicts with the load found in DATA, then communicate this
979    information back through DATA.  */
980 
981 static void
mems_conflict_for_gcse_p(rtx dest,const_rtx setter ATTRIBUTE_UNUSED,void * data)982 mems_conflict_for_gcse_p (rtx dest, const_rtx setter ATTRIBUTE_UNUSED,
983 			  void *data)
984 {
985   struct mem_conflict_info *mci = (struct mem_conflict_info *) data;
986 
987   while (GET_CODE (dest) == SUBREG
988 	 || GET_CODE (dest) == ZERO_EXTRACT
989 	 || GET_CODE (dest) == STRICT_LOW_PART)
990     dest = XEXP (dest, 0);
991 
992   /* If DEST is not a MEM, then it will not conflict with the load.  Note
993      that function calls are assumed to clobber memory, but are handled
994      elsewhere.  */
995   if (! MEM_P (dest))
996     return;
997 
998   /* If we are setting a MEM in our list of specially recognized MEMs,
999      don't mark as killed this time.  */
1000   if (pre_ldst_mems != NULL && expr_equiv_p (dest, mci->mem))
1001     {
1002       if (!find_rtx_in_ldst (dest))
1003 	mci->conflict = true;
1004       return;
1005     }
1006 
1007   if (true_dependence (dest, GET_MODE (dest), mci->mem))
1008     mci->conflict = true;
1009 }
1010 
1011 /* Return nonzero if the expression in X (a memory reference) is killed
1012    in block BB before or after the insn with the LUID in UID_LIMIT.
1013    AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
1014    before UID_LIMIT.
1015 
1016    To check the entire block, set UID_LIMIT to max_uid + 1 and
1017    AVAIL_P to 0.  */
1018 
1019 static int
load_killed_in_block_p(const_basic_block bb,int uid_limit,const_rtx x,int avail_p)1020 load_killed_in_block_p (const_basic_block bb, int uid_limit, const_rtx x,
1021 			int avail_p)
1022 {
1023   vec<rtx_insn *> list = modify_mem_list[bb->index];
1024   rtx_insn *setter;
1025   unsigned ix;
1026 
1027   /* If this is a readonly then we aren't going to be changing it.  */
1028   if (MEM_READONLY_P (x))
1029     return 0;
1030 
1031   FOR_EACH_VEC_ELT_REVERSE (list, ix, setter)
1032     {
1033       struct mem_conflict_info mci;
1034 
1035       /* Ignore entries in the list that do not apply.  */
1036       if ((avail_p
1037 	   && DF_INSN_LUID (setter) < uid_limit)
1038 	  || (! avail_p
1039 	      && DF_INSN_LUID (setter) > uid_limit))
1040 	continue;
1041 
1042       /* If SETTER is a call everything is clobbered.  Note that calls
1043 	 to pure functions are never put on the list, so we need not
1044 	 worry about them.  */
1045       if (CALL_P (setter))
1046 	return 1;
1047 
1048       /* SETTER must be an INSN of some kind that sets memory.  Call
1049 	 note_stores to examine each hunk of memory that is modified.  */
1050       mci.mem = x;
1051       mci.conflict = false;
1052       note_stores (setter, mems_conflict_for_gcse_p, &mci);
1053       if (mci.conflict)
1054 	return 1;
1055     }
1056   return 0;
1057 }
1058 
1059 /* Return nonzero if the operands of expression X are unchanged from
1060    the start of INSN's basic block up to but not including INSN.  */
1061 
1062 static int
oprs_anticipatable_p(const_rtx x,const rtx_insn * insn)1063 oprs_anticipatable_p (const_rtx x, const rtx_insn *insn)
1064 {
1065   return oprs_unchanged_p (x, insn, 0);
1066 }
1067 
1068 /* Return nonzero if the operands of expression X are unchanged from
1069    INSN to the end of INSN's basic block.  */
1070 
1071 static int
oprs_available_p(const_rtx x,const rtx_insn * insn)1072 oprs_available_p (const_rtx x, const rtx_insn *insn)
1073 {
1074   return oprs_unchanged_p (x, insn, 1);
1075 }
1076 
1077 /* Hash expression X.
1078 
1079    MODE is only used if X is a CONST_INT.  DO_NOT_RECORD_P is a boolean
1080    indicating if a volatile operand is found or if the expression contains
1081    something we don't want to insert in the table.  HASH_TABLE_SIZE is
1082    the current size of the hash table to be probed.  */
1083 
1084 static unsigned int
hash_expr(const_rtx x,machine_mode mode,int * do_not_record_p,int hash_table_size)1085 hash_expr (const_rtx x, machine_mode mode, int *do_not_record_p,
1086 	   int hash_table_size)
1087 {
1088   unsigned int hash;
1089 
1090   *do_not_record_p = 0;
1091 
1092   hash = hash_rtx (x, mode, do_not_record_p, NULL, /*have_reg_qty=*/false);
1093   return hash % hash_table_size;
1094 }
1095 
1096 /* Return nonzero if exp1 is equivalent to exp2.  */
1097 
1098 static int
expr_equiv_p(const_rtx x,const_rtx y)1099 expr_equiv_p (const_rtx x, const_rtx y)
1100 {
1101   return exp_equiv_p (x, y, 0, true);
1102 }
1103 
1104 /* Insert expression X in INSN in the hash TABLE.
1105    If it is already present, record it as the last occurrence in INSN's
1106    basic block.
1107 
1108    MODE is the mode of the value X is being stored into.
1109    It is only used if X is a CONST_INT.
1110 
1111    ANTIC_P is nonzero if X is an anticipatable expression.
1112    AVAIL_P is nonzero if X is an available expression.
1113 
1114    MAX_DISTANCE is the maximum distance in instructions this expression can
1115    be moved.  */
1116 
1117 static void
insert_expr_in_table(rtx x,machine_mode mode,rtx_insn * insn,int antic_p,int avail_p,HOST_WIDE_INT max_distance,struct gcse_hash_table_d * table)1118 insert_expr_in_table (rtx x, machine_mode mode, rtx_insn *insn,
1119 		      int antic_p,
1120 		      int avail_p, HOST_WIDE_INT max_distance,
1121 		      struct gcse_hash_table_d *table)
1122 {
1123   int found, do_not_record_p;
1124   unsigned int hash;
1125   struct gcse_expr *cur_expr, *last_expr = NULL;
1126   struct gcse_occr *antic_occr, *avail_occr;
1127 
1128   hash = hash_expr (x, mode, &do_not_record_p, table->size);
1129 
1130   /* Do not insert expression in table if it contains volatile operands,
1131      or if hash_expr determines the expression is something we don't want
1132      to or can't handle.  */
1133   if (do_not_record_p)
1134     return;
1135 
1136   cur_expr = table->table[hash];
1137   found = 0;
1138 
1139   while (cur_expr && (found = expr_equiv_p (cur_expr->expr, x)) == 0)
1140     {
1141       /* If the expression isn't found, save a pointer to the end of
1142 	 the list.  */
1143       last_expr = cur_expr;
1144       cur_expr = cur_expr->next_same_hash;
1145     }
1146 
1147   if (! found)
1148     {
1149       cur_expr = GOBNEW (struct gcse_expr);
1150       bytes_used += sizeof (struct gcse_expr);
1151       if (table->table[hash] == NULL)
1152 	/* This is the first pattern that hashed to this index.  */
1153 	table->table[hash] = cur_expr;
1154       else
1155 	/* Add EXPR to end of this hash chain.  */
1156 	last_expr->next_same_hash = cur_expr;
1157 
1158       /* Set the fields of the expr element.  */
1159       cur_expr->expr = x;
1160       cur_expr->bitmap_index = table->n_elems++;
1161       cur_expr->next_same_hash = NULL;
1162       cur_expr->antic_occr = NULL;
1163       cur_expr->avail_occr = NULL;
1164       gcc_assert (max_distance >= 0);
1165       cur_expr->max_distance = max_distance;
1166     }
1167   else
1168     gcc_assert (cur_expr->max_distance == max_distance);
1169 
1170   /* Now record the occurrence(s).  */
1171   if (antic_p)
1172     {
1173       antic_occr = cur_expr->antic_occr;
1174 
1175       if (antic_occr
1176 	  && BLOCK_FOR_INSN (antic_occr->insn) != BLOCK_FOR_INSN (insn))
1177 	antic_occr = NULL;
1178 
1179       if (antic_occr)
1180 	/* Found another instance of the expression in the same basic block.
1181 	   Prefer the currently recorded one.  We want the first one in the
1182 	   block and the block is scanned from start to end.  */
1183 	; /* nothing to do */
1184       else
1185 	{
1186 	  /* First occurrence of this expression in this basic block.  */
1187 	  antic_occr = GOBNEW (struct gcse_occr);
1188 	  bytes_used += sizeof (struct gcse_occr);
1189 	  antic_occr->insn = insn;
1190 	  antic_occr->next = cur_expr->antic_occr;
1191 	  antic_occr->deleted_p = 0;
1192 	  cur_expr->antic_occr = antic_occr;
1193 	}
1194     }
1195 
1196   if (avail_p)
1197     {
1198       avail_occr = cur_expr->avail_occr;
1199 
1200       if (avail_occr
1201 	  && BLOCK_FOR_INSN (avail_occr->insn) == BLOCK_FOR_INSN (insn))
1202 	{
1203 	  /* Found another instance of the expression in the same basic block.
1204 	     Prefer this occurrence to the currently recorded one.  We want
1205 	     the last one in the block and the block is scanned from start
1206 	     to end.  */
1207 	  avail_occr->insn = insn;
1208 	}
1209       else
1210 	{
1211 	  /* First occurrence of this expression in this basic block.  */
1212 	  avail_occr = GOBNEW (struct gcse_occr);
1213 	  bytes_used += sizeof (struct gcse_occr);
1214 	  avail_occr->insn = insn;
1215 	  avail_occr->next = cur_expr->avail_occr;
1216 	  avail_occr->deleted_p = 0;
1217 	  cur_expr->avail_occr = avail_occr;
1218 	}
1219     }
1220 }
1221 
1222 /* Scan SET present in INSN and add an entry to the hash TABLE.  */
1223 
1224 static void
hash_scan_set(rtx set,rtx_insn * insn,struct gcse_hash_table_d * table)1225 hash_scan_set (rtx set, rtx_insn *insn, struct gcse_hash_table_d *table)
1226 {
1227   rtx src = SET_SRC (set);
1228   rtx dest = SET_DEST (set);
1229   rtx note;
1230 
1231   if (GET_CODE (src) == CALL)
1232     hash_scan_call (src, insn, table);
1233 
1234   else if (REG_P (dest))
1235     {
1236       unsigned int regno = REGNO (dest);
1237       HOST_WIDE_INT max_distance = 0;
1238 
1239       /* See if a REG_EQUAL note shows this equivalent to a simpler expression.
1240 
1241 	 This allows us to do a single GCSE pass and still eliminate
1242 	 redundant constants, addresses or other expressions that are
1243 	 constructed with multiple instructions.
1244 
1245 	 However, keep the original SRC if INSN is a simple reg-reg move.
1246 	 In this case, there will almost always be a REG_EQUAL note on the
1247 	 insn that sets SRC.  By recording the REG_EQUAL value here as SRC
1248 	 for INSN, we miss copy propagation opportunities and we perform the
1249 	 same PRE GCSE operation repeatedly on the same REG_EQUAL value if we
1250 	 do more than one PRE GCSE pass.
1251 
1252 	 Note that this does not impede profitable constant propagations.  We
1253 	 "look through" reg-reg sets in lookup_avail_set.  */
1254       note = find_reg_equal_equiv_note (insn);
1255       if (note != 0
1256 	  && REG_NOTE_KIND (note) == REG_EQUAL
1257 	  && !REG_P (src)
1258 	  && want_to_gcse_p (XEXP (note, 0), GET_MODE (dest), NULL))
1259 	src = XEXP (note, 0), set = gen_rtx_SET (dest, src);
1260 
1261       /* Only record sets of pseudo-regs in the hash table.  */
1262       if (regno >= FIRST_PSEUDO_REGISTER
1263 	  /* Don't GCSE something if we can't do a reg/reg copy.  */
1264 	  && can_copy_p (GET_MODE (dest))
1265 	  /* GCSE commonly inserts instruction after the insn.  We can't
1266 	     do that easily for EH edges so disable GCSE on these for now.  */
1267 	  /* ??? We can now easily create new EH landing pads at the
1268 	     gimple level, for splitting edges; there's no reason we
1269 	     can't do the same thing at the rtl level.  */
1270 	  && !can_throw_internal (insn)
1271 	  /* Is SET_SRC something we want to gcse?  */
1272 	  && want_to_gcse_p (src, GET_MODE (dest), &max_distance)
1273 	  /* Don't CSE a nop.  */
1274 	  && ! set_noop_p (set)
1275 	  /* Don't GCSE if it has attached REG_EQUIV note.
1276 	     At this point this only function parameters should have
1277 	     REG_EQUIV notes and if the argument slot is used somewhere
1278 	     explicitly, it means address of parameter has been taken,
1279 	     so we should not extend the lifetime of the pseudo.  */
1280 	  && (note == NULL_RTX || ! MEM_P (XEXP (note, 0))))
1281 	{
1282 	  /* An expression is not anticipatable if its operands are
1283 	     modified before this insn or if this is not the only SET in
1284 	     this insn.  The latter condition does not have to mean that
1285 	     SRC itself is not anticipatable, but we just will not be
1286 	     able to handle code motion of insns with multiple sets.  */
1287 	  int antic_p = oprs_anticipatable_p (src, insn)
1288 			&& !multiple_sets (insn);
1289 	  /* An expression is not available if its operands are
1290 	     subsequently modified, including this insn.  It's also not
1291 	     available if this is a branch, because we can't insert
1292 	     a set after the branch.  */
1293 	  int avail_p = (oprs_available_p (src, insn)
1294 			 && ! JUMP_P (insn));
1295 
1296 	  insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p,
1297 				max_distance, table);
1298 	}
1299     }
1300   /* In case of store we want to consider the memory value as available in
1301      the REG stored in that memory. This makes it possible to remove
1302      redundant loads from due to stores to the same location.  */
1303   else if (flag_gcse_las && REG_P (src) && MEM_P (dest))
1304     {
1305       unsigned int regno = REGNO (src);
1306       HOST_WIDE_INT max_distance = 0;
1307 
1308       /* Only record sets of pseudo-regs in the hash table.  */
1309       if (regno >= FIRST_PSEUDO_REGISTER
1310 	  /* Don't GCSE something if we can't do a reg/reg copy.  */
1311 	  && can_copy_p (GET_MODE (src))
1312 	  /* GCSE commonly inserts instruction after the insn.  We can't
1313 	     do that easily for EH edges so disable GCSE on these for now.  */
1314 	  && !can_throw_internal (insn)
1315 	  /* Is SET_DEST something we want to gcse?  */
1316 	  && want_to_gcse_p (dest, GET_MODE (dest), &max_distance)
1317 	  /* Don't CSE a nop.  */
1318 	  && ! set_noop_p (set)
1319 	  /* Don't GCSE if it has attached REG_EQUIV note.
1320 	     At this point this only function parameters should have
1321 	     REG_EQUIV notes and if the argument slot is used somewhere
1322 	     explicitly, it means address of parameter has been taken,
1323 	     so we should not extend the lifetime of the pseudo.  */
1324 	  && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
1325 	      || ! MEM_P (XEXP (note, 0))))
1326 	{
1327 	  /* Stores are never anticipatable.  */
1328 	  int antic_p = 0;
1329 	  /* An expression is not available if its operands are
1330 	     subsequently modified, including this insn.  It's also not
1331 	     available if this is a branch, because we can't insert
1332 	     a set after the branch.  */
1333 	  int avail_p = oprs_available_p (dest, insn) && ! JUMP_P (insn);
1334 
1335 	  /* Record the memory expression (DEST) in the hash table.  */
1336 	  insert_expr_in_table (dest, GET_MODE (dest), insn,
1337 				antic_p, avail_p, max_distance, table);
1338 	}
1339     }
1340 }
1341 
1342 static void
hash_scan_clobber(rtx x ATTRIBUTE_UNUSED,rtx_insn * insn ATTRIBUTE_UNUSED,struct gcse_hash_table_d * table ATTRIBUTE_UNUSED)1343 hash_scan_clobber (rtx x ATTRIBUTE_UNUSED, rtx_insn *insn ATTRIBUTE_UNUSED,
1344 		   struct gcse_hash_table_d *table ATTRIBUTE_UNUSED)
1345 {
1346   /* Currently nothing to do.  */
1347 }
1348 
1349 static void
hash_scan_call(rtx x ATTRIBUTE_UNUSED,rtx_insn * insn ATTRIBUTE_UNUSED,struct gcse_hash_table_d * table ATTRIBUTE_UNUSED)1350 hash_scan_call (rtx x ATTRIBUTE_UNUSED, rtx_insn *insn ATTRIBUTE_UNUSED,
1351 		struct gcse_hash_table_d *table ATTRIBUTE_UNUSED)
1352 {
1353   /* Currently nothing to do.  */
1354 }
1355 
1356 /* Process INSN and add hash table entries as appropriate.  */
1357 
1358 static void
hash_scan_insn(rtx_insn * insn,struct gcse_hash_table_d * table)1359 hash_scan_insn (rtx_insn *insn, struct gcse_hash_table_d *table)
1360 {
1361   rtx pat = PATTERN (insn);
1362   int i;
1363 
1364   /* Pick out the sets of INSN and for other forms of instructions record
1365      what's been modified.  */
1366 
1367   if (GET_CODE (pat) == SET)
1368     hash_scan_set (pat, insn, table);
1369 
1370   else if (GET_CODE (pat) == CLOBBER)
1371     hash_scan_clobber (pat, insn, table);
1372 
1373   else if (GET_CODE (pat) == CALL)
1374     hash_scan_call (pat, insn, table);
1375 
1376   else if (GET_CODE (pat) == PARALLEL)
1377     for (i = 0; i < XVECLEN (pat, 0); i++)
1378       {
1379 	rtx x = XVECEXP (pat, 0, i);
1380 
1381 	if (GET_CODE (x) == SET)
1382 	  hash_scan_set (x, insn, table);
1383 	else if (GET_CODE (x) == CLOBBER)
1384 	  hash_scan_clobber (x, insn, table);
1385 	else if (GET_CODE (x) == CALL)
1386 	  hash_scan_call (x, insn, table);
1387       }
1388 }
1389 
1390 /* Dump the hash table TABLE to file FILE under the name NAME.  */
1391 
1392 static void
dump_hash_table(FILE * file,const char * name,struct gcse_hash_table_d * table)1393 dump_hash_table (FILE *file, const char *name, struct gcse_hash_table_d *table)
1394 {
1395   int i;
1396   /* Flattened out table, so it's printed in proper order.  */
1397   struct gcse_expr **flat_table;
1398   unsigned int *hash_val;
1399   struct gcse_expr *expr;
1400 
1401   flat_table = XCNEWVEC (struct gcse_expr *, table->n_elems);
1402   hash_val = XNEWVEC (unsigned int, table->n_elems);
1403 
1404   for (i = 0; i < (int) table->size; i++)
1405     for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
1406       {
1407 	flat_table[expr->bitmap_index] = expr;
1408 	hash_val[expr->bitmap_index] = i;
1409       }
1410 
1411   fprintf (file, "%s hash table (%d buckets, %d entries)\n",
1412 	   name, table->size, table->n_elems);
1413 
1414   for (i = 0; i < (int) table->n_elems; i++)
1415     if (flat_table[i] != 0)
1416       {
1417 	expr = flat_table[i];
1418 	fprintf (file, "Index %d (hash value %d; max distance "
1419 		 HOST_WIDE_INT_PRINT_DEC ")\n  ",
1420 		 expr->bitmap_index, hash_val[i], expr->max_distance);
1421 	print_rtl (file, expr->expr);
1422 	fprintf (file, "\n");
1423       }
1424 
1425   fprintf (file, "\n");
1426 
1427   free (flat_table);
1428   free (hash_val);
1429 }
1430 
1431 /* Record register first/last/block set information for REGNO in INSN.
1432 
1433    first_set records the first place in the block where the register
1434    is set and is used to compute "anticipatability".
1435 
1436    last_set records the last place in the block where the register
1437    is set and is used to compute "availability".
1438 
1439    last_bb records the block for which first_set and last_set are
1440    valid, as a quick test to invalidate them.  */
1441 
1442 static void
record_last_reg_set_info(rtx_insn * insn,int regno)1443 record_last_reg_set_info (rtx_insn *insn, int regno)
1444 {
1445   struct reg_avail_info *info = &reg_avail_info[regno];
1446   int luid = DF_INSN_LUID (insn);
1447 
1448   info->last_set = luid;
1449   if (info->last_bb != current_bb)
1450     {
1451       info->last_bb = current_bb;
1452       info->first_set = luid;
1453     }
1454 }
1455 
1456 /* Record memory modification information for INSN.  We do not actually care
1457    about the memory location(s) that are set, or even how they are set (consider
1458    a CALL_INSN).  We merely need to record which insns modify memory.  */
1459 
1460 static void
record_last_mem_set_info(rtx_insn * insn)1461 record_last_mem_set_info (rtx_insn *insn)
1462 {
1463   if (! flag_gcse_lm)
1464     return;
1465 
1466   record_last_mem_set_info_common (insn, modify_mem_list,
1467 				   canon_modify_mem_list,
1468 				   modify_mem_list_set,
1469 				   blocks_with_calls);
1470 }
1471 
1472 /* Called from compute_hash_table via note_stores to handle one
1473    SET or CLOBBER in an insn.  DATA is really the instruction in which
1474    the SET is taking place.  */
1475 
1476 static void
record_last_set_info(rtx dest,const_rtx setter ATTRIBUTE_UNUSED,void * data)1477 record_last_set_info (rtx dest, const_rtx setter ATTRIBUTE_UNUSED, void *data)
1478 {
1479   rtx_insn *last_set_insn = (rtx_insn *) data;
1480 
1481   if (GET_CODE (dest) == SUBREG)
1482     dest = SUBREG_REG (dest);
1483 
1484   if (REG_P (dest))
1485     record_last_reg_set_info (last_set_insn, REGNO (dest));
1486   else if (MEM_P (dest)
1487 	   /* Ignore pushes, they clobber nothing.  */
1488 	   && ! push_operand (dest, GET_MODE (dest)))
1489     record_last_mem_set_info (last_set_insn);
1490 }
1491 
1492 /* Top level function to create an expression hash table.
1493 
1494    Expression entries are placed in the hash table if
1495    - they are of the form (set (pseudo-reg) src),
1496    - src is something we want to perform GCSE on,
1497    - none of the operands are subsequently modified in the block
1498 
1499    Currently src must be a pseudo-reg or a const_int.
1500 
1501    TABLE is the table computed.  */
1502 
1503 static void
compute_hash_table_work(struct gcse_hash_table_d * table)1504 compute_hash_table_work (struct gcse_hash_table_d *table)
1505 {
1506   int i;
1507 
1508   /* re-Cache any INSN_LIST nodes we have allocated.  */
1509   clear_modify_mem_tables ();
1510   /* Some working arrays used to track first and last set in each block.  */
1511   reg_avail_info = GNEWVEC (struct reg_avail_info, max_reg_num ());
1512 
1513   for (i = 0; i < max_reg_num (); ++i)
1514     reg_avail_info[i].last_bb = NULL;
1515 
1516   FOR_EACH_BB_FN (current_bb, cfun)
1517     {
1518       rtx_insn *insn;
1519       unsigned int regno;
1520 
1521       /* First pass over the instructions records information used to
1522 	 determine when registers and memory are first and last set.  */
1523       FOR_BB_INSNS (current_bb, insn)
1524 	{
1525 	  if (!NONDEBUG_INSN_P (insn))
1526 	    continue;
1527 
1528 	  if (CALL_P (insn))
1529 	    {
1530 	      hard_reg_set_iterator hrsi;
1531 
1532 	      /* We don't track modes of hard registers, so we need
1533 		 to be conservative and assume that partial kills
1534 		 are full kills.  */
1535 	      HARD_REG_SET callee_clobbers
1536 		= insn_callee_abi (insn).full_and_partial_reg_clobbers ();
1537 	      EXECUTE_IF_SET_IN_HARD_REG_SET (callee_clobbers, 0, regno, hrsi)
1538 		record_last_reg_set_info (insn, regno);
1539 
1540 	      if (! RTL_CONST_OR_PURE_CALL_P (insn)
1541 		  || RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
1542 		record_last_mem_set_info (insn);
1543 	    }
1544 
1545 	  note_stores (insn, record_last_set_info, insn);
1546 	}
1547 
1548       /* The next pass builds the hash table.  */
1549       FOR_BB_INSNS (current_bb, insn)
1550 	if (NONDEBUG_INSN_P (insn))
1551 	  hash_scan_insn (insn, table);
1552     }
1553 
1554   free (reg_avail_info);
1555   reg_avail_info = NULL;
1556 }
1557 
1558 /* Allocate space for the set/expr hash TABLE.
1559    It is used to determine the number of buckets to use.  */
1560 
1561 static void
alloc_hash_table(struct gcse_hash_table_d * table)1562 alloc_hash_table (struct gcse_hash_table_d *table)
1563 {
1564   int n;
1565 
1566   n = get_max_insn_count ();
1567 
1568   table->size = n / 4;
1569   if (table->size < 11)
1570     table->size = 11;
1571 
1572   /* Attempt to maintain efficient use of hash table.
1573      Making it an odd number is simplest for now.
1574      ??? Later take some measurements.  */
1575   table->size |= 1;
1576   n = table->size * sizeof (struct gcse_expr *);
1577   table->table = GNEWVAR (struct gcse_expr *, n);
1578 }
1579 
1580 /* Free things allocated by alloc_hash_table.  */
1581 
1582 static void
free_hash_table(struct gcse_hash_table_d * table)1583 free_hash_table (struct gcse_hash_table_d *table)
1584 {
1585   free (table->table);
1586 }
1587 
1588 /* Compute the expression hash table TABLE.  */
1589 
1590 static void
compute_hash_table(struct gcse_hash_table_d * table)1591 compute_hash_table (struct gcse_hash_table_d *table)
1592 {
1593   /* Initialize count of number of entries in hash table.  */
1594   table->n_elems = 0;
1595   memset (table->table, 0, table->size * sizeof (struct gcse_expr *));
1596 
1597   compute_hash_table_work (table);
1598 }
1599 
1600 /* Expression tracking support.  */
1601 
1602 /* Clear canon_modify_mem_list and modify_mem_list tables.  */
1603 static void
clear_modify_mem_tables(void)1604 clear_modify_mem_tables (void)
1605 {
1606   unsigned i;
1607   bitmap_iterator bi;
1608 
1609   EXECUTE_IF_SET_IN_BITMAP (modify_mem_list_set, 0, i, bi)
1610     {
1611       modify_mem_list[i].release ();
1612       canon_modify_mem_list[i].release ();
1613     }
1614   bitmap_clear (modify_mem_list_set);
1615   bitmap_clear (blocks_with_calls);
1616 }
1617 
1618 /* Release memory used by modify_mem_list_set.  */
1619 
1620 static void
free_modify_mem_tables(void)1621 free_modify_mem_tables (void)
1622 {
1623   clear_modify_mem_tables ();
1624   free (modify_mem_list);
1625   free (canon_modify_mem_list);
1626   modify_mem_list = 0;
1627   canon_modify_mem_list = 0;
1628 }
1629 
1630 /* Compute PRE+LCM working variables.  */
1631 
1632 /* Local properties of expressions.  */
1633 
1634 /* Nonzero for expressions that are transparent in the block.  */
1635 static sbitmap *transp;
1636 
1637 /* Nonzero for expressions that are computed (available) in the block.  */
1638 static sbitmap *comp;
1639 
1640 /* Nonzero for expressions that are locally anticipatable in the block.  */
1641 static sbitmap *antloc;
1642 
1643 /* Nonzero for expressions where this block is an optimal computation
1644    point.  */
1645 static sbitmap *pre_optimal;
1646 
1647 /* Nonzero for expressions which are redundant in a particular block.  */
1648 static sbitmap *pre_redundant;
1649 
1650 /* Nonzero for expressions which should be inserted on a specific edge.  */
1651 static sbitmap *pre_insert_map;
1652 
1653 /* Nonzero for expressions which should be deleted in a specific block.  */
1654 static sbitmap *pre_delete_map;
1655 
1656 /* Allocate vars used for PRE analysis.  */
1657 
1658 static void
alloc_pre_mem(int n_blocks,int n_exprs)1659 alloc_pre_mem (int n_blocks, int n_exprs)
1660 {
1661   transp = sbitmap_vector_alloc (n_blocks, n_exprs);
1662   comp = sbitmap_vector_alloc (n_blocks, n_exprs);
1663   antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
1664 
1665   pre_optimal = NULL;
1666   pre_redundant = NULL;
1667   pre_insert_map = NULL;
1668   pre_delete_map = NULL;
1669   ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
1670 
1671   /* pre_insert and pre_delete are allocated later.  */
1672 }
1673 
1674 /* Free vars used for PRE analysis.  */
1675 
1676 static void
free_pre_mem(void)1677 free_pre_mem (void)
1678 {
1679   sbitmap_vector_free (transp);
1680   sbitmap_vector_free (comp);
1681 
1682   /* ANTLOC and AE_KILL are freed just after pre_lcm finishes.  */
1683 
1684   if (pre_optimal)
1685     sbitmap_vector_free (pre_optimal);
1686   if (pre_redundant)
1687     sbitmap_vector_free (pre_redundant);
1688   if (pre_insert_map)
1689     sbitmap_vector_free (pre_insert_map);
1690   if (pre_delete_map)
1691     sbitmap_vector_free (pre_delete_map);
1692 
1693   transp = comp = NULL;
1694   pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
1695 }
1696 
1697 /* Remove certain expressions from anticipatable and transparent
1698    sets of basic blocks that have incoming abnormal edge.
1699    For PRE remove potentially trapping expressions to avoid placing
1700    them on abnormal edges.  For hoisting remove memory references that
1701    can be clobbered by calls.  */
1702 
1703 static void
prune_expressions(bool pre_p)1704 prune_expressions (bool pre_p)
1705 {
1706   struct gcse_expr *expr;
1707   unsigned int ui;
1708   basic_block bb;
1709 
1710   auto_sbitmap prune_exprs (expr_hash_table.n_elems);
1711   bitmap_clear (prune_exprs);
1712   for (ui = 0; ui < expr_hash_table.size; ui++)
1713     {
1714       for (expr = expr_hash_table.table[ui]; expr; expr = expr->next_same_hash)
1715 	{
1716 	  /* Note potentially trapping expressions.  */
1717 	  if (may_trap_p (expr->expr))
1718 	    {
1719 	      bitmap_set_bit (prune_exprs, expr->bitmap_index);
1720 	      continue;
1721 	    }
1722 
1723 	  if (!pre_p && contains_mem_rtx_p (expr->expr))
1724 	    /* Note memory references that can be clobbered by a call.
1725 	       We do not split abnormal edges in hoisting, so would
1726 	       a memory reference get hoisted along an abnormal edge,
1727 	       it would be placed /before/ the call.  Therefore, only
1728 	       constant memory references can be hoisted along abnormal
1729 	       edges.  */
1730 	    {
1731 	      rtx x = expr->expr;
1732 
1733 	      /* Common cases where we might find the MEM which may allow us
1734 		 to avoid pruning the expression.  */
1735 	      while (GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND)
1736 		x = XEXP (x, 0);
1737 
1738 	      /* If we found the MEM, go ahead and look at it to see if it has
1739 		 properties that allow us to avoid pruning its expression out
1740 		 of the tables.  */
1741 	      if (MEM_P (x))
1742 		{
1743 		  if (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1744 		      && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
1745 		    continue;
1746 
1747 		  if (MEM_READONLY_P (x)
1748 		      && !MEM_VOLATILE_P (x)
1749 		      && MEM_NOTRAP_P (x))
1750 		    /* Constant memory reference, e.g., a PIC address.  */
1751 		    continue;
1752 		}
1753 
1754 	      /* ??? Optimally, we would use interprocedural alias
1755 		 analysis to determine if this mem is actually killed
1756 		 by this call.  */
1757 
1758 	      bitmap_set_bit (prune_exprs, expr->bitmap_index);
1759 	    }
1760 	}
1761     }
1762 
1763   FOR_EACH_BB_FN (bb, cfun)
1764     {
1765       edge e;
1766       edge_iterator ei;
1767 
1768       /* If the current block is the destination of an abnormal edge, we
1769 	 kill all trapping (for PRE) and memory (for hoist) expressions
1770 	 because we won't be able to properly place the instruction on
1771 	 the edge.  So make them neither anticipatable nor transparent.
1772 	 This is fairly conservative.
1773 
1774 	 ??? For hoisting it may be necessary to check for set-and-jump
1775 	 instructions here, not just for abnormal edges.  The general problem
1776 	 is that when an expression cannot not be placed right at the end of
1777 	 a basic block we should account for any side-effects of a subsequent
1778 	 jump instructions that could clobber the expression.  It would
1779 	 be best to implement this check along the lines of
1780 	 should_hoist_expr_to_dom where the target block is already known
1781 	 and, hence, there's no need to conservatively prune expressions on
1782 	 "intermediate" set-and-jump instructions.  */
1783       FOR_EACH_EDGE (e, ei, bb->preds)
1784 	if ((e->flags & EDGE_ABNORMAL)
1785 	    && (pre_p || CALL_P (BB_END (e->src))))
1786 	  {
1787 	    bitmap_and_compl (antloc[bb->index],
1788 				antloc[bb->index], prune_exprs);
1789 	    bitmap_and_compl (transp[bb->index],
1790 				transp[bb->index], prune_exprs);
1791 	    break;
1792 	  }
1793     }
1794 }
1795 
1796 /* It may be necessary to insert a large number of insns on edges to
1797    make the existing occurrences of expressions fully redundant.  This
1798    routine examines the set of insertions and deletions and if the ratio
1799    of insertions to deletions is too high for a particular expression, then
1800    the expression is removed from the insertion/deletion sets.
1801 
1802    N_ELEMS is the number of elements in the hash table.  */
1803 
1804 static void
prune_insertions_deletions(int n_elems)1805 prune_insertions_deletions (int n_elems)
1806 {
1807   sbitmap_iterator sbi;
1808 
1809   /* We always use I to iterate over blocks/edges and J to iterate over
1810      expressions.  */
1811   unsigned int i, j;
1812 
1813   /* Counts for the number of times an expression needs to be inserted and
1814      number of times an expression can be removed as a result.  */
1815   int *insertions = GCNEWVEC (int, n_elems);
1816   int *deletions = GCNEWVEC (int, n_elems);
1817 
1818   /* Set of expressions which require too many insertions relative to
1819      the number of deletions achieved.  We will prune these out of the
1820      insertion/deletion sets.  */
1821   auto_sbitmap prune_exprs (n_elems);
1822   bitmap_clear (prune_exprs);
1823 
1824   /* Iterate over the edges counting the number of times each expression
1825      needs to be inserted.  */
1826   for (i = 0; i < (unsigned) n_edges_for_fn (cfun); i++)
1827     {
1828       EXECUTE_IF_SET_IN_BITMAP (pre_insert_map[i], 0, j, sbi)
1829 	insertions[j]++;
1830     }
1831 
1832   /* Similarly for deletions, but those occur in blocks rather than on
1833      edges.  */
1834   for (i = 0; i < (unsigned) last_basic_block_for_fn (cfun); i++)
1835     {
1836       EXECUTE_IF_SET_IN_BITMAP (pre_delete_map[i], 0, j, sbi)
1837 	deletions[j]++;
1838     }
1839 
1840   /* Now that we have accurate counts, iterate over the elements in the
1841      hash table and see if any need too many insertions relative to the
1842      number of evaluations that can be removed.  If so, mark them in
1843      PRUNE_EXPRS.  */
1844   for (j = 0; j < (unsigned) n_elems; j++)
1845     if (deletions[j]
1846 	&& (insertions[j] / deletions[j]) > param_max_gcse_insertion_ratio)
1847       bitmap_set_bit (prune_exprs, j);
1848 
1849   /* Now prune PRE_INSERT_MAP and PRE_DELETE_MAP based on PRUNE_EXPRS.  */
1850   EXECUTE_IF_SET_IN_BITMAP (prune_exprs, 0, j, sbi)
1851     {
1852       for (i = 0; i < (unsigned) n_edges_for_fn (cfun); i++)
1853 	bitmap_clear_bit (pre_insert_map[i], j);
1854 
1855       for (i = 0; i < (unsigned) last_basic_block_for_fn (cfun); i++)
1856 	bitmap_clear_bit (pre_delete_map[i], j);
1857     }
1858 
1859   free (insertions);
1860   free (deletions);
1861 }
1862 
1863 /* Top level routine to do the dataflow analysis needed by PRE.  */
1864 
1865 static struct edge_list *
compute_pre_data(void)1866 compute_pre_data (void)
1867 {
1868   struct edge_list *edge_list;
1869   basic_block bb;
1870 
1871   compute_local_properties (transp, comp, antloc, &expr_hash_table);
1872   prune_expressions (true);
1873   bitmap_vector_clear (ae_kill, last_basic_block_for_fn (cfun));
1874 
1875   /* Compute ae_kill for each basic block using:
1876 
1877      ~(TRANSP | COMP)
1878   */
1879 
1880   FOR_EACH_BB_FN (bb, cfun)
1881     {
1882       bitmap_ior (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
1883       bitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
1884     }
1885 
1886   edge_list = pre_edge_lcm (expr_hash_table.n_elems, transp, comp, antloc,
1887 			    ae_kill, &pre_insert_map, &pre_delete_map);
1888   sbitmap_vector_free (antloc);
1889   antloc = NULL;
1890   sbitmap_vector_free (ae_kill);
1891   ae_kill = NULL;
1892 
1893   prune_insertions_deletions (expr_hash_table.n_elems);
1894 
1895   return edge_list;
1896 }
1897 
1898 /* PRE utilities */
1899 
1900 /* Return nonzero if an occurrence of expression EXPR in OCCR_BB would reach
1901    block BB.
1902 
1903    VISITED is a pointer to a working buffer for tracking which BB's have
1904    been visited.  It is NULL for the top-level call.
1905 
1906    We treat reaching expressions that go through blocks containing the same
1907    reaching expression as "not reaching".  E.g. if EXPR is generated in blocks
1908    2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
1909    2 as not reaching.  The intent is to improve the probability of finding
1910    only one reaching expression and to reduce register lifetimes by picking
1911    the closest such expression.  */
1912 
1913 static int
pre_expr_reaches_here_p_work(basic_block occr_bb,struct gcse_expr * expr,basic_block bb,char * visited)1914 pre_expr_reaches_here_p_work (basic_block occr_bb, struct gcse_expr *expr,
1915 			      basic_block bb, char *visited)
1916 {
1917   edge pred;
1918   edge_iterator ei;
1919 
1920   FOR_EACH_EDGE (pred, ei, bb->preds)
1921     {
1922       basic_block pred_bb = pred->src;
1923 
1924       if (pred->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
1925 	  /* Has predecessor has already been visited?  */
1926 	  || visited[pred_bb->index])
1927 	;/* Nothing to do.  */
1928 
1929       /* Does this predecessor generate this expression?  */
1930       else if (bitmap_bit_p (comp[pred_bb->index], expr->bitmap_index))
1931 	{
1932 	  /* Is this the occurrence we're looking for?
1933 	     Note that there's only one generating occurrence per block
1934 	     so we just need to check the block number.  */
1935 	  if (occr_bb == pred_bb)
1936 	    return 1;
1937 
1938 	  visited[pred_bb->index] = 1;
1939 	}
1940       /* Ignore this predecessor if it kills the expression.  */
1941       else if (! bitmap_bit_p (transp[pred_bb->index], expr->bitmap_index))
1942 	visited[pred_bb->index] = 1;
1943 
1944       /* Neither gen nor kill.  */
1945       else
1946 	{
1947 	  visited[pred_bb->index] = 1;
1948 	  if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
1949 	    return 1;
1950 	}
1951     }
1952 
1953   /* All paths have been checked.  */
1954   return 0;
1955 }
1956 
1957 /* The wrapper for pre_expr_reaches_here_work that ensures that any
1958    memory allocated for that function is returned.  */
1959 
1960 static int
pre_expr_reaches_here_p(basic_block occr_bb,struct gcse_expr * expr,basic_block bb)1961 pre_expr_reaches_here_p (basic_block occr_bb, struct gcse_expr *expr, basic_block bb)
1962 {
1963   int rval;
1964   char *visited = XCNEWVEC (char, last_basic_block_for_fn (cfun));
1965 
1966   rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
1967 
1968   free (visited);
1969   return rval;
1970 }
1971 
1972 /* Generate RTL to copy an EXP to REG and return it.  */
1973 
1974 rtx_insn *
prepare_copy_insn(rtx reg,rtx exp)1975 prepare_copy_insn (rtx reg, rtx exp)
1976 {
1977   rtx_insn *pat;
1978 
1979   start_sequence ();
1980 
1981   /* If the expression is something that's an operand, like a constant,
1982      just copy it to a register.  */
1983   if (general_operand (exp, GET_MODE (reg)))
1984     emit_move_insn (reg, exp);
1985 
1986   /* Otherwise, make a new insn to compute this expression and make sure the
1987      insn will be recognized (this also adds any needed CLOBBERs).  */
1988   else
1989     {
1990       rtx_insn *insn = emit_insn (gen_rtx_SET (reg, exp));
1991 
1992       if (insn_invalid_p (insn, false))
1993 	gcc_unreachable ();
1994     }
1995 
1996   pat = get_insns ();
1997   end_sequence ();
1998 
1999   return pat;
2000 }
2001 
2002 /* Generate RTL to copy an EXPR to its `reaching_reg' and return it.  */
2003 
2004 static rtx_insn *
process_insert_insn(struct gcse_expr * expr)2005 process_insert_insn (struct gcse_expr *expr)
2006 {
2007   rtx reg = expr->reaching_reg;
2008   /* Copy the expression to make sure we don't have any sharing issues.  */
2009   rtx exp = copy_rtx (expr->expr);
2010 
2011   return prepare_copy_insn (reg, exp);
2012 }
2013 
2014 /* Add EXPR to the end of basic block BB.
2015 
2016    This is used by both the PRE and code hoisting.  */
2017 
2018 static void
insert_insn_end_basic_block(struct gcse_expr * expr,basic_block bb)2019 insert_insn_end_basic_block (struct gcse_expr *expr, basic_block bb)
2020 {
2021   rtx_insn *insn = BB_END (bb);
2022   rtx_insn *new_insn;
2023   rtx reg = expr->reaching_reg;
2024   int regno = REGNO (reg);
2025   rtx_insn *pat, *pat_end;
2026 
2027   pat = process_insert_insn (expr);
2028   gcc_assert (pat && INSN_P (pat));
2029 
2030   pat_end = pat;
2031   while (NEXT_INSN (pat_end) != NULL_RTX)
2032     pat_end = NEXT_INSN (pat_end);
2033 
2034   /* If the last insn is a jump, insert EXPR in front [taking care to
2035      handle cc0, etc. properly].  Similarly we need to care trapping
2036      instructions in presence of non-call exceptions.  */
2037 
2038   if (JUMP_P (insn)
2039       || (NONJUMP_INSN_P (insn)
2040 	  && (!single_succ_p (bb)
2041 	      || single_succ_edge (bb)->flags & EDGE_ABNORMAL)))
2042     {
2043       /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
2044 	 if cc0 isn't set.  */
2045       if (HAVE_cc0)
2046 	{
2047 	  rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
2048 	  if (note)
2049 	    insn = safe_as_a <rtx_insn *> (XEXP (note, 0));
2050 	  else
2051 	    {
2052 	      rtx_insn *maybe_cc0_setter = prev_nonnote_insn (insn);
2053 	      if (maybe_cc0_setter
2054 		  && INSN_P (maybe_cc0_setter)
2055 		  && sets_cc0_p (PATTERN (maybe_cc0_setter)))
2056 		insn = maybe_cc0_setter;
2057 	    }
2058 	}
2059 
2060       /* FIXME: What if something in cc0/jump uses value set in new insn?  */
2061       new_insn = emit_insn_before_noloc (pat, insn, bb);
2062     }
2063 
2064   /* Likewise if the last insn is a call, as will happen in the presence
2065      of exception handling.  */
2066   else if (CALL_P (insn)
2067 	   && (!single_succ_p (bb)
2068 	       || single_succ_edge (bb)->flags & EDGE_ABNORMAL))
2069     {
2070       /* Keeping in mind targets with small register classes and parameters
2071          in registers, we search backward and place the instructions before
2072 	 the first parameter is loaded.  Do this for everyone for consistency
2073 	 and a presumption that we'll get better code elsewhere as well.  */
2074 
2075       /* Since different machines initialize their parameter registers
2076 	 in different orders, assume nothing.  Collect the set of all
2077 	 parameter registers.  */
2078       insn = find_first_parameter_load (insn, BB_HEAD (bb));
2079 
2080       /* If we found all the parameter loads, then we want to insert
2081 	 before the first parameter load.
2082 
2083 	 If we did not find all the parameter loads, then we might have
2084 	 stopped on the head of the block, which could be a CODE_LABEL.
2085 	 If we inserted before the CODE_LABEL, then we would be putting
2086 	 the insn in the wrong basic block.  In that case, put the insn
2087 	 after the CODE_LABEL.  Also, respect NOTE_INSN_BASIC_BLOCK.  */
2088       while (LABEL_P (insn)
2089 	     || NOTE_INSN_BASIC_BLOCK_P (insn))
2090 	insn = NEXT_INSN (insn);
2091 
2092       new_insn = emit_insn_before_noloc (pat, insn, bb);
2093     }
2094   else
2095     new_insn = emit_insn_after_noloc (pat, insn, bb);
2096 
2097   while (1)
2098     {
2099       if (INSN_P (pat))
2100 	add_label_notes (PATTERN (pat), new_insn);
2101       if (pat == pat_end)
2102 	break;
2103       pat = NEXT_INSN (pat);
2104     }
2105 
2106   gcse_create_count++;
2107 
2108   if (dump_file)
2109     {
2110       fprintf (dump_file, "PRE/HOIST: end of bb %d, insn %d, ",
2111 	       bb->index, INSN_UID (new_insn));
2112       fprintf (dump_file, "copying expression %d to reg %d\n",
2113 	       expr->bitmap_index, regno);
2114     }
2115 }
2116 
2117 /* Insert partially redundant expressions on edges in the CFG to make
2118    the expressions fully redundant.  */
2119 
2120 static int
pre_edge_insert(struct edge_list * edge_list,struct gcse_expr ** index_map)2121 pre_edge_insert (struct edge_list *edge_list, struct gcse_expr **index_map)
2122 {
2123   int e, i, j, num_edges, set_size, did_insert = 0;
2124   sbitmap *inserted;
2125 
2126   /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
2127      if it reaches any of the deleted expressions.  */
2128 
2129   set_size = pre_insert_map[0]->size;
2130   num_edges = NUM_EDGES (edge_list);
2131   inserted = sbitmap_vector_alloc (num_edges, expr_hash_table.n_elems);
2132   bitmap_vector_clear (inserted, num_edges);
2133 
2134   for (e = 0; e < num_edges; e++)
2135     {
2136       int indx;
2137       basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
2138 
2139       for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
2140 	{
2141 	  SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
2142 
2143 	  for (j = indx;
2144 	       insert && j < (int) expr_hash_table.n_elems;
2145 	       j++, insert >>= 1)
2146 	    if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
2147 	      {
2148 		struct gcse_expr *expr = index_map[j];
2149 		struct gcse_occr *occr;
2150 
2151 		/* Now look at each deleted occurrence of this expression.  */
2152 		for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2153 		  {
2154 		    if (! occr->deleted_p)
2155 		      continue;
2156 
2157 		    /* Insert this expression on this edge if it would
2158 		       reach the deleted occurrence in BB.  */
2159 		    if (!bitmap_bit_p (inserted[e], j))
2160 		      {
2161 			rtx_insn *insn;
2162 			edge eg = INDEX_EDGE (edge_list, e);
2163 
2164 			/* We can't insert anything on an abnormal and
2165 			   critical edge, so we insert the insn at the end of
2166 			   the previous block. There are several alternatives
2167 			   detailed in Morgans book P277 (sec 10.5) for
2168 			   handling this situation.  This one is easiest for
2169 			   now.  */
2170 
2171 			if (eg->flags & EDGE_ABNORMAL)
2172 			  insert_insn_end_basic_block (index_map[j], bb);
2173 			else
2174 			  {
2175 			    insn = process_insert_insn (index_map[j]);
2176 			    insert_insn_on_edge (insn, eg);
2177 			  }
2178 
2179 			if (dump_file)
2180 			  {
2181 			    fprintf (dump_file, "PRE: edge (%d,%d), ",
2182 				     bb->index,
2183 				     INDEX_EDGE_SUCC_BB (edge_list, e)->index);
2184 			    fprintf (dump_file, "copy expression %d\n",
2185 				     expr->bitmap_index);
2186 			  }
2187 
2188 			update_ld_motion_stores (expr);
2189 			bitmap_set_bit (inserted[e], j);
2190 			did_insert = 1;
2191 			gcse_create_count++;
2192 		      }
2193 		  }
2194 	      }
2195 	}
2196     }
2197 
2198   sbitmap_vector_free (inserted);
2199   return did_insert;
2200 }
2201 
2202 /* Copy the result of EXPR->EXPR generated by INSN to EXPR->REACHING_REG.
2203    Given "old_reg <- expr" (INSN), instead of adding after it
2204      reaching_reg <- old_reg
2205    it's better to do the following:
2206      reaching_reg <- expr
2207      old_reg      <- reaching_reg
2208    because this way copy propagation can discover additional PRE
2209    opportunities.  But if this fails, we try the old way.
2210    When "expr" is a store, i.e.
2211    given "MEM <- old_reg", instead of adding after it
2212      reaching_reg <- old_reg
2213    it's better to add it before as follows:
2214      reaching_reg <- old_reg
2215      MEM          <- reaching_reg.  */
2216 
2217 static void
pre_insert_copy_insn(struct gcse_expr * expr,rtx_insn * insn)2218 pre_insert_copy_insn (struct gcse_expr *expr, rtx_insn *insn)
2219 {
2220   rtx reg = expr->reaching_reg;
2221   int regno = REGNO (reg);
2222   int indx = expr->bitmap_index;
2223   rtx pat = PATTERN (insn);
2224   rtx set, first_set;
2225   rtx_insn *new_insn;
2226   rtx old_reg;
2227   int i;
2228 
2229   /* This block matches the logic in hash_scan_insn.  */
2230   switch (GET_CODE (pat))
2231     {
2232     case SET:
2233       set = pat;
2234       break;
2235 
2236     case PARALLEL:
2237       /* Search through the parallel looking for the set whose
2238 	 source was the expression that we're interested in.  */
2239       first_set = NULL_RTX;
2240       set = NULL_RTX;
2241       for (i = 0; i < XVECLEN (pat, 0); i++)
2242 	{
2243 	  rtx x = XVECEXP (pat, 0, i);
2244 	  if (GET_CODE (x) == SET)
2245 	    {
2246 	      /* If the source was a REG_EQUAL or REG_EQUIV note, we
2247 		 may not find an equivalent expression, but in this
2248 		 case the PARALLEL will have a single set.  */
2249 	      if (first_set == NULL_RTX)
2250 		first_set = x;
2251 	      if (expr_equiv_p (SET_SRC (x), expr->expr))
2252 	        {
2253 	          set = x;
2254 	          break;
2255 	        }
2256 	    }
2257 	}
2258 
2259       gcc_assert (first_set);
2260       if (set == NULL_RTX)
2261         set = first_set;
2262       break;
2263 
2264     default:
2265       gcc_unreachable ();
2266     }
2267 
2268   if (REG_P (SET_DEST (set)))
2269     {
2270       old_reg = SET_DEST (set);
2271       /* Check if we can modify the set destination in the original insn.  */
2272       if (validate_change (insn, &SET_DEST (set), reg, 0))
2273         {
2274           new_insn = gen_move_insn (old_reg, reg);
2275           new_insn = emit_insn_after (new_insn, insn);
2276         }
2277       else
2278         {
2279           new_insn = gen_move_insn (reg, old_reg);
2280           new_insn = emit_insn_after (new_insn, insn);
2281         }
2282     }
2283   else /* This is possible only in case of a store to memory.  */
2284     {
2285       old_reg = SET_SRC (set);
2286       new_insn = gen_move_insn (reg, old_reg);
2287 
2288       /* Check if we can modify the set source in the original insn.  */
2289       if (validate_change (insn, &SET_SRC (set), reg, 0))
2290         new_insn = emit_insn_before (new_insn, insn);
2291       else
2292         new_insn = emit_insn_after (new_insn, insn);
2293     }
2294 
2295   gcse_create_count++;
2296 
2297   if (dump_file)
2298     fprintf (dump_file,
2299 	     "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
2300 	      BLOCK_FOR_INSN (insn)->index, INSN_UID (new_insn), indx,
2301 	      INSN_UID (insn), regno);
2302 }
2303 
2304 /* Copy available expressions that reach the redundant expression
2305    to `reaching_reg'.  */
2306 
2307 static void
pre_insert_copies(void)2308 pre_insert_copies (void)
2309 {
2310   unsigned int i, added_copy;
2311   struct gcse_expr *expr;
2312   struct gcse_occr *occr;
2313   struct gcse_occr *avail;
2314 
2315   /* For each available expression in the table, copy the result to
2316      `reaching_reg' if the expression reaches a deleted one.
2317 
2318      ??? The current algorithm is rather brute force.
2319      Need to do some profiling.  */
2320 
2321   for (i = 0; i < expr_hash_table.size; i++)
2322     for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2323       {
2324 	/* If the basic block isn't reachable, PPOUT will be TRUE.  However,
2325 	   we don't want to insert a copy here because the expression may not
2326 	   really be redundant.  So only insert an insn if the expression was
2327 	   deleted.  This test also avoids further processing if the
2328 	   expression wasn't deleted anywhere.  */
2329 	if (expr->reaching_reg == NULL)
2330 	  continue;
2331 
2332 	/* Set when we add a copy for that expression.  */
2333 	added_copy = 0;
2334 
2335 	for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2336 	  {
2337 	    if (! occr->deleted_p)
2338 	      continue;
2339 
2340 	    for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
2341 	      {
2342 		rtx_insn *insn = avail->insn;
2343 
2344 		/* No need to handle this one if handled already.  */
2345 		if (avail->copied_p)
2346 		  continue;
2347 
2348 		/* Don't handle this one if it's a redundant one.  */
2349 		if (insn->deleted ())
2350 		  continue;
2351 
2352 		/* Or if the expression doesn't reach the deleted one.  */
2353 		if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
2354 					       expr,
2355 					       BLOCK_FOR_INSN (occr->insn)))
2356 		  continue;
2357 
2358                 added_copy = 1;
2359 
2360 		/* Copy the result of avail to reaching_reg.  */
2361 		pre_insert_copy_insn (expr, insn);
2362 		avail->copied_p = 1;
2363 	      }
2364 	  }
2365 
2366 	  if (added_copy)
2367             update_ld_motion_stores (expr);
2368       }
2369 }
2370 
2371 struct set_data
2372 {
2373   rtx_insn *insn;
2374   const_rtx set;
2375   int nsets;
2376 };
2377 
2378 /* Increment number of sets and record set in DATA.  */
2379 
2380 static void
record_set_data(rtx dest,const_rtx set,void * data)2381 record_set_data (rtx dest, const_rtx set, void *data)
2382 {
2383   struct set_data *s = (struct set_data *)data;
2384 
2385   if (GET_CODE (set) == SET)
2386     {
2387       /* We allow insns having multiple sets, where all but one are
2388 	 dead as single set insns.  In the common case only a single
2389 	 set is present, so we want to avoid checking for REG_UNUSED
2390 	 notes unless necessary.  */
2391       if (s->nsets == 1
2392 	  && find_reg_note (s->insn, REG_UNUSED, SET_DEST (s->set))
2393 	  && !side_effects_p (s->set))
2394 	s->nsets = 0;
2395 
2396       if (!s->nsets)
2397 	{
2398 	  /* Record this set.  */
2399 	  s->nsets += 1;
2400 	  s->set = set;
2401 	}
2402       else if (!find_reg_note (s->insn, REG_UNUSED, dest)
2403 	       || side_effects_p (set))
2404 	s->nsets += 1;
2405     }
2406 }
2407 
2408 static const_rtx
single_set_gcse(rtx_insn * insn)2409 single_set_gcse (rtx_insn *insn)
2410 {
2411   struct set_data s;
2412   rtx pattern;
2413 
2414   gcc_assert (INSN_P (insn));
2415 
2416   /* Optimize common case.  */
2417   pattern = PATTERN (insn);
2418   if (GET_CODE (pattern) == SET)
2419     return pattern;
2420 
2421   s.insn = insn;
2422   s.nsets = 0;
2423   note_pattern_stores (pattern, record_set_data, &s);
2424 
2425   /* Considered invariant insns have exactly one set.  */
2426   gcc_assert (s.nsets == 1);
2427   return s.set;
2428 }
2429 
2430 /* Emit move from SRC to DEST noting the equivalence with expression computed
2431    in INSN.  */
2432 
2433 static rtx_insn *
gcse_emit_move_after(rtx dest,rtx src,rtx_insn * insn)2434 gcse_emit_move_after (rtx dest, rtx src, rtx_insn *insn)
2435 {
2436   rtx_insn *new_rtx;
2437   const_rtx set = single_set_gcse (insn);
2438   rtx set2;
2439   rtx note;
2440   rtx eqv = NULL_RTX;
2441 
2442   /* This should never fail since we're creating a reg->reg copy
2443      we've verified to be valid.  */
2444 
2445   new_rtx = emit_insn_after (gen_move_insn (dest, src), insn);
2446 
2447   /* Note the equivalence for local CSE pass.  Take the note from the old
2448      set if there was one.  Otherwise record the SET_SRC from the old set
2449      unless DEST is also an operand of the SET_SRC.  */
2450   set2 = single_set (new_rtx);
2451   if (!set2 || !rtx_equal_p (SET_DEST (set2), dest))
2452     return new_rtx;
2453   if ((note = find_reg_equal_equiv_note (insn)))
2454     eqv = XEXP (note, 0);
2455   else if (! REG_P (dest)
2456 	   || ! reg_mentioned_p (dest, SET_SRC (set)))
2457     eqv = SET_SRC (set);
2458 
2459   if (eqv != NULL_RTX)
2460     set_unique_reg_note (new_rtx, REG_EQUAL, copy_insn_1 (eqv));
2461 
2462   return new_rtx;
2463 }
2464 
2465 /* Delete redundant computations.
2466    Deletion is done by changing the insn to copy the `reaching_reg' of
2467    the expression into the result of the SET.  It is left to later passes
2468    to propagate the copy or eliminate it.
2469 
2470    Return nonzero if a change is made.  */
2471 
2472 static int
pre_delete(void)2473 pre_delete (void)
2474 {
2475   unsigned int i;
2476   int changed;
2477   struct gcse_expr *expr;
2478   struct gcse_occr *occr;
2479 
2480   changed = 0;
2481   for (i = 0; i < expr_hash_table.size; i++)
2482     for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2483       {
2484 	int indx = expr->bitmap_index;
2485 
2486 	/* We only need to search antic_occr since we require ANTLOC != 0.  */
2487 	for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2488 	  {
2489 	    rtx_insn *insn = occr->insn;
2490 	    rtx set;
2491 	    basic_block bb = BLOCK_FOR_INSN (insn);
2492 
2493 	    /* We only delete insns that have a single_set.  */
2494 	    if (bitmap_bit_p (pre_delete_map[bb->index], indx)
2495 		&& (set = single_set (insn)) != 0
2496                 && dbg_cnt (pre_insn))
2497 	      {
2498 		/* Create a pseudo-reg to store the result of reaching
2499 		   expressions into.  Get the mode for the new pseudo from
2500 		   the mode of the original destination pseudo.  */
2501 		if (expr->reaching_reg == NULL)
2502 		  expr->reaching_reg = gen_reg_rtx_and_attrs (SET_DEST (set));
2503 
2504 		gcse_emit_move_after (SET_DEST (set), expr->reaching_reg, insn);
2505 		delete_insn (insn);
2506 		occr->deleted_p = 1;
2507 		changed = 1;
2508 		gcse_subst_count++;
2509 
2510 		if (dump_file)
2511 		  {
2512 		    fprintf (dump_file,
2513 			     "PRE: redundant insn %d (expression %d) in ",
2514 			       INSN_UID (insn), indx);
2515 		    fprintf (dump_file, "bb %d, reaching reg is %d\n",
2516 			     bb->index, REGNO (expr->reaching_reg));
2517 		  }
2518 	      }
2519 	  }
2520       }
2521 
2522   return changed;
2523 }
2524 
2525 /* Perform GCSE optimizations using PRE.
2526    This is called by one_pre_gcse_pass after all the dataflow analysis
2527    has been done.
2528 
2529    This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
2530    lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
2531    Compiler Design and Implementation.
2532 
2533    ??? A new pseudo reg is created to hold the reaching expression.  The nice
2534    thing about the classical approach is that it would try to use an existing
2535    reg.  If the register can't be adequately optimized [i.e. we introduce
2536    reload problems], one could add a pass here to propagate the new register
2537    through the block.
2538 
2539    ??? We don't handle single sets in PARALLELs because we're [currently] not
2540    able to copy the rest of the parallel when we insert copies to create full
2541    redundancies from partial redundancies.  However, there's no reason why we
2542    can't handle PARALLELs in the cases where there are no partial
2543    redundancies.  */
2544 
2545 static int
pre_gcse(struct edge_list * edge_list)2546 pre_gcse (struct edge_list *edge_list)
2547 {
2548   unsigned int i;
2549   int did_insert, changed;
2550   struct gcse_expr **index_map;
2551   struct gcse_expr *expr;
2552 
2553   /* Compute a mapping from expression number (`bitmap_index') to
2554      hash table entry.  */
2555 
2556   index_map = XCNEWVEC (struct gcse_expr *, expr_hash_table.n_elems);
2557   for (i = 0; i < expr_hash_table.size; i++)
2558     for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2559       index_map[expr->bitmap_index] = expr;
2560 
2561   /* Delete the redundant insns first so that
2562      - we know what register to use for the new insns and for the other
2563        ones with reaching expressions
2564      - we know which insns are redundant when we go to create copies  */
2565 
2566   changed = pre_delete ();
2567   did_insert = pre_edge_insert (edge_list, index_map);
2568 
2569   /* In other places with reaching expressions, copy the expression to the
2570      specially allocated pseudo-reg that reaches the redundant expr.  */
2571   pre_insert_copies ();
2572   if (did_insert)
2573     {
2574       commit_edge_insertions ();
2575       changed = 1;
2576     }
2577 
2578   free (index_map);
2579   return changed;
2580 }
2581 
2582 /* Top level routine to perform one PRE GCSE pass.
2583 
2584    Return nonzero if a change was made.  */
2585 
2586 static int
one_pre_gcse_pass(void)2587 one_pre_gcse_pass (void)
2588 {
2589   int changed = 0;
2590 
2591   gcse_subst_count = 0;
2592   gcse_create_count = 0;
2593 
2594   /* Return if there's nothing to do, or it is too expensive.  */
2595   if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
2596       || gcse_or_cprop_is_too_expensive (_("PRE disabled")))
2597     return 0;
2598 
2599   /* We need alias.  */
2600   init_alias_analysis ();
2601 
2602   bytes_used = 0;
2603   gcc_obstack_init (&gcse_obstack);
2604   alloc_gcse_mem ();
2605 
2606   alloc_hash_table (&expr_hash_table);
2607   add_noreturn_fake_exit_edges ();
2608   if (flag_gcse_lm)
2609     compute_ld_motion_mems ();
2610 
2611   compute_hash_table (&expr_hash_table);
2612   if (flag_gcse_lm)
2613     trim_ld_motion_mems ();
2614   if (dump_file)
2615     dump_hash_table (dump_file, "Expression", &expr_hash_table);
2616 
2617   if (expr_hash_table.n_elems > 0)
2618     {
2619       struct edge_list *edge_list;
2620       alloc_pre_mem (last_basic_block_for_fn (cfun), expr_hash_table.n_elems);
2621       edge_list = compute_pre_data ();
2622       changed |= pre_gcse (edge_list);
2623       free_edge_list (edge_list);
2624       free_pre_mem ();
2625     }
2626 
2627   if (flag_gcse_lm)
2628     free_ld_motion_mems ();
2629   remove_fake_exit_edges ();
2630   free_hash_table (&expr_hash_table);
2631 
2632   free_gcse_mem ();
2633   obstack_free (&gcse_obstack, NULL);
2634 
2635   /* We are finished with alias.  */
2636   end_alias_analysis ();
2637 
2638   if (dump_file)
2639     {
2640       fprintf (dump_file, "PRE GCSE of %s, %d basic blocks, %d bytes needed, ",
2641 	       current_function_name (), n_basic_blocks_for_fn (cfun),
2642 	       bytes_used);
2643       fprintf (dump_file, "%d substs, %d insns created\n",
2644 	       gcse_subst_count, gcse_create_count);
2645     }
2646 
2647   return changed;
2648 }
2649 
2650 /* If X contains any LABEL_REF's, add REG_LABEL_OPERAND notes for them
2651    to INSN.  If such notes are added to an insn which references a
2652    CODE_LABEL, the LABEL_NUSES count is incremented.  We have to add
2653    that note, because the following loop optimization pass requires
2654    them.  */
2655 
2656 /* ??? If there was a jump optimization pass after gcse and before loop,
2657    then we would not need to do this here, because jump would add the
2658    necessary REG_LABEL_OPERAND and REG_LABEL_TARGET notes.  */
2659 
2660 static void
add_label_notes(rtx x,rtx_insn * insn)2661 add_label_notes (rtx x, rtx_insn *insn)
2662 {
2663   enum rtx_code code = GET_CODE (x);
2664   int i, j;
2665   const char *fmt;
2666 
2667   if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
2668     {
2669       /* This code used to ignore labels that referred to dispatch tables to
2670 	 avoid flow generating (slightly) worse code.
2671 
2672 	 We no longer ignore such label references (see LABEL_REF handling in
2673 	 mark_jump_label for additional information).  */
2674 
2675       /* There's no reason for current users to emit jump-insns with
2676 	 such a LABEL_REF, so we don't have to handle REG_LABEL_TARGET
2677 	 notes.  */
2678       gcc_assert (!JUMP_P (insn));
2679       add_reg_note (insn, REG_LABEL_OPERAND, label_ref_label (x));
2680 
2681       if (LABEL_P (label_ref_label (x)))
2682 	LABEL_NUSES (label_ref_label (x))++;
2683 
2684       return;
2685     }
2686 
2687   for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2688     {
2689       if (fmt[i] == 'e')
2690 	add_label_notes (XEXP (x, i), insn);
2691       else if (fmt[i] == 'E')
2692 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2693 	  add_label_notes (XVECEXP (x, i, j), insn);
2694     }
2695 }
2696 
2697 /* Code Hoisting variables and subroutines.  */
2698 
2699 /* Very busy expressions.  */
2700 static sbitmap *hoist_vbein;
2701 static sbitmap *hoist_vbeout;
2702 
2703 /* ??? We could compute post dominators and run this algorithm in
2704    reverse to perform tail merging, doing so would probably be
2705    more effective than the tail merging code in jump.c.
2706 
2707    It's unclear if tail merging could be run in parallel with
2708    code hoisting.  It would be nice.  */
2709 
2710 /* Allocate vars used for code hoisting analysis.  */
2711 
2712 static void
alloc_code_hoist_mem(int n_blocks,int n_exprs)2713 alloc_code_hoist_mem (int n_blocks, int n_exprs)
2714 {
2715   antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
2716   transp = sbitmap_vector_alloc (n_blocks, n_exprs);
2717   comp = sbitmap_vector_alloc (n_blocks, n_exprs);
2718 
2719   hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
2720   hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
2721 }
2722 
2723 /* Free vars used for code hoisting analysis.  */
2724 
2725 static void
free_code_hoist_mem(void)2726 free_code_hoist_mem (void)
2727 {
2728   sbitmap_vector_free (antloc);
2729   sbitmap_vector_free (transp);
2730   sbitmap_vector_free (comp);
2731 
2732   sbitmap_vector_free (hoist_vbein);
2733   sbitmap_vector_free (hoist_vbeout);
2734 
2735   free_dominance_info (CDI_DOMINATORS);
2736 }
2737 
2738 /* Compute the very busy expressions at entry/exit from each block.
2739 
2740    An expression is very busy if all paths from a given point
2741    compute the expression.  */
2742 
2743 static void
compute_code_hoist_vbeinout(void)2744 compute_code_hoist_vbeinout (void)
2745 {
2746   int changed, passes;
2747   basic_block bb;
2748 
2749   bitmap_vector_clear (hoist_vbeout, last_basic_block_for_fn (cfun));
2750   bitmap_vector_clear (hoist_vbein, last_basic_block_for_fn (cfun));
2751 
2752   passes = 0;
2753   changed = 1;
2754 
2755   while (changed)
2756     {
2757       changed = 0;
2758 
2759       /* We scan the blocks in the reverse order to speed up
2760 	 the convergence.  */
2761       FOR_EACH_BB_REVERSE_FN (bb, cfun)
2762 	{
2763 	  if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
2764 	    {
2765 	      bitmap_intersection_of_succs (hoist_vbeout[bb->index],
2766 					    hoist_vbein, bb);
2767 
2768 	      /* Include expressions in VBEout that are calculated
2769 		 in BB and available at its end.  */
2770 	      bitmap_ior (hoist_vbeout[bb->index],
2771 			      hoist_vbeout[bb->index], comp[bb->index]);
2772 	    }
2773 
2774 	  changed |= bitmap_or_and (hoist_vbein[bb->index],
2775 					      antloc[bb->index],
2776 					      hoist_vbeout[bb->index],
2777 					      transp[bb->index]);
2778 	}
2779 
2780       passes++;
2781     }
2782 
2783   if (dump_file)
2784     {
2785       fprintf (dump_file, "hoisting vbeinout computation: %d passes\n", passes);
2786 
2787       FOR_EACH_BB_FN (bb, cfun)
2788         {
2789 	  fprintf (dump_file, "vbein (%d): ", bb->index);
2790 	  dump_bitmap_file (dump_file, hoist_vbein[bb->index]);
2791 	  fprintf (dump_file, "vbeout(%d): ", bb->index);
2792 	  dump_bitmap_file (dump_file, hoist_vbeout[bb->index]);
2793 	}
2794     }
2795 }
2796 
2797 /* Top level routine to do the dataflow analysis needed by code hoisting.  */
2798 
2799 static void
compute_code_hoist_data(void)2800 compute_code_hoist_data (void)
2801 {
2802   compute_local_properties (transp, comp, antloc, &expr_hash_table);
2803   prune_expressions (false);
2804   compute_code_hoist_vbeinout ();
2805   calculate_dominance_info (CDI_DOMINATORS);
2806   if (dump_file)
2807     fprintf (dump_file, "\n");
2808 }
2809 
2810 /* Update register pressure for BB when hoisting an expression from
2811    instruction FROM, if live ranges of inputs are shrunk.  Also
2812    maintain live_in information if live range of register referred
2813    in FROM is shrunk.
2814 
2815    Return 0 if register pressure doesn't change, otherwise return
2816    the number by which register pressure is decreased.
2817 
2818    NOTE: Register pressure won't be increased in this function.  */
2819 
2820 static int
update_bb_reg_pressure(basic_block bb,rtx_insn * from)2821 update_bb_reg_pressure (basic_block bb, rtx_insn *from)
2822 {
2823   rtx dreg;
2824   rtx_insn *insn;
2825   basic_block succ_bb;
2826   df_ref use, op_ref;
2827   edge succ;
2828   edge_iterator ei;
2829   int decreased_pressure = 0;
2830   int nregs;
2831   enum reg_class pressure_class;
2832 
2833   FOR_EACH_INSN_USE (use, from)
2834     {
2835       dreg = DF_REF_REAL_REG (use);
2836       /* The live range of register is shrunk only if it isn't:
2837 	 1. referred on any path from the end of this block to EXIT, or
2838 	 2. referred by insns other than FROM in this block.  */
2839       FOR_EACH_EDGE (succ, ei, bb->succs)
2840 	{
2841 	  succ_bb = succ->dest;
2842 	  if (succ_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
2843 	    continue;
2844 
2845 	  if (bitmap_bit_p (BB_DATA (succ_bb)->live_in, REGNO (dreg)))
2846 	    break;
2847 	}
2848       if (succ != NULL)
2849 	continue;
2850 
2851       op_ref = DF_REG_USE_CHAIN (REGNO (dreg));
2852       for (; op_ref; op_ref = DF_REF_NEXT_REG (op_ref))
2853 	{
2854 	  if (!DF_REF_INSN_INFO (op_ref))
2855 	    continue;
2856 
2857 	  insn = DF_REF_INSN (op_ref);
2858 	  if (BLOCK_FOR_INSN (insn) == bb
2859 	      && NONDEBUG_INSN_P (insn) && insn != from)
2860 	    break;
2861 	}
2862 
2863       pressure_class = get_regno_pressure_class (REGNO (dreg), &nregs);
2864       /* Decrease register pressure and update live_in information for
2865 	 this block.  */
2866       if (!op_ref && pressure_class != NO_REGS)
2867 	{
2868 	  decreased_pressure += nregs;
2869 	  BB_DATA (bb)->max_reg_pressure[pressure_class] -= nregs;
2870 	  bitmap_clear_bit (BB_DATA (bb)->live_in, REGNO (dreg));
2871 	}
2872     }
2873   return decreased_pressure;
2874 }
2875 
2876 /* Determine if the expression EXPR should be hoisted to EXPR_BB up in
2877    flow graph, if it can reach BB unimpared.  Stop the search if the
2878    expression would need to be moved more than DISTANCE instructions.
2879 
2880    DISTANCE is the number of instructions through which EXPR can be
2881    hoisted up in flow graph.
2882 
2883    BB_SIZE points to an array which contains the number of instructions
2884    for each basic block.
2885 
2886    PRESSURE_CLASS and NREGS are register class and number of hard registers
2887    for storing EXPR.
2888 
2889    HOISTED_BBS points to a bitmap indicating basic blocks through which
2890    EXPR is hoisted.
2891 
2892    FROM is the instruction from which EXPR is hoisted.
2893 
2894    It's unclear exactly what Muchnick meant by "unimpared".  It seems
2895    to me that the expression must either be computed or transparent in
2896    *every* block in the path(s) from EXPR_BB to BB.  Any other definition
2897    would allow the expression to be hoisted out of loops, even if
2898    the expression wasn't a loop invariant.
2899 
2900    Contrast this to reachability for PRE where an expression is
2901    considered reachable if *any* path reaches instead of *all*
2902    paths.  */
2903 
2904 static int
should_hoist_expr_to_dom(basic_block expr_bb,struct gcse_expr * expr,basic_block bb,sbitmap visited,HOST_WIDE_INT distance,int * bb_size,enum reg_class pressure_class,int * nregs,bitmap hoisted_bbs,rtx_insn * from)2905 should_hoist_expr_to_dom (basic_block expr_bb, struct gcse_expr *expr,
2906 			  basic_block bb, sbitmap visited,
2907 			  HOST_WIDE_INT distance,
2908 			  int *bb_size, enum reg_class pressure_class,
2909 			  int *nregs, bitmap hoisted_bbs, rtx_insn *from)
2910 {
2911   unsigned int i;
2912   edge pred;
2913   edge_iterator ei;
2914   sbitmap_iterator sbi;
2915   int visited_allocated_locally = 0;
2916   int decreased_pressure = 0;
2917 
2918   if (flag_ira_hoist_pressure)
2919     {
2920       /* Record old information of basic block BB when it is visited
2921 	 at the first time.  */
2922       if (!bitmap_bit_p (hoisted_bbs, bb->index))
2923 	{
2924 	  struct bb_data *data = BB_DATA (bb);
2925 	  bitmap_copy (data->backup, data->live_in);
2926 	  data->old_pressure = data->max_reg_pressure[pressure_class];
2927 	}
2928       decreased_pressure = update_bb_reg_pressure (bb, from);
2929     }
2930   /* Terminate the search if distance, for which EXPR is allowed to move,
2931      is exhausted.  */
2932   if (distance > 0)
2933     {
2934       if (flag_ira_hoist_pressure)
2935 	{
2936 	  /* Prefer to hoist EXPR if register pressure is decreased.  */
2937 	  if (decreased_pressure > *nregs)
2938 	    distance += bb_size[bb->index];
2939 	  /* Let EXPR be hoisted through basic block at no cost if one
2940 	     of following conditions is satisfied:
2941 
2942 	     1. The basic block has low register pressure.
2943 	     2. Register pressure won't be increases after hoisting EXPR.
2944 
2945 	     Constant expressions is handled conservatively, because
2946 	     hoisting constant expression aggressively results in worse
2947 	     code.  This decision is made by the observation of CSiBE
2948 	     on ARM target, while it has no obvious effect on other
2949 	     targets like x86, x86_64, mips and powerpc.  */
2950 	  else if (CONST_INT_P (expr->expr)
2951 		   || (BB_DATA (bb)->max_reg_pressure[pressure_class]
2952 			 >= ira_class_hard_regs_num[pressure_class]
2953 		       && decreased_pressure < *nregs))
2954 	    distance -= bb_size[bb->index];
2955 	}
2956       else
2957 	distance -= bb_size[bb->index];
2958 
2959       if (distance <= 0)
2960 	return 0;
2961     }
2962   else
2963     gcc_assert (distance == 0);
2964 
2965   if (visited == NULL)
2966     {
2967       visited_allocated_locally = 1;
2968       visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
2969       bitmap_clear (visited);
2970     }
2971 
2972   FOR_EACH_EDGE (pred, ei, bb->preds)
2973     {
2974       basic_block pred_bb = pred->src;
2975 
2976       if (pred->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2977 	break;
2978       else if (pred_bb == expr_bb)
2979 	continue;
2980       else if (bitmap_bit_p (visited, pred_bb->index))
2981 	continue;
2982       else if (! bitmap_bit_p (transp[pred_bb->index], expr->bitmap_index))
2983 	break;
2984       /* Not killed.  */
2985       else
2986 	{
2987 	  bitmap_set_bit (visited, pred_bb->index);
2988 	  if (! should_hoist_expr_to_dom (expr_bb, expr, pred_bb,
2989 					  visited, distance, bb_size,
2990 					  pressure_class, nregs,
2991 					  hoisted_bbs, from))
2992 	    break;
2993 	}
2994     }
2995   if (visited_allocated_locally)
2996     {
2997       /* If EXPR can be hoisted to expr_bb, record basic blocks through
2998 	 which EXPR is hoisted in hoisted_bbs.  */
2999       if (flag_ira_hoist_pressure && !pred)
3000 	{
3001 	  /* Record the basic block from which EXPR is hoisted.  */
3002 	  bitmap_set_bit (visited, bb->index);
3003 	  EXECUTE_IF_SET_IN_BITMAP (visited, 0, i, sbi)
3004 	    bitmap_set_bit (hoisted_bbs, i);
3005 	}
3006       sbitmap_free (visited);
3007     }
3008 
3009   return (pred == NULL);
3010 }
3011 
3012 /* Find occurrence in BB.  */
3013 
3014 static struct gcse_occr *
find_occr_in_bb(struct gcse_occr * occr,basic_block bb)3015 find_occr_in_bb (struct gcse_occr *occr, basic_block bb)
3016 {
3017   /* Find the right occurrence of this expression.  */
3018   while (occr && BLOCK_FOR_INSN (occr->insn) != bb)
3019     occr = occr->next;
3020 
3021   return occr;
3022 }
3023 
3024 /* Actually perform code hoisting.
3025 
3026    The code hoisting pass can hoist multiple computations of the same
3027    expression along dominated path to a dominating basic block, like
3028    from b2/b3 to b1 as depicted below:
3029 
3030           b1      ------
3031           /\         |
3032          /  \        |
3033         bx   by   distance
3034        /      \      |
3035       /        \     |
3036      b2        b3 ------
3037 
3038    Unfortunately code hoisting generally extends the live range of an
3039    output pseudo register, which increases register pressure and hurts
3040    register allocation.  To address this issue, an attribute MAX_DISTANCE
3041    is computed and attached to each expression.  The attribute is computed
3042    from rtx cost of the corresponding expression and it's used to control
3043    how long the expression can be hoisted up in flow graph.  As the
3044    expression is hoisted up in flow graph, GCC decreases its DISTANCE
3045    and stops the hoist if DISTANCE reaches 0.  Code hoisting can decrease
3046    register pressure if live ranges of inputs are shrunk.
3047 
3048    Option "-fira-hoist-pressure" implements register pressure directed
3049    hoist based on upper method.  The rationale is:
3050      1. Calculate register pressure for each basic block by reusing IRA
3051 	facility.
3052      2. When expression is hoisted through one basic block, GCC checks
3053 	the change of live ranges for inputs/output.  The basic block's
3054 	register pressure will be increased because of extended live
3055 	range of output.  However, register pressure will be decreased
3056 	if the live ranges of inputs are shrunk.
3057      3. After knowing how hoisting affects register pressure, GCC prefers
3058 	to hoist the expression if it can decrease register pressure, by
3059 	increasing DISTANCE of the corresponding expression.
3060      4. If hoisting the expression increases register pressure, GCC checks
3061 	register pressure of the basic block and decrease DISTANCE only if
3062 	the register pressure is high.  In other words, expression will be
3063 	hoisted through at no cost if the basic block has low register
3064 	pressure.
3065      5. Update register pressure information for basic blocks through
3066 	which expression is hoisted.  */
3067 
3068 static int
hoist_code(void)3069 hoist_code (void)
3070 {
3071   basic_block bb, dominated;
3072   vec<basic_block> dom_tree_walk;
3073   unsigned int dom_tree_walk_index;
3074   vec<basic_block> domby;
3075   unsigned int i, j, k;
3076   struct gcse_expr **index_map;
3077   struct gcse_expr *expr;
3078   int *to_bb_head;
3079   int *bb_size;
3080   int changed = 0;
3081   struct bb_data *data;
3082   /* Basic blocks that have occurrences reachable from BB.  */
3083   bitmap from_bbs;
3084   /* Basic blocks through which expr is hoisted.  */
3085   bitmap hoisted_bbs = NULL;
3086   bitmap_iterator bi;
3087 
3088   /* Compute a mapping from expression number (`bitmap_index') to
3089      hash table entry.  */
3090 
3091   index_map = XCNEWVEC (struct gcse_expr *, expr_hash_table.n_elems);
3092   for (i = 0; i < expr_hash_table.size; i++)
3093     for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
3094       index_map[expr->bitmap_index] = expr;
3095 
3096   /* Calculate sizes of basic blocks and note how far
3097      each instruction is from the start of its block.  We then use this
3098      data to restrict distance an expression can travel.  */
3099 
3100   to_bb_head = XCNEWVEC (int, get_max_uid ());
3101   bb_size = XCNEWVEC (int, last_basic_block_for_fn (cfun));
3102 
3103   FOR_EACH_BB_FN (bb, cfun)
3104     {
3105       rtx_insn *insn;
3106       int to_head;
3107 
3108       to_head = 0;
3109       FOR_BB_INSNS (bb, insn)
3110 	{
3111 	  /* Don't count debug instructions to avoid them affecting
3112 	     decision choices.  */
3113 	  if (NONDEBUG_INSN_P (insn))
3114 	    to_bb_head[INSN_UID (insn)] = to_head++;
3115 	}
3116 
3117       bb_size[bb->index] = to_head;
3118     }
3119 
3120   gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs) == 1
3121 	      && (EDGE_SUCC (ENTRY_BLOCK_PTR_FOR_FN (cfun), 0)->dest
3122 		  == ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb));
3123 
3124   from_bbs = BITMAP_ALLOC (NULL);
3125   if (flag_ira_hoist_pressure)
3126     hoisted_bbs = BITMAP_ALLOC (NULL);
3127 
3128   dom_tree_walk = get_all_dominated_blocks (CDI_DOMINATORS,
3129 					    ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb);
3130 
3131   /* Walk over each basic block looking for potentially hoistable
3132      expressions, nothing gets hoisted from the entry block.  */
3133   FOR_EACH_VEC_ELT (dom_tree_walk, dom_tree_walk_index, bb)
3134     {
3135       domby = get_dominated_to_depth (CDI_DOMINATORS, bb,
3136 				      param_max_hoist_depth);
3137 
3138       if (domby.length () == 0)
3139 	continue;
3140 
3141       /* Examine each expression that is very busy at the exit of this
3142 	 block.  These are the potentially hoistable expressions.  */
3143       for (i = 0; i < SBITMAP_SIZE (hoist_vbeout[bb->index]); i++)
3144 	{
3145 	  if (bitmap_bit_p (hoist_vbeout[bb->index], i))
3146 	    {
3147 	      int nregs = 0;
3148 	      enum reg_class pressure_class = NO_REGS;
3149 	      /* Current expression.  */
3150 	      struct gcse_expr *expr = index_map[i];
3151 	      /* Number of occurrences of EXPR that can be hoisted to BB.  */
3152 	      int hoistable = 0;
3153 	      /* Occurrences reachable from BB.  */
3154 	      vec<occr_t> occrs_to_hoist = vNULL;
3155 	      /* We want to insert the expression into BB only once, so
3156 		 note when we've inserted it.  */
3157 	      int insn_inserted_p;
3158 	      occr_t occr;
3159 
3160 	      /* If an expression is computed in BB and is available at end of
3161 		 BB, hoist all occurrences dominated by BB to BB.  */
3162 	      if (bitmap_bit_p (comp[bb->index], i))
3163 		{
3164 		  occr = find_occr_in_bb (expr->antic_occr, bb);
3165 
3166 		  if (occr)
3167 		    {
3168 		      /* An occurrence might've been already deleted
3169 			 while processing a dominator of BB.  */
3170 		      if (!occr->deleted_p)
3171 			{
3172 			  gcc_assert (NONDEBUG_INSN_P (occr->insn));
3173 			  hoistable++;
3174 			}
3175 		    }
3176 		  else
3177 		    hoistable++;
3178 		}
3179 
3180 	      /* We've found a potentially hoistable expression, now
3181 		 we look at every block BB dominates to see if it
3182 		 computes the expression.  */
3183 	      FOR_EACH_VEC_ELT (domby, j, dominated)
3184 		{
3185 		  HOST_WIDE_INT max_distance;
3186 
3187 		  /* Ignore self dominance.  */
3188 		  if (bb == dominated)
3189 		    continue;
3190 		  /* We've found a dominated block, now see if it computes
3191 		     the busy expression and whether or not moving that
3192 		     expression to the "beginning" of that block is safe.  */
3193 		  if (!bitmap_bit_p (antloc[dominated->index], i))
3194 		    continue;
3195 
3196 		  occr = find_occr_in_bb (expr->antic_occr, dominated);
3197 		  gcc_assert (occr);
3198 
3199 		  /* An occurrence might've been already deleted
3200 		     while processing a dominator of BB.  */
3201 		  if (occr->deleted_p)
3202 		    continue;
3203 		  gcc_assert (NONDEBUG_INSN_P (occr->insn));
3204 
3205 		  max_distance = expr->max_distance;
3206 		  if (max_distance > 0)
3207 		    /* Adjust MAX_DISTANCE to account for the fact that
3208 		       OCCR won't have to travel all of DOMINATED, but
3209 		       only part of it.  */
3210 		    max_distance += (bb_size[dominated->index]
3211 				     - to_bb_head[INSN_UID (occr->insn)]);
3212 
3213 		  pressure_class = get_pressure_class_and_nregs (occr->insn,
3214 								 &nregs);
3215 
3216 		  /* Note if the expression should be hoisted from the dominated
3217 		     block to BB if it can reach DOMINATED unimpared.
3218 
3219 		     Keep track of how many times this expression is hoistable
3220 		     from a dominated block into BB.  */
3221 		  if (should_hoist_expr_to_dom (bb, expr, dominated, NULL,
3222 						max_distance, bb_size,
3223 						pressure_class,	&nregs,
3224 						hoisted_bbs, occr->insn))
3225 		    {
3226 		      hoistable++;
3227 		      occrs_to_hoist.safe_push (occr);
3228 		      bitmap_set_bit (from_bbs, dominated->index);
3229 		    }
3230 		}
3231 
3232 	      /* If we found more than one hoistable occurrence of this
3233 		 expression, then note it in the vector of expressions to
3234 		 hoist.  It makes no sense to hoist things which are computed
3235 		 in only one BB, and doing so tends to pessimize register
3236 		 allocation.  One could increase this value to try harder
3237 		 to avoid any possible code expansion due to register
3238 		 allocation issues; however experiments have shown that
3239 		 the vast majority of hoistable expressions are only movable
3240 		 from two successors, so raising this threshold is likely
3241 		 to nullify any benefit we get from code hoisting.  */
3242 	      if (hoistable > 1 && dbg_cnt (hoist_insn))
3243 		{
3244 		  /* If (hoistable != vec::length), then there is
3245 		     an occurrence of EXPR in BB itself.  Don't waste
3246 		     time looking for LCA in this case.  */
3247 		  if ((unsigned) hoistable == occrs_to_hoist.length ())
3248 		    {
3249 		      basic_block lca;
3250 
3251 		      lca = nearest_common_dominator_for_set (CDI_DOMINATORS,
3252 							      from_bbs);
3253 		      if (lca != bb)
3254 			/* Punt, it's better to hoist these occurrences to
3255 			   LCA.  */
3256 			occrs_to_hoist.release ();
3257 		    }
3258 		}
3259 	      else
3260 		/* Punt, no point hoisting a single occurrence.  */
3261 		occrs_to_hoist.release ();
3262 
3263 	      if (flag_ira_hoist_pressure
3264 		  && !occrs_to_hoist.is_empty ())
3265 		{
3266 		  /* Increase register pressure of basic blocks to which
3267 		     expr is hoisted because of extended live range of
3268 		     output.  */
3269 		  data = BB_DATA (bb);
3270 		  data->max_reg_pressure[pressure_class] += nregs;
3271 		  EXECUTE_IF_SET_IN_BITMAP (hoisted_bbs, 0, k, bi)
3272 		    {
3273 		      data = BB_DATA (BASIC_BLOCK_FOR_FN (cfun, k));
3274 		      data->max_reg_pressure[pressure_class] += nregs;
3275 		    }
3276 		}
3277 	      else if (flag_ira_hoist_pressure)
3278 		{
3279 		  /* Restore register pressure and live_in info for basic
3280 		     blocks recorded in hoisted_bbs when expr will not be
3281 		     hoisted.  */
3282 		  EXECUTE_IF_SET_IN_BITMAP (hoisted_bbs, 0, k, bi)
3283 		    {
3284 		      data = BB_DATA (BASIC_BLOCK_FOR_FN (cfun, k));
3285 		      bitmap_copy (data->live_in, data->backup);
3286 		      data->max_reg_pressure[pressure_class]
3287 			  = data->old_pressure;
3288 		    }
3289 		}
3290 
3291 	      if (flag_ira_hoist_pressure)
3292 		bitmap_clear (hoisted_bbs);
3293 
3294 	      insn_inserted_p = 0;
3295 
3296 	      /* Walk through occurrences of I'th expressions we want
3297 		 to hoist to BB and make the transformations.  */
3298 	      FOR_EACH_VEC_ELT (occrs_to_hoist, j, occr)
3299 		{
3300 		  rtx_insn *insn;
3301 		  const_rtx set;
3302 
3303 		  gcc_assert (!occr->deleted_p);
3304 
3305 		  insn = occr->insn;
3306 		  set = single_set_gcse (insn);
3307 
3308 		  /* Create a pseudo-reg to store the result of reaching
3309 		     expressions into.  Get the mode for the new pseudo
3310 		     from the mode of the original destination pseudo.
3311 
3312 		     It is important to use new pseudos whenever we
3313 		     emit a set.  This will allow reload to use
3314 		     rematerialization for such registers.  */
3315 		  if (!insn_inserted_p)
3316 		    expr->reaching_reg
3317 		      = gen_reg_rtx_and_attrs (SET_DEST (set));
3318 
3319 		  gcse_emit_move_after (SET_DEST (set), expr->reaching_reg,
3320 					insn);
3321 		  delete_insn (insn);
3322 		  occr->deleted_p = 1;
3323 		  changed = 1;
3324 		  gcse_subst_count++;
3325 
3326 		  if (!insn_inserted_p)
3327 		    {
3328 		      insert_insn_end_basic_block (expr, bb);
3329 		      insn_inserted_p = 1;
3330 		    }
3331 		}
3332 
3333 	      occrs_to_hoist.release ();
3334 	      bitmap_clear (from_bbs);
3335 	    }
3336 	}
3337       domby.release ();
3338     }
3339 
3340   dom_tree_walk.release ();
3341   BITMAP_FREE (from_bbs);
3342   if (flag_ira_hoist_pressure)
3343     BITMAP_FREE (hoisted_bbs);
3344 
3345   free (bb_size);
3346   free (to_bb_head);
3347   free (index_map);
3348 
3349   return changed;
3350 }
3351 
3352 /* Return pressure class and number of needed hard registers (through
3353    *NREGS) of register REGNO.  */
3354 static enum reg_class
get_regno_pressure_class(int regno,int * nregs)3355 get_regno_pressure_class (int regno, int *nregs)
3356 {
3357   if (regno >= FIRST_PSEUDO_REGISTER)
3358     {
3359       enum reg_class pressure_class;
3360 
3361       pressure_class = reg_allocno_class (regno);
3362       pressure_class = ira_pressure_class_translate[pressure_class];
3363       *nregs
3364 	= ira_reg_class_max_nregs[pressure_class][PSEUDO_REGNO_MODE (regno)];
3365       return pressure_class;
3366     }
3367   else if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno)
3368 	   && ! TEST_HARD_REG_BIT (eliminable_regset, regno))
3369     {
3370       *nregs = 1;
3371       return ira_pressure_class_translate[REGNO_REG_CLASS (regno)];
3372     }
3373   else
3374     {
3375       *nregs = 0;
3376       return NO_REGS;
3377     }
3378 }
3379 
3380 /* Return pressure class and number of hard registers (through *NREGS)
3381    for destination of INSN. */
3382 static enum reg_class
get_pressure_class_and_nregs(rtx_insn * insn,int * nregs)3383 get_pressure_class_and_nregs (rtx_insn *insn, int *nregs)
3384 {
3385   rtx reg;
3386   enum reg_class pressure_class;
3387   const_rtx set = single_set_gcse (insn);
3388 
3389   reg = SET_DEST (set);
3390   if (GET_CODE (reg) == SUBREG)
3391     reg = SUBREG_REG (reg);
3392   if (MEM_P (reg))
3393     {
3394       *nregs = 0;
3395       pressure_class = NO_REGS;
3396     }
3397   else
3398     {
3399       gcc_assert (REG_P (reg));
3400       pressure_class = reg_allocno_class (REGNO (reg));
3401       pressure_class = ira_pressure_class_translate[pressure_class];
3402       *nregs
3403 	= ira_reg_class_max_nregs[pressure_class][GET_MODE (SET_SRC (set))];
3404     }
3405   return pressure_class;
3406 }
3407 
3408 /* Increase (if INCR_P) or decrease current register pressure for
3409    register REGNO.  */
3410 static void
change_pressure(int regno,bool incr_p)3411 change_pressure (int regno, bool incr_p)
3412 {
3413   int nregs;
3414   enum reg_class pressure_class;
3415 
3416   pressure_class = get_regno_pressure_class (regno, &nregs);
3417   if (! incr_p)
3418     curr_reg_pressure[pressure_class] -= nregs;
3419   else
3420     {
3421       curr_reg_pressure[pressure_class] += nregs;
3422       if (BB_DATA (curr_bb)->max_reg_pressure[pressure_class]
3423 	  < curr_reg_pressure[pressure_class])
3424 	BB_DATA (curr_bb)->max_reg_pressure[pressure_class]
3425 	  = curr_reg_pressure[pressure_class];
3426     }
3427 }
3428 
3429 /* Calculate register pressure for each basic block by walking insns
3430    from last to first.  */
3431 static void
calculate_bb_reg_pressure(void)3432 calculate_bb_reg_pressure (void)
3433 {
3434   int i;
3435   unsigned int j;
3436   rtx_insn *insn;
3437   basic_block bb;
3438   bitmap curr_regs_live;
3439   bitmap_iterator bi;
3440 
3441 
3442   ira_setup_eliminable_regset ();
3443   curr_regs_live = BITMAP_ALLOC (&reg_obstack);
3444   FOR_EACH_BB_FN (bb, cfun)
3445     {
3446       curr_bb = bb;
3447       BB_DATA (bb)->live_in = BITMAP_ALLOC (NULL);
3448       BB_DATA (bb)->backup = BITMAP_ALLOC (NULL);
3449       bitmap_copy (BB_DATA (bb)->live_in, df_get_live_in (bb));
3450       bitmap_copy (curr_regs_live, df_get_live_out (bb));
3451       for (i = 0; i < ira_pressure_classes_num; i++)
3452 	curr_reg_pressure[ira_pressure_classes[i]] = 0;
3453       EXECUTE_IF_SET_IN_BITMAP (curr_regs_live, 0, j, bi)
3454 	change_pressure (j, true);
3455 
3456       FOR_BB_INSNS_REVERSE (bb, insn)
3457 	{
3458 	  rtx dreg;
3459 	  int regno;
3460 	  df_ref def, use;
3461 
3462 	  if (! NONDEBUG_INSN_P (insn))
3463 	    continue;
3464 
3465 	  FOR_EACH_INSN_DEF (def, insn)
3466 	    {
3467 	      dreg = DF_REF_REAL_REG (def);
3468 	      gcc_assert (REG_P (dreg));
3469 	      regno = REGNO (dreg);
3470 	      if (!(DF_REF_FLAGS (def)
3471 		    & (DF_REF_PARTIAL | DF_REF_CONDITIONAL)))
3472 		{
3473 		  if (bitmap_clear_bit (curr_regs_live, regno))
3474 		    change_pressure (regno, false);
3475 		}
3476 	    }
3477 
3478 	  FOR_EACH_INSN_USE (use, insn)
3479 	    {
3480 	      dreg = DF_REF_REAL_REG (use);
3481 	      gcc_assert (REG_P (dreg));
3482 	      regno = REGNO (dreg);
3483 	      if (bitmap_set_bit (curr_regs_live, regno))
3484 		change_pressure (regno, true);
3485 	    }
3486 	}
3487     }
3488   BITMAP_FREE (curr_regs_live);
3489 
3490   if (dump_file == NULL)
3491     return;
3492 
3493   fprintf (dump_file, "\nRegister Pressure: \n");
3494   FOR_EACH_BB_FN (bb, cfun)
3495     {
3496       fprintf (dump_file, "  Basic block %d: \n", bb->index);
3497       for (i = 0; (int) i < ira_pressure_classes_num; i++)
3498 	{
3499 	  enum reg_class pressure_class;
3500 
3501 	  pressure_class = ira_pressure_classes[i];
3502 	  if (BB_DATA (bb)->max_reg_pressure[pressure_class] == 0)
3503 	    continue;
3504 
3505 	  fprintf (dump_file, "    %s=%d\n", reg_class_names[pressure_class],
3506 		   BB_DATA (bb)->max_reg_pressure[pressure_class]);
3507 	}
3508     }
3509   fprintf (dump_file, "\n");
3510 }
3511 
3512 /* Top level routine to perform one code hoisting (aka unification) pass
3513 
3514    Return nonzero if a change was made.  */
3515 
3516 static int
one_code_hoisting_pass(void)3517 one_code_hoisting_pass (void)
3518 {
3519   int changed = 0;
3520 
3521   gcse_subst_count = 0;
3522   gcse_create_count = 0;
3523 
3524   /* Return if there's nothing to do, or it is too expensive.  */
3525   if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
3526       || gcse_or_cprop_is_too_expensive (_("GCSE disabled")))
3527     return 0;
3528 
3529   doing_code_hoisting_p = true;
3530 
3531   /* Calculate register pressure for each basic block.  */
3532   if (flag_ira_hoist_pressure)
3533     {
3534       regstat_init_n_sets_and_refs ();
3535       ira_set_pseudo_classes (false, dump_file);
3536       alloc_aux_for_blocks (sizeof (struct bb_data));
3537       calculate_bb_reg_pressure ();
3538       regstat_free_n_sets_and_refs ();
3539     }
3540 
3541   /* We need alias.  */
3542   init_alias_analysis ();
3543 
3544   bytes_used = 0;
3545   gcc_obstack_init (&gcse_obstack);
3546   alloc_gcse_mem ();
3547 
3548   alloc_hash_table (&expr_hash_table);
3549   compute_hash_table (&expr_hash_table);
3550   if (dump_file)
3551     dump_hash_table (dump_file, "Code Hosting Expressions", &expr_hash_table);
3552 
3553   if (expr_hash_table.n_elems > 0)
3554     {
3555       alloc_code_hoist_mem (last_basic_block_for_fn (cfun),
3556 			    expr_hash_table.n_elems);
3557       compute_code_hoist_data ();
3558       changed = hoist_code ();
3559       free_code_hoist_mem ();
3560     }
3561 
3562   if (flag_ira_hoist_pressure)
3563     {
3564       free_aux_for_blocks ();
3565       free_reg_info ();
3566     }
3567   free_hash_table (&expr_hash_table);
3568   free_gcse_mem ();
3569   obstack_free (&gcse_obstack, NULL);
3570 
3571   /* We are finished with alias.  */
3572   end_alias_analysis ();
3573 
3574   if (dump_file)
3575     {
3576       fprintf (dump_file, "HOIST of %s, %d basic blocks, %d bytes needed, ",
3577 	       current_function_name (), n_basic_blocks_for_fn (cfun),
3578 	       bytes_used);
3579       fprintf (dump_file, "%d substs, %d insns created\n",
3580 	       gcse_subst_count, gcse_create_count);
3581     }
3582 
3583   doing_code_hoisting_p = false;
3584 
3585   return changed;
3586 }
3587 
3588 /*  Here we provide the things required to do store motion towards the exit.
3589     In order for this to be effective, gcse also needed to be taught how to
3590     move a load when it is killed only by a store to itself.
3591 
3592 	    int i;
3593 	    float a[10];
3594 
3595 	    void foo(float scale)
3596 	    {
3597 	      for (i=0; i<10; i++)
3598 		a[i] *= scale;
3599 	    }
3600 
3601     'i' is both loaded and stored to in the loop. Normally, gcse cannot move
3602     the load out since its live around the loop, and stored at the bottom
3603     of the loop.
3604 
3605       The 'Load Motion' referred to and implemented in this file is
3606     an enhancement to gcse which when using edge based LCM, recognizes
3607     this situation and allows gcse to move the load out of the loop.
3608 
3609       Once gcse has hoisted the load, store motion can then push this
3610     load towards the exit, and we end up with no loads or stores of 'i'
3611     in the loop.  */
3612 
3613 /* This will search the ldst list for a matching expression. If it
3614    doesn't find one, we create one and initialize it.  */
3615 
3616 static struct ls_expr *
ldst_entry(rtx x)3617 ldst_entry (rtx x)
3618 {
3619   int do_not_record_p = 0;
3620   struct ls_expr * ptr;
3621   unsigned int hash;
3622   ls_expr **slot;
3623   struct ls_expr e;
3624 
3625   hash = hash_rtx (x, GET_MODE (x), &do_not_record_p,
3626 		   NULL,  /*have_reg_qty=*/false);
3627 
3628   e.pattern = x;
3629   slot = pre_ldst_table->find_slot_with_hash (&e, hash, INSERT);
3630   if (*slot)
3631     return *slot;
3632 
3633   ptr = XNEW (struct ls_expr);
3634 
3635   ptr->next         = pre_ldst_mems;
3636   ptr->expr         = NULL;
3637   ptr->pattern      = x;
3638   ptr->pattern_regs = NULL_RTX;
3639   ptr->stores.create (0);
3640   ptr->reaching_reg = NULL_RTX;
3641   ptr->invalid      = 0;
3642   ptr->index        = 0;
3643   ptr->hash_index   = hash;
3644   pre_ldst_mems     = ptr;
3645   *slot = ptr;
3646 
3647   return ptr;
3648 }
3649 
3650 /* Free up an individual ldst entry.  */
3651 
3652 static void
free_ldst_entry(struct ls_expr * ptr)3653 free_ldst_entry (struct ls_expr * ptr)
3654 {
3655   ptr->stores.release ();
3656 
3657   free (ptr);
3658 }
3659 
3660 /* Free up all memory associated with the ldst list.  */
3661 
3662 static void
free_ld_motion_mems(void)3663 free_ld_motion_mems (void)
3664 {
3665   delete pre_ldst_table;
3666   pre_ldst_table = NULL;
3667 
3668   while (pre_ldst_mems)
3669     {
3670       struct ls_expr * tmp = pre_ldst_mems;
3671 
3672       pre_ldst_mems = pre_ldst_mems->next;
3673 
3674       free_ldst_entry (tmp);
3675     }
3676 
3677   pre_ldst_mems = NULL;
3678 }
3679 
3680 /* Dump debugging info about the ldst list.  */
3681 
3682 static void
print_ldst_list(FILE * file)3683 print_ldst_list (FILE * file)
3684 {
3685   struct ls_expr * ptr;
3686 
3687   fprintf (file, "LDST list: \n");
3688 
3689   for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
3690     {
3691       fprintf (file, "  Pattern (%3d): ", ptr->index);
3692 
3693       print_rtl (file, ptr->pattern);
3694 
3695       fprintf (file, "\n	Stores : ");
3696       print_rtx_insn_vec (file, ptr->stores);
3697 
3698       fprintf (file, "\n\n");
3699     }
3700 
3701   fprintf (file, "\n");
3702 }
3703 
3704 /* Returns 1 if X is in the list of ldst only expressions.  */
3705 
3706 static struct ls_expr *
find_rtx_in_ldst(rtx x)3707 find_rtx_in_ldst (rtx x)
3708 {
3709   struct ls_expr e;
3710   ls_expr **slot;
3711   if (!pre_ldst_table)
3712     return NULL;
3713   e.pattern = x;
3714   slot = pre_ldst_table->find_slot (&e, NO_INSERT);
3715   if (!slot || (*slot)->invalid)
3716     return NULL;
3717   return *slot;
3718 }
3719 
3720 /* Load Motion for loads which only kill themselves.  */
3721 
3722 /* Return true if x, a MEM, is a simple access with no side effects.
3723    These are the types of loads we consider for the ld_motion list,
3724    otherwise we let the usual aliasing take care of it.  */
3725 
3726 static int
simple_mem(const_rtx x)3727 simple_mem (const_rtx x)
3728 {
3729   if (MEM_VOLATILE_P (x))
3730     return 0;
3731 
3732   if (GET_MODE (x) == BLKmode)
3733     return 0;
3734 
3735   /* If we are handling exceptions, we must be careful with memory references
3736      that may trap.  If we are not, the behavior is undefined, so we may just
3737      continue.  */
3738   if (cfun->can_throw_non_call_exceptions && may_trap_p (x))
3739     return 0;
3740 
3741   if (side_effects_p (x))
3742     return 0;
3743 
3744   /* Do not consider function arguments passed on stack.  */
3745   if (reg_mentioned_p (stack_pointer_rtx, x))
3746     return 0;
3747 
3748   if (flag_float_store && FLOAT_MODE_P (GET_MODE (x)))
3749     return 0;
3750 
3751   return 1;
3752 }
3753 
3754 /* Make sure there isn't a buried reference in this pattern anywhere.
3755    If there is, invalidate the entry for it since we're not capable
3756    of fixing it up just yet.. We have to be sure we know about ALL
3757    loads since the aliasing code will allow all entries in the
3758    ld_motion list to not-alias itself.  If we miss a load, we will get
3759    the wrong value since gcse might common it and we won't know to
3760    fix it up.  */
3761 
3762 static void
invalidate_any_buried_refs(rtx x)3763 invalidate_any_buried_refs (rtx x)
3764 {
3765   const char * fmt;
3766   int i, j;
3767   struct ls_expr * ptr;
3768 
3769   /* Invalidate it in the list.  */
3770   if (MEM_P (x) && simple_mem (x))
3771     {
3772       ptr = ldst_entry (x);
3773       ptr->invalid = 1;
3774     }
3775 
3776   /* Recursively process the insn.  */
3777   fmt = GET_RTX_FORMAT (GET_CODE (x));
3778 
3779   for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3780     {
3781       if (fmt[i] == 'e')
3782 	invalidate_any_buried_refs (XEXP (x, i));
3783       else if (fmt[i] == 'E')
3784 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3785 	  invalidate_any_buried_refs (XVECEXP (x, i, j));
3786     }
3787 }
3788 
3789 /* Find all the 'simple' MEMs which are used in LOADs and STORES.  Simple
3790    being defined as MEM loads and stores to symbols, with no side effects
3791    and no registers in the expression.  For a MEM destination, we also
3792    check that the insn is still valid if we replace the destination with a
3793    REG, as is done in update_ld_motion_stores.  If there are any uses/defs
3794    which don't match this criteria, they are invalidated and trimmed out
3795    later.  */
3796 
3797 static void
compute_ld_motion_mems(void)3798 compute_ld_motion_mems (void)
3799 {
3800   struct ls_expr * ptr;
3801   basic_block bb;
3802   rtx_insn *insn;
3803 
3804   pre_ldst_mems = NULL;
3805   pre_ldst_table = new hash_table<pre_ldst_expr_hasher> (13);
3806 
3807   FOR_EACH_BB_FN (bb, cfun)
3808     {
3809       FOR_BB_INSNS (bb, insn)
3810 	{
3811 	  if (NONDEBUG_INSN_P (insn))
3812 	    {
3813 	      if (GET_CODE (PATTERN (insn)) == SET)
3814 		{
3815 		  rtx src = SET_SRC (PATTERN (insn));
3816 		  rtx dest = SET_DEST (PATTERN (insn));
3817 
3818 		  /* Check for a simple load.  */
3819 		  if (MEM_P (src) && simple_mem (src))
3820 		    {
3821 		      ptr = ldst_entry (src);
3822 		      if (!REG_P (dest))
3823 			ptr->invalid = 1;
3824 		    }
3825 		  else
3826 		    {
3827 		      /* Make sure there isn't a buried load somewhere.  */
3828 		      invalidate_any_buried_refs (src);
3829 		    }
3830 
3831 		  /* Check for a simple load through a REG_EQUAL note.  */
3832 		  rtx note = find_reg_equal_equiv_note (insn), src_eq;
3833 		  if (note
3834 		      && REG_NOTE_KIND (note) == REG_EQUAL
3835 		      && (src_eq = XEXP (note, 0))
3836 		      && !(MEM_P (src_eq) && simple_mem (src_eq)))
3837 		    invalidate_any_buried_refs (src_eq);
3838 
3839 		  /* Check for stores. Don't worry about aliased ones, they
3840 		     will block any movement we might do later. We only care
3841 		     about this exact pattern since those are the only
3842 		     circumstance that we will ignore the aliasing info.  */
3843 		  if (MEM_P (dest) && simple_mem (dest))
3844 		    {
3845 		      ptr = ldst_entry (dest);
3846 		      machine_mode src_mode = GET_MODE (src);
3847 		      if (! MEM_P (src)
3848 			  && GET_CODE (src) != ASM_OPERANDS
3849 			  /* Check for REG manually since want_to_gcse_p
3850 			     returns 0 for all REGs.  */
3851 			  && can_assign_to_reg_without_clobbers_p (src,
3852 								    src_mode))
3853 			ptr->stores.safe_push (insn);
3854 		      else
3855 			ptr->invalid = 1;
3856 		    }
3857 		}
3858 	      else
3859 		{
3860 		  /* Invalidate all MEMs in the pattern and...  */
3861 		  invalidate_any_buried_refs (PATTERN (insn));
3862 
3863 		  /* ...in REG_EQUAL notes for PARALLELs with single SET.  */
3864 		  rtx note = find_reg_equal_equiv_note (insn), src_eq;
3865 		  if (note
3866 		      && REG_NOTE_KIND (note) == REG_EQUAL
3867 		      && (src_eq = XEXP (note, 0)))
3868 		    invalidate_any_buried_refs (src_eq);
3869 		}
3870 	    }
3871 	}
3872     }
3873 }
3874 
3875 /* Remove any references that have been either invalidated or are not in the
3876    expression list for pre gcse.  */
3877 
3878 static void
trim_ld_motion_mems(void)3879 trim_ld_motion_mems (void)
3880 {
3881   struct ls_expr * * last = & pre_ldst_mems;
3882   struct ls_expr * ptr = pre_ldst_mems;
3883 
3884   while (ptr != NULL)
3885     {
3886       struct gcse_expr * expr;
3887 
3888       /* Delete if entry has been made invalid.  */
3889       if (! ptr->invalid)
3890 	{
3891 	  /* Delete if we cannot find this mem in the expression list.  */
3892 	  unsigned int hash = ptr->hash_index % expr_hash_table.size;
3893 
3894 	  for (expr = expr_hash_table.table[hash];
3895 	       expr != NULL;
3896 	       expr = expr->next_same_hash)
3897 	    if (expr_equiv_p (expr->expr, ptr->pattern))
3898 	      break;
3899 	}
3900       else
3901 	expr = (struct gcse_expr *) 0;
3902 
3903       if (expr)
3904 	{
3905 	  /* Set the expression field if we are keeping it.  */
3906 	  ptr->expr = expr;
3907 	  last = & ptr->next;
3908 	  ptr = ptr->next;
3909 	}
3910       else
3911 	{
3912 	  *last = ptr->next;
3913 	  pre_ldst_table->remove_elt_with_hash (ptr, ptr->hash_index);
3914 	  free_ldst_entry (ptr);
3915 	  ptr = * last;
3916 	}
3917     }
3918 
3919   /* Show the world what we've found.  */
3920   if (dump_file && pre_ldst_mems != NULL)
3921     print_ldst_list (dump_file);
3922 }
3923 
3924 /* This routine will take an expression which we are replacing with
3925    a reaching register, and update any stores that are needed if
3926    that expression is in the ld_motion list.  Stores are updated by
3927    copying their SRC to the reaching register, and then storing
3928    the reaching register into the store location. These keeps the
3929    correct value in the reaching register for the loads.  */
3930 
3931 static void
update_ld_motion_stores(struct gcse_expr * expr)3932 update_ld_motion_stores (struct gcse_expr * expr)
3933 {
3934   struct ls_expr * mem_ptr;
3935 
3936   if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
3937     {
3938       /* We can try to find just the REACHED stores, but is shouldn't
3939 	 matter to set the reaching reg everywhere...  some might be
3940 	 dead and should be eliminated later.  */
3941 
3942       /* We replace (set mem expr) with (set reg expr) (set mem reg)
3943 	 where reg is the reaching reg used in the load.  We checked in
3944 	 compute_ld_motion_mems that we can replace (set mem expr) with
3945 	 (set reg expr) in that insn.  */
3946       rtx_insn *insn;
3947       unsigned int i;
3948       FOR_EACH_VEC_ELT_REVERSE (mem_ptr->stores, i, insn)
3949 	{
3950 	  rtx pat = PATTERN (insn);
3951 	  rtx src = SET_SRC (pat);
3952 	  rtx reg = expr->reaching_reg;
3953 
3954 	  /* If we've already copied it, continue.  */
3955 	  if (expr->reaching_reg == src)
3956 	    continue;
3957 
3958 	  if (dump_file)
3959 	    {
3960 	      fprintf (dump_file, "PRE:  store updated with reaching reg ");
3961 	      print_rtl (dump_file, reg);
3962 	      fprintf (dump_file, ":\n	");
3963 	      print_inline_rtx (dump_file, insn, 8);
3964 	      fprintf (dump_file, "\n");
3965 	    }
3966 
3967 	  rtx_insn *copy = gen_move_insn (reg, copy_rtx (SET_SRC (pat)));
3968 	  emit_insn_before (copy, insn);
3969 	  SET_SRC (pat) = reg;
3970 	  df_insn_rescan (insn);
3971 
3972 	  /* un-recognize this pattern since it's probably different now.  */
3973 	  INSN_CODE (insn) = -1;
3974 	  gcse_create_count++;
3975 	}
3976     }
3977 }
3978 
3979 /* Return true if the graph is too expensive to optimize. PASS is the
3980    optimization about to be performed.  */
3981 
3982 bool
gcse_or_cprop_is_too_expensive(const char * pass)3983 gcse_or_cprop_is_too_expensive (const char *pass)
3984 {
3985   unsigned HOST_WIDE_INT memory_request
3986     = ((unsigned HOST_WIDE_INT)n_basic_blocks_for_fn (cfun)
3987        * SBITMAP_SET_SIZE (max_reg_num ()) * sizeof (SBITMAP_ELT_TYPE));
3988 
3989   /* Trying to perform global optimizations on flow graphs which have
3990      a high connectivity will take a long time and is unlikely to be
3991      particularly useful.
3992 
3993      In normal circumstances a cfg should have about twice as many
3994      edges as blocks.  But we do not want to punish small functions
3995      which have a couple switch statements.  Rather than simply
3996      threshold the number of blocks, uses something with a more
3997      graceful degradation.  */
3998   if (n_edges_for_fn (cfun) > 20000 + n_basic_blocks_for_fn (cfun) * 4)
3999     {
4000       warning (OPT_Wdisabled_optimization,
4001 	       "%s: %d basic blocks and %d edges/basic block",
4002 	       pass, n_basic_blocks_for_fn (cfun),
4003 	       n_edges_for_fn (cfun) / n_basic_blocks_for_fn (cfun));
4004 
4005       return true;
4006     }
4007 
4008   /* If allocating memory for the dataflow bitmaps would take up too much
4009      storage it's better just to disable the optimization.  */
4010   if (memory_request / 1024 > (unsigned HOST_WIDE_INT)param_max_gcse_memory)
4011     {
4012       warning (OPT_Wdisabled_optimization,
4013 	       "%s: %d basic blocks and %d registers; "
4014 	       "increase %<--param max-gcse-memory%> above %wu",
4015 	       pass, n_basic_blocks_for_fn (cfun), max_reg_num (),
4016 	       memory_request / 1024);
4017 
4018       return true;
4019     }
4020 
4021   return false;
4022 }
4023 
4024 static unsigned int
execute_rtl_pre(void)4025 execute_rtl_pre (void)
4026 {
4027   int changed;
4028   delete_unreachable_blocks ();
4029   df_analyze ();
4030   changed = one_pre_gcse_pass ();
4031   flag_rerun_cse_after_global_opts |= changed;
4032   if (changed)
4033     cleanup_cfg (0);
4034   return 0;
4035 }
4036 
4037 static unsigned int
execute_rtl_hoist(void)4038 execute_rtl_hoist (void)
4039 {
4040   int changed;
4041   delete_unreachable_blocks ();
4042   df_analyze ();
4043   changed = one_code_hoisting_pass ();
4044   flag_rerun_cse_after_global_opts |= changed;
4045   if (changed)
4046     cleanup_cfg (0);
4047   return 0;
4048 }
4049 
4050 namespace {
4051 
4052 const pass_data pass_data_rtl_pre =
4053 {
4054   RTL_PASS, /* type */
4055   "rtl pre", /* name */
4056   OPTGROUP_NONE, /* optinfo_flags */
4057   TV_PRE, /* tv_id */
4058   PROP_cfglayout, /* properties_required */
4059   0, /* properties_provided */
4060   0, /* properties_destroyed */
4061   0, /* todo_flags_start */
4062   TODO_df_finish, /* todo_flags_finish */
4063 };
4064 
4065 class pass_rtl_pre : public rtl_opt_pass
4066 {
4067 public:
pass_rtl_pre(gcc::context * ctxt)4068   pass_rtl_pre (gcc::context *ctxt)
4069     : rtl_opt_pass (pass_data_rtl_pre, ctxt)
4070   {}
4071 
4072   /* opt_pass methods: */
4073   virtual bool gate (function *);
execute(function *)4074   virtual unsigned int execute (function *) { return execute_rtl_pre (); }
4075 
4076 }; // class pass_rtl_pre
4077 
4078 /* We do not construct an accurate cfg in functions which call
4079    setjmp, so none of these passes runs if the function calls
4080    setjmp.
4081    FIXME: Should just handle setjmp via REG_SETJMP notes.  */
4082 
4083 bool
gate(function * fun)4084 pass_rtl_pre::gate (function *fun)
4085 {
4086   return optimize > 0 && flag_gcse
4087     && !fun->calls_setjmp
4088     && optimize_function_for_speed_p (fun)
4089     && dbg_cnt (pre);
4090 }
4091 
4092 } // anon namespace
4093 
4094 rtl_opt_pass *
make_pass_rtl_pre(gcc::context * ctxt)4095 make_pass_rtl_pre (gcc::context *ctxt)
4096 {
4097   return new pass_rtl_pre (ctxt);
4098 }
4099 
4100 namespace {
4101 
4102 const pass_data pass_data_rtl_hoist =
4103 {
4104   RTL_PASS, /* type */
4105   "hoist", /* name */
4106   OPTGROUP_NONE, /* optinfo_flags */
4107   TV_HOIST, /* tv_id */
4108   PROP_cfglayout, /* properties_required */
4109   0, /* properties_provided */
4110   0, /* properties_destroyed */
4111   0, /* todo_flags_start */
4112   TODO_df_finish, /* todo_flags_finish */
4113 };
4114 
4115 class pass_rtl_hoist : public rtl_opt_pass
4116 {
4117 public:
pass_rtl_hoist(gcc::context * ctxt)4118   pass_rtl_hoist (gcc::context *ctxt)
4119     : rtl_opt_pass (pass_data_rtl_hoist, ctxt)
4120   {}
4121 
4122   /* opt_pass methods: */
4123   virtual bool gate (function *);
execute(function *)4124   virtual unsigned int execute (function *) { return execute_rtl_hoist (); }
4125 
4126 }; // class pass_rtl_hoist
4127 
4128 bool
gate(function *)4129 pass_rtl_hoist::gate (function *)
4130 {
4131   return optimize > 0 && flag_gcse
4132     && !cfun->calls_setjmp
4133     /* It does not make sense to run code hoisting unless we are optimizing
4134        for code size -- it rarely makes programs faster, and can make then
4135        bigger if we did PRE (when optimizing for space, we don't run PRE).  */
4136     && optimize_function_for_size_p (cfun)
4137     && dbg_cnt (hoist);
4138 }
4139 
4140 } // anon namespace
4141 
4142 rtl_opt_pass *
make_pass_rtl_hoist(gcc::context * ctxt)4143 make_pass_rtl_hoist (gcc::context *ctxt)
4144 {
4145   return new pass_rtl_hoist (ctxt);
4146 }
4147 
4148 /* Reset all state within gcse.c so that we can rerun the compiler
4149    within the same process.  For use by toplev::finalize.  */
4150 
4151 void
gcse_c_finalize(void)4152 gcse_c_finalize (void)
4153 {
4154   test_insn = NULL;
4155 }
4156 
4157 #include "gt-gcse.h"
4158