1 /* Partial redundancy elimination / Hoisting for RTL.
2 Copyright (C) 1997-2016 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* TODO
21 - reordering of memory allocation and freeing to be more space efficient
22 - calc rough register pressure information and use the info to drive all
23 kinds of code motion (including code hoisting) in a unified way.
24 */
25
26 /* References searched while implementing this.
27
28 Compilers Principles, Techniques and Tools
29 Aho, Sethi, Ullman
30 Addison-Wesley, 1988
31
32 Global Optimization by Suppression of Partial Redundancies
33 E. Morel, C. Renvoise
34 communications of the acm, Vol. 22, Num. 2, Feb. 1979
35
36 A Portable Machine-Independent Global Optimizer - Design and Measurements
37 Frederick Chow
38 Stanford Ph.D. thesis, Dec. 1983
39
40 A Fast Algorithm for Code Movement Optimization
41 D.M. Dhamdhere
42 SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
43
44 A Solution to a Problem with Morel and Renvoise's
45 Global Optimization by Suppression of Partial Redundancies
46 K-H Drechsler, M.P. Stadel
47 ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
48
49 Practical Adaptation of the Global Optimization
50 Algorithm of Morel and Renvoise
51 D.M. Dhamdhere
52 ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
53
54 Efficiently Computing Static Single Assignment Form and the Control
55 Dependence Graph
56 R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
57 ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
58
59 Lazy Code Motion
60 J. Knoop, O. Ruthing, B. Steffen
61 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
62
63 What's In a Region? Or Computing Control Dependence Regions in Near-Linear
64 Time for Reducible Flow Control
65 Thomas Ball
66 ACM Letters on Programming Languages and Systems,
67 Vol. 2, Num. 1-4, Mar-Dec 1993
68
69 An Efficient Representation for Sparse Sets
70 Preston Briggs, Linda Torczon
71 ACM Letters on Programming Languages and Systems,
72 Vol. 2, Num. 1-4, Mar-Dec 1993
73
74 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
75 K-H Drechsler, M.P. Stadel
76 ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
77
78 Partial Dead Code Elimination
79 J. Knoop, O. Ruthing, B. Steffen
80 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
81
82 Effective Partial Redundancy Elimination
83 P. Briggs, K.D. Cooper
84 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
85
86 The Program Structure Tree: Computing Control Regions in Linear Time
87 R. Johnson, D. Pearson, K. Pingali
88 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
89
90 Optimal Code Motion: Theory and Practice
91 J. Knoop, O. Ruthing, B. Steffen
92 ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
93
94 The power of assignment motion
95 J. Knoop, O. Ruthing, B. Steffen
96 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
97
98 Global code motion / global value numbering
99 C. Click
100 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
101
102 Value Driven Redundancy Elimination
103 L.T. Simpson
104 Rice University Ph.D. thesis, Apr. 1996
105
106 Value Numbering
107 L.T. Simpson
108 Massively Scalar Compiler Project, Rice University, Sep. 1996
109
110 High Performance Compilers for Parallel Computing
111 Michael Wolfe
112 Addison-Wesley, 1996
113
114 Advanced Compiler Design and Implementation
115 Steven Muchnick
116 Morgan Kaufmann, 1997
117
118 Building an Optimizing Compiler
119 Robert Morgan
120 Digital Press, 1998
121
122 People wishing to speed up the code here should read:
123 Elimination Algorithms for Data Flow Analysis
124 B.G. Ryder, M.C. Paull
125 ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
126
127 How to Analyze Large Programs Efficiently and Informatively
128 D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
129 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
130
131 People wishing to do something different can find various possibilities
132 in the above papers and elsewhere.
133 */
134
135 #include "config.h"
136 #include "system.h"
137 #include "coretypes.h"
138 #include "backend.h"
139 #include "target.h"
140 #include "rtl.h"
141 #include "tree.h"
142 #include "predict.h"
143 #include "df.h"
144 #include "tm_p.h"
145 #include "insn-config.h"
146 #include "regs.h"
147 #include "ira.h"
148 #include "recog.h"
149 #include "diagnostic-core.h"
150 #include "cfgrtl.h"
151 #include "cfganal.h"
152 #include "lcm.h"
153 #include "cfgcleanup.h"
154 #include "expr.h"
155 #include "params.h"
156 #include "intl.h"
157 #include "tree-pass.h"
158 #include "dbgcnt.h"
159 #include "gcse.h"
160 #include "gcse-common.h"
161
162 /* We support GCSE via Partial Redundancy Elimination. PRE optimizations
163 are a superset of those done by classic GCSE.
164
165 Two passes of copy/constant propagation are done around PRE or hoisting
166 because the first one enables more GCSE and the second one helps to clean
167 up the copies that PRE and HOIST create. This is needed more for PRE than
168 for HOIST because code hoisting will try to use an existing register
169 containing the common subexpression rather than create a new one. This is
170 harder to do for PRE because of the code motion (which HOIST doesn't do).
171
172 Expressions we are interested in GCSE-ing are of the form
173 (set (pseudo-reg) (expression)).
174 Function want_to_gcse_p says what these are.
175
176 In addition, expressions in REG_EQUAL notes are candidates for GCSE-ing.
177 This allows PRE to hoist expressions that are expressed in multiple insns,
178 such as complex address calculations (e.g. for PIC code, or loads with a
179 high part and a low part).
180
181 PRE handles moving invariant expressions out of loops (by treating them as
182 partially redundant).
183
184 **********************
185
186 We used to support multiple passes but there are diminishing returns in
187 doing so. The first pass usually makes 90% of the changes that are doable.
188 A second pass can make a few more changes made possible by the first pass.
189 Experiments show any further passes don't make enough changes to justify
190 the expense.
191
192 A study of spec92 using an unlimited number of passes:
193 [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
194 [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
195 [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
196
197 It was found doing copy propagation between each pass enables further
198 substitutions.
199
200 This study was done before expressions in REG_EQUAL notes were added as
201 candidate expressions for optimization, and before the GIMPLE optimizers
202 were added. Probably, multiple passes is even less efficient now than
203 at the time when the study was conducted.
204
205 PRE is quite expensive in complicated functions because the DFA can take
206 a while to converge. Hence we only perform one pass.
207
208 **********************
209
210 The steps for PRE are:
211
212 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
213
214 2) Perform the data flow analysis for PRE.
215
216 3) Delete the redundant instructions
217
218 4) Insert the required copies [if any] that make the partially
219 redundant instructions fully redundant.
220
221 5) For other reaching expressions, insert an instruction to copy the value
222 to a newly created pseudo that will reach the redundant instruction.
223
224 The deletion is done first so that when we do insertions we
225 know which pseudo reg to use.
226
227 Various papers have argued that PRE DFA is expensive (O(n^2)) and others
228 argue it is not. The number of iterations for the algorithm to converge
229 is typically 2-4 so I don't view it as that expensive (relatively speaking).
230
231 PRE GCSE depends heavily on the second CPROP pass to clean up the copies
232 we create. To make an expression reach the place where it's redundant,
233 the result of the expression is copied to a new register, and the redundant
234 expression is deleted by replacing it with this new register. Classic GCSE
235 doesn't have this problem as much as it computes the reaching defs of
236 each register in each block and thus can try to use an existing
237 register. */
238
239 /* GCSE global vars. */
240
241 struct target_gcse default_target_gcse;
242 #if SWITCHABLE_TARGET
243 struct target_gcse *this_target_gcse = &default_target_gcse;
244 #endif
245
246 /* Set to non-zero if CSE should run after all GCSE optimizations are done. */
247 int flag_rerun_cse_after_global_opts;
248
249 /* An obstack for our working variables. */
250 static struct obstack gcse_obstack;
251
252 /* Hash table of expressions. */
253
254 struct gcse_expr
255 {
256 /* The expression. */
257 rtx expr;
258 /* Index in the available expression bitmaps. */
259 int bitmap_index;
260 /* Next entry with the same hash. */
261 struct gcse_expr *next_same_hash;
262 /* List of anticipatable occurrences in basic blocks in the function.
263 An "anticipatable occurrence" is one that is the first occurrence in the
264 basic block, the operands are not modified in the basic block prior
265 to the occurrence and the output is not used between the start of
266 the block and the occurrence. */
267 struct gcse_occr *antic_occr;
268 /* List of available occurrence in basic blocks in the function.
269 An "available occurrence" is one that is the last occurrence in the
270 basic block and the operands are not modified by following statements in
271 the basic block [including this insn]. */
272 struct gcse_occr *avail_occr;
273 /* Non-null if the computation is PRE redundant.
274 The value is the newly created pseudo-reg to record a copy of the
275 expression in all the places that reach the redundant copy. */
276 rtx reaching_reg;
277 /* Maximum distance in instructions this expression can travel.
278 We avoid moving simple expressions for more than a few instructions
279 to keep register pressure under control.
280 A value of "0" removes restrictions on how far the expression can
281 travel. */
282 HOST_WIDE_INT max_distance;
283 };
284
285 /* Occurrence of an expression.
286 There is one per basic block. If a pattern appears more than once the
287 last appearance is used [or first for anticipatable expressions]. */
288
289 struct gcse_occr
290 {
291 /* Next occurrence of this expression. */
292 struct gcse_occr *next;
293 /* The insn that computes the expression. */
294 rtx_insn *insn;
295 /* Nonzero if this [anticipatable] occurrence has been deleted. */
296 char deleted_p;
297 /* Nonzero if this [available] occurrence has been copied to
298 reaching_reg. */
299 /* ??? This is mutually exclusive with deleted_p, so they could share
300 the same byte. */
301 char copied_p;
302 };
303
304 typedef struct gcse_occr *occr_t;
305
306 /* Expression hash tables.
307 Each hash table is an array of buckets.
308 ??? It is known that if it were an array of entries, structure elements
309 `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
310 not clear whether in the final analysis a sufficient amount of memory would
311 be saved as the size of the available expression bitmaps would be larger
312 [one could build a mapping table without holes afterwards though].
313 Someday I'll perform the computation and figure it out. */
314
315 struct gcse_hash_table_d
316 {
317 /* The table itself.
318 This is an array of `expr_hash_table_size' elements. */
319 struct gcse_expr **table;
320
321 /* Size of the hash table, in elements. */
322 unsigned int size;
323
324 /* Number of hash table elements. */
325 unsigned int n_elems;
326 };
327
328 /* Expression hash table. */
329 static struct gcse_hash_table_d expr_hash_table;
330
331 /* This is a list of expressions which are MEMs and will be used by load
332 or store motion.
333 Load motion tracks MEMs which aren't killed by anything except itself,
334 i.e. loads and stores to a single location.
335 We can then allow movement of these MEM refs with a little special
336 allowance. (all stores copy the same value to the reaching reg used
337 for the loads). This means all values used to store into memory must have
338 no side effects so we can re-issue the setter value. */
339
340 struct ls_expr
341 {
342 struct gcse_expr * expr; /* Gcse expression reference for LM. */
343 rtx pattern; /* Pattern of this mem. */
344 rtx pattern_regs; /* List of registers mentioned by the mem. */
345 rtx_insn_list *loads; /* INSN list of loads seen. */
346 rtx_insn_list *stores; /* INSN list of stores seen. */
347 struct ls_expr * next; /* Next in the list. */
348 int invalid; /* Invalid for some reason. */
349 int index; /* If it maps to a bitmap index. */
350 unsigned int hash_index; /* Index when in a hash table. */
351 rtx reaching_reg; /* Register to use when re-writing. */
352 };
353
354 /* Head of the list of load/store memory refs. */
355 static struct ls_expr * pre_ldst_mems = NULL;
356
357 struct pre_ldst_expr_hasher : nofree_ptr_hash <ls_expr>
358 {
359 typedef value_type compare_type;
360 static inline hashval_t hash (const ls_expr *);
361 static inline bool equal (const ls_expr *, const ls_expr *);
362 };
363
364 /* Hashtable helpers. */
365 inline hashval_t
hash(const ls_expr * x)366 pre_ldst_expr_hasher::hash (const ls_expr *x)
367 {
368 int do_not_record_p = 0;
369 return
370 hash_rtx (x->pattern, GET_MODE (x->pattern), &do_not_record_p, NULL, false);
371 }
372
373 static int expr_equiv_p (const_rtx, const_rtx);
374
375 inline bool
equal(const ls_expr * ptr1,const ls_expr * ptr2)376 pre_ldst_expr_hasher::equal (const ls_expr *ptr1,
377 const ls_expr *ptr2)
378 {
379 return expr_equiv_p (ptr1->pattern, ptr2->pattern);
380 }
381
382 /* Hashtable for the load/store memory refs. */
383 static hash_table<pre_ldst_expr_hasher> *pre_ldst_table;
384
385 /* Bitmap containing one bit for each register in the program.
386 Used when performing GCSE to track which registers have been set since
387 the start of the basic block. */
388 static regset reg_set_bitmap;
389
390 /* Array, indexed by basic block number for a list of insns which modify
391 memory within that block. */
392 static vec<rtx_insn *> *modify_mem_list;
393 static bitmap modify_mem_list_set;
394
395 /* This array parallels modify_mem_list, except that it stores MEMs
396 being set and their canonicalized memory addresses. */
397 static vec<modify_pair> *canon_modify_mem_list;
398
399 /* Bitmap indexed by block numbers to record which blocks contain
400 function calls. */
401 static bitmap blocks_with_calls;
402
403 /* Various variables for statistics gathering. */
404
405 /* Memory used in a pass.
406 This isn't intended to be absolutely precise. Its intent is only
407 to keep an eye on memory usage. */
408 static int bytes_used;
409
410 /* GCSE substitutions made. */
411 static int gcse_subst_count;
412 /* Number of copy instructions created. */
413 static int gcse_create_count;
414
415 /* Doing code hoisting. */
416 static bool doing_code_hoisting_p = false;
417
418 /* For available exprs */
419 static sbitmap *ae_kill;
420
421 /* Data stored for each basic block. */
422 struct bb_data
423 {
424 /* Maximal register pressure inside basic block for given register class
425 (defined only for the pressure classes). */
426 int max_reg_pressure[N_REG_CLASSES];
427 /* Recorded register pressure of basic block before trying to hoist
428 an expression. Will be used to restore the register pressure
429 if the expression should not be hoisted. */
430 int old_pressure;
431 /* Recorded register live_in info of basic block during code hoisting
432 process. BACKUP is used to record live_in info before trying to
433 hoist an expression, and will be used to restore LIVE_IN if the
434 expression should not be hoisted. */
435 bitmap live_in, backup;
436 };
437
438 #define BB_DATA(bb) ((struct bb_data *) (bb)->aux)
439
440 static basic_block curr_bb;
441
442 /* Current register pressure for each pressure class. */
443 static int curr_reg_pressure[N_REG_CLASSES];
444
445
446 static void compute_can_copy (void);
447 static void *gmalloc (size_t) ATTRIBUTE_MALLOC;
448 static void *gcalloc (size_t, size_t) ATTRIBUTE_MALLOC;
449 static void *gcse_alloc (unsigned long);
450 static void alloc_gcse_mem (void);
451 static void free_gcse_mem (void);
452 static void hash_scan_insn (rtx_insn *, struct gcse_hash_table_d *);
453 static void hash_scan_set (rtx, rtx_insn *, struct gcse_hash_table_d *);
454 static void hash_scan_clobber (rtx, rtx_insn *, struct gcse_hash_table_d *);
455 static void hash_scan_call (rtx, rtx_insn *, struct gcse_hash_table_d *);
456 static int oprs_unchanged_p (const_rtx, const rtx_insn *, int);
457 static int oprs_anticipatable_p (const_rtx, const rtx_insn *);
458 static int oprs_available_p (const_rtx, const rtx_insn *);
459 static void insert_expr_in_table (rtx, machine_mode, rtx_insn *, int, int,
460 HOST_WIDE_INT, struct gcse_hash_table_d *);
461 static unsigned int hash_expr (const_rtx, machine_mode, int *, int);
462 static void record_last_reg_set_info (rtx_insn *, int);
463 static void record_last_mem_set_info (rtx_insn *);
464 static void record_last_set_info (rtx, const_rtx, void *);
465 static void compute_hash_table (struct gcse_hash_table_d *);
466 static void alloc_hash_table (struct gcse_hash_table_d *);
467 static void free_hash_table (struct gcse_hash_table_d *);
468 static void compute_hash_table_work (struct gcse_hash_table_d *);
469 static void dump_hash_table (FILE *, const char *, struct gcse_hash_table_d *);
470 static void compute_local_properties (sbitmap *, sbitmap *, sbitmap *,
471 struct gcse_hash_table_d *);
472 static void mems_conflict_for_gcse_p (rtx, const_rtx, void *);
473 static int load_killed_in_block_p (const_basic_block, int, const_rtx, int);
474 static void alloc_pre_mem (int, int);
475 static void free_pre_mem (void);
476 static struct edge_list *compute_pre_data (void);
477 static int pre_expr_reaches_here_p (basic_block, struct gcse_expr *,
478 basic_block);
479 static void insert_insn_end_basic_block (struct gcse_expr *, basic_block);
480 static void pre_insert_copy_insn (struct gcse_expr *, rtx_insn *);
481 static void pre_insert_copies (void);
482 static int pre_delete (void);
483 static int pre_gcse (struct edge_list *);
484 static int one_pre_gcse_pass (void);
485 static void add_label_notes (rtx, rtx_insn *);
486 static void alloc_code_hoist_mem (int, int);
487 static void free_code_hoist_mem (void);
488 static void compute_code_hoist_vbeinout (void);
489 static void compute_code_hoist_data (void);
490 static int should_hoist_expr_to_dom (basic_block, struct gcse_expr *,
491 basic_block,
492 sbitmap, HOST_WIDE_INT, int *,
493 enum reg_class,
494 int *, bitmap, rtx_insn *);
495 static int hoist_code (void);
496 static enum reg_class get_regno_pressure_class (int regno, int *nregs);
497 static enum reg_class get_pressure_class_and_nregs (rtx_insn *insn, int *nregs);
498 static int one_code_hoisting_pass (void);
499 static rtx_insn *process_insert_insn (struct gcse_expr *);
500 static int pre_edge_insert (struct edge_list *, struct gcse_expr **);
501 static int pre_expr_reaches_here_p_work (basic_block, struct gcse_expr *,
502 basic_block, char *);
503 static struct ls_expr * ldst_entry (rtx);
504 static void free_ldst_entry (struct ls_expr *);
505 static void free_ld_motion_mems (void);
506 static void print_ldst_list (FILE *);
507 static struct ls_expr * find_rtx_in_ldst (rtx);
508 static int simple_mem (const_rtx);
509 static void invalidate_any_buried_refs (rtx);
510 static void compute_ld_motion_mems (void);
511 static void trim_ld_motion_mems (void);
512 static void update_ld_motion_stores (struct gcse_expr *);
513 static void clear_modify_mem_tables (void);
514 static void free_modify_mem_tables (void);
515
516 #define GNEW(T) ((T *) gmalloc (sizeof (T)))
517 #define GCNEW(T) ((T *) gcalloc (1, sizeof (T)))
518
519 #define GNEWVEC(T, N) ((T *) gmalloc (sizeof (T) * (N)))
520 #define GCNEWVEC(T, N) ((T *) gcalloc ((N), sizeof (T)))
521
522 #define GNEWVAR(T, S) ((T *) gmalloc ((S)))
523 #define GCNEWVAR(T, S) ((T *) gcalloc (1, (S)))
524
525 #define GOBNEW(T) ((T *) gcse_alloc (sizeof (T)))
526 #define GOBNEWVAR(T, S) ((T *) gcse_alloc ((S)))
527
528 /* Misc. utilities. */
529
530 #define can_copy \
531 (this_target_gcse->x_can_copy)
532 #define can_copy_init_p \
533 (this_target_gcse->x_can_copy_init_p)
534
535 /* Compute which modes support reg/reg copy operations. */
536
537 static void
compute_can_copy(void)538 compute_can_copy (void)
539 {
540 int i;
541 #ifndef AVOID_CCMODE_COPIES
542 rtx reg;
543 rtx_insn *insn;
544 #endif
545 memset (can_copy, 0, NUM_MACHINE_MODES);
546
547 start_sequence ();
548 for (i = 0; i < NUM_MACHINE_MODES; i++)
549 if (GET_MODE_CLASS (i) == MODE_CC)
550 {
551 #ifdef AVOID_CCMODE_COPIES
552 can_copy[i] = 0;
553 #else
554 reg = gen_rtx_REG ((machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
555 insn = emit_insn (gen_rtx_SET (reg, reg));
556 if (recog (PATTERN (insn), insn, NULL) >= 0)
557 can_copy[i] = 1;
558 #endif
559 }
560 else
561 can_copy[i] = 1;
562
563 end_sequence ();
564 }
565
566 /* Returns whether the mode supports reg/reg copy operations. */
567
568 bool
can_copy_p(machine_mode mode)569 can_copy_p (machine_mode mode)
570 {
571 if (! can_copy_init_p)
572 {
573 compute_can_copy ();
574 can_copy_init_p = true;
575 }
576
577 return can_copy[mode] != 0;
578 }
579
580 /* Cover function to xmalloc to record bytes allocated. */
581
582 static void *
gmalloc(size_t size)583 gmalloc (size_t size)
584 {
585 bytes_used += size;
586 return xmalloc (size);
587 }
588
589 /* Cover function to xcalloc to record bytes allocated. */
590
591 static void *
gcalloc(size_t nelem,size_t elsize)592 gcalloc (size_t nelem, size_t elsize)
593 {
594 bytes_used += nelem * elsize;
595 return xcalloc (nelem, elsize);
596 }
597
598 /* Cover function to obstack_alloc. */
599
600 static void *
gcse_alloc(unsigned long size)601 gcse_alloc (unsigned long size)
602 {
603 bytes_used += size;
604 return obstack_alloc (&gcse_obstack, size);
605 }
606
607 /* Allocate memory for the reg/memory set tracking tables.
608 This is called at the start of each pass. */
609
610 static void
alloc_gcse_mem(void)611 alloc_gcse_mem (void)
612 {
613 /* Allocate vars to track sets of regs. */
614 reg_set_bitmap = ALLOC_REG_SET (NULL);
615
616 /* Allocate array to keep a list of insns which modify memory in each
617 basic block. The two typedefs are needed to work around the
618 pre-processor limitation with template types in macro arguments. */
619 typedef vec<rtx_insn *> vec_rtx_heap;
620 typedef vec<modify_pair> vec_modify_pair_heap;
621 modify_mem_list = GCNEWVEC (vec_rtx_heap, last_basic_block_for_fn (cfun));
622 canon_modify_mem_list = GCNEWVEC (vec_modify_pair_heap,
623 last_basic_block_for_fn (cfun));
624 modify_mem_list_set = BITMAP_ALLOC (NULL);
625 blocks_with_calls = BITMAP_ALLOC (NULL);
626 }
627
628 /* Free memory allocated by alloc_gcse_mem. */
629
630 static void
free_gcse_mem(void)631 free_gcse_mem (void)
632 {
633 FREE_REG_SET (reg_set_bitmap);
634
635 free_modify_mem_tables ();
636 BITMAP_FREE (modify_mem_list_set);
637 BITMAP_FREE (blocks_with_calls);
638 }
639
640 /* Compute the local properties of each recorded expression.
641
642 Local properties are those that are defined by the block, irrespective of
643 other blocks.
644
645 An expression is transparent in a block if its operands are not modified
646 in the block.
647
648 An expression is computed (locally available) in a block if it is computed
649 at least once and expression would contain the same value if the
650 computation was moved to the end of the block.
651
652 An expression is locally anticipatable in a block if it is computed at
653 least once and expression would contain the same value if the computation
654 was moved to the beginning of the block.
655
656 We call this routine for pre and code hoisting. They all compute
657 basically the same information and thus can easily share this code.
658
659 TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
660 properties. If NULL, then it is not necessary to compute or record that
661 particular property.
662
663 TABLE controls which hash table to look at. */
664
665 static void
compute_local_properties(sbitmap * transp,sbitmap * comp,sbitmap * antloc,struct gcse_hash_table_d * table)666 compute_local_properties (sbitmap *transp, sbitmap *comp, sbitmap *antloc,
667 struct gcse_hash_table_d *table)
668 {
669 unsigned int i;
670
671 /* Initialize any bitmaps that were passed in. */
672 if (transp)
673 {
674 bitmap_vector_ones (transp, last_basic_block_for_fn (cfun));
675 }
676
677 if (comp)
678 bitmap_vector_clear (comp, last_basic_block_for_fn (cfun));
679 if (antloc)
680 bitmap_vector_clear (antloc, last_basic_block_for_fn (cfun));
681
682 for (i = 0; i < table->size; i++)
683 {
684 struct gcse_expr *expr;
685
686 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
687 {
688 int indx = expr->bitmap_index;
689 struct gcse_occr *occr;
690
691 /* The expression is transparent in this block if it is not killed.
692 We start by assuming all are transparent [none are killed], and
693 then reset the bits for those that are. */
694 if (transp)
695 compute_transp (expr->expr, indx, transp,
696 blocks_with_calls,
697 modify_mem_list_set,
698 canon_modify_mem_list);
699
700 /* The occurrences recorded in antic_occr are exactly those that
701 we want to set to nonzero in ANTLOC. */
702 if (antloc)
703 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
704 {
705 bitmap_set_bit (antloc[BLOCK_FOR_INSN (occr->insn)->index], indx);
706
707 /* While we're scanning the table, this is a good place to
708 initialize this. */
709 occr->deleted_p = 0;
710 }
711
712 /* The occurrences recorded in avail_occr are exactly those that
713 we want to set to nonzero in COMP. */
714 if (comp)
715 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
716 {
717 bitmap_set_bit (comp[BLOCK_FOR_INSN (occr->insn)->index], indx);
718
719 /* While we're scanning the table, this is a good place to
720 initialize this. */
721 occr->copied_p = 0;
722 }
723
724 /* While we're scanning the table, this is a good place to
725 initialize this. */
726 expr->reaching_reg = 0;
727 }
728 }
729 }
730
731 /* Hash table support. */
732
733 struct reg_avail_info
734 {
735 basic_block last_bb;
736 int first_set;
737 int last_set;
738 };
739
740 static struct reg_avail_info *reg_avail_info;
741 static basic_block current_bb;
742
743 /* See whether X, the source of a set, is something we want to consider for
744 GCSE. */
745
746 static int
want_to_gcse_p(rtx x,machine_mode mode,HOST_WIDE_INT * max_distance_ptr)747 want_to_gcse_p (rtx x, machine_mode mode, HOST_WIDE_INT *max_distance_ptr)
748 {
749 #ifdef STACK_REGS
750 /* On register stack architectures, don't GCSE constants from the
751 constant pool, as the benefits are often swamped by the overhead
752 of shuffling the register stack between basic blocks. */
753 if (IS_STACK_MODE (GET_MODE (x)))
754 x = avoid_constant_pool_reference (x);
755 #endif
756
757 /* GCSE'ing constants:
758
759 We do not specifically distinguish between constant and non-constant
760 expressions in PRE and Hoist. We use set_src_cost below to limit
761 the maximum distance simple expressions can travel.
762
763 Nevertheless, constants are much easier to GCSE, and, hence,
764 it is easy to overdo the optimizations. Usually, excessive PRE and
765 Hoisting of constant leads to increased register pressure.
766
767 RA can deal with this by rematerialing some of the constants.
768 Therefore, it is important that the back-end generates sets of constants
769 in a way that allows reload rematerialize them under high register
770 pressure, i.e., a pseudo register with REG_EQUAL to constant
771 is set only once. Failing to do so will result in IRA/reload
772 spilling such constants under high register pressure instead of
773 rematerializing them. */
774
775 switch (GET_CODE (x))
776 {
777 case REG:
778 case SUBREG:
779 case CALL:
780 return 0;
781
782 CASE_CONST_ANY:
783 if (!doing_code_hoisting_p)
784 /* Do not PRE constants. */
785 return 0;
786
787 /* FALLTHRU */
788
789 default:
790 if (doing_code_hoisting_p)
791 /* PRE doesn't implement max_distance restriction. */
792 {
793 int cost;
794 HOST_WIDE_INT max_distance;
795
796 gcc_assert (!optimize_function_for_speed_p (cfun)
797 && optimize_function_for_size_p (cfun));
798 cost = set_src_cost (x, mode, 0);
799
800 if (cost < COSTS_N_INSNS (GCSE_UNRESTRICTED_COST))
801 {
802 max_distance
803 = ((HOST_WIDE_INT)GCSE_COST_DISTANCE_RATIO * cost) / 10;
804 if (max_distance == 0)
805 return 0;
806
807 gcc_assert (max_distance > 0);
808 }
809 else
810 max_distance = 0;
811
812 if (max_distance_ptr)
813 *max_distance_ptr = max_distance;
814 }
815
816 return can_assign_to_reg_without_clobbers_p (x, mode);
817 }
818 }
819
820 /* Used internally by can_assign_to_reg_without_clobbers_p. */
821
822 static GTY(()) rtx_insn *test_insn;
823
824 /* Return true if we can assign X to a pseudo register of mode MODE
825 such that the resulting insn does not result in clobbering a hard
826 register as a side-effect.
827
828 Additionally, if the target requires it, check that the resulting insn
829 can be copied. If it cannot, this means that X is special and probably
830 has hidden side-effects we don't want to mess with.
831
832 This function is typically used by code motion passes, to verify
833 that it is safe to insert an insn without worrying about clobbering
834 maybe live hard regs. */
835
836 bool
can_assign_to_reg_without_clobbers_p(rtx x,machine_mode mode)837 can_assign_to_reg_without_clobbers_p (rtx x, machine_mode mode)
838 {
839 int num_clobbers = 0;
840 int icode;
841 bool can_assign = false;
842
843 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
844 if (general_operand (x, mode))
845 return 1;
846 else if (GET_MODE (x) == VOIDmode)
847 return 0;
848
849 /* Otherwise, check if we can make a valid insn from it. First initialize
850 our test insn if we haven't already. */
851 if (test_insn == 0)
852 {
853 test_insn
854 = make_insn_raw (gen_rtx_SET (gen_rtx_REG (word_mode,
855 FIRST_PSEUDO_REGISTER * 2),
856 const0_rtx));
857 SET_NEXT_INSN (test_insn) = SET_PREV_INSN (test_insn) = 0;
858 INSN_LOCATION (test_insn) = UNKNOWN_LOCATION;
859 }
860
861 /* Now make an insn like the one we would make when GCSE'ing and see if
862 valid. */
863 PUT_MODE (SET_DEST (PATTERN (test_insn)), mode);
864 SET_SRC (PATTERN (test_insn)) = x;
865
866 icode = recog (PATTERN (test_insn), test_insn, &num_clobbers);
867
868 /* If the test insn is valid and doesn't need clobbers, and the target also
869 has no objections, we're good. */
870 if (icode >= 0
871 && (num_clobbers == 0 || !added_clobbers_hard_reg_p (icode))
872 && ! (targetm.cannot_copy_insn_p
873 && targetm.cannot_copy_insn_p (test_insn)))
874 can_assign = true;
875
876 /* Make sure test_insn doesn't have any pointers into GC space. */
877 SET_SRC (PATTERN (test_insn)) = NULL_RTX;
878
879 return can_assign;
880 }
881
882 /* Return nonzero if the operands of expression X are unchanged from the
883 start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
884 or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
885
886 static int
oprs_unchanged_p(const_rtx x,const rtx_insn * insn,int avail_p)887 oprs_unchanged_p (const_rtx x, const rtx_insn *insn, int avail_p)
888 {
889 int i, j;
890 enum rtx_code code;
891 const char *fmt;
892
893 if (x == 0)
894 return 1;
895
896 code = GET_CODE (x);
897 switch (code)
898 {
899 case REG:
900 {
901 struct reg_avail_info *info = ®_avail_info[REGNO (x)];
902
903 if (info->last_bb != current_bb)
904 return 1;
905 if (avail_p)
906 return info->last_set < DF_INSN_LUID (insn);
907 else
908 return info->first_set >= DF_INSN_LUID (insn);
909 }
910
911 case MEM:
912 if (! flag_gcse_lm
913 || load_killed_in_block_p (current_bb, DF_INSN_LUID (insn),
914 x, avail_p))
915 return 0;
916 else
917 return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
918
919 case PRE_DEC:
920 case PRE_INC:
921 case POST_DEC:
922 case POST_INC:
923 case PRE_MODIFY:
924 case POST_MODIFY:
925 return 0;
926
927 case PC:
928 case CC0: /*FIXME*/
929 case CONST:
930 CASE_CONST_ANY:
931 case SYMBOL_REF:
932 case LABEL_REF:
933 case ADDR_VEC:
934 case ADDR_DIFF_VEC:
935 return 1;
936
937 default:
938 break;
939 }
940
941 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
942 {
943 if (fmt[i] == 'e')
944 {
945 /* If we are about to do the last recursive call needed at this
946 level, change it into iteration. This function is called enough
947 to be worth it. */
948 if (i == 0)
949 return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
950
951 else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
952 return 0;
953 }
954 else if (fmt[i] == 'E')
955 for (j = 0; j < XVECLEN (x, i); j++)
956 if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
957 return 0;
958 }
959
960 return 1;
961 }
962
963 /* Info passed from load_killed_in_block_p to mems_conflict_for_gcse_p. */
964
965 struct mem_conflict_info
966 {
967 /* A memory reference for a load instruction, mems_conflict_for_gcse_p will
968 see if a memory store conflicts with this memory load. */
969 const_rtx mem;
970
971 /* True if mems_conflict_for_gcse_p finds a conflict between two memory
972 references. */
973 bool conflict;
974 };
975
976 /* DEST is the output of an instruction. If it is a memory reference and
977 possibly conflicts with the load found in DATA, then communicate this
978 information back through DATA. */
979
980 static void
mems_conflict_for_gcse_p(rtx dest,const_rtx setter ATTRIBUTE_UNUSED,void * data)981 mems_conflict_for_gcse_p (rtx dest, const_rtx setter ATTRIBUTE_UNUSED,
982 void *data)
983 {
984 struct mem_conflict_info *mci = (struct mem_conflict_info *) data;
985
986 while (GET_CODE (dest) == SUBREG
987 || GET_CODE (dest) == ZERO_EXTRACT
988 || GET_CODE (dest) == STRICT_LOW_PART)
989 dest = XEXP (dest, 0);
990
991 /* If DEST is not a MEM, then it will not conflict with the load. Note
992 that function calls are assumed to clobber memory, but are handled
993 elsewhere. */
994 if (! MEM_P (dest))
995 return;
996
997 /* If we are setting a MEM in our list of specially recognized MEMs,
998 don't mark as killed this time. */
999 if (pre_ldst_mems != NULL && expr_equiv_p (dest, mci->mem))
1000 {
1001 if (!find_rtx_in_ldst (dest))
1002 mci->conflict = true;
1003 return;
1004 }
1005
1006 if (true_dependence (dest, GET_MODE (dest), mci->mem))
1007 mci->conflict = true;
1008 }
1009
1010 /* Return nonzero if the expression in X (a memory reference) is killed
1011 in block BB before or after the insn with the LUID in UID_LIMIT.
1012 AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
1013 before UID_LIMIT.
1014
1015 To check the entire block, set UID_LIMIT to max_uid + 1 and
1016 AVAIL_P to 0. */
1017
1018 static int
load_killed_in_block_p(const_basic_block bb,int uid_limit,const_rtx x,int avail_p)1019 load_killed_in_block_p (const_basic_block bb, int uid_limit, const_rtx x,
1020 int avail_p)
1021 {
1022 vec<rtx_insn *> list = modify_mem_list[bb->index];
1023 rtx_insn *setter;
1024 unsigned ix;
1025
1026 /* If this is a readonly then we aren't going to be changing it. */
1027 if (MEM_READONLY_P (x))
1028 return 0;
1029
1030 FOR_EACH_VEC_ELT_REVERSE (list, ix, setter)
1031 {
1032 struct mem_conflict_info mci;
1033
1034 /* Ignore entries in the list that do not apply. */
1035 if ((avail_p
1036 && DF_INSN_LUID (setter) < uid_limit)
1037 || (! avail_p
1038 && DF_INSN_LUID (setter) > uid_limit))
1039 continue;
1040
1041 /* If SETTER is a call everything is clobbered. Note that calls
1042 to pure functions are never put on the list, so we need not
1043 worry about them. */
1044 if (CALL_P (setter))
1045 return 1;
1046
1047 /* SETTER must be an INSN of some kind that sets memory. Call
1048 note_stores to examine each hunk of memory that is modified. */
1049 mci.mem = x;
1050 mci.conflict = false;
1051 note_stores (PATTERN (setter), mems_conflict_for_gcse_p, &mci);
1052 if (mci.conflict)
1053 return 1;
1054 }
1055 return 0;
1056 }
1057
1058 /* Return nonzero if the operands of expression X are unchanged from
1059 the start of INSN's basic block up to but not including INSN. */
1060
1061 static int
oprs_anticipatable_p(const_rtx x,const rtx_insn * insn)1062 oprs_anticipatable_p (const_rtx x, const rtx_insn *insn)
1063 {
1064 return oprs_unchanged_p (x, insn, 0);
1065 }
1066
1067 /* Return nonzero if the operands of expression X are unchanged from
1068 INSN to the end of INSN's basic block. */
1069
1070 static int
oprs_available_p(const_rtx x,const rtx_insn * insn)1071 oprs_available_p (const_rtx x, const rtx_insn *insn)
1072 {
1073 return oprs_unchanged_p (x, insn, 1);
1074 }
1075
1076 /* Hash expression X.
1077
1078 MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
1079 indicating if a volatile operand is found or if the expression contains
1080 something we don't want to insert in the table. HASH_TABLE_SIZE is
1081 the current size of the hash table to be probed. */
1082
1083 static unsigned int
hash_expr(const_rtx x,machine_mode mode,int * do_not_record_p,int hash_table_size)1084 hash_expr (const_rtx x, machine_mode mode, int *do_not_record_p,
1085 int hash_table_size)
1086 {
1087 unsigned int hash;
1088
1089 *do_not_record_p = 0;
1090
1091 hash = hash_rtx (x, mode, do_not_record_p, NULL, /*have_reg_qty=*/false);
1092 return hash % hash_table_size;
1093 }
1094
1095 /* Return nonzero if exp1 is equivalent to exp2. */
1096
1097 static int
expr_equiv_p(const_rtx x,const_rtx y)1098 expr_equiv_p (const_rtx x, const_rtx y)
1099 {
1100 return exp_equiv_p (x, y, 0, true);
1101 }
1102
1103 /* Insert expression X in INSN in the hash TABLE.
1104 If it is already present, record it as the last occurrence in INSN's
1105 basic block.
1106
1107 MODE is the mode of the value X is being stored into.
1108 It is only used if X is a CONST_INT.
1109
1110 ANTIC_P is nonzero if X is an anticipatable expression.
1111 AVAIL_P is nonzero if X is an available expression.
1112
1113 MAX_DISTANCE is the maximum distance in instructions this expression can
1114 be moved. */
1115
1116 static void
insert_expr_in_table(rtx x,machine_mode mode,rtx_insn * insn,int antic_p,int avail_p,HOST_WIDE_INT max_distance,struct gcse_hash_table_d * table)1117 insert_expr_in_table (rtx x, machine_mode mode, rtx_insn *insn,
1118 int antic_p,
1119 int avail_p, HOST_WIDE_INT max_distance,
1120 struct gcse_hash_table_d *table)
1121 {
1122 int found, do_not_record_p;
1123 unsigned int hash;
1124 struct gcse_expr *cur_expr, *last_expr = NULL;
1125 struct gcse_occr *antic_occr, *avail_occr;
1126
1127 hash = hash_expr (x, mode, &do_not_record_p, table->size);
1128
1129 /* Do not insert expression in table if it contains volatile operands,
1130 or if hash_expr determines the expression is something we don't want
1131 to or can't handle. */
1132 if (do_not_record_p)
1133 return;
1134
1135 cur_expr = table->table[hash];
1136 found = 0;
1137
1138 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1139 {
1140 /* If the expression isn't found, save a pointer to the end of
1141 the list. */
1142 last_expr = cur_expr;
1143 cur_expr = cur_expr->next_same_hash;
1144 }
1145
1146 if (! found)
1147 {
1148 cur_expr = GOBNEW (struct gcse_expr);
1149 bytes_used += sizeof (struct gcse_expr);
1150 if (table->table[hash] == NULL)
1151 /* This is the first pattern that hashed to this index. */
1152 table->table[hash] = cur_expr;
1153 else
1154 /* Add EXPR to end of this hash chain. */
1155 last_expr->next_same_hash = cur_expr;
1156
1157 /* Set the fields of the expr element. */
1158 cur_expr->expr = x;
1159 cur_expr->bitmap_index = table->n_elems++;
1160 cur_expr->next_same_hash = NULL;
1161 cur_expr->antic_occr = NULL;
1162 cur_expr->avail_occr = NULL;
1163 gcc_assert (max_distance >= 0);
1164 cur_expr->max_distance = max_distance;
1165 }
1166 else
1167 gcc_assert (cur_expr->max_distance == max_distance);
1168
1169 /* Now record the occurrence(s). */
1170 if (antic_p)
1171 {
1172 antic_occr = cur_expr->antic_occr;
1173
1174 if (antic_occr
1175 && BLOCK_FOR_INSN (antic_occr->insn) != BLOCK_FOR_INSN (insn))
1176 antic_occr = NULL;
1177
1178 if (antic_occr)
1179 /* Found another instance of the expression in the same basic block.
1180 Prefer the currently recorded one. We want the first one in the
1181 block and the block is scanned from start to end. */
1182 ; /* nothing to do */
1183 else
1184 {
1185 /* First occurrence of this expression in this basic block. */
1186 antic_occr = GOBNEW (struct gcse_occr);
1187 bytes_used += sizeof (struct gcse_occr);
1188 antic_occr->insn = insn;
1189 antic_occr->next = cur_expr->antic_occr;
1190 antic_occr->deleted_p = 0;
1191 cur_expr->antic_occr = antic_occr;
1192 }
1193 }
1194
1195 if (avail_p)
1196 {
1197 avail_occr = cur_expr->avail_occr;
1198
1199 if (avail_occr
1200 && BLOCK_FOR_INSN (avail_occr->insn) == BLOCK_FOR_INSN (insn))
1201 {
1202 /* Found another instance of the expression in the same basic block.
1203 Prefer this occurrence to the currently recorded one. We want
1204 the last one in the block and the block is scanned from start
1205 to end. */
1206 avail_occr->insn = insn;
1207 }
1208 else
1209 {
1210 /* First occurrence of this expression in this basic block. */
1211 avail_occr = GOBNEW (struct gcse_occr);
1212 bytes_used += sizeof (struct gcse_occr);
1213 avail_occr->insn = insn;
1214 avail_occr->next = cur_expr->avail_occr;
1215 avail_occr->deleted_p = 0;
1216 cur_expr->avail_occr = avail_occr;
1217 }
1218 }
1219 }
1220
1221 /* Scan SET present in INSN and add an entry to the hash TABLE. */
1222
1223 static void
hash_scan_set(rtx set,rtx_insn * insn,struct gcse_hash_table_d * table)1224 hash_scan_set (rtx set, rtx_insn *insn, struct gcse_hash_table_d *table)
1225 {
1226 rtx src = SET_SRC (set);
1227 rtx dest = SET_DEST (set);
1228 rtx note;
1229
1230 if (GET_CODE (src) == CALL)
1231 hash_scan_call (src, insn, table);
1232
1233 else if (REG_P (dest))
1234 {
1235 unsigned int regno = REGNO (dest);
1236 HOST_WIDE_INT max_distance = 0;
1237
1238 /* See if a REG_EQUAL note shows this equivalent to a simpler expression.
1239
1240 This allows us to do a single GCSE pass and still eliminate
1241 redundant constants, addresses or other expressions that are
1242 constructed with multiple instructions.
1243
1244 However, keep the original SRC if INSN is a simple reg-reg move.
1245 In this case, there will almost always be a REG_EQUAL note on the
1246 insn that sets SRC. By recording the REG_EQUAL value here as SRC
1247 for INSN, we miss copy propagation opportunities and we perform the
1248 same PRE GCSE operation repeatedly on the same REG_EQUAL value if we
1249 do more than one PRE GCSE pass.
1250
1251 Note that this does not impede profitable constant propagations. We
1252 "look through" reg-reg sets in lookup_avail_set. */
1253 note = find_reg_equal_equiv_note (insn);
1254 if (note != 0
1255 && REG_NOTE_KIND (note) == REG_EQUAL
1256 && !REG_P (src)
1257 && want_to_gcse_p (XEXP (note, 0), GET_MODE (dest), NULL))
1258 src = XEXP (note, 0), set = gen_rtx_SET (dest, src);
1259
1260 /* Only record sets of pseudo-regs in the hash table. */
1261 if (regno >= FIRST_PSEUDO_REGISTER
1262 /* Don't GCSE something if we can't do a reg/reg copy. */
1263 && can_copy_p (GET_MODE (dest))
1264 /* GCSE commonly inserts instruction after the insn. We can't
1265 do that easily for EH edges so disable GCSE on these for now. */
1266 /* ??? We can now easily create new EH landing pads at the
1267 gimple level, for splitting edges; there's no reason we
1268 can't do the same thing at the rtl level. */
1269 && !can_throw_internal (insn)
1270 /* Is SET_SRC something we want to gcse? */
1271 && want_to_gcse_p (src, GET_MODE (dest), &max_distance)
1272 /* Don't CSE a nop. */
1273 && ! set_noop_p (set)
1274 /* Don't GCSE if it has attached REG_EQUIV note.
1275 At this point this only function parameters should have
1276 REG_EQUIV notes and if the argument slot is used somewhere
1277 explicitly, it means address of parameter has been taken,
1278 so we should not extend the lifetime of the pseudo. */
1279 && (note == NULL_RTX || ! MEM_P (XEXP (note, 0))))
1280 {
1281 /* An expression is not anticipatable if its operands are
1282 modified before this insn or if this is not the only SET in
1283 this insn. The latter condition does not have to mean that
1284 SRC itself is not anticipatable, but we just will not be
1285 able to handle code motion of insns with multiple sets. */
1286 int antic_p = oprs_anticipatable_p (src, insn)
1287 && !multiple_sets (insn);
1288 /* An expression is not available if its operands are
1289 subsequently modified, including this insn. It's also not
1290 available if this is a branch, because we can't insert
1291 a set after the branch. */
1292 int avail_p = (oprs_available_p (src, insn)
1293 && ! JUMP_P (insn));
1294
1295 insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p,
1296 max_distance, table);
1297 }
1298 }
1299 /* In case of store we want to consider the memory value as available in
1300 the REG stored in that memory. This makes it possible to remove
1301 redundant loads from due to stores to the same location. */
1302 else if (flag_gcse_las && REG_P (src) && MEM_P (dest))
1303 {
1304 unsigned int regno = REGNO (src);
1305 HOST_WIDE_INT max_distance = 0;
1306
1307 /* Only record sets of pseudo-regs in the hash table. */
1308 if (regno >= FIRST_PSEUDO_REGISTER
1309 /* Don't GCSE something if we can't do a reg/reg copy. */
1310 && can_copy_p (GET_MODE (src))
1311 /* GCSE commonly inserts instruction after the insn. We can't
1312 do that easily for EH edges so disable GCSE on these for now. */
1313 && !can_throw_internal (insn)
1314 /* Is SET_DEST something we want to gcse? */
1315 && want_to_gcse_p (dest, GET_MODE (dest), &max_distance)
1316 /* Don't CSE a nop. */
1317 && ! set_noop_p (set)
1318 /* Don't GCSE if it has attached REG_EQUIV note.
1319 At this point this only function parameters should have
1320 REG_EQUIV notes and if the argument slot is used somewhere
1321 explicitly, it means address of parameter has been taken,
1322 so we should not extend the lifetime of the pseudo. */
1323 && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
1324 || ! MEM_P (XEXP (note, 0))))
1325 {
1326 /* Stores are never anticipatable. */
1327 int antic_p = 0;
1328 /* An expression is not available if its operands are
1329 subsequently modified, including this insn. It's also not
1330 available if this is a branch, because we can't insert
1331 a set after the branch. */
1332 int avail_p = oprs_available_p (dest, insn) && ! JUMP_P (insn);
1333
1334 /* Record the memory expression (DEST) in the hash table. */
1335 insert_expr_in_table (dest, GET_MODE (dest), insn,
1336 antic_p, avail_p, max_distance, table);
1337 }
1338 }
1339 }
1340
1341 static void
hash_scan_clobber(rtx x ATTRIBUTE_UNUSED,rtx_insn * insn ATTRIBUTE_UNUSED,struct gcse_hash_table_d * table ATTRIBUTE_UNUSED)1342 hash_scan_clobber (rtx x ATTRIBUTE_UNUSED, rtx_insn *insn ATTRIBUTE_UNUSED,
1343 struct gcse_hash_table_d *table ATTRIBUTE_UNUSED)
1344 {
1345 /* Currently nothing to do. */
1346 }
1347
1348 static void
hash_scan_call(rtx x ATTRIBUTE_UNUSED,rtx_insn * insn ATTRIBUTE_UNUSED,struct gcse_hash_table_d * table ATTRIBUTE_UNUSED)1349 hash_scan_call (rtx x ATTRIBUTE_UNUSED, rtx_insn *insn ATTRIBUTE_UNUSED,
1350 struct gcse_hash_table_d *table ATTRIBUTE_UNUSED)
1351 {
1352 /* Currently nothing to do. */
1353 }
1354
1355 /* Process INSN and add hash table entries as appropriate. */
1356
1357 static void
hash_scan_insn(rtx_insn * insn,struct gcse_hash_table_d * table)1358 hash_scan_insn (rtx_insn *insn, struct gcse_hash_table_d *table)
1359 {
1360 rtx pat = PATTERN (insn);
1361 int i;
1362
1363 /* Pick out the sets of INSN and for other forms of instructions record
1364 what's been modified. */
1365
1366 if (GET_CODE (pat) == SET)
1367 hash_scan_set (pat, insn, table);
1368
1369 else if (GET_CODE (pat) == CLOBBER)
1370 hash_scan_clobber (pat, insn, table);
1371
1372 else if (GET_CODE (pat) == CALL)
1373 hash_scan_call (pat, insn, table);
1374
1375 else if (GET_CODE (pat) == PARALLEL)
1376 for (i = 0; i < XVECLEN (pat, 0); i++)
1377 {
1378 rtx x = XVECEXP (pat, 0, i);
1379
1380 if (GET_CODE (x) == SET)
1381 hash_scan_set (x, insn, table);
1382 else if (GET_CODE (x) == CLOBBER)
1383 hash_scan_clobber (x, insn, table);
1384 else if (GET_CODE (x) == CALL)
1385 hash_scan_call (x, insn, table);
1386 }
1387 }
1388
1389 /* Dump the hash table TABLE to file FILE under the name NAME. */
1390
1391 static void
dump_hash_table(FILE * file,const char * name,struct gcse_hash_table_d * table)1392 dump_hash_table (FILE *file, const char *name, struct gcse_hash_table_d *table)
1393 {
1394 int i;
1395 /* Flattened out table, so it's printed in proper order. */
1396 struct gcse_expr **flat_table;
1397 unsigned int *hash_val;
1398 struct gcse_expr *expr;
1399
1400 flat_table = XCNEWVEC (struct gcse_expr *, table->n_elems);
1401 hash_val = XNEWVEC (unsigned int, table->n_elems);
1402
1403 for (i = 0; i < (int) table->size; i++)
1404 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
1405 {
1406 flat_table[expr->bitmap_index] = expr;
1407 hash_val[expr->bitmap_index] = i;
1408 }
1409
1410 fprintf (file, "%s hash table (%d buckets, %d entries)\n",
1411 name, table->size, table->n_elems);
1412
1413 for (i = 0; i < (int) table->n_elems; i++)
1414 if (flat_table[i] != 0)
1415 {
1416 expr = flat_table[i];
1417 fprintf (file, "Index %d (hash value %d; max distance "
1418 HOST_WIDE_INT_PRINT_DEC ")\n ",
1419 expr->bitmap_index, hash_val[i], expr->max_distance);
1420 print_rtl (file, expr->expr);
1421 fprintf (file, "\n");
1422 }
1423
1424 fprintf (file, "\n");
1425
1426 free (flat_table);
1427 free (hash_val);
1428 }
1429
1430 /* Record register first/last/block set information for REGNO in INSN.
1431
1432 first_set records the first place in the block where the register
1433 is set and is used to compute "anticipatability".
1434
1435 last_set records the last place in the block where the register
1436 is set and is used to compute "availability".
1437
1438 last_bb records the block for which first_set and last_set are
1439 valid, as a quick test to invalidate them. */
1440
1441 static void
record_last_reg_set_info(rtx_insn * insn,int regno)1442 record_last_reg_set_info (rtx_insn *insn, int regno)
1443 {
1444 struct reg_avail_info *info = ®_avail_info[regno];
1445 int luid = DF_INSN_LUID (insn);
1446
1447 info->last_set = luid;
1448 if (info->last_bb != current_bb)
1449 {
1450 info->last_bb = current_bb;
1451 info->first_set = luid;
1452 }
1453 }
1454
1455 /* Record memory modification information for INSN. We do not actually care
1456 about the memory location(s) that are set, or even how they are set (consider
1457 a CALL_INSN). We merely need to record which insns modify memory. */
1458
1459 static void
record_last_mem_set_info(rtx_insn * insn)1460 record_last_mem_set_info (rtx_insn *insn)
1461 {
1462 if (! flag_gcse_lm)
1463 return;
1464
1465 record_last_mem_set_info_common (insn, modify_mem_list,
1466 canon_modify_mem_list,
1467 modify_mem_list_set,
1468 blocks_with_calls);
1469 }
1470
1471 /* Called from compute_hash_table via note_stores to handle one
1472 SET or CLOBBER in an insn. DATA is really the instruction in which
1473 the SET is taking place. */
1474
1475 static void
record_last_set_info(rtx dest,const_rtx setter ATTRIBUTE_UNUSED,void * data)1476 record_last_set_info (rtx dest, const_rtx setter ATTRIBUTE_UNUSED, void *data)
1477 {
1478 rtx_insn *last_set_insn = (rtx_insn *) data;
1479
1480 if (GET_CODE (dest) == SUBREG)
1481 dest = SUBREG_REG (dest);
1482
1483 if (REG_P (dest))
1484 record_last_reg_set_info (last_set_insn, REGNO (dest));
1485 else if (MEM_P (dest)
1486 /* Ignore pushes, they clobber nothing. */
1487 && ! push_operand (dest, GET_MODE (dest)))
1488 record_last_mem_set_info (last_set_insn);
1489 }
1490
1491 /* Top level function to create an expression hash table.
1492
1493 Expression entries are placed in the hash table if
1494 - they are of the form (set (pseudo-reg) src),
1495 - src is something we want to perform GCSE on,
1496 - none of the operands are subsequently modified in the block
1497
1498 Currently src must be a pseudo-reg or a const_int.
1499
1500 TABLE is the table computed. */
1501
1502 static void
compute_hash_table_work(struct gcse_hash_table_d * table)1503 compute_hash_table_work (struct gcse_hash_table_d *table)
1504 {
1505 int i;
1506
1507 /* re-Cache any INSN_LIST nodes we have allocated. */
1508 clear_modify_mem_tables ();
1509 /* Some working arrays used to track first and last set in each block. */
1510 reg_avail_info = GNEWVEC (struct reg_avail_info, max_reg_num ());
1511
1512 for (i = 0; i < max_reg_num (); ++i)
1513 reg_avail_info[i].last_bb = NULL;
1514
1515 FOR_EACH_BB_FN (current_bb, cfun)
1516 {
1517 rtx_insn *insn;
1518 unsigned int regno;
1519
1520 /* First pass over the instructions records information used to
1521 determine when registers and memory are first and last set. */
1522 FOR_BB_INSNS (current_bb, insn)
1523 {
1524 if (!NONDEBUG_INSN_P (insn))
1525 continue;
1526
1527 if (CALL_P (insn))
1528 {
1529 hard_reg_set_iterator hrsi;
1530 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call,
1531 0, regno, hrsi)
1532 record_last_reg_set_info (insn, regno);
1533
1534 if (! RTL_CONST_OR_PURE_CALL_P (insn))
1535 record_last_mem_set_info (insn);
1536 }
1537
1538 note_stores (PATTERN (insn), record_last_set_info, insn);
1539 }
1540
1541 /* The next pass builds the hash table. */
1542 FOR_BB_INSNS (current_bb, insn)
1543 if (NONDEBUG_INSN_P (insn))
1544 hash_scan_insn (insn, table);
1545 }
1546
1547 free (reg_avail_info);
1548 reg_avail_info = NULL;
1549 }
1550
1551 /* Allocate space for the set/expr hash TABLE.
1552 It is used to determine the number of buckets to use. */
1553
1554 static void
alloc_hash_table(struct gcse_hash_table_d * table)1555 alloc_hash_table (struct gcse_hash_table_d *table)
1556 {
1557 int n;
1558
1559 n = get_max_insn_count ();
1560
1561 table->size = n / 4;
1562 if (table->size < 11)
1563 table->size = 11;
1564
1565 /* Attempt to maintain efficient use of hash table.
1566 Making it an odd number is simplest for now.
1567 ??? Later take some measurements. */
1568 table->size |= 1;
1569 n = table->size * sizeof (struct gcse_expr *);
1570 table->table = GNEWVAR (struct gcse_expr *, n);
1571 }
1572
1573 /* Free things allocated by alloc_hash_table. */
1574
1575 static void
free_hash_table(struct gcse_hash_table_d * table)1576 free_hash_table (struct gcse_hash_table_d *table)
1577 {
1578 free (table->table);
1579 }
1580
1581 /* Compute the expression hash table TABLE. */
1582
1583 static void
compute_hash_table(struct gcse_hash_table_d * table)1584 compute_hash_table (struct gcse_hash_table_d *table)
1585 {
1586 /* Initialize count of number of entries in hash table. */
1587 table->n_elems = 0;
1588 memset (table->table, 0, table->size * sizeof (struct gcse_expr *));
1589
1590 compute_hash_table_work (table);
1591 }
1592
1593 /* Expression tracking support. */
1594
1595 /* Clear canon_modify_mem_list and modify_mem_list tables. */
1596 static void
clear_modify_mem_tables(void)1597 clear_modify_mem_tables (void)
1598 {
1599 unsigned i;
1600 bitmap_iterator bi;
1601
1602 EXECUTE_IF_SET_IN_BITMAP (modify_mem_list_set, 0, i, bi)
1603 {
1604 modify_mem_list[i].release ();
1605 canon_modify_mem_list[i].release ();
1606 }
1607 bitmap_clear (modify_mem_list_set);
1608 bitmap_clear (blocks_with_calls);
1609 }
1610
1611 /* Release memory used by modify_mem_list_set. */
1612
1613 static void
free_modify_mem_tables(void)1614 free_modify_mem_tables (void)
1615 {
1616 clear_modify_mem_tables ();
1617 free (modify_mem_list);
1618 free (canon_modify_mem_list);
1619 modify_mem_list = 0;
1620 canon_modify_mem_list = 0;
1621 }
1622
1623 /* Compute PRE+LCM working variables. */
1624
1625 /* Local properties of expressions. */
1626
1627 /* Nonzero for expressions that are transparent in the block. */
1628 static sbitmap *transp;
1629
1630 /* Nonzero for expressions that are computed (available) in the block. */
1631 static sbitmap *comp;
1632
1633 /* Nonzero for expressions that are locally anticipatable in the block. */
1634 static sbitmap *antloc;
1635
1636 /* Nonzero for expressions where this block is an optimal computation
1637 point. */
1638 static sbitmap *pre_optimal;
1639
1640 /* Nonzero for expressions which are redundant in a particular block. */
1641 static sbitmap *pre_redundant;
1642
1643 /* Nonzero for expressions which should be inserted on a specific edge. */
1644 static sbitmap *pre_insert_map;
1645
1646 /* Nonzero for expressions which should be deleted in a specific block. */
1647 static sbitmap *pre_delete_map;
1648
1649 /* Allocate vars used for PRE analysis. */
1650
1651 static void
alloc_pre_mem(int n_blocks,int n_exprs)1652 alloc_pre_mem (int n_blocks, int n_exprs)
1653 {
1654 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
1655 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
1656 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
1657
1658 pre_optimal = NULL;
1659 pre_redundant = NULL;
1660 pre_insert_map = NULL;
1661 pre_delete_map = NULL;
1662 ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
1663
1664 /* pre_insert and pre_delete are allocated later. */
1665 }
1666
1667 /* Free vars used for PRE analysis. */
1668
1669 static void
free_pre_mem(void)1670 free_pre_mem (void)
1671 {
1672 sbitmap_vector_free (transp);
1673 sbitmap_vector_free (comp);
1674
1675 /* ANTLOC and AE_KILL are freed just after pre_lcm finishes. */
1676
1677 if (pre_optimal)
1678 sbitmap_vector_free (pre_optimal);
1679 if (pre_redundant)
1680 sbitmap_vector_free (pre_redundant);
1681 if (pre_insert_map)
1682 sbitmap_vector_free (pre_insert_map);
1683 if (pre_delete_map)
1684 sbitmap_vector_free (pre_delete_map);
1685
1686 transp = comp = NULL;
1687 pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
1688 }
1689
1690 /* Remove certain expressions from anticipatable and transparent
1691 sets of basic blocks that have incoming abnormal edge.
1692 For PRE remove potentially trapping expressions to avoid placing
1693 them on abnormal edges. For hoisting remove memory references that
1694 can be clobbered by calls. */
1695
1696 static void
prune_expressions(bool pre_p)1697 prune_expressions (bool pre_p)
1698 {
1699 sbitmap prune_exprs;
1700 struct gcse_expr *expr;
1701 unsigned int ui;
1702 basic_block bb;
1703
1704 prune_exprs = sbitmap_alloc (expr_hash_table.n_elems);
1705 bitmap_clear (prune_exprs);
1706 for (ui = 0; ui < expr_hash_table.size; ui++)
1707 {
1708 for (expr = expr_hash_table.table[ui]; expr; expr = expr->next_same_hash)
1709 {
1710 /* Note potentially trapping expressions. */
1711 if (may_trap_p (expr->expr))
1712 {
1713 bitmap_set_bit (prune_exprs, expr->bitmap_index);
1714 continue;
1715 }
1716
1717 if (!pre_p && MEM_P (expr->expr))
1718 /* Note memory references that can be clobbered by a call.
1719 We do not split abnormal edges in hoisting, so would
1720 a memory reference get hoisted along an abnormal edge,
1721 it would be placed /before/ the call. Therefore, only
1722 constant memory references can be hoisted along abnormal
1723 edges. */
1724 {
1725 if (GET_CODE (XEXP (expr->expr, 0)) == SYMBOL_REF
1726 && CONSTANT_POOL_ADDRESS_P (XEXP (expr->expr, 0)))
1727 continue;
1728
1729 if (MEM_READONLY_P (expr->expr)
1730 && !MEM_VOLATILE_P (expr->expr)
1731 && MEM_NOTRAP_P (expr->expr))
1732 /* Constant memory reference, e.g., a PIC address. */
1733 continue;
1734
1735 /* ??? Optimally, we would use interprocedural alias
1736 analysis to determine if this mem is actually killed
1737 by this call. */
1738
1739 bitmap_set_bit (prune_exprs, expr->bitmap_index);
1740 }
1741 }
1742 }
1743
1744 FOR_EACH_BB_FN (bb, cfun)
1745 {
1746 edge e;
1747 edge_iterator ei;
1748
1749 /* If the current block is the destination of an abnormal edge, we
1750 kill all trapping (for PRE) and memory (for hoist) expressions
1751 because we won't be able to properly place the instruction on
1752 the edge. So make them neither anticipatable nor transparent.
1753 This is fairly conservative.
1754
1755 ??? For hoisting it may be necessary to check for set-and-jump
1756 instructions here, not just for abnormal edges. The general problem
1757 is that when an expression cannot not be placed right at the end of
1758 a basic block we should account for any side-effects of a subsequent
1759 jump instructions that could clobber the expression. It would
1760 be best to implement this check along the lines of
1761 should_hoist_expr_to_dom where the target block is already known
1762 and, hence, there's no need to conservatively prune expressions on
1763 "intermediate" set-and-jump instructions. */
1764 FOR_EACH_EDGE (e, ei, bb->preds)
1765 if ((e->flags & EDGE_ABNORMAL)
1766 && (pre_p || CALL_P (BB_END (e->src))))
1767 {
1768 bitmap_and_compl (antloc[bb->index],
1769 antloc[bb->index], prune_exprs);
1770 bitmap_and_compl (transp[bb->index],
1771 transp[bb->index], prune_exprs);
1772 break;
1773 }
1774 }
1775
1776 sbitmap_free (prune_exprs);
1777 }
1778
1779 /* It may be necessary to insert a large number of insns on edges to
1780 make the existing occurrences of expressions fully redundant. This
1781 routine examines the set of insertions and deletions and if the ratio
1782 of insertions to deletions is too high for a particular expression, then
1783 the expression is removed from the insertion/deletion sets.
1784
1785 N_ELEMS is the number of elements in the hash table. */
1786
1787 static void
prune_insertions_deletions(int n_elems)1788 prune_insertions_deletions (int n_elems)
1789 {
1790 sbitmap_iterator sbi;
1791 sbitmap prune_exprs;
1792
1793 /* We always use I to iterate over blocks/edges and J to iterate over
1794 expressions. */
1795 unsigned int i, j;
1796
1797 /* Counts for the number of times an expression needs to be inserted and
1798 number of times an expression can be removed as a result. */
1799 int *insertions = GCNEWVEC (int, n_elems);
1800 int *deletions = GCNEWVEC (int, n_elems);
1801
1802 /* Set of expressions which require too many insertions relative to
1803 the number of deletions achieved. We will prune these out of the
1804 insertion/deletion sets. */
1805 prune_exprs = sbitmap_alloc (n_elems);
1806 bitmap_clear (prune_exprs);
1807
1808 /* Iterate over the edges counting the number of times each expression
1809 needs to be inserted. */
1810 for (i = 0; i < (unsigned) n_edges_for_fn (cfun); i++)
1811 {
1812 EXECUTE_IF_SET_IN_BITMAP (pre_insert_map[i], 0, j, sbi)
1813 insertions[j]++;
1814 }
1815
1816 /* Similarly for deletions, but those occur in blocks rather than on
1817 edges. */
1818 for (i = 0; i < (unsigned) last_basic_block_for_fn (cfun); i++)
1819 {
1820 EXECUTE_IF_SET_IN_BITMAP (pre_delete_map[i], 0, j, sbi)
1821 deletions[j]++;
1822 }
1823
1824 /* Now that we have accurate counts, iterate over the elements in the
1825 hash table and see if any need too many insertions relative to the
1826 number of evaluations that can be removed. If so, mark them in
1827 PRUNE_EXPRS. */
1828 for (j = 0; j < (unsigned) n_elems; j++)
1829 if (deletions[j]
1830 && ((unsigned) insertions[j] / deletions[j]) > MAX_GCSE_INSERTION_RATIO)
1831 bitmap_set_bit (prune_exprs, j);
1832
1833 /* Now prune PRE_INSERT_MAP and PRE_DELETE_MAP based on PRUNE_EXPRS. */
1834 EXECUTE_IF_SET_IN_BITMAP (prune_exprs, 0, j, sbi)
1835 {
1836 for (i = 0; i < (unsigned) n_edges_for_fn (cfun); i++)
1837 bitmap_clear_bit (pre_insert_map[i], j);
1838
1839 for (i = 0; i < (unsigned) last_basic_block_for_fn (cfun); i++)
1840 bitmap_clear_bit (pre_delete_map[i], j);
1841 }
1842
1843 sbitmap_free (prune_exprs);
1844 free (insertions);
1845 free (deletions);
1846 }
1847
1848 /* Top level routine to do the dataflow analysis needed by PRE. */
1849
1850 static struct edge_list *
compute_pre_data(void)1851 compute_pre_data (void)
1852 {
1853 struct edge_list *edge_list;
1854 basic_block bb;
1855
1856 compute_local_properties (transp, comp, antloc, &expr_hash_table);
1857 prune_expressions (true);
1858 bitmap_vector_clear (ae_kill, last_basic_block_for_fn (cfun));
1859
1860 /* Compute ae_kill for each basic block using:
1861
1862 ~(TRANSP | COMP)
1863 */
1864
1865 FOR_EACH_BB_FN (bb, cfun)
1866 {
1867 bitmap_ior (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
1868 bitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
1869 }
1870
1871 edge_list = pre_edge_lcm (expr_hash_table.n_elems, transp, comp, antloc,
1872 ae_kill, &pre_insert_map, &pre_delete_map);
1873 sbitmap_vector_free (antloc);
1874 antloc = NULL;
1875 sbitmap_vector_free (ae_kill);
1876 ae_kill = NULL;
1877
1878 prune_insertions_deletions (expr_hash_table.n_elems);
1879
1880 return edge_list;
1881 }
1882
1883 /* PRE utilities */
1884
1885 /* Return nonzero if an occurrence of expression EXPR in OCCR_BB would reach
1886 block BB.
1887
1888 VISITED is a pointer to a working buffer for tracking which BB's have
1889 been visited. It is NULL for the top-level call.
1890
1891 We treat reaching expressions that go through blocks containing the same
1892 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
1893 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
1894 2 as not reaching. The intent is to improve the probability of finding
1895 only one reaching expression and to reduce register lifetimes by picking
1896 the closest such expression. */
1897
1898 static int
pre_expr_reaches_here_p_work(basic_block occr_bb,struct gcse_expr * expr,basic_block bb,char * visited)1899 pre_expr_reaches_here_p_work (basic_block occr_bb, struct gcse_expr *expr,
1900 basic_block bb, char *visited)
1901 {
1902 edge pred;
1903 edge_iterator ei;
1904
1905 FOR_EACH_EDGE (pred, ei, bb->preds)
1906 {
1907 basic_block pred_bb = pred->src;
1908
1909 if (pred->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
1910 /* Has predecessor has already been visited? */
1911 || visited[pred_bb->index])
1912 ;/* Nothing to do. */
1913
1914 /* Does this predecessor generate this expression? */
1915 else if (bitmap_bit_p (comp[pred_bb->index], expr->bitmap_index))
1916 {
1917 /* Is this the occurrence we're looking for?
1918 Note that there's only one generating occurrence per block
1919 so we just need to check the block number. */
1920 if (occr_bb == pred_bb)
1921 return 1;
1922
1923 visited[pred_bb->index] = 1;
1924 }
1925 /* Ignore this predecessor if it kills the expression. */
1926 else if (! bitmap_bit_p (transp[pred_bb->index], expr->bitmap_index))
1927 visited[pred_bb->index] = 1;
1928
1929 /* Neither gen nor kill. */
1930 else
1931 {
1932 visited[pred_bb->index] = 1;
1933 if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
1934 return 1;
1935 }
1936 }
1937
1938 /* All paths have been checked. */
1939 return 0;
1940 }
1941
1942 /* The wrapper for pre_expr_reaches_here_work that ensures that any
1943 memory allocated for that function is returned. */
1944
1945 static int
pre_expr_reaches_here_p(basic_block occr_bb,struct gcse_expr * expr,basic_block bb)1946 pre_expr_reaches_here_p (basic_block occr_bb, struct gcse_expr *expr, basic_block bb)
1947 {
1948 int rval;
1949 char *visited = XCNEWVEC (char, last_basic_block_for_fn (cfun));
1950
1951 rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
1952
1953 free (visited);
1954 return rval;
1955 }
1956
1957 /* Generate RTL to copy an EXPR to its `reaching_reg' and return it. */
1958
1959 static rtx_insn *
process_insert_insn(struct gcse_expr * expr)1960 process_insert_insn (struct gcse_expr *expr)
1961 {
1962 rtx reg = expr->reaching_reg;
1963 /* Copy the expression to make sure we don't have any sharing issues. */
1964 rtx exp = copy_rtx (expr->expr);
1965 rtx_insn *pat;
1966
1967 start_sequence ();
1968
1969 /* If the expression is something that's an operand, like a constant,
1970 just copy it to a register. */
1971 if (general_operand (exp, GET_MODE (reg)))
1972 emit_move_insn (reg, exp);
1973
1974 /* Otherwise, make a new insn to compute this expression and make sure the
1975 insn will be recognized (this also adds any needed CLOBBERs). */
1976 else
1977 {
1978 rtx_insn *insn = emit_insn (gen_rtx_SET (reg, exp));
1979
1980 if (insn_invalid_p (insn, false))
1981 gcc_unreachable ();
1982 }
1983
1984 pat = get_insns ();
1985 end_sequence ();
1986
1987 return pat;
1988 }
1989
1990 /* Add EXPR to the end of basic block BB.
1991
1992 This is used by both the PRE and code hoisting. */
1993
1994 static void
insert_insn_end_basic_block(struct gcse_expr * expr,basic_block bb)1995 insert_insn_end_basic_block (struct gcse_expr *expr, basic_block bb)
1996 {
1997 rtx_insn *insn = BB_END (bb);
1998 rtx_insn *new_insn;
1999 rtx reg = expr->reaching_reg;
2000 int regno = REGNO (reg);
2001 rtx_insn *pat, *pat_end;
2002
2003 pat = process_insert_insn (expr);
2004 gcc_assert (pat && INSN_P (pat));
2005
2006 pat_end = pat;
2007 while (NEXT_INSN (pat_end) != NULL_RTX)
2008 pat_end = NEXT_INSN (pat_end);
2009
2010 /* If the last insn is a jump, insert EXPR in front [taking care to
2011 handle cc0, etc. properly]. Similarly we need to care trapping
2012 instructions in presence of non-call exceptions. */
2013
2014 if (JUMP_P (insn)
2015 || (NONJUMP_INSN_P (insn)
2016 && (!single_succ_p (bb)
2017 || single_succ_edge (bb)->flags & EDGE_ABNORMAL)))
2018 {
2019 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
2020 if cc0 isn't set. */
2021 if (HAVE_cc0)
2022 {
2023 rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
2024 if (note)
2025 insn = safe_as_a <rtx_insn *> (XEXP (note, 0));
2026 else
2027 {
2028 rtx_insn *maybe_cc0_setter = prev_nonnote_insn (insn);
2029 if (maybe_cc0_setter
2030 && INSN_P (maybe_cc0_setter)
2031 && sets_cc0_p (PATTERN (maybe_cc0_setter)))
2032 insn = maybe_cc0_setter;
2033 }
2034 }
2035
2036 /* FIXME: What if something in cc0/jump uses value set in new insn? */
2037 new_insn = emit_insn_before_noloc (pat, insn, bb);
2038 }
2039
2040 /* Likewise if the last insn is a call, as will happen in the presence
2041 of exception handling. */
2042 else if (CALL_P (insn)
2043 && (!single_succ_p (bb)
2044 || single_succ_edge (bb)->flags & EDGE_ABNORMAL))
2045 {
2046 /* Keeping in mind targets with small register classes and parameters
2047 in registers, we search backward and place the instructions before
2048 the first parameter is loaded. Do this for everyone for consistency
2049 and a presumption that we'll get better code elsewhere as well. */
2050
2051 /* Since different machines initialize their parameter registers
2052 in different orders, assume nothing. Collect the set of all
2053 parameter registers. */
2054 insn = find_first_parameter_load (insn, BB_HEAD (bb));
2055
2056 /* If we found all the parameter loads, then we want to insert
2057 before the first parameter load.
2058
2059 If we did not find all the parameter loads, then we might have
2060 stopped on the head of the block, which could be a CODE_LABEL.
2061 If we inserted before the CODE_LABEL, then we would be putting
2062 the insn in the wrong basic block. In that case, put the insn
2063 after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
2064 while (LABEL_P (insn)
2065 || NOTE_INSN_BASIC_BLOCK_P (insn))
2066 insn = NEXT_INSN (insn);
2067
2068 new_insn = emit_insn_before_noloc (pat, insn, bb);
2069 }
2070 else
2071 new_insn = emit_insn_after_noloc (pat, insn, bb);
2072
2073 while (1)
2074 {
2075 if (INSN_P (pat))
2076 add_label_notes (PATTERN (pat), new_insn);
2077 if (pat == pat_end)
2078 break;
2079 pat = NEXT_INSN (pat);
2080 }
2081
2082 gcse_create_count++;
2083
2084 if (dump_file)
2085 {
2086 fprintf (dump_file, "PRE/HOIST: end of bb %d, insn %d, ",
2087 bb->index, INSN_UID (new_insn));
2088 fprintf (dump_file, "copying expression %d to reg %d\n",
2089 expr->bitmap_index, regno);
2090 }
2091 }
2092
2093 /* Insert partially redundant expressions on edges in the CFG to make
2094 the expressions fully redundant. */
2095
2096 static int
pre_edge_insert(struct edge_list * edge_list,struct gcse_expr ** index_map)2097 pre_edge_insert (struct edge_list *edge_list, struct gcse_expr **index_map)
2098 {
2099 int e, i, j, num_edges, set_size, did_insert = 0;
2100 sbitmap *inserted;
2101
2102 /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
2103 if it reaches any of the deleted expressions. */
2104
2105 set_size = pre_insert_map[0]->size;
2106 num_edges = NUM_EDGES (edge_list);
2107 inserted = sbitmap_vector_alloc (num_edges, expr_hash_table.n_elems);
2108 bitmap_vector_clear (inserted, num_edges);
2109
2110 for (e = 0; e < num_edges; e++)
2111 {
2112 int indx;
2113 basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
2114
2115 for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
2116 {
2117 SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
2118
2119 for (j = indx;
2120 insert && j < (int) expr_hash_table.n_elems;
2121 j++, insert >>= 1)
2122 if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
2123 {
2124 struct gcse_expr *expr = index_map[j];
2125 struct gcse_occr *occr;
2126
2127 /* Now look at each deleted occurrence of this expression. */
2128 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2129 {
2130 if (! occr->deleted_p)
2131 continue;
2132
2133 /* Insert this expression on this edge if it would
2134 reach the deleted occurrence in BB. */
2135 if (!bitmap_bit_p (inserted[e], j))
2136 {
2137 rtx_insn *insn;
2138 edge eg = INDEX_EDGE (edge_list, e);
2139
2140 /* We can't insert anything on an abnormal and
2141 critical edge, so we insert the insn at the end of
2142 the previous block. There are several alternatives
2143 detailed in Morgans book P277 (sec 10.5) for
2144 handling this situation. This one is easiest for
2145 now. */
2146
2147 if (eg->flags & EDGE_ABNORMAL)
2148 insert_insn_end_basic_block (index_map[j], bb);
2149 else
2150 {
2151 insn = process_insert_insn (index_map[j]);
2152 insert_insn_on_edge (insn, eg);
2153 }
2154
2155 if (dump_file)
2156 {
2157 fprintf (dump_file, "PRE: edge (%d,%d), ",
2158 bb->index,
2159 INDEX_EDGE_SUCC_BB (edge_list, e)->index);
2160 fprintf (dump_file, "copy expression %d\n",
2161 expr->bitmap_index);
2162 }
2163
2164 update_ld_motion_stores (expr);
2165 bitmap_set_bit (inserted[e], j);
2166 did_insert = 1;
2167 gcse_create_count++;
2168 }
2169 }
2170 }
2171 }
2172 }
2173
2174 sbitmap_vector_free (inserted);
2175 return did_insert;
2176 }
2177
2178 /* Copy the result of EXPR->EXPR generated by INSN to EXPR->REACHING_REG.
2179 Given "old_reg <- expr" (INSN), instead of adding after it
2180 reaching_reg <- old_reg
2181 it's better to do the following:
2182 reaching_reg <- expr
2183 old_reg <- reaching_reg
2184 because this way copy propagation can discover additional PRE
2185 opportunities. But if this fails, we try the old way.
2186 When "expr" is a store, i.e.
2187 given "MEM <- old_reg", instead of adding after it
2188 reaching_reg <- old_reg
2189 it's better to add it before as follows:
2190 reaching_reg <- old_reg
2191 MEM <- reaching_reg. */
2192
2193 static void
pre_insert_copy_insn(struct gcse_expr * expr,rtx_insn * insn)2194 pre_insert_copy_insn (struct gcse_expr *expr, rtx_insn *insn)
2195 {
2196 rtx reg = expr->reaching_reg;
2197 int regno = REGNO (reg);
2198 int indx = expr->bitmap_index;
2199 rtx pat = PATTERN (insn);
2200 rtx set, first_set;
2201 rtx_insn *new_insn;
2202 rtx old_reg;
2203 int i;
2204
2205 /* This block matches the logic in hash_scan_insn. */
2206 switch (GET_CODE (pat))
2207 {
2208 case SET:
2209 set = pat;
2210 break;
2211
2212 case PARALLEL:
2213 /* Search through the parallel looking for the set whose
2214 source was the expression that we're interested in. */
2215 first_set = NULL_RTX;
2216 set = NULL_RTX;
2217 for (i = 0; i < XVECLEN (pat, 0); i++)
2218 {
2219 rtx x = XVECEXP (pat, 0, i);
2220 if (GET_CODE (x) == SET)
2221 {
2222 /* If the source was a REG_EQUAL or REG_EQUIV note, we
2223 may not find an equivalent expression, but in this
2224 case the PARALLEL will have a single set. */
2225 if (first_set == NULL_RTX)
2226 first_set = x;
2227 if (expr_equiv_p (SET_SRC (x), expr->expr))
2228 {
2229 set = x;
2230 break;
2231 }
2232 }
2233 }
2234
2235 gcc_assert (first_set);
2236 if (set == NULL_RTX)
2237 set = first_set;
2238 break;
2239
2240 default:
2241 gcc_unreachable ();
2242 }
2243
2244 if (REG_P (SET_DEST (set)))
2245 {
2246 old_reg = SET_DEST (set);
2247 /* Check if we can modify the set destination in the original insn. */
2248 if (validate_change (insn, &SET_DEST (set), reg, 0))
2249 {
2250 new_insn = gen_move_insn (old_reg, reg);
2251 new_insn = emit_insn_after (new_insn, insn);
2252 }
2253 else
2254 {
2255 new_insn = gen_move_insn (reg, old_reg);
2256 new_insn = emit_insn_after (new_insn, insn);
2257 }
2258 }
2259 else /* This is possible only in case of a store to memory. */
2260 {
2261 old_reg = SET_SRC (set);
2262 new_insn = gen_move_insn (reg, old_reg);
2263
2264 /* Check if we can modify the set source in the original insn. */
2265 if (validate_change (insn, &SET_SRC (set), reg, 0))
2266 new_insn = emit_insn_before (new_insn, insn);
2267 else
2268 new_insn = emit_insn_after (new_insn, insn);
2269 }
2270
2271 gcse_create_count++;
2272
2273 if (dump_file)
2274 fprintf (dump_file,
2275 "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
2276 BLOCK_FOR_INSN (insn)->index, INSN_UID (new_insn), indx,
2277 INSN_UID (insn), regno);
2278 }
2279
2280 /* Copy available expressions that reach the redundant expression
2281 to `reaching_reg'. */
2282
2283 static void
pre_insert_copies(void)2284 pre_insert_copies (void)
2285 {
2286 unsigned int i, added_copy;
2287 struct gcse_expr *expr;
2288 struct gcse_occr *occr;
2289 struct gcse_occr *avail;
2290
2291 /* For each available expression in the table, copy the result to
2292 `reaching_reg' if the expression reaches a deleted one.
2293
2294 ??? The current algorithm is rather brute force.
2295 Need to do some profiling. */
2296
2297 for (i = 0; i < expr_hash_table.size; i++)
2298 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2299 {
2300 /* If the basic block isn't reachable, PPOUT will be TRUE. However,
2301 we don't want to insert a copy here because the expression may not
2302 really be redundant. So only insert an insn if the expression was
2303 deleted. This test also avoids further processing if the
2304 expression wasn't deleted anywhere. */
2305 if (expr->reaching_reg == NULL)
2306 continue;
2307
2308 /* Set when we add a copy for that expression. */
2309 added_copy = 0;
2310
2311 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2312 {
2313 if (! occr->deleted_p)
2314 continue;
2315
2316 for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
2317 {
2318 rtx_insn *insn = avail->insn;
2319
2320 /* No need to handle this one if handled already. */
2321 if (avail->copied_p)
2322 continue;
2323
2324 /* Don't handle this one if it's a redundant one. */
2325 if (insn->deleted ())
2326 continue;
2327
2328 /* Or if the expression doesn't reach the deleted one. */
2329 if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
2330 expr,
2331 BLOCK_FOR_INSN (occr->insn)))
2332 continue;
2333
2334 added_copy = 1;
2335
2336 /* Copy the result of avail to reaching_reg. */
2337 pre_insert_copy_insn (expr, insn);
2338 avail->copied_p = 1;
2339 }
2340 }
2341
2342 if (added_copy)
2343 update_ld_motion_stores (expr);
2344 }
2345 }
2346
2347 struct set_data
2348 {
2349 rtx_insn *insn;
2350 const_rtx set;
2351 int nsets;
2352 };
2353
2354 /* Increment number of sets and record set in DATA. */
2355
2356 static void
record_set_data(rtx dest,const_rtx set,void * data)2357 record_set_data (rtx dest, const_rtx set, void *data)
2358 {
2359 struct set_data *s = (struct set_data *)data;
2360
2361 if (GET_CODE (set) == SET)
2362 {
2363 /* We allow insns having multiple sets, where all but one are
2364 dead as single set insns. In the common case only a single
2365 set is present, so we want to avoid checking for REG_UNUSED
2366 notes unless necessary. */
2367 if (s->nsets == 1
2368 && find_reg_note (s->insn, REG_UNUSED, SET_DEST (s->set))
2369 && !side_effects_p (s->set))
2370 s->nsets = 0;
2371
2372 if (!s->nsets)
2373 {
2374 /* Record this set. */
2375 s->nsets += 1;
2376 s->set = set;
2377 }
2378 else if (!find_reg_note (s->insn, REG_UNUSED, dest)
2379 || side_effects_p (set))
2380 s->nsets += 1;
2381 }
2382 }
2383
2384 static const_rtx
single_set_gcse(rtx_insn * insn)2385 single_set_gcse (rtx_insn *insn)
2386 {
2387 struct set_data s;
2388 rtx pattern;
2389
2390 gcc_assert (INSN_P (insn));
2391
2392 /* Optimize common case. */
2393 pattern = PATTERN (insn);
2394 if (GET_CODE (pattern) == SET)
2395 return pattern;
2396
2397 s.insn = insn;
2398 s.nsets = 0;
2399 note_stores (pattern, record_set_data, &s);
2400
2401 /* Considered invariant insns have exactly one set. */
2402 gcc_assert (s.nsets == 1);
2403 return s.set;
2404 }
2405
2406 /* Emit move from SRC to DEST noting the equivalence with expression computed
2407 in INSN. */
2408
2409 static rtx_insn *
gcse_emit_move_after(rtx dest,rtx src,rtx_insn * insn)2410 gcse_emit_move_after (rtx dest, rtx src, rtx_insn *insn)
2411 {
2412 rtx_insn *new_rtx;
2413 const_rtx set = single_set_gcse (insn);
2414 rtx set2;
2415 rtx note;
2416 rtx eqv = NULL_RTX;
2417
2418 /* This should never fail since we're creating a reg->reg copy
2419 we've verified to be valid. */
2420
2421 new_rtx = emit_insn_after (gen_move_insn (dest, src), insn);
2422
2423 /* Note the equivalence for local CSE pass. Take the note from the old
2424 set if there was one. Otherwise record the SET_SRC from the old set
2425 unless DEST is also an operand of the SET_SRC. */
2426 set2 = single_set (new_rtx);
2427 if (!set2 || !rtx_equal_p (SET_DEST (set2), dest))
2428 return new_rtx;
2429 if ((note = find_reg_equal_equiv_note (insn)))
2430 eqv = XEXP (note, 0);
2431 else if (! REG_P (dest)
2432 || ! reg_mentioned_p (dest, SET_SRC (set)))
2433 eqv = SET_SRC (set);
2434
2435 if (eqv != NULL_RTX)
2436 set_unique_reg_note (new_rtx, REG_EQUAL, copy_insn_1 (eqv));
2437
2438 return new_rtx;
2439 }
2440
2441 /* Delete redundant computations.
2442 Deletion is done by changing the insn to copy the `reaching_reg' of
2443 the expression into the result of the SET. It is left to later passes
2444 to propagate the copy or eliminate it.
2445
2446 Return nonzero if a change is made. */
2447
2448 static int
pre_delete(void)2449 pre_delete (void)
2450 {
2451 unsigned int i;
2452 int changed;
2453 struct gcse_expr *expr;
2454 struct gcse_occr *occr;
2455
2456 changed = 0;
2457 for (i = 0; i < expr_hash_table.size; i++)
2458 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2459 {
2460 int indx = expr->bitmap_index;
2461
2462 /* We only need to search antic_occr since we require ANTLOC != 0. */
2463 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2464 {
2465 rtx_insn *insn = occr->insn;
2466 rtx set;
2467 basic_block bb = BLOCK_FOR_INSN (insn);
2468
2469 /* We only delete insns that have a single_set. */
2470 if (bitmap_bit_p (pre_delete_map[bb->index], indx)
2471 && (set = single_set (insn)) != 0
2472 && dbg_cnt (pre_insn))
2473 {
2474 /* Create a pseudo-reg to store the result of reaching
2475 expressions into. Get the mode for the new pseudo from
2476 the mode of the original destination pseudo. */
2477 if (expr->reaching_reg == NULL)
2478 expr->reaching_reg = gen_reg_rtx_and_attrs (SET_DEST (set));
2479
2480 gcse_emit_move_after (SET_DEST (set), expr->reaching_reg, insn);
2481 delete_insn (insn);
2482 occr->deleted_p = 1;
2483 changed = 1;
2484 gcse_subst_count++;
2485
2486 if (dump_file)
2487 {
2488 fprintf (dump_file,
2489 "PRE: redundant insn %d (expression %d) in ",
2490 INSN_UID (insn), indx);
2491 fprintf (dump_file, "bb %d, reaching reg is %d\n",
2492 bb->index, REGNO (expr->reaching_reg));
2493 }
2494 }
2495 }
2496 }
2497
2498 return changed;
2499 }
2500
2501 /* Perform GCSE optimizations using PRE.
2502 This is called by one_pre_gcse_pass after all the dataflow analysis
2503 has been done.
2504
2505 This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
2506 lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
2507 Compiler Design and Implementation.
2508
2509 ??? A new pseudo reg is created to hold the reaching expression. The nice
2510 thing about the classical approach is that it would try to use an existing
2511 reg. If the register can't be adequately optimized [i.e. we introduce
2512 reload problems], one could add a pass here to propagate the new register
2513 through the block.
2514
2515 ??? We don't handle single sets in PARALLELs because we're [currently] not
2516 able to copy the rest of the parallel when we insert copies to create full
2517 redundancies from partial redundancies. However, there's no reason why we
2518 can't handle PARALLELs in the cases where there are no partial
2519 redundancies. */
2520
2521 static int
pre_gcse(struct edge_list * edge_list)2522 pre_gcse (struct edge_list *edge_list)
2523 {
2524 unsigned int i;
2525 int did_insert, changed;
2526 struct gcse_expr **index_map;
2527 struct gcse_expr *expr;
2528
2529 /* Compute a mapping from expression number (`bitmap_index') to
2530 hash table entry. */
2531
2532 index_map = XCNEWVEC (struct gcse_expr *, expr_hash_table.n_elems);
2533 for (i = 0; i < expr_hash_table.size; i++)
2534 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2535 index_map[expr->bitmap_index] = expr;
2536
2537 /* Delete the redundant insns first so that
2538 - we know what register to use for the new insns and for the other
2539 ones with reaching expressions
2540 - we know which insns are redundant when we go to create copies */
2541
2542 changed = pre_delete ();
2543 did_insert = pre_edge_insert (edge_list, index_map);
2544
2545 /* In other places with reaching expressions, copy the expression to the
2546 specially allocated pseudo-reg that reaches the redundant expr. */
2547 pre_insert_copies ();
2548 if (did_insert)
2549 {
2550 commit_edge_insertions ();
2551 changed = 1;
2552 }
2553
2554 free (index_map);
2555 return changed;
2556 }
2557
2558 /* Top level routine to perform one PRE GCSE pass.
2559
2560 Return nonzero if a change was made. */
2561
2562 static int
one_pre_gcse_pass(void)2563 one_pre_gcse_pass (void)
2564 {
2565 int changed = 0;
2566
2567 gcse_subst_count = 0;
2568 gcse_create_count = 0;
2569
2570 /* Return if there's nothing to do, or it is too expensive. */
2571 if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
2572 || gcse_or_cprop_is_too_expensive (_("PRE disabled")))
2573 return 0;
2574
2575 /* We need alias. */
2576 init_alias_analysis ();
2577
2578 bytes_used = 0;
2579 gcc_obstack_init (&gcse_obstack);
2580 alloc_gcse_mem ();
2581
2582 alloc_hash_table (&expr_hash_table);
2583 add_noreturn_fake_exit_edges ();
2584 if (flag_gcse_lm)
2585 compute_ld_motion_mems ();
2586
2587 compute_hash_table (&expr_hash_table);
2588 if (flag_gcse_lm)
2589 trim_ld_motion_mems ();
2590 if (dump_file)
2591 dump_hash_table (dump_file, "Expression", &expr_hash_table);
2592
2593 if (expr_hash_table.n_elems > 0)
2594 {
2595 struct edge_list *edge_list;
2596 alloc_pre_mem (last_basic_block_for_fn (cfun), expr_hash_table.n_elems);
2597 edge_list = compute_pre_data ();
2598 changed |= pre_gcse (edge_list);
2599 free_edge_list (edge_list);
2600 free_pre_mem ();
2601 }
2602
2603 if (flag_gcse_lm)
2604 free_ld_motion_mems ();
2605 remove_fake_exit_edges ();
2606 free_hash_table (&expr_hash_table);
2607
2608 free_gcse_mem ();
2609 obstack_free (&gcse_obstack, NULL);
2610
2611 /* We are finished with alias. */
2612 end_alias_analysis ();
2613
2614 if (dump_file)
2615 {
2616 fprintf (dump_file, "PRE GCSE of %s, %d basic blocks, %d bytes needed, ",
2617 current_function_name (), n_basic_blocks_for_fn (cfun),
2618 bytes_used);
2619 fprintf (dump_file, "%d substs, %d insns created\n",
2620 gcse_subst_count, gcse_create_count);
2621 }
2622
2623 return changed;
2624 }
2625
2626 /* If X contains any LABEL_REF's, add REG_LABEL_OPERAND notes for them
2627 to INSN. If such notes are added to an insn which references a
2628 CODE_LABEL, the LABEL_NUSES count is incremented. We have to add
2629 that note, because the following loop optimization pass requires
2630 them. */
2631
2632 /* ??? If there was a jump optimization pass after gcse and before loop,
2633 then we would not need to do this here, because jump would add the
2634 necessary REG_LABEL_OPERAND and REG_LABEL_TARGET notes. */
2635
2636 static void
add_label_notes(rtx x,rtx_insn * insn)2637 add_label_notes (rtx x, rtx_insn *insn)
2638 {
2639 enum rtx_code code = GET_CODE (x);
2640 int i, j;
2641 const char *fmt;
2642
2643 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
2644 {
2645 /* This code used to ignore labels that referred to dispatch tables to
2646 avoid flow generating (slightly) worse code.
2647
2648 We no longer ignore such label references (see LABEL_REF handling in
2649 mark_jump_label for additional information). */
2650
2651 /* There's no reason for current users to emit jump-insns with
2652 such a LABEL_REF, so we don't have to handle REG_LABEL_TARGET
2653 notes. */
2654 gcc_assert (!JUMP_P (insn));
2655 add_reg_note (insn, REG_LABEL_OPERAND, LABEL_REF_LABEL (x));
2656
2657 if (LABEL_P (LABEL_REF_LABEL (x)))
2658 LABEL_NUSES (LABEL_REF_LABEL (x))++;
2659
2660 return;
2661 }
2662
2663 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2664 {
2665 if (fmt[i] == 'e')
2666 add_label_notes (XEXP (x, i), insn);
2667 else if (fmt[i] == 'E')
2668 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2669 add_label_notes (XVECEXP (x, i, j), insn);
2670 }
2671 }
2672
2673 /* Code Hoisting variables and subroutines. */
2674
2675 /* Very busy expressions. */
2676 static sbitmap *hoist_vbein;
2677 static sbitmap *hoist_vbeout;
2678
2679 /* ??? We could compute post dominators and run this algorithm in
2680 reverse to perform tail merging, doing so would probably be
2681 more effective than the tail merging code in jump.c.
2682
2683 It's unclear if tail merging could be run in parallel with
2684 code hoisting. It would be nice. */
2685
2686 /* Allocate vars used for code hoisting analysis. */
2687
2688 static void
alloc_code_hoist_mem(int n_blocks,int n_exprs)2689 alloc_code_hoist_mem (int n_blocks, int n_exprs)
2690 {
2691 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
2692 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
2693 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
2694
2695 hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
2696 hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
2697 }
2698
2699 /* Free vars used for code hoisting analysis. */
2700
2701 static void
free_code_hoist_mem(void)2702 free_code_hoist_mem (void)
2703 {
2704 sbitmap_vector_free (antloc);
2705 sbitmap_vector_free (transp);
2706 sbitmap_vector_free (comp);
2707
2708 sbitmap_vector_free (hoist_vbein);
2709 sbitmap_vector_free (hoist_vbeout);
2710
2711 free_dominance_info (CDI_DOMINATORS);
2712 }
2713
2714 /* Compute the very busy expressions at entry/exit from each block.
2715
2716 An expression is very busy if all paths from a given point
2717 compute the expression. */
2718
2719 static void
compute_code_hoist_vbeinout(void)2720 compute_code_hoist_vbeinout (void)
2721 {
2722 int changed, passes;
2723 basic_block bb;
2724
2725 bitmap_vector_clear (hoist_vbeout, last_basic_block_for_fn (cfun));
2726 bitmap_vector_clear (hoist_vbein, last_basic_block_for_fn (cfun));
2727
2728 passes = 0;
2729 changed = 1;
2730
2731 while (changed)
2732 {
2733 changed = 0;
2734
2735 /* We scan the blocks in the reverse order to speed up
2736 the convergence. */
2737 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2738 {
2739 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
2740 {
2741 bitmap_intersection_of_succs (hoist_vbeout[bb->index],
2742 hoist_vbein, bb);
2743
2744 /* Include expressions in VBEout that are calculated
2745 in BB and available at its end. */
2746 bitmap_ior (hoist_vbeout[bb->index],
2747 hoist_vbeout[bb->index], comp[bb->index]);
2748 }
2749
2750 changed |= bitmap_or_and (hoist_vbein[bb->index],
2751 antloc[bb->index],
2752 hoist_vbeout[bb->index],
2753 transp[bb->index]);
2754 }
2755
2756 passes++;
2757 }
2758
2759 if (dump_file)
2760 {
2761 fprintf (dump_file, "hoisting vbeinout computation: %d passes\n", passes);
2762
2763 FOR_EACH_BB_FN (bb, cfun)
2764 {
2765 fprintf (dump_file, "vbein (%d): ", bb->index);
2766 dump_bitmap_file (dump_file, hoist_vbein[bb->index]);
2767 fprintf (dump_file, "vbeout(%d): ", bb->index);
2768 dump_bitmap_file (dump_file, hoist_vbeout[bb->index]);
2769 }
2770 }
2771 }
2772
2773 /* Top level routine to do the dataflow analysis needed by code hoisting. */
2774
2775 static void
compute_code_hoist_data(void)2776 compute_code_hoist_data (void)
2777 {
2778 compute_local_properties (transp, comp, antloc, &expr_hash_table);
2779 prune_expressions (false);
2780 compute_code_hoist_vbeinout ();
2781 calculate_dominance_info (CDI_DOMINATORS);
2782 if (dump_file)
2783 fprintf (dump_file, "\n");
2784 }
2785
2786 /* Update register pressure for BB when hoisting an expression from
2787 instruction FROM, if live ranges of inputs are shrunk. Also
2788 maintain live_in information if live range of register referred
2789 in FROM is shrunk.
2790
2791 Return 0 if register pressure doesn't change, otherwise return
2792 the number by which register pressure is decreased.
2793
2794 NOTE: Register pressure won't be increased in this function. */
2795
2796 static int
update_bb_reg_pressure(basic_block bb,rtx_insn * from)2797 update_bb_reg_pressure (basic_block bb, rtx_insn *from)
2798 {
2799 rtx dreg;
2800 rtx_insn *insn;
2801 basic_block succ_bb;
2802 df_ref use, op_ref;
2803 edge succ;
2804 edge_iterator ei;
2805 int decreased_pressure = 0;
2806 int nregs;
2807 enum reg_class pressure_class;
2808
2809 FOR_EACH_INSN_USE (use, from)
2810 {
2811 dreg = DF_REF_REAL_REG (use);
2812 /* The live range of register is shrunk only if it isn't:
2813 1. referred on any path from the end of this block to EXIT, or
2814 2. referred by insns other than FROM in this block. */
2815 FOR_EACH_EDGE (succ, ei, bb->succs)
2816 {
2817 succ_bb = succ->dest;
2818 if (succ_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
2819 continue;
2820
2821 if (bitmap_bit_p (BB_DATA (succ_bb)->live_in, REGNO (dreg)))
2822 break;
2823 }
2824 if (succ != NULL)
2825 continue;
2826
2827 op_ref = DF_REG_USE_CHAIN (REGNO (dreg));
2828 for (; op_ref; op_ref = DF_REF_NEXT_REG (op_ref))
2829 {
2830 if (!DF_REF_INSN_INFO (op_ref))
2831 continue;
2832
2833 insn = DF_REF_INSN (op_ref);
2834 if (BLOCK_FOR_INSN (insn) == bb
2835 && NONDEBUG_INSN_P (insn) && insn != from)
2836 break;
2837 }
2838
2839 pressure_class = get_regno_pressure_class (REGNO (dreg), &nregs);
2840 /* Decrease register pressure and update live_in information for
2841 this block. */
2842 if (!op_ref && pressure_class != NO_REGS)
2843 {
2844 decreased_pressure += nregs;
2845 BB_DATA (bb)->max_reg_pressure[pressure_class] -= nregs;
2846 bitmap_clear_bit (BB_DATA (bb)->live_in, REGNO (dreg));
2847 }
2848 }
2849 return decreased_pressure;
2850 }
2851
2852 /* Determine if the expression EXPR should be hoisted to EXPR_BB up in
2853 flow graph, if it can reach BB unimpared. Stop the search if the
2854 expression would need to be moved more than DISTANCE instructions.
2855
2856 DISTANCE is the number of instructions through which EXPR can be
2857 hoisted up in flow graph.
2858
2859 BB_SIZE points to an array which contains the number of instructions
2860 for each basic block.
2861
2862 PRESSURE_CLASS and NREGS are register class and number of hard registers
2863 for storing EXPR.
2864
2865 HOISTED_BBS points to a bitmap indicating basic blocks through which
2866 EXPR is hoisted.
2867
2868 FROM is the instruction from which EXPR is hoisted.
2869
2870 It's unclear exactly what Muchnick meant by "unimpared". It seems
2871 to me that the expression must either be computed or transparent in
2872 *every* block in the path(s) from EXPR_BB to BB. Any other definition
2873 would allow the expression to be hoisted out of loops, even if
2874 the expression wasn't a loop invariant.
2875
2876 Contrast this to reachability for PRE where an expression is
2877 considered reachable if *any* path reaches instead of *all*
2878 paths. */
2879
2880 static int
should_hoist_expr_to_dom(basic_block expr_bb,struct gcse_expr * expr,basic_block bb,sbitmap visited,HOST_WIDE_INT distance,int * bb_size,enum reg_class pressure_class,int * nregs,bitmap hoisted_bbs,rtx_insn * from)2881 should_hoist_expr_to_dom (basic_block expr_bb, struct gcse_expr *expr,
2882 basic_block bb, sbitmap visited,
2883 HOST_WIDE_INT distance,
2884 int *bb_size, enum reg_class pressure_class,
2885 int *nregs, bitmap hoisted_bbs, rtx_insn *from)
2886 {
2887 unsigned int i;
2888 edge pred;
2889 edge_iterator ei;
2890 sbitmap_iterator sbi;
2891 int visited_allocated_locally = 0;
2892 int decreased_pressure = 0;
2893
2894 if (flag_ira_hoist_pressure)
2895 {
2896 /* Record old information of basic block BB when it is visited
2897 at the first time. */
2898 if (!bitmap_bit_p (hoisted_bbs, bb->index))
2899 {
2900 struct bb_data *data = BB_DATA (bb);
2901 bitmap_copy (data->backup, data->live_in);
2902 data->old_pressure = data->max_reg_pressure[pressure_class];
2903 }
2904 decreased_pressure = update_bb_reg_pressure (bb, from);
2905 }
2906 /* Terminate the search if distance, for which EXPR is allowed to move,
2907 is exhausted. */
2908 if (distance > 0)
2909 {
2910 if (flag_ira_hoist_pressure)
2911 {
2912 /* Prefer to hoist EXPR if register pressure is decreased. */
2913 if (decreased_pressure > *nregs)
2914 distance += bb_size[bb->index];
2915 /* Let EXPR be hoisted through basic block at no cost if one
2916 of following conditions is satisfied:
2917
2918 1. The basic block has low register pressure.
2919 2. Register pressure won't be increases after hoisting EXPR.
2920
2921 Constant expressions is handled conservatively, because
2922 hoisting constant expression aggressively results in worse
2923 code. This decision is made by the observation of CSiBE
2924 on ARM target, while it has no obvious effect on other
2925 targets like x86, x86_64, mips and powerpc. */
2926 else if (CONST_INT_P (expr->expr)
2927 || (BB_DATA (bb)->max_reg_pressure[pressure_class]
2928 >= ira_class_hard_regs_num[pressure_class]
2929 && decreased_pressure < *nregs))
2930 distance -= bb_size[bb->index];
2931 }
2932 else
2933 distance -= bb_size[bb->index];
2934
2935 if (distance <= 0)
2936 return 0;
2937 }
2938 else
2939 gcc_assert (distance == 0);
2940
2941 if (visited == NULL)
2942 {
2943 visited_allocated_locally = 1;
2944 visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
2945 bitmap_clear (visited);
2946 }
2947
2948 FOR_EACH_EDGE (pred, ei, bb->preds)
2949 {
2950 basic_block pred_bb = pred->src;
2951
2952 if (pred->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2953 break;
2954 else if (pred_bb == expr_bb)
2955 continue;
2956 else if (bitmap_bit_p (visited, pred_bb->index))
2957 continue;
2958 else if (! bitmap_bit_p (transp[pred_bb->index], expr->bitmap_index))
2959 break;
2960 /* Not killed. */
2961 else
2962 {
2963 bitmap_set_bit (visited, pred_bb->index);
2964 if (! should_hoist_expr_to_dom (expr_bb, expr, pred_bb,
2965 visited, distance, bb_size,
2966 pressure_class, nregs,
2967 hoisted_bbs, from))
2968 break;
2969 }
2970 }
2971 if (visited_allocated_locally)
2972 {
2973 /* If EXPR can be hoisted to expr_bb, record basic blocks through
2974 which EXPR is hoisted in hoisted_bbs. */
2975 if (flag_ira_hoist_pressure && !pred)
2976 {
2977 /* Record the basic block from which EXPR is hoisted. */
2978 bitmap_set_bit (visited, bb->index);
2979 EXECUTE_IF_SET_IN_BITMAP (visited, 0, i, sbi)
2980 bitmap_set_bit (hoisted_bbs, i);
2981 }
2982 sbitmap_free (visited);
2983 }
2984
2985 return (pred == NULL);
2986 }
2987
2988 /* Find occurrence in BB. */
2989
2990 static struct gcse_occr *
find_occr_in_bb(struct gcse_occr * occr,basic_block bb)2991 find_occr_in_bb (struct gcse_occr *occr, basic_block bb)
2992 {
2993 /* Find the right occurrence of this expression. */
2994 while (occr && BLOCK_FOR_INSN (occr->insn) != bb)
2995 occr = occr->next;
2996
2997 return occr;
2998 }
2999
3000 /* Actually perform code hoisting.
3001
3002 The code hoisting pass can hoist multiple computations of the same
3003 expression along dominated path to a dominating basic block, like
3004 from b2/b3 to b1 as depicted below:
3005
3006 b1 ------
3007 /\ |
3008 / \ |
3009 bx by distance
3010 / \ |
3011 / \ |
3012 b2 b3 ------
3013
3014 Unfortunately code hoisting generally extends the live range of an
3015 output pseudo register, which increases register pressure and hurts
3016 register allocation. To address this issue, an attribute MAX_DISTANCE
3017 is computed and attached to each expression. The attribute is computed
3018 from rtx cost of the corresponding expression and it's used to control
3019 how long the expression can be hoisted up in flow graph. As the
3020 expression is hoisted up in flow graph, GCC decreases its DISTANCE
3021 and stops the hoist if DISTANCE reaches 0. Code hoisting can decrease
3022 register pressure if live ranges of inputs are shrunk.
3023
3024 Option "-fira-hoist-pressure" implements register pressure directed
3025 hoist based on upper method. The rationale is:
3026 1. Calculate register pressure for each basic block by reusing IRA
3027 facility.
3028 2. When expression is hoisted through one basic block, GCC checks
3029 the change of live ranges for inputs/output. The basic block's
3030 register pressure will be increased because of extended live
3031 range of output. However, register pressure will be decreased
3032 if the live ranges of inputs are shrunk.
3033 3. After knowing how hoisting affects register pressure, GCC prefers
3034 to hoist the expression if it can decrease register pressure, by
3035 increasing DISTANCE of the corresponding expression.
3036 4. If hoisting the expression increases register pressure, GCC checks
3037 register pressure of the basic block and decrease DISTANCE only if
3038 the register pressure is high. In other words, expression will be
3039 hoisted through at no cost if the basic block has low register
3040 pressure.
3041 5. Update register pressure information for basic blocks through
3042 which expression is hoisted. */
3043
3044 static int
hoist_code(void)3045 hoist_code (void)
3046 {
3047 basic_block bb, dominated;
3048 vec<basic_block> dom_tree_walk;
3049 unsigned int dom_tree_walk_index;
3050 vec<basic_block> domby;
3051 unsigned int i, j, k;
3052 struct gcse_expr **index_map;
3053 struct gcse_expr *expr;
3054 int *to_bb_head;
3055 int *bb_size;
3056 int changed = 0;
3057 struct bb_data *data;
3058 /* Basic blocks that have occurrences reachable from BB. */
3059 bitmap from_bbs;
3060 /* Basic blocks through which expr is hoisted. */
3061 bitmap hoisted_bbs = NULL;
3062 bitmap_iterator bi;
3063
3064 /* Compute a mapping from expression number (`bitmap_index') to
3065 hash table entry. */
3066
3067 index_map = XCNEWVEC (struct gcse_expr *, expr_hash_table.n_elems);
3068 for (i = 0; i < expr_hash_table.size; i++)
3069 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
3070 index_map[expr->bitmap_index] = expr;
3071
3072 /* Calculate sizes of basic blocks and note how far
3073 each instruction is from the start of its block. We then use this
3074 data to restrict distance an expression can travel. */
3075
3076 to_bb_head = XCNEWVEC (int, get_max_uid ());
3077 bb_size = XCNEWVEC (int, last_basic_block_for_fn (cfun));
3078
3079 FOR_EACH_BB_FN (bb, cfun)
3080 {
3081 rtx_insn *insn;
3082 int to_head;
3083
3084 to_head = 0;
3085 FOR_BB_INSNS (bb, insn)
3086 {
3087 /* Don't count debug instructions to avoid them affecting
3088 decision choices. */
3089 if (NONDEBUG_INSN_P (insn))
3090 to_bb_head[INSN_UID (insn)] = to_head++;
3091 }
3092
3093 bb_size[bb->index] = to_head;
3094 }
3095
3096 gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs) == 1
3097 && (EDGE_SUCC (ENTRY_BLOCK_PTR_FOR_FN (cfun), 0)->dest
3098 == ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb));
3099
3100 from_bbs = BITMAP_ALLOC (NULL);
3101 if (flag_ira_hoist_pressure)
3102 hoisted_bbs = BITMAP_ALLOC (NULL);
3103
3104 dom_tree_walk = get_all_dominated_blocks (CDI_DOMINATORS,
3105 ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb);
3106
3107 /* Walk over each basic block looking for potentially hoistable
3108 expressions, nothing gets hoisted from the entry block. */
3109 FOR_EACH_VEC_ELT (dom_tree_walk, dom_tree_walk_index, bb)
3110 {
3111 domby = get_dominated_to_depth (CDI_DOMINATORS, bb, MAX_HOIST_DEPTH);
3112
3113 if (domby.length () == 0)
3114 continue;
3115
3116 /* Examine each expression that is very busy at the exit of this
3117 block. These are the potentially hoistable expressions. */
3118 for (i = 0; i < SBITMAP_SIZE (hoist_vbeout[bb->index]); i++)
3119 {
3120 if (bitmap_bit_p (hoist_vbeout[bb->index], i))
3121 {
3122 int nregs = 0;
3123 enum reg_class pressure_class = NO_REGS;
3124 /* Current expression. */
3125 struct gcse_expr *expr = index_map[i];
3126 /* Number of occurrences of EXPR that can be hoisted to BB. */
3127 int hoistable = 0;
3128 /* Occurrences reachable from BB. */
3129 vec<occr_t> occrs_to_hoist = vNULL;
3130 /* We want to insert the expression into BB only once, so
3131 note when we've inserted it. */
3132 int insn_inserted_p;
3133 occr_t occr;
3134
3135 /* If an expression is computed in BB and is available at end of
3136 BB, hoist all occurrences dominated by BB to BB. */
3137 if (bitmap_bit_p (comp[bb->index], i))
3138 {
3139 occr = find_occr_in_bb (expr->antic_occr, bb);
3140
3141 if (occr)
3142 {
3143 /* An occurrence might've been already deleted
3144 while processing a dominator of BB. */
3145 if (!occr->deleted_p)
3146 {
3147 gcc_assert (NONDEBUG_INSN_P (occr->insn));
3148 hoistable++;
3149 }
3150 }
3151 else
3152 hoistable++;
3153 }
3154
3155 /* We've found a potentially hoistable expression, now
3156 we look at every block BB dominates to see if it
3157 computes the expression. */
3158 FOR_EACH_VEC_ELT (domby, j, dominated)
3159 {
3160 HOST_WIDE_INT max_distance;
3161
3162 /* Ignore self dominance. */
3163 if (bb == dominated)
3164 continue;
3165 /* We've found a dominated block, now see if it computes
3166 the busy expression and whether or not moving that
3167 expression to the "beginning" of that block is safe. */
3168 if (!bitmap_bit_p (antloc[dominated->index], i))
3169 continue;
3170
3171 occr = find_occr_in_bb (expr->antic_occr, dominated);
3172 gcc_assert (occr);
3173
3174 /* An occurrence might've been already deleted
3175 while processing a dominator of BB. */
3176 if (occr->deleted_p)
3177 continue;
3178 gcc_assert (NONDEBUG_INSN_P (occr->insn));
3179
3180 max_distance = expr->max_distance;
3181 if (max_distance > 0)
3182 /* Adjust MAX_DISTANCE to account for the fact that
3183 OCCR won't have to travel all of DOMINATED, but
3184 only part of it. */
3185 max_distance += (bb_size[dominated->index]
3186 - to_bb_head[INSN_UID (occr->insn)]);
3187
3188 pressure_class = get_pressure_class_and_nregs (occr->insn,
3189 &nregs);
3190
3191 /* Note if the expression should be hoisted from the dominated
3192 block to BB if it can reach DOMINATED unimpared.
3193
3194 Keep track of how many times this expression is hoistable
3195 from a dominated block into BB. */
3196 if (should_hoist_expr_to_dom (bb, expr, dominated, NULL,
3197 max_distance, bb_size,
3198 pressure_class, &nregs,
3199 hoisted_bbs, occr->insn))
3200 {
3201 hoistable++;
3202 occrs_to_hoist.safe_push (occr);
3203 bitmap_set_bit (from_bbs, dominated->index);
3204 }
3205 }
3206
3207 /* If we found more than one hoistable occurrence of this
3208 expression, then note it in the vector of expressions to
3209 hoist. It makes no sense to hoist things which are computed
3210 in only one BB, and doing so tends to pessimize register
3211 allocation. One could increase this value to try harder
3212 to avoid any possible code expansion due to register
3213 allocation issues; however experiments have shown that
3214 the vast majority of hoistable expressions are only movable
3215 from two successors, so raising this threshold is likely
3216 to nullify any benefit we get from code hoisting. */
3217 if (hoistable > 1 && dbg_cnt (hoist_insn))
3218 {
3219 /* If (hoistable != vec::length), then there is
3220 an occurrence of EXPR in BB itself. Don't waste
3221 time looking for LCA in this case. */
3222 if ((unsigned) hoistable == occrs_to_hoist.length ())
3223 {
3224 basic_block lca;
3225
3226 lca = nearest_common_dominator_for_set (CDI_DOMINATORS,
3227 from_bbs);
3228 if (lca != bb)
3229 /* Punt, it's better to hoist these occurrences to
3230 LCA. */
3231 occrs_to_hoist.release ();
3232 }
3233 }
3234 else
3235 /* Punt, no point hoisting a single occurrence. */
3236 occrs_to_hoist.release ();
3237
3238 if (flag_ira_hoist_pressure
3239 && !occrs_to_hoist.is_empty ())
3240 {
3241 /* Increase register pressure of basic blocks to which
3242 expr is hoisted because of extended live range of
3243 output. */
3244 data = BB_DATA (bb);
3245 data->max_reg_pressure[pressure_class] += nregs;
3246 EXECUTE_IF_SET_IN_BITMAP (hoisted_bbs, 0, k, bi)
3247 {
3248 data = BB_DATA (BASIC_BLOCK_FOR_FN (cfun, k));
3249 data->max_reg_pressure[pressure_class] += nregs;
3250 }
3251 }
3252 else if (flag_ira_hoist_pressure)
3253 {
3254 /* Restore register pressure and live_in info for basic
3255 blocks recorded in hoisted_bbs when expr will not be
3256 hoisted. */
3257 EXECUTE_IF_SET_IN_BITMAP (hoisted_bbs, 0, k, bi)
3258 {
3259 data = BB_DATA (BASIC_BLOCK_FOR_FN (cfun, k));
3260 bitmap_copy (data->live_in, data->backup);
3261 data->max_reg_pressure[pressure_class]
3262 = data->old_pressure;
3263 }
3264 }
3265
3266 if (flag_ira_hoist_pressure)
3267 bitmap_clear (hoisted_bbs);
3268
3269 insn_inserted_p = 0;
3270
3271 /* Walk through occurrences of I'th expressions we want
3272 to hoist to BB and make the transformations. */
3273 FOR_EACH_VEC_ELT (occrs_to_hoist, j, occr)
3274 {
3275 rtx_insn *insn;
3276 const_rtx set;
3277
3278 gcc_assert (!occr->deleted_p);
3279
3280 insn = occr->insn;
3281 set = single_set_gcse (insn);
3282
3283 /* Create a pseudo-reg to store the result of reaching
3284 expressions into. Get the mode for the new pseudo
3285 from the mode of the original destination pseudo.
3286
3287 It is important to use new pseudos whenever we
3288 emit a set. This will allow reload to use
3289 rematerialization for such registers. */
3290 if (!insn_inserted_p)
3291 expr->reaching_reg
3292 = gen_reg_rtx_and_attrs (SET_DEST (set));
3293
3294 gcse_emit_move_after (SET_DEST (set), expr->reaching_reg,
3295 insn);
3296 delete_insn (insn);
3297 occr->deleted_p = 1;
3298 changed = 1;
3299 gcse_subst_count++;
3300
3301 if (!insn_inserted_p)
3302 {
3303 insert_insn_end_basic_block (expr, bb);
3304 insn_inserted_p = 1;
3305 }
3306 }
3307
3308 occrs_to_hoist.release ();
3309 bitmap_clear (from_bbs);
3310 }
3311 }
3312 domby.release ();
3313 }
3314
3315 dom_tree_walk.release ();
3316 BITMAP_FREE (from_bbs);
3317 if (flag_ira_hoist_pressure)
3318 BITMAP_FREE (hoisted_bbs);
3319
3320 free (bb_size);
3321 free (to_bb_head);
3322 free (index_map);
3323
3324 return changed;
3325 }
3326
3327 /* Return pressure class and number of needed hard registers (through
3328 *NREGS) of register REGNO. */
3329 static enum reg_class
get_regno_pressure_class(int regno,int * nregs)3330 get_regno_pressure_class (int regno, int *nregs)
3331 {
3332 if (regno >= FIRST_PSEUDO_REGISTER)
3333 {
3334 enum reg_class pressure_class;
3335
3336 pressure_class = reg_allocno_class (regno);
3337 pressure_class = ira_pressure_class_translate[pressure_class];
3338 *nregs
3339 = ira_reg_class_max_nregs[pressure_class][PSEUDO_REGNO_MODE (regno)];
3340 return pressure_class;
3341 }
3342 else if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno)
3343 && ! TEST_HARD_REG_BIT (eliminable_regset, regno))
3344 {
3345 *nregs = 1;
3346 return ira_pressure_class_translate[REGNO_REG_CLASS (regno)];
3347 }
3348 else
3349 {
3350 *nregs = 0;
3351 return NO_REGS;
3352 }
3353 }
3354
3355 /* Return pressure class and number of hard registers (through *NREGS)
3356 for destination of INSN. */
3357 static enum reg_class
get_pressure_class_and_nregs(rtx_insn * insn,int * nregs)3358 get_pressure_class_and_nregs (rtx_insn *insn, int *nregs)
3359 {
3360 rtx reg;
3361 enum reg_class pressure_class;
3362 const_rtx set = single_set_gcse (insn);
3363
3364 reg = SET_DEST (set);
3365 if (GET_CODE (reg) == SUBREG)
3366 reg = SUBREG_REG (reg);
3367 if (MEM_P (reg))
3368 {
3369 *nregs = 0;
3370 pressure_class = NO_REGS;
3371 }
3372 else
3373 {
3374 gcc_assert (REG_P (reg));
3375 pressure_class = reg_allocno_class (REGNO (reg));
3376 pressure_class = ira_pressure_class_translate[pressure_class];
3377 *nregs
3378 = ira_reg_class_max_nregs[pressure_class][GET_MODE (SET_SRC (set))];
3379 }
3380 return pressure_class;
3381 }
3382
3383 /* Increase (if INCR_P) or decrease current register pressure for
3384 register REGNO. */
3385 static void
change_pressure(int regno,bool incr_p)3386 change_pressure (int regno, bool incr_p)
3387 {
3388 int nregs;
3389 enum reg_class pressure_class;
3390
3391 pressure_class = get_regno_pressure_class (regno, &nregs);
3392 if (! incr_p)
3393 curr_reg_pressure[pressure_class] -= nregs;
3394 else
3395 {
3396 curr_reg_pressure[pressure_class] += nregs;
3397 if (BB_DATA (curr_bb)->max_reg_pressure[pressure_class]
3398 < curr_reg_pressure[pressure_class])
3399 BB_DATA (curr_bb)->max_reg_pressure[pressure_class]
3400 = curr_reg_pressure[pressure_class];
3401 }
3402 }
3403
3404 /* Calculate register pressure for each basic block by walking insns
3405 from last to first. */
3406 static void
calculate_bb_reg_pressure(void)3407 calculate_bb_reg_pressure (void)
3408 {
3409 int i;
3410 unsigned int j;
3411 rtx_insn *insn;
3412 basic_block bb;
3413 bitmap curr_regs_live;
3414 bitmap_iterator bi;
3415
3416
3417 ira_setup_eliminable_regset ();
3418 curr_regs_live = BITMAP_ALLOC (®_obstack);
3419 FOR_EACH_BB_FN (bb, cfun)
3420 {
3421 curr_bb = bb;
3422 BB_DATA (bb)->live_in = BITMAP_ALLOC (NULL);
3423 BB_DATA (bb)->backup = BITMAP_ALLOC (NULL);
3424 bitmap_copy (BB_DATA (bb)->live_in, df_get_live_in (bb));
3425 bitmap_copy (curr_regs_live, df_get_live_out (bb));
3426 for (i = 0; i < ira_pressure_classes_num; i++)
3427 curr_reg_pressure[ira_pressure_classes[i]] = 0;
3428 EXECUTE_IF_SET_IN_BITMAP (curr_regs_live, 0, j, bi)
3429 change_pressure (j, true);
3430
3431 FOR_BB_INSNS_REVERSE (bb, insn)
3432 {
3433 rtx dreg;
3434 int regno;
3435 df_ref def, use;
3436
3437 if (! NONDEBUG_INSN_P (insn))
3438 continue;
3439
3440 FOR_EACH_INSN_DEF (def, insn)
3441 {
3442 dreg = DF_REF_REAL_REG (def);
3443 gcc_assert (REG_P (dreg));
3444 regno = REGNO (dreg);
3445 if (!(DF_REF_FLAGS (def)
3446 & (DF_REF_PARTIAL | DF_REF_CONDITIONAL)))
3447 {
3448 if (bitmap_clear_bit (curr_regs_live, regno))
3449 change_pressure (regno, false);
3450 }
3451 }
3452
3453 FOR_EACH_INSN_USE (use, insn)
3454 {
3455 dreg = DF_REF_REAL_REG (use);
3456 gcc_assert (REG_P (dreg));
3457 regno = REGNO (dreg);
3458 if (bitmap_set_bit (curr_regs_live, regno))
3459 change_pressure (regno, true);
3460 }
3461 }
3462 }
3463 BITMAP_FREE (curr_regs_live);
3464
3465 if (dump_file == NULL)
3466 return;
3467
3468 fprintf (dump_file, "\nRegister Pressure: \n");
3469 FOR_EACH_BB_FN (bb, cfun)
3470 {
3471 fprintf (dump_file, " Basic block %d: \n", bb->index);
3472 for (i = 0; (int) i < ira_pressure_classes_num; i++)
3473 {
3474 enum reg_class pressure_class;
3475
3476 pressure_class = ira_pressure_classes[i];
3477 if (BB_DATA (bb)->max_reg_pressure[pressure_class] == 0)
3478 continue;
3479
3480 fprintf (dump_file, " %s=%d\n", reg_class_names[pressure_class],
3481 BB_DATA (bb)->max_reg_pressure[pressure_class]);
3482 }
3483 }
3484 fprintf (dump_file, "\n");
3485 }
3486
3487 /* Top level routine to perform one code hoisting (aka unification) pass
3488
3489 Return nonzero if a change was made. */
3490
3491 static int
one_code_hoisting_pass(void)3492 one_code_hoisting_pass (void)
3493 {
3494 int changed = 0;
3495
3496 gcse_subst_count = 0;
3497 gcse_create_count = 0;
3498
3499 /* Return if there's nothing to do, or it is too expensive. */
3500 if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
3501 || gcse_or_cprop_is_too_expensive (_("GCSE disabled")))
3502 return 0;
3503
3504 doing_code_hoisting_p = true;
3505
3506 /* Calculate register pressure for each basic block. */
3507 if (flag_ira_hoist_pressure)
3508 {
3509 regstat_init_n_sets_and_refs ();
3510 ira_set_pseudo_classes (false, dump_file);
3511 alloc_aux_for_blocks (sizeof (struct bb_data));
3512 calculate_bb_reg_pressure ();
3513 regstat_free_n_sets_and_refs ();
3514 }
3515
3516 /* We need alias. */
3517 init_alias_analysis ();
3518
3519 bytes_used = 0;
3520 gcc_obstack_init (&gcse_obstack);
3521 alloc_gcse_mem ();
3522
3523 alloc_hash_table (&expr_hash_table);
3524 compute_hash_table (&expr_hash_table);
3525 if (dump_file)
3526 dump_hash_table (dump_file, "Code Hosting Expressions", &expr_hash_table);
3527
3528 if (expr_hash_table.n_elems > 0)
3529 {
3530 alloc_code_hoist_mem (last_basic_block_for_fn (cfun),
3531 expr_hash_table.n_elems);
3532 compute_code_hoist_data ();
3533 changed = hoist_code ();
3534 free_code_hoist_mem ();
3535 }
3536
3537 if (flag_ira_hoist_pressure)
3538 {
3539 free_aux_for_blocks ();
3540 free_reg_info ();
3541 }
3542 free_hash_table (&expr_hash_table);
3543 free_gcse_mem ();
3544 obstack_free (&gcse_obstack, NULL);
3545
3546 /* We are finished with alias. */
3547 end_alias_analysis ();
3548
3549 if (dump_file)
3550 {
3551 fprintf (dump_file, "HOIST of %s, %d basic blocks, %d bytes needed, ",
3552 current_function_name (), n_basic_blocks_for_fn (cfun),
3553 bytes_used);
3554 fprintf (dump_file, "%d substs, %d insns created\n",
3555 gcse_subst_count, gcse_create_count);
3556 }
3557
3558 doing_code_hoisting_p = false;
3559
3560 return changed;
3561 }
3562
3563 /* Here we provide the things required to do store motion towards the exit.
3564 In order for this to be effective, gcse also needed to be taught how to
3565 move a load when it is killed only by a store to itself.
3566
3567 int i;
3568 float a[10];
3569
3570 void foo(float scale)
3571 {
3572 for (i=0; i<10; i++)
3573 a[i] *= scale;
3574 }
3575
3576 'i' is both loaded and stored to in the loop. Normally, gcse cannot move
3577 the load out since its live around the loop, and stored at the bottom
3578 of the loop.
3579
3580 The 'Load Motion' referred to and implemented in this file is
3581 an enhancement to gcse which when using edge based LCM, recognizes
3582 this situation and allows gcse to move the load out of the loop.
3583
3584 Once gcse has hoisted the load, store motion can then push this
3585 load towards the exit, and we end up with no loads or stores of 'i'
3586 in the loop. */
3587
3588 /* This will search the ldst list for a matching expression. If it
3589 doesn't find one, we create one and initialize it. */
3590
3591 static struct ls_expr *
ldst_entry(rtx x)3592 ldst_entry (rtx x)
3593 {
3594 int do_not_record_p = 0;
3595 struct ls_expr * ptr;
3596 unsigned int hash;
3597 ls_expr **slot;
3598 struct ls_expr e;
3599
3600 hash = hash_rtx (x, GET_MODE (x), &do_not_record_p,
3601 NULL, /*have_reg_qty=*/false);
3602
3603 e.pattern = x;
3604 slot = pre_ldst_table->find_slot_with_hash (&e, hash, INSERT);
3605 if (*slot)
3606 return *slot;
3607
3608 ptr = XNEW (struct ls_expr);
3609
3610 ptr->next = pre_ldst_mems;
3611 ptr->expr = NULL;
3612 ptr->pattern = x;
3613 ptr->pattern_regs = NULL_RTX;
3614 ptr->loads = NULL;
3615 ptr->stores = NULL;
3616 ptr->reaching_reg = NULL_RTX;
3617 ptr->invalid = 0;
3618 ptr->index = 0;
3619 ptr->hash_index = hash;
3620 pre_ldst_mems = ptr;
3621 *slot = ptr;
3622
3623 return ptr;
3624 }
3625
3626 /* Free up an individual ldst entry. */
3627
3628 static void
free_ldst_entry(struct ls_expr * ptr)3629 free_ldst_entry (struct ls_expr * ptr)
3630 {
3631 free_INSN_LIST_list (& ptr->loads);
3632 free_INSN_LIST_list (& ptr->stores);
3633
3634 free (ptr);
3635 }
3636
3637 /* Free up all memory associated with the ldst list. */
3638
3639 static void
free_ld_motion_mems(void)3640 free_ld_motion_mems (void)
3641 {
3642 delete pre_ldst_table;
3643 pre_ldst_table = NULL;
3644
3645 while (pre_ldst_mems)
3646 {
3647 struct ls_expr * tmp = pre_ldst_mems;
3648
3649 pre_ldst_mems = pre_ldst_mems->next;
3650
3651 free_ldst_entry (tmp);
3652 }
3653
3654 pre_ldst_mems = NULL;
3655 }
3656
3657 /* Dump debugging info about the ldst list. */
3658
3659 static void
print_ldst_list(FILE * file)3660 print_ldst_list (FILE * file)
3661 {
3662 struct ls_expr * ptr;
3663
3664 fprintf (file, "LDST list: \n");
3665
3666 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
3667 {
3668 fprintf (file, " Pattern (%3d): ", ptr->index);
3669
3670 print_rtl (file, ptr->pattern);
3671
3672 fprintf (file, "\n Loads : ");
3673
3674 if (ptr->loads)
3675 print_rtl (file, ptr->loads);
3676 else
3677 fprintf (file, "(nil)");
3678
3679 fprintf (file, "\n Stores : ");
3680
3681 if (ptr->stores)
3682 print_rtl (file, ptr->stores);
3683 else
3684 fprintf (file, "(nil)");
3685
3686 fprintf (file, "\n\n");
3687 }
3688
3689 fprintf (file, "\n");
3690 }
3691
3692 /* Returns 1 if X is in the list of ldst only expressions. */
3693
3694 static struct ls_expr *
find_rtx_in_ldst(rtx x)3695 find_rtx_in_ldst (rtx x)
3696 {
3697 struct ls_expr e;
3698 ls_expr **slot;
3699 if (!pre_ldst_table)
3700 return NULL;
3701 e.pattern = x;
3702 slot = pre_ldst_table->find_slot (&e, NO_INSERT);
3703 if (!slot || (*slot)->invalid)
3704 return NULL;
3705 return *slot;
3706 }
3707
3708 /* Load Motion for loads which only kill themselves. */
3709
3710 /* Return true if x, a MEM, is a simple access with no side effects.
3711 These are the types of loads we consider for the ld_motion list,
3712 otherwise we let the usual aliasing take care of it. */
3713
3714 static int
simple_mem(const_rtx x)3715 simple_mem (const_rtx x)
3716 {
3717 if (MEM_VOLATILE_P (x))
3718 return 0;
3719
3720 if (GET_MODE (x) == BLKmode)
3721 return 0;
3722
3723 /* If we are handling exceptions, we must be careful with memory references
3724 that may trap. If we are not, the behavior is undefined, so we may just
3725 continue. */
3726 if (cfun->can_throw_non_call_exceptions && may_trap_p (x))
3727 return 0;
3728
3729 if (side_effects_p (x))
3730 return 0;
3731
3732 /* Do not consider function arguments passed on stack. */
3733 if (reg_mentioned_p (stack_pointer_rtx, x))
3734 return 0;
3735
3736 if (flag_float_store && FLOAT_MODE_P (GET_MODE (x)))
3737 return 0;
3738
3739 return 1;
3740 }
3741
3742 /* Make sure there isn't a buried reference in this pattern anywhere.
3743 If there is, invalidate the entry for it since we're not capable
3744 of fixing it up just yet.. We have to be sure we know about ALL
3745 loads since the aliasing code will allow all entries in the
3746 ld_motion list to not-alias itself. If we miss a load, we will get
3747 the wrong value since gcse might common it and we won't know to
3748 fix it up. */
3749
3750 static void
invalidate_any_buried_refs(rtx x)3751 invalidate_any_buried_refs (rtx x)
3752 {
3753 const char * fmt;
3754 int i, j;
3755 struct ls_expr * ptr;
3756
3757 /* Invalidate it in the list. */
3758 if (MEM_P (x) && simple_mem (x))
3759 {
3760 ptr = ldst_entry (x);
3761 ptr->invalid = 1;
3762 }
3763
3764 /* Recursively process the insn. */
3765 fmt = GET_RTX_FORMAT (GET_CODE (x));
3766
3767 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3768 {
3769 if (fmt[i] == 'e')
3770 invalidate_any_buried_refs (XEXP (x, i));
3771 else if (fmt[i] == 'E')
3772 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3773 invalidate_any_buried_refs (XVECEXP (x, i, j));
3774 }
3775 }
3776
3777 /* Find all the 'simple' MEMs which are used in LOADs and STORES. Simple
3778 being defined as MEM loads and stores to symbols, with no side effects
3779 and no registers in the expression. For a MEM destination, we also
3780 check that the insn is still valid if we replace the destination with a
3781 REG, as is done in update_ld_motion_stores. If there are any uses/defs
3782 which don't match this criteria, they are invalidated and trimmed out
3783 later. */
3784
3785 static void
compute_ld_motion_mems(void)3786 compute_ld_motion_mems (void)
3787 {
3788 struct ls_expr * ptr;
3789 basic_block bb;
3790 rtx_insn *insn;
3791
3792 pre_ldst_mems = NULL;
3793 pre_ldst_table = new hash_table<pre_ldst_expr_hasher> (13);
3794
3795 FOR_EACH_BB_FN (bb, cfun)
3796 {
3797 FOR_BB_INSNS (bb, insn)
3798 {
3799 if (NONDEBUG_INSN_P (insn))
3800 {
3801 if (GET_CODE (PATTERN (insn)) == SET)
3802 {
3803 rtx src = SET_SRC (PATTERN (insn));
3804 rtx dest = SET_DEST (PATTERN (insn));
3805
3806 /* Check for a simple load. */
3807 if (MEM_P (src) && simple_mem (src))
3808 {
3809 ptr = ldst_entry (src);
3810 if (REG_P (dest))
3811 ptr->loads = alloc_INSN_LIST (insn, ptr->loads);
3812 else
3813 ptr->invalid = 1;
3814 }
3815 else
3816 {
3817 /* Make sure there isn't a buried load somewhere. */
3818 invalidate_any_buried_refs (src);
3819 }
3820
3821 /* Check for a simple load through a REG_EQUAL note. */
3822 rtx note = find_reg_equal_equiv_note (insn), src_eq;
3823 if (note
3824 && REG_NOTE_KIND (note) == REG_EQUAL
3825 && (src_eq = XEXP (note, 0))
3826 && !(MEM_P (src_eq) && simple_mem (src_eq)))
3827 invalidate_any_buried_refs (src_eq);
3828
3829 /* Check for stores. Don't worry about aliased ones, they
3830 will block any movement we might do later. We only care
3831 about this exact pattern since those are the only
3832 circumstance that we will ignore the aliasing info. */
3833 if (MEM_P (dest) && simple_mem (dest))
3834 {
3835 ptr = ldst_entry (dest);
3836 machine_mode src_mode = GET_MODE (src);
3837 if (! MEM_P (src)
3838 && GET_CODE (src) != ASM_OPERANDS
3839 /* Check for REG manually since want_to_gcse_p
3840 returns 0 for all REGs. */
3841 && can_assign_to_reg_without_clobbers_p (src,
3842 src_mode))
3843 ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
3844 else
3845 ptr->invalid = 1;
3846 }
3847 }
3848 else
3849 {
3850 /* Invalidate all MEMs in the pattern and... */
3851 invalidate_any_buried_refs (PATTERN (insn));
3852
3853 /* ...in REG_EQUAL notes for PARALLELs with single SET. */
3854 rtx note = find_reg_equal_equiv_note (insn), src_eq;
3855 if (note
3856 && REG_NOTE_KIND (note) == REG_EQUAL
3857 && (src_eq = XEXP (note, 0)))
3858 invalidate_any_buried_refs (src_eq);
3859 }
3860 }
3861 }
3862 }
3863 }
3864
3865 /* Remove any references that have been either invalidated or are not in the
3866 expression list for pre gcse. */
3867
3868 static void
trim_ld_motion_mems(void)3869 trim_ld_motion_mems (void)
3870 {
3871 struct ls_expr * * last = & pre_ldst_mems;
3872 struct ls_expr * ptr = pre_ldst_mems;
3873
3874 while (ptr != NULL)
3875 {
3876 struct gcse_expr * expr;
3877
3878 /* Delete if entry has been made invalid. */
3879 if (! ptr->invalid)
3880 {
3881 /* Delete if we cannot find this mem in the expression list. */
3882 unsigned int hash = ptr->hash_index % expr_hash_table.size;
3883
3884 for (expr = expr_hash_table.table[hash];
3885 expr != NULL;
3886 expr = expr->next_same_hash)
3887 if (expr_equiv_p (expr->expr, ptr->pattern))
3888 break;
3889 }
3890 else
3891 expr = (struct gcse_expr *) 0;
3892
3893 if (expr)
3894 {
3895 /* Set the expression field if we are keeping it. */
3896 ptr->expr = expr;
3897 last = & ptr->next;
3898 ptr = ptr->next;
3899 }
3900 else
3901 {
3902 *last = ptr->next;
3903 pre_ldst_table->remove_elt_with_hash (ptr, ptr->hash_index);
3904 free_ldst_entry (ptr);
3905 ptr = * last;
3906 }
3907 }
3908
3909 /* Show the world what we've found. */
3910 if (dump_file && pre_ldst_mems != NULL)
3911 print_ldst_list (dump_file);
3912 }
3913
3914 /* This routine will take an expression which we are replacing with
3915 a reaching register, and update any stores that are needed if
3916 that expression is in the ld_motion list. Stores are updated by
3917 copying their SRC to the reaching register, and then storing
3918 the reaching register into the store location. These keeps the
3919 correct value in the reaching register for the loads. */
3920
3921 static void
update_ld_motion_stores(struct gcse_expr * expr)3922 update_ld_motion_stores (struct gcse_expr * expr)
3923 {
3924 struct ls_expr * mem_ptr;
3925
3926 if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
3927 {
3928 /* We can try to find just the REACHED stores, but is shouldn't
3929 matter to set the reaching reg everywhere... some might be
3930 dead and should be eliminated later. */
3931
3932 /* We replace (set mem expr) with (set reg expr) (set mem reg)
3933 where reg is the reaching reg used in the load. We checked in
3934 compute_ld_motion_mems that we can replace (set mem expr) with
3935 (set reg expr) in that insn. */
3936 rtx list = mem_ptr->stores;
3937
3938 for ( ; list != NULL_RTX; list = XEXP (list, 1))
3939 {
3940 rtx_insn *insn = as_a <rtx_insn *> (XEXP (list, 0));
3941 rtx pat = PATTERN (insn);
3942 rtx src = SET_SRC (pat);
3943 rtx reg = expr->reaching_reg;
3944
3945 /* If we've already copied it, continue. */
3946 if (expr->reaching_reg == src)
3947 continue;
3948
3949 if (dump_file)
3950 {
3951 fprintf (dump_file, "PRE: store updated with reaching reg ");
3952 print_rtl (dump_file, reg);
3953 fprintf (dump_file, ":\n ");
3954 print_inline_rtx (dump_file, insn, 8);
3955 fprintf (dump_file, "\n");
3956 }
3957
3958 rtx_insn *copy = gen_move_insn (reg, copy_rtx (SET_SRC (pat)));
3959 emit_insn_before (copy, insn);
3960 SET_SRC (pat) = reg;
3961 df_insn_rescan (insn);
3962
3963 /* un-recognize this pattern since it's probably different now. */
3964 INSN_CODE (insn) = -1;
3965 gcse_create_count++;
3966 }
3967 }
3968 }
3969
3970 /* Return true if the graph is too expensive to optimize. PASS is the
3971 optimization about to be performed. */
3972
3973 bool
gcse_or_cprop_is_too_expensive(const char * pass)3974 gcse_or_cprop_is_too_expensive (const char *pass)
3975 {
3976 unsigned int memory_request = (n_basic_blocks_for_fn (cfun)
3977 * SBITMAP_SET_SIZE (max_reg_num ())
3978 * sizeof (SBITMAP_ELT_TYPE));
3979
3980 /* Trying to perform global optimizations on flow graphs which have
3981 a high connectivity will take a long time and is unlikely to be
3982 particularly useful.
3983
3984 In normal circumstances a cfg should have about twice as many
3985 edges as blocks. But we do not want to punish small functions
3986 which have a couple switch statements. Rather than simply
3987 threshold the number of blocks, uses something with a more
3988 graceful degradation. */
3989 if (n_edges_for_fn (cfun) > 20000 + n_basic_blocks_for_fn (cfun) * 4)
3990 {
3991 warning (OPT_Wdisabled_optimization,
3992 "%s: %d basic blocks and %d edges/basic block",
3993 pass, n_basic_blocks_for_fn (cfun),
3994 n_edges_for_fn (cfun) / n_basic_blocks_for_fn (cfun));
3995
3996 return true;
3997 }
3998
3999 /* If allocating memory for the dataflow bitmaps would take up too much
4000 storage it's better just to disable the optimization. */
4001 if (memory_request > MAX_GCSE_MEMORY)
4002 {
4003 warning (OPT_Wdisabled_optimization,
4004 "%s: %d basic blocks and %d registers; increase --param max-gcse-memory above %d",
4005 pass, n_basic_blocks_for_fn (cfun), max_reg_num (),
4006 memory_request);
4007
4008 return true;
4009 }
4010
4011 return false;
4012 }
4013
4014 static unsigned int
execute_rtl_pre(void)4015 execute_rtl_pre (void)
4016 {
4017 int changed;
4018 delete_unreachable_blocks ();
4019 df_analyze ();
4020 changed = one_pre_gcse_pass ();
4021 flag_rerun_cse_after_global_opts |= changed;
4022 if (changed)
4023 cleanup_cfg (0);
4024 return 0;
4025 }
4026
4027 static unsigned int
execute_rtl_hoist(void)4028 execute_rtl_hoist (void)
4029 {
4030 int changed;
4031 delete_unreachable_blocks ();
4032 df_analyze ();
4033 changed = one_code_hoisting_pass ();
4034 flag_rerun_cse_after_global_opts |= changed;
4035 if (changed)
4036 cleanup_cfg (0);
4037 return 0;
4038 }
4039
4040 namespace {
4041
4042 const pass_data pass_data_rtl_pre =
4043 {
4044 RTL_PASS, /* type */
4045 "rtl pre", /* name */
4046 OPTGROUP_NONE, /* optinfo_flags */
4047 TV_PRE, /* tv_id */
4048 PROP_cfglayout, /* properties_required */
4049 0, /* properties_provided */
4050 0, /* properties_destroyed */
4051 0, /* todo_flags_start */
4052 TODO_df_finish, /* todo_flags_finish */
4053 };
4054
4055 class pass_rtl_pre : public rtl_opt_pass
4056 {
4057 public:
pass_rtl_pre(gcc::context * ctxt)4058 pass_rtl_pre (gcc::context *ctxt)
4059 : rtl_opt_pass (pass_data_rtl_pre, ctxt)
4060 {}
4061
4062 /* opt_pass methods: */
4063 virtual bool gate (function *);
execute(function *)4064 virtual unsigned int execute (function *) { return execute_rtl_pre (); }
4065
4066 }; // class pass_rtl_pre
4067
4068 /* We do not construct an accurate cfg in functions which call
4069 setjmp, so none of these passes runs if the function calls
4070 setjmp.
4071 FIXME: Should just handle setjmp via REG_SETJMP notes. */
4072
4073 bool
gate(function * fun)4074 pass_rtl_pre::gate (function *fun)
4075 {
4076 return optimize > 0 && flag_gcse
4077 && !fun->calls_setjmp
4078 && optimize_function_for_speed_p (fun)
4079 && dbg_cnt (pre);
4080 }
4081
4082 } // anon namespace
4083
4084 rtl_opt_pass *
make_pass_rtl_pre(gcc::context * ctxt)4085 make_pass_rtl_pre (gcc::context *ctxt)
4086 {
4087 return new pass_rtl_pre (ctxt);
4088 }
4089
4090 namespace {
4091
4092 const pass_data pass_data_rtl_hoist =
4093 {
4094 RTL_PASS, /* type */
4095 "hoist", /* name */
4096 OPTGROUP_NONE, /* optinfo_flags */
4097 TV_HOIST, /* tv_id */
4098 PROP_cfglayout, /* properties_required */
4099 0, /* properties_provided */
4100 0, /* properties_destroyed */
4101 0, /* todo_flags_start */
4102 TODO_df_finish, /* todo_flags_finish */
4103 };
4104
4105 class pass_rtl_hoist : public rtl_opt_pass
4106 {
4107 public:
pass_rtl_hoist(gcc::context * ctxt)4108 pass_rtl_hoist (gcc::context *ctxt)
4109 : rtl_opt_pass (pass_data_rtl_hoist, ctxt)
4110 {}
4111
4112 /* opt_pass methods: */
4113 virtual bool gate (function *);
execute(function *)4114 virtual unsigned int execute (function *) { return execute_rtl_hoist (); }
4115
4116 }; // class pass_rtl_hoist
4117
4118 bool
gate(function *)4119 pass_rtl_hoist::gate (function *)
4120 {
4121 return optimize > 0 && flag_gcse
4122 && !cfun->calls_setjmp
4123 /* It does not make sense to run code hoisting unless we are optimizing
4124 for code size -- it rarely makes programs faster, and can make then
4125 bigger if we did PRE (when optimizing for space, we don't run PRE). */
4126 && optimize_function_for_size_p (cfun)
4127 && dbg_cnt (hoist);
4128 }
4129
4130 } // anon namespace
4131
4132 rtl_opt_pass *
make_pass_rtl_hoist(gcc::context * ctxt)4133 make_pass_rtl_hoist (gcc::context *ctxt)
4134 {
4135 return new pass_rtl_hoist (ctxt);
4136 }
4137
4138 /* Reset all state within gcse.c so that we can rerun the compiler
4139 within the same process. For use by toplev::finalize. */
4140
4141 void
gcse_c_finalize(void)4142 gcse_c_finalize (void)
4143 {
4144 test_insn = NULL;
4145 }
4146
4147 #include "gt-gcse.h"
4148