1 /* Global common subexpression elimination/Partial redundancy elimination
2 and global constant/copy propagation for GNU compiler.
3 Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
21 02111-1307, USA. */
22
23 /* TODO
24 - reordering of memory allocation and freeing to be more space efficient
25 - do rough calc of how many regs are needed in each block, and a rough
26 calc of how many regs are available in each class and use that to
27 throttle back the code in cases where RTX_COST is minimal.
28 - a store to the same address as a load does not kill the load if the
29 source of the store is also the destination of the load. Handling this
30 allows more load motion, particularly out of loops.
31 - ability to realloc sbitmap vectors would allow one initial computation
32 of reg_set_in_block with only subsequent additions, rather than
33 recomputing it for each pass
34
35 */
36
37 /* References searched while implementing this.
38
39 Compilers Principles, Techniques and Tools
40 Aho, Sethi, Ullman
41 Addison-Wesley, 1988
42
43 Global Optimization by Suppression of Partial Redundancies
44 E. Morel, C. Renvoise
45 communications of the acm, Vol. 22, Num. 2, Feb. 1979
46
47 A Portable Machine-Independent Global Optimizer - Design and Measurements
48 Frederick Chow
49 Stanford Ph.D. thesis, Dec. 1983
50
51 A Fast Algorithm for Code Movement Optimization
52 D.M. Dhamdhere
53 SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
54
55 A Solution to a Problem with Morel and Renvoise's
56 Global Optimization by Suppression of Partial Redundancies
57 K-H Drechsler, M.P. Stadel
58 ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
59
60 Practical Adaptation of the Global Optimization
61 Algorithm of Morel and Renvoise
62 D.M. Dhamdhere
63 ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
64
65 Efficiently Computing Static Single Assignment Form and the Control
66 Dependence Graph
67 R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
68 ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
69
70 Lazy Code Motion
71 J. Knoop, O. Ruthing, B. Steffen
72 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
73
74 What's In a Region? Or Computing Control Dependence Regions in Near-Linear
75 Time for Reducible Flow Control
76 Thomas Ball
77 ACM Letters on Programming Languages and Systems,
78 Vol. 2, Num. 1-4, Mar-Dec 1993
79
80 An Efficient Representation for Sparse Sets
81 Preston Briggs, Linda Torczon
82 ACM Letters on Programming Languages and Systems,
83 Vol. 2, Num. 1-4, Mar-Dec 1993
84
85 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
86 K-H Drechsler, M.P. Stadel
87 ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
88
89 Partial Dead Code Elimination
90 J. Knoop, O. Ruthing, B. Steffen
91 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
92
93 Effective Partial Redundancy Elimination
94 P. Briggs, K.D. Cooper
95 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
96
97 The Program Structure Tree: Computing Control Regions in Linear Time
98 R. Johnson, D. Pearson, K. Pingali
99 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
100
101 Optimal Code Motion: Theory and Practice
102 J. Knoop, O. Ruthing, B. Steffen
103 ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
104
105 The power of assignment motion
106 J. Knoop, O. Ruthing, B. Steffen
107 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
108
109 Global code motion / global value numbering
110 C. Click
111 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
112
113 Value Driven Redundancy Elimination
114 L.T. Simpson
115 Rice University Ph.D. thesis, Apr. 1996
116
117 Value Numbering
118 L.T. Simpson
119 Massively Scalar Compiler Project, Rice University, Sep. 1996
120
121 High Performance Compilers for Parallel Computing
122 Michael Wolfe
123 Addison-Wesley, 1996
124
125 Advanced Compiler Design and Implementation
126 Steven Muchnick
127 Morgan Kaufmann, 1997
128
129 Building an Optimizing Compiler
130 Robert Morgan
131 Digital Press, 1998
132
133 People wishing to speed up the code here should read:
134 Elimination Algorithms for Data Flow Analysis
135 B.G. Ryder, M.C. Paull
136 ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
137
138 How to Analyze Large Programs Efficiently and Informatively
139 D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
140 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
141
142 People wishing to do something different can find various possibilities
143 in the above papers and elsewhere.
144 */
145
146 #include "config.h"
147 #include "system.h"
148 #include "coretypes.h"
149 #include "tm.h"
150 #include "toplev.h"
151
152 #include "rtl.h"
153 #include "tree.h"
154 #include "tm_p.h"
155 #include "regs.h"
156 #include "hard-reg-set.h"
157 #include "flags.h"
158 #include "real.h"
159 #include "insn-config.h"
160 #include "recog.h"
161 #include "basic-block.h"
162 #include "output.h"
163 #include "function.h"
164 #include "expr.h"
165 #include "except.h"
166 #include "ggc.h"
167 #include "params.h"
168 #include "cselib.h"
169 #include "intl.h"
170 #include "obstack.h"
171
172 /* Propagate flow information through back edges and thus enable PRE's
173 moving loop invariant calculations out of loops.
174
175 Originally this tended to create worse overall code, but several
176 improvements during the development of PRE seem to have made following
177 back edges generally a win.
178
179 Note much of the loop invariant code motion done here would normally
180 be done by loop.c, which has more heuristics for when to move invariants
181 out of loops. At some point we might need to move some of those
182 heuristics into gcse.c. */
183
184 /* We support GCSE via Partial Redundancy Elimination. PRE optimizations
185 are a superset of those done by GCSE.
186
187 We perform the following steps:
188
189 1) Compute basic block information.
190
191 2) Compute table of places where registers are set.
192
193 3) Perform copy/constant propagation.
194
195 4) Perform global cse.
196
197 5) Perform another pass of copy/constant propagation.
198
199 Two passes of copy/constant propagation are done because the first one
200 enables more GCSE and the second one helps to clean up the copies that
201 GCSE creates. This is needed more for PRE than for Classic because Classic
202 GCSE will try to use an existing register containing the common
203 subexpression rather than create a new one. This is harder to do for PRE
204 because of the code motion (which Classic GCSE doesn't do).
205
206 Expressions we are interested in GCSE-ing are of the form
207 (set (pseudo-reg) (expression)).
208 Function want_to_gcse_p says what these are.
209
210 PRE handles moving invariant expressions out of loops (by treating them as
211 partially redundant).
212
213 Eventually it would be nice to replace cse.c/gcse.c with SSA (static single
214 assignment) based GVN (global value numbering). L. T. Simpson's paper
215 (Rice University) on value numbering is a useful reference for this.
216
217 **********************
218
219 We used to support multiple passes but there are diminishing returns in
220 doing so. The first pass usually makes 90% of the changes that are doable.
221 A second pass can make a few more changes made possible by the first pass.
222 Experiments show any further passes don't make enough changes to justify
223 the expense.
224
225 A study of spec92 using an unlimited number of passes:
226 [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
227 [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
228 [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
229
230 It was found doing copy propagation between each pass enables further
231 substitutions.
232
233 PRE is quite expensive in complicated functions because the DFA can take
234 awhile to converge. Hence we only perform one pass. The parameter max-gcse-passes can
235 be modified if one wants to experiment.
236
237 **********************
238
239 The steps for PRE are:
240
241 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
242
243 2) Perform the data flow analysis for PRE.
244
245 3) Delete the redundant instructions
246
247 4) Insert the required copies [if any] that make the partially
248 redundant instructions fully redundant.
249
250 5) For other reaching expressions, insert an instruction to copy the value
251 to a newly created pseudo that will reach the redundant instruction.
252
253 The deletion is done first so that when we do insertions we
254 know which pseudo reg to use.
255
256 Various papers have argued that PRE DFA is expensive (O(n^2)) and others
257 argue it is not. The number of iterations for the algorithm to converge
258 is typically 2-4 so I don't view it as that expensive (relatively speaking).
259
260 PRE GCSE depends heavily on the second CSE pass to clean up the copies
261 we create. To make an expression reach the place where it's redundant,
262 the result of the expression is copied to a new register, and the redundant
263 expression is deleted by replacing it with this new register. Classic GCSE
264 doesn't have this problem as much as it computes the reaching defs of
265 each register in each block and thus can try to use an existing register.
266
267 **********************
268
269 A fair bit of simplicity is created by creating small functions for simple
270 tasks, even when the function is only called in one place. This may
271 measurably slow things down [or may not] by creating more function call
272 overhead than is necessary. The source is laid out so that it's trivial
273 to make the affected functions inline so that one can measure what speed
274 up, if any, can be achieved, and maybe later when things settle things can
275 be rearranged.
276
277 Help stamp out big monolithic functions! */
278
279 /* GCSE global vars. */
280
281 /* -dG dump file. */
282 static FILE *gcse_file;
283
284 /* Note whether or not we should run jump optimization after gcse. We
285 want to do this for two cases.
286
287 * If we changed any jumps via cprop.
288
289 * If we added any labels via edge splitting. */
290
291 static int run_jump_opt_after_gcse;
292
293 /* Bitmaps are normally not included in debugging dumps.
294 However it's useful to be able to print them from GDB.
295 We could create special functions for this, but it's simpler to
296 just allow passing stderr to the dump_foo fns. Since stderr can
297 be a macro, we store a copy here. */
298 static FILE *debug_stderr;
299
300 /* An obstack for our working variables. */
301 static struct obstack gcse_obstack;
302
303 struct reg_use {rtx reg_rtx; };
304
305 /* Hash table of expressions. */
306
307 struct expr
308 {
309 /* The expression (SET_SRC for expressions, PATTERN for assignments). */
310 rtx expr;
311 /* Index in the available expression bitmaps. */
312 int bitmap_index;
313 /* Next entry with the same hash. */
314 struct expr *next_same_hash;
315 /* List of anticipatable occurrences in basic blocks in the function.
316 An "anticipatable occurrence" is one that is the first occurrence in the
317 basic block, the operands are not modified in the basic block prior
318 to the occurrence and the output is not used between the start of
319 the block and the occurrence. */
320 struct occr *antic_occr;
321 /* List of available occurrence in basic blocks in the function.
322 An "available occurrence" is one that is the last occurrence in the
323 basic block and the operands are not modified by following statements in
324 the basic block [including this insn]. */
325 struct occr *avail_occr;
326 /* Non-null if the computation is PRE redundant.
327 The value is the newly created pseudo-reg to record a copy of the
328 expression in all the places that reach the redundant copy. */
329 rtx reaching_reg;
330 };
331
332 /* Occurrence of an expression.
333 There is one per basic block. If a pattern appears more than once the
334 last appearance is used [or first for anticipatable expressions]. */
335
336 struct occr
337 {
338 /* Next occurrence of this expression. */
339 struct occr *next;
340 /* The insn that computes the expression. */
341 rtx insn;
342 /* Nonzero if this [anticipatable] occurrence has been deleted. */
343 char deleted_p;
344 /* Nonzero if this [available] occurrence has been copied to
345 reaching_reg. */
346 /* ??? This is mutually exclusive with deleted_p, so they could share
347 the same byte. */
348 char copied_p;
349 };
350
351 /* Expression and copy propagation hash tables.
352 Each hash table is an array of buckets.
353 ??? It is known that if it were an array of entries, structure elements
354 `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
355 not clear whether in the final analysis a sufficient amount of memory would
356 be saved as the size of the available expression bitmaps would be larger
357 [one could build a mapping table without holes afterwards though].
358 Someday I'll perform the computation and figure it out. */
359
360 struct hash_table
361 {
362 /* The table itself.
363 This is an array of `expr_hash_table_size' elements. */
364 struct expr **table;
365
366 /* Size of the hash table, in elements. */
367 unsigned int size;
368
369 /* Number of hash table elements. */
370 unsigned int n_elems;
371
372 /* Whether the table is expression of copy propagation one. */
373 int set_p;
374 };
375
376 /* Expression hash table. */
377 static struct hash_table expr_hash_table;
378
379 /* Copy propagation hash table. */
380 static struct hash_table set_hash_table;
381
382 /* Mapping of uids to cuids.
383 Only real insns get cuids. */
384 static int *uid_cuid;
385
386 /* Highest UID in UID_CUID. */
387 static int max_uid;
388
389 /* Get the cuid of an insn. */
390 #ifdef ENABLE_CHECKING
391 #define INSN_CUID(INSN) (INSN_UID (INSN) > max_uid ? (abort (), 0) : uid_cuid[INSN_UID (INSN)])
392 #else
393 #define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)])
394 #endif
395
396 /* Number of cuids. */
397 static int max_cuid;
398
399 /* Mapping of cuids to insns. */
400 static rtx *cuid_insn;
401
402 /* Get insn from cuid. */
403 #define CUID_INSN(CUID) (cuid_insn[CUID])
404
405 /* Maximum register number in function prior to doing gcse + 1.
406 Registers created during this pass have regno >= max_gcse_regno.
407 This is named with "gcse" to not collide with global of same name. */
408 static unsigned int max_gcse_regno;
409
410 /* Table of registers that are modified.
411
412 For each register, each element is a list of places where the pseudo-reg
413 is set.
414
415 For simplicity, GCSE is done on sets of pseudo-regs only. PRE GCSE only
416 requires knowledge of which blocks kill which regs [and thus could use
417 a bitmap instead of the lists `reg_set_table' uses].
418
419 `reg_set_table' and could be turned into an array of bitmaps (num-bbs x
420 num-regs) [however perhaps it may be useful to keep the data as is]. One
421 advantage of recording things this way is that `reg_set_table' is fairly
422 sparse with respect to pseudo regs but for hard regs could be fairly dense
423 [relatively speaking]. And recording sets of pseudo-regs in lists speeds
424 up functions like compute_transp since in the case of pseudo-regs we only
425 need to iterate over the number of times a pseudo-reg is set, not over the
426 number of basic blocks [clearly there is a bit of a slow down in the cases
427 where a pseudo is set more than once in a block, however it is believed
428 that the net effect is to speed things up]. This isn't done for hard-regs
429 because recording call-clobbered hard-regs in `reg_set_table' at each
430 function call can consume a fair bit of memory, and iterating over
431 hard-regs stored this way in compute_transp will be more expensive. */
432
433 typedef struct reg_set
434 {
435 /* The next setting of this register. */
436 struct reg_set *next;
437 /* The insn where it was set. */
438 rtx insn;
439 } reg_set;
440
441 static reg_set **reg_set_table;
442
443 /* Size of `reg_set_table'.
444 The table starts out at max_gcse_regno + slop, and is enlarged as
445 necessary. */
446 static int reg_set_table_size;
447
448 /* Amount to grow `reg_set_table' by when it's full. */
449 #define REG_SET_TABLE_SLOP 100
450
451 /* This is a list of expressions which are MEMs and will be used by load
452 or store motion.
453 Load motion tracks MEMs which aren't killed by
454 anything except itself. (ie, loads and stores to a single location).
455 We can then allow movement of these MEM refs with a little special
456 allowance. (all stores copy the same value to the reaching reg used
457 for the loads). This means all values used to store into memory must have
458 no side effects so we can re-issue the setter value.
459 Store Motion uses this structure as an expression table to track stores
460 which look interesting, and might be moveable towards the exit block. */
461
462 struct ls_expr
463 {
464 struct expr * expr; /* Gcse expression reference for LM. */
465 rtx pattern; /* Pattern of this mem. */
466 rtx pattern_regs; /* List of registers mentioned by the mem. */
467 rtx loads; /* INSN list of loads seen. */
468 rtx stores; /* INSN list of stores seen. */
469 struct ls_expr * next; /* Next in the list. */
470 int invalid; /* Invalid for some reason. */
471 int index; /* If it maps to a bitmap index. */
472 unsigned int hash_index; /* Index when in a hash table. */
473 rtx reaching_reg; /* Register to use when re-writing. */
474 };
475
476 /* Array of implicit set patterns indexed by basic block index. */
477 static rtx *implicit_sets;
478
479 /* Head of the list of load/store memory refs. */
480 static struct ls_expr * pre_ldst_mems = NULL;
481
482 /* Bitmap containing one bit for each register in the program.
483 Used when performing GCSE to track which registers have been set since
484 the start of the basic block. */
485 static regset reg_set_bitmap;
486
487 /* For each block, a bitmap of registers set in the block.
488 This is used by expr_killed_p and compute_transp.
489 It is computed during hash table computation and not by compute_sets
490 as it includes registers added since the last pass (or between cprop and
491 gcse) and it's currently not easy to realloc sbitmap vectors. */
492 static sbitmap *reg_set_in_block;
493
494 /* Array, indexed by basic block number for a list of insns which modify
495 memory within that block. */
496 static rtx * modify_mem_list;
497 bitmap modify_mem_list_set;
498
499 /* This array parallels modify_mem_list, but is kept canonicalized. */
500 static rtx * canon_modify_mem_list;
501 bitmap canon_modify_mem_list_set;
502 /* Various variables for statistics gathering. */
503
504 /* Memory used in a pass.
505 This isn't intended to be absolutely precise. Its intent is only
506 to keep an eye on memory usage. */
507 static int bytes_used;
508
509 /* GCSE substitutions made. */
510 static int gcse_subst_count;
511 /* Number of copy instructions created. */
512 static int gcse_create_count;
513 /* Number of constants propagated. */
514 static int const_prop_count;
515 /* Number of copys propagated. */
516 static int copy_prop_count;
517
518 /* These variables are used by classic GCSE.
519 Normally they'd be defined a bit later, but `rd_gen' needs to
520 be declared sooner. */
521
522 /* Each block has a bitmap of each type.
523 The length of each blocks bitmap is:
524
525 max_cuid - for reaching definitions
526 n_exprs - for available expressions
527
528 Thus we view the bitmaps as 2 dimensional arrays. i.e.
529 rd_kill[block_num][cuid_num]
530 ae_kill[block_num][expr_num] */
531
532 /* For reaching defs */
533 static sbitmap *rd_kill, *rd_gen, *reaching_defs, *rd_out;
534
535 /* for available exprs */
536 static sbitmap *ae_kill, *ae_gen, *ae_in, *ae_out;
537
538 /* Objects of this type are passed around by the null-pointer check
539 removal routines. */
540 struct null_pointer_info
541 {
542 /* The basic block being processed. */
543 basic_block current_block;
544 /* The first register to be handled in this pass. */
545 unsigned int min_reg;
546 /* One greater than the last register to be handled in this pass. */
547 unsigned int max_reg;
548 sbitmap *nonnull_local;
549 sbitmap *nonnull_killed;
550 };
551
552 static void compute_can_copy (void);
553 static void *gmalloc (size_t) ATTRIBUTE_MALLOC;
554 static void *gcalloc (size_t, size_t) ATTRIBUTE_MALLOC;
555 static void *grealloc (void *, size_t);
556 static void *gcse_alloc (unsigned long);
557 static void alloc_gcse_mem (rtx);
558 static void free_gcse_mem (void);
559 static void alloc_reg_set_mem (int);
560 static void free_reg_set_mem (void);
561 static int get_bitmap_width (int, int, int);
562 static void record_one_set (int, rtx);
563 static void replace_one_set (int, rtx, rtx);
564 static void record_set_info (rtx, rtx, void *);
565 static void compute_sets (rtx);
566 static void hash_scan_insn (rtx, struct hash_table *, int);
567 static void hash_scan_set (rtx, rtx, struct hash_table *);
568 static void hash_scan_clobber (rtx, rtx, struct hash_table *);
569 static void hash_scan_call (rtx, rtx, struct hash_table *);
570 static int want_to_gcse_p (rtx);
571 static bool gcse_constant_p (rtx);
572 static int oprs_unchanged_p (rtx, rtx, int);
573 static int oprs_anticipatable_p (rtx, rtx);
574 static int oprs_available_p (rtx, rtx);
575 static void insert_expr_in_table (rtx, enum machine_mode, rtx, int, int,
576 struct hash_table *);
577 static void insert_set_in_table (rtx, rtx, struct hash_table *);
578 static unsigned int hash_expr (rtx, enum machine_mode, int *, int);
579 static unsigned int hash_expr_1 (rtx, enum machine_mode, int *);
580 static unsigned int hash_string_1 (const char *);
581 static unsigned int hash_set (int, int);
582 static int expr_equiv_p (rtx, rtx);
583 static void record_last_reg_set_info (rtx, int);
584 static void record_last_mem_set_info (rtx);
585 static void record_last_set_info (rtx, rtx, void *);
586 static void compute_hash_table (struct hash_table *);
587 static void alloc_hash_table (int, struct hash_table *, int);
588 static void free_hash_table (struct hash_table *);
589 static void compute_hash_table_work (struct hash_table *);
590 static void dump_hash_table (FILE *, const char *, struct hash_table *);
591 static struct expr *lookup_expr (rtx, struct hash_table *);
592 static struct expr *lookup_set (unsigned int, struct hash_table *);
593 static struct expr *next_set (unsigned int, struct expr *);
594 static void reset_opr_set_tables (void);
595 static int oprs_not_set_p (rtx, rtx);
596 static void mark_call (rtx);
597 static void mark_set (rtx, rtx);
598 static void mark_clobber (rtx, rtx);
599 static void mark_oprs_set (rtx);
600 static void alloc_cprop_mem (int, int);
601 static void free_cprop_mem (void);
602 static void compute_transp (rtx, int, sbitmap *, int);
603 static void compute_transpout (void);
604 static void compute_local_properties (sbitmap *, sbitmap *, sbitmap *,
605 struct hash_table *);
606 static void compute_cprop_data (void);
607 static void find_used_regs (rtx *, void *);
608 static int try_replace_reg (rtx, rtx, rtx);
609 static struct expr *find_avail_set (int, rtx);
610 static int cprop_jump (basic_block, rtx, rtx, rtx, rtx);
611 static void mems_conflict_for_gcse_p (rtx, rtx, void *);
612 static int load_killed_in_block_p (basic_block, int, rtx, int);
613 static void canon_list_insert (rtx, rtx, void *);
614 static int cprop_insn (rtx, int);
615 static int cprop (int);
616 static void find_implicit_sets (void);
617 static int one_cprop_pass (int, int, int);
618 static bool constprop_register (rtx, rtx, rtx, int);
619 static struct expr *find_bypass_set (int, int);
620 static bool reg_killed_on_edge (rtx, edge);
621 static int bypass_block (basic_block, rtx, rtx);
622 static int bypass_conditional_jumps (void);
623 static void alloc_pre_mem (int, int);
624 static void free_pre_mem (void);
625 static void compute_pre_data (void);
626 static int pre_expr_reaches_here_p (basic_block, struct expr *,
627 basic_block);
628 static void insert_insn_end_bb (struct expr *, basic_block, int);
629 static void pre_insert_copy_insn (struct expr *, rtx);
630 static void pre_insert_copies (void);
631 static int pre_delete (void);
632 static int pre_gcse (void);
633 static int one_pre_gcse_pass (int);
634 static void add_label_notes (rtx, rtx);
635 static void alloc_code_hoist_mem (int, int);
636 static void free_code_hoist_mem (void);
637 static void compute_code_hoist_vbeinout (void);
638 static void compute_code_hoist_data (void);
639 static int hoist_expr_reaches_here_p (basic_block, int, basic_block, char *);
640 static void hoist_code (void);
641 static int one_code_hoisting_pass (void);
642 static void alloc_rd_mem (int, int);
643 static void free_rd_mem (void);
644 static void handle_rd_kill_set (rtx, int, basic_block);
645 static void compute_kill_rd (void);
646 static void compute_rd (void);
647 static void alloc_avail_expr_mem (int, int);
648 static void free_avail_expr_mem (void);
649 static void compute_ae_gen (struct hash_table *);
650 static int expr_killed_p (rtx, basic_block);
651 static void compute_ae_kill (sbitmap *, sbitmap *, struct hash_table *);
652 static int expr_reaches_here_p (struct occr *, struct expr *, basic_block,
653 int);
654 static rtx computing_insn (struct expr *, rtx);
655 static int def_reaches_here_p (rtx, rtx);
656 static int can_disregard_other_sets (struct reg_set **, rtx, int);
657 static int handle_avail_expr (rtx, struct expr *);
658 static int classic_gcse (void);
659 static int one_classic_gcse_pass (int);
660 static void invalidate_nonnull_info (rtx, rtx, void *);
661 static int delete_null_pointer_checks_1 (unsigned int *, sbitmap *, sbitmap *,
662 struct null_pointer_info *);
663 static rtx process_insert_insn (struct expr *);
664 static int pre_edge_insert (struct edge_list *, struct expr **);
665 static int expr_reaches_here_p_work (struct occr *, struct expr *,
666 basic_block, int, char *);
667 static int pre_expr_reaches_here_p_work (basic_block, struct expr *,
668 basic_block, char *);
669 static struct ls_expr * ldst_entry (rtx);
670 static void free_ldst_entry (struct ls_expr *);
671 static void free_ldst_mems (void);
672 static void print_ldst_list (FILE *);
673 static struct ls_expr * find_rtx_in_ldst (rtx);
674 static int enumerate_ldsts (void);
675 static inline struct ls_expr * first_ls_expr (void);
676 static inline struct ls_expr * next_ls_expr (struct ls_expr *);
677 static int simple_mem (rtx);
678 static void invalidate_any_buried_refs (rtx);
679 static void compute_ld_motion_mems (void);
680 static void trim_ld_motion_mems (void);
681 static void update_ld_motion_stores (struct expr *);
682 static void reg_set_info (rtx, rtx, void *);
683 static void reg_clear_last_set (rtx, rtx, void *);
684 static bool store_ops_ok (rtx, int *);
685 static rtx extract_mentioned_regs (rtx);
686 static rtx extract_mentioned_regs_helper (rtx, rtx);
687 static void find_moveable_store (rtx, int *, int *);
688 static int compute_store_table (void);
689 static bool load_kills_store (rtx, rtx, int);
690 static bool find_loads (rtx, rtx, int);
691 static bool store_killed_in_insn (rtx, rtx, rtx, int);
692 static bool store_killed_after (rtx, rtx, rtx, basic_block, int *, rtx *);
693 static bool store_killed_before (rtx, rtx, rtx, basic_block, int *);
694 static void build_store_vectors (void);
695 static void insert_insn_start_bb (rtx, basic_block);
696 static int insert_store (struct ls_expr *, edge);
697 static void remove_reachable_equiv_notes (basic_block, struct ls_expr *);
698 static void replace_store_insn (rtx, rtx, basic_block, struct ls_expr *);
699 static void delete_store (struct ls_expr *, basic_block);
700 static void free_store_memory (void);
701 static void store_motion (void);
702 static void free_insn_expr_list_list (rtx *);
703 static void clear_modify_mem_tables (void);
704 static void free_modify_mem_tables (void);
705 static rtx gcse_emit_move_after (rtx, rtx, rtx);
706 static void local_cprop_find_used_regs (rtx *, void *);
707 static bool do_local_cprop (rtx, rtx, int, rtx*);
708 static bool adjust_libcall_notes (rtx, rtx, rtx, rtx*);
709 static void local_cprop_pass (int);
710 static bool is_too_expensive (const char *);
711
712
713 /* Entry point for global common subexpression elimination.
714 F is the first instruction in the function. */
715
716 int
gcse_main(rtx f,FILE * file)717 gcse_main (rtx f, FILE *file)
718 {
719 int changed, pass;
720 /* Bytes used at start of pass. */
721 int initial_bytes_used;
722 /* Maximum number of bytes used by a pass. */
723 int max_pass_bytes;
724 /* Point to release obstack data from for each pass. */
725 char *gcse_obstack_bottom;
726
727 /* We do not construct an accurate cfg in functions which call
728 setjmp, so just punt to be safe. */
729 if (current_function_calls_setjmp)
730 return 0;
731
732 /* Assume that we do not need to run jump optimizations after gcse. */
733 run_jump_opt_after_gcse = 0;
734
735 /* For calling dump_foo fns from gdb. */
736 debug_stderr = stderr;
737 gcse_file = file;
738
739 /* Identify the basic block information for this function, including
740 successors and predecessors. */
741 max_gcse_regno = max_reg_num ();
742
743 if (file)
744 dump_flow_info (file);
745
746 /* Return if there's nothing to do, or it is too expensive. */
747 if (n_basic_blocks <= 1 || is_too_expensive (_("GCSE disabled")))
748 return 0;
749
750 gcc_obstack_init (&gcse_obstack);
751 bytes_used = 0;
752
753 /* We need alias. */
754 init_alias_analysis ();
755 /* Record where pseudo-registers are set. This data is kept accurate
756 during each pass. ??? We could also record hard-reg information here
757 [since it's unchanging], however it is currently done during hash table
758 computation.
759
760 It may be tempting to compute MEM set information here too, but MEM sets
761 will be subject to code motion one day and thus we need to compute
762 information about memory sets when we build the hash tables. */
763
764 alloc_reg_set_mem (max_gcse_regno);
765 compute_sets (f);
766
767 pass = 0;
768 initial_bytes_used = bytes_used;
769 max_pass_bytes = 0;
770 gcse_obstack_bottom = gcse_alloc (1);
771 changed = 1;
772 while (changed && pass < MAX_GCSE_PASSES)
773 {
774 changed = 0;
775 if (file)
776 fprintf (file, "GCSE pass %d\n\n", pass + 1);
777
778 /* Initialize bytes_used to the space for the pred/succ lists,
779 and the reg_set_table data. */
780 bytes_used = initial_bytes_used;
781
782 /* Each pass may create new registers, so recalculate each time. */
783 max_gcse_regno = max_reg_num ();
784
785 alloc_gcse_mem (f);
786
787 /* Don't allow constant propagation to modify jumps
788 during this pass. */
789 changed = one_cprop_pass (pass + 1, 0, 0);
790
791 if (optimize_size)
792 changed |= one_classic_gcse_pass (pass + 1);
793 else
794 {
795 changed |= one_pre_gcse_pass (pass + 1);
796 /* We may have just created new basic blocks. Release and
797 recompute various things which are sized on the number of
798 basic blocks. */
799 if (changed)
800 {
801 free_modify_mem_tables ();
802 modify_mem_list = gcalloc (last_basic_block, sizeof (rtx));
803 canon_modify_mem_list = gcalloc (last_basic_block, sizeof (rtx));
804 }
805 free_reg_set_mem ();
806 alloc_reg_set_mem (max_reg_num ());
807 compute_sets (f);
808 run_jump_opt_after_gcse = 1;
809 }
810
811 if (max_pass_bytes < bytes_used)
812 max_pass_bytes = bytes_used;
813
814 /* Free up memory, then reallocate for code hoisting. We can
815 not re-use the existing allocated memory because the tables
816 will not have info for the insns or registers created by
817 partial redundancy elimination. */
818 free_gcse_mem ();
819
820 /* It does not make sense to run code hoisting unless we are optimizing
821 for code size -- it rarely makes programs faster, and can make
822 them bigger if we did partial redundancy elimination (when optimizing
823 for space, we use a classic gcse algorithm instead of partial
824 redundancy algorithms). */
825 if (optimize_size)
826 {
827 max_gcse_regno = max_reg_num ();
828 alloc_gcse_mem (f);
829 changed |= one_code_hoisting_pass ();
830 free_gcse_mem ();
831
832 if (max_pass_bytes < bytes_used)
833 max_pass_bytes = bytes_used;
834 }
835
836 if (file)
837 {
838 fprintf (file, "\n");
839 fflush (file);
840 }
841
842 obstack_free (&gcse_obstack, gcse_obstack_bottom);
843 pass++;
844 }
845
846 /* Do one last pass of copy propagation, including cprop into
847 conditional jumps. */
848
849 max_gcse_regno = max_reg_num ();
850 alloc_gcse_mem (f);
851 /* This time, go ahead and allow cprop to alter jumps. */
852 one_cprop_pass (pass + 1, 1, 0);
853 free_gcse_mem ();
854
855 if (file)
856 {
857 fprintf (file, "GCSE of %s: %d basic blocks, ",
858 current_function_name (), n_basic_blocks);
859 fprintf (file, "%d pass%s, %d bytes\n\n",
860 pass, pass > 1 ? "es" : "", max_pass_bytes);
861 }
862
863 obstack_free (&gcse_obstack, NULL);
864 free_reg_set_mem ();
865 /* We are finished with alias. */
866 end_alias_analysis ();
867 allocate_reg_info (max_reg_num (), FALSE, FALSE);
868
869 if (!optimize_size && flag_gcse_sm)
870 store_motion ();
871
872 /* Record where pseudo-registers are set. */
873 return run_jump_opt_after_gcse;
874 }
875
876 /* Misc. utilities. */
877
878 /* Nonzero for each mode that supports (set (reg) (reg)).
879 This is trivially true for integer and floating point values.
880 It may or may not be true for condition codes. */
881 static char can_copy[(int) NUM_MACHINE_MODES];
882
883 /* Compute which modes support reg/reg copy operations. */
884
885 static void
compute_can_copy(void)886 compute_can_copy (void)
887 {
888 int i;
889 #ifndef AVOID_CCMODE_COPIES
890 rtx reg, insn;
891 #endif
892 memset (can_copy, 0, NUM_MACHINE_MODES);
893
894 start_sequence ();
895 for (i = 0; i < NUM_MACHINE_MODES; i++)
896 if (GET_MODE_CLASS (i) == MODE_CC)
897 {
898 #ifdef AVOID_CCMODE_COPIES
899 can_copy[i] = 0;
900 #else
901 reg = gen_rtx_REG ((enum machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
902 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, reg));
903 if (recog (PATTERN (insn), insn, NULL) >= 0)
904 can_copy[i] = 1;
905 #endif
906 }
907 else
908 can_copy[i] = 1;
909
910 end_sequence ();
911 }
912
913 /* Returns whether the mode supports reg/reg copy operations. */
914
915 bool
can_copy_p(enum machine_mode mode)916 can_copy_p (enum machine_mode mode)
917 {
918 static bool can_copy_init_p = false;
919
920 if (! can_copy_init_p)
921 {
922 compute_can_copy ();
923 can_copy_init_p = true;
924 }
925
926 return can_copy[mode] != 0;
927 }
928
929 /* Cover function to xmalloc to record bytes allocated. */
930
931 static void *
gmalloc(size_t size)932 gmalloc (size_t size)
933 {
934 bytes_used += size;
935 return xmalloc (size);
936 }
937
938 /* Cover function to xcalloc to record bytes allocated. */
939
940 static void *
gcalloc(size_t nelem,size_t elsize)941 gcalloc (size_t nelem, size_t elsize)
942 {
943 bytes_used += nelem * elsize;
944 return xcalloc (nelem, elsize);
945 }
946
947 /* Cover function to xrealloc.
948 We don't record the additional size since we don't know it.
949 It won't affect memory usage stats much anyway. */
950
951 static void *
grealloc(void * ptr,size_t size)952 grealloc (void *ptr, size_t size)
953 {
954 return xrealloc (ptr, size);
955 }
956
957 /* Cover function to obstack_alloc. */
958
959 static void *
gcse_alloc(unsigned long size)960 gcse_alloc (unsigned long size)
961 {
962 bytes_used += size;
963 return obstack_alloc (&gcse_obstack, size);
964 }
965
966 /* Allocate memory for the cuid mapping array,
967 and reg/memory set tracking tables.
968
969 This is called at the start of each pass. */
970
971 static void
alloc_gcse_mem(rtx f)972 alloc_gcse_mem (rtx f)
973 {
974 int i;
975 rtx insn;
976
977 /* Find the largest UID and create a mapping from UIDs to CUIDs.
978 CUIDs are like UIDs except they increase monotonically, have no gaps,
979 and only apply to real insns. */
980
981 max_uid = get_max_uid ();
982 uid_cuid = gcalloc (max_uid + 1, sizeof (int));
983 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
984 {
985 if (INSN_P (insn))
986 uid_cuid[INSN_UID (insn)] = i++;
987 else
988 uid_cuid[INSN_UID (insn)] = i;
989 }
990
991 /* Create a table mapping cuids to insns. */
992
993 max_cuid = i;
994 cuid_insn = gcalloc (max_cuid + 1, sizeof (rtx));
995 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
996 if (INSN_P (insn))
997 CUID_INSN (i++) = insn;
998
999 /* Allocate vars to track sets of regs. */
1000 reg_set_bitmap = BITMAP_XMALLOC ();
1001
1002 /* Allocate vars to track sets of regs, memory per block. */
1003 reg_set_in_block = sbitmap_vector_alloc (last_basic_block, max_gcse_regno);
1004 /* Allocate array to keep a list of insns which modify memory in each
1005 basic block. */
1006 modify_mem_list = gcalloc (last_basic_block, sizeof (rtx));
1007 canon_modify_mem_list = gcalloc (last_basic_block, sizeof (rtx));
1008 modify_mem_list_set = BITMAP_XMALLOC ();
1009 canon_modify_mem_list_set = BITMAP_XMALLOC ();
1010 }
1011
1012 /* Free memory allocated by alloc_gcse_mem. */
1013
1014 static void
free_gcse_mem(void)1015 free_gcse_mem (void)
1016 {
1017 free (uid_cuid);
1018 free (cuid_insn);
1019
1020 BITMAP_XFREE (reg_set_bitmap);
1021
1022 sbitmap_vector_free (reg_set_in_block);
1023 free_modify_mem_tables ();
1024 BITMAP_XFREE (modify_mem_list_set);
1025 BITMAP_XFREE (canon_modify_mem_list_set);
1026 }
1027
1028 /* Many of the global optimization algorithms work by solving dataflow
1029 equations for various expressions. Initially, some local value is
1030 computed for each expression in each block. Then, the values across the
1031 various blocks are combined (by following flow graph edges) to arrive at
1032 global values. Conceptually, each set of equations is independent. We
1033 may therefore solve all the equations in parallel, solve them one at a
1034 time, or pick any intermediate approach.
1035
1036 When you're going to need N two-dimensional bitmaps, each X (say, the
1037 number of blocks) by Y (say, the number of expressions), call this
1038 function. It's not important what X and Y represent; only that Y
1039 correspond to the things that can be done in parallel. This function will
1040 return an appropriate chunking factor C; you should solve C sets of
1041 equations in parallel. By going through this function, we can easily
1042 trade space against time; by solving fewer equations in parallel we use
1043 less space. */
1044
1045 static int
get_bitmap_width(int n,int x,int y)1046 get_bitmap_width (int n, int x, int y)
1047 {
1048 /* It's not really worth figuring out *exactly* how much memory will
1049 be used by a particular choice. The important thing is to get
1050 something approximately right. */
1051 size_t max_bitmap_memory = 10 * 1024 * 1024;
1052
1053 /* The number of bytes we'd use for a single column of minimum
1054 width. */
1055 size_t column_size = n * x * sizeof (SBITMAP_ELT_TYPE);
1056
1057 /* Often, it's reasonable just to solve all the equations in
1058 parallel. */
1059 if (column_size * SBITMAP_SET_SIZE (y) <= max_bitmap_memory)
1060 return y;
1061
1062 /* Otherwise, pick the largest width we can, without going over the
1063 limit. */
1064 return SBITMAP_ELT_BITS * ((max_bitmap_memory + column_size - 1)
1065 / column_size);
1066 }
1067
1068 /* Compute the local properties of each recorded expression.
1069
1070 Local properties are those that are defined by the block, irrespective of
1071 other blocks.
1072
1073 An expression is transparent in a block if its operands are not modified
1074 in the block.
1075
1076 An expression is computed (locally available) in a block if it is computed
1077 at least once and expression would contain the same value if the
1078 computation was moved to the end of the block.
1079
1080 An expression is locally anticipatable in a block if it is computed at
1081 least once and expression would contain the same value if the computation
1082 was moved to the beginning of the block.
1083
1084 We call this routine for cprop, pre and code hoisting. They all compute
1085 basically the same information and thus can easily share this code.
1086
1087 TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
1088 properties. If NULL, then it is not necessary to compute or record that
1089 particular property.
1090
1091 TABLE controls which hash table to look at. If it is set hash table,
1092 additionally, TRANSP is computed as ~TRANSP, since this is really cprop's
1093 ABSALTERED. */
1094
1095 static void
compute_local_properties(sbitmap * transp,sbitmap * comp,sbitmap * antloc,struct hash_table * table)1096 compute_local_properties (sbitmap *transp, sbitmap *comp, sbitmap *antloc, struct hash_table *table)
1097 {
1098 unsigned int i;
1099
1100 /* Initialize any bitmaps that were passed in. */
1101 if (transp)
1102 {
1103 if (table->set_p)
1104 sbitmap_vector_zero (transp, last_basic_block);
1105 else
1106 sbitmap_vector_ones (transp, last_basic_block);
1107 }
1108
1109 if (comp)
1110 sbitmap_vector_zero (comp, last_basic_block);
1111 if (antloc)
1112 sbitmap_vector_zero (antloc, last_basic_block);
1113
1114 for (i = 0; i < table->size; i++)
1115 {
1116 struct expr *expr;
1117
1118 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
1119 {
1120 int indx = expr->bitmap_index;
1121 struct occr *occr;
1122
1123 /* The expression is transparent in this block if it is not killed.
1124 We start by assuming all are transparent [none are killed], and
1125 then reset the bits for those that are. */
1126 if (transp)
1127 compute_transp (expr->expr, indx, transp, table->set_p);
1128
1129 /* The occurrences recorded in antic_occr are exactly those that
1130 we want to set to nonzero in ANTLOC. */
1131 if (antloc)
1132 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
1133 {
1134 SET_BIT (antloc[BLOCK_NUM (occr->insn)], indx);
1135
1136 /* While we're scanning the table, this is a good place to
1137 initialize this. */
1138 occr->deleted_p = 0;
1139 }
1140
1141 /* The occurrences recorded in avail_occr are exactly those that
1142 we want to set to nonzero in COMP. */
1143 if (comp)
1144 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
1145 {
1146 SET_BIT (comp[BLOCK_NUM (occr->insn)], indx);
1147
1148 /* While we're scanning the table, this is a good place to
1149 initialize this. */
1150 occr->copied_p = 0;
1151 }
1152
1153 /* While we're scanning the table, this is a good place to
1154 initialize this. */
1155 expr->reaching_reg = 0;
1156 }
1157 }
1158 }
1159
1160 /* Register set information.
1161
1162 `reg_set_table' records where each register is set or otherwise
1163 modified. */
1164
1165 static struct obstack reg_set_obstack;
1166
1167 static void
alloc_reg_set_mem(int n_regs)1168 alloc_reg_set_mem (int n_regs)
1169 {
1170 reg_set_table_size = n_regs + REG_SET_TABLE_SLOP;
1171 reg_set_table = gcalloc (reg_set_table_size, sizeof (struct reg_set *));
1172
1173 gcc_obstack_init (®_set_obstack);
1174 }
1175
1176 static void
free_reg_set_mem(void)1177 free_reg_set_mem (void)
1178 {
1179 free (reg_set_table);
1180 obstack_free (®_set_obstack, NULL);
1181 }
1182
1183 /* An OLD_INSN that used to set REGNO was replaced by NEW_INSN.
1184 Update the corresponding `reg_set_table' entry accordingly.
1185 We assume that NEW_INSN is not already recorded in reg_set_table[regno]. */
1186
1187 static void
replace_one_set(int regno,rtx old_insn,rtx new_insn)1188 replace_one_set (int regno, rtx old_insn, rtx new_insn)
1189 {
1190 struct reg_set *reg_info;
1191 if (regno >= reg_set_table_size)
1192 return;
1193 for (reg_info = reg_set_table[regno]; reg_info; reg_info = reg_info->next)
1194 if (reg_info->insn == old_insn)
1195 {
1196 reg_info->insn = new_insn;
1197 break;
1198 }
1199 }
1200
1201 /* Record REGNO in the reg_set table. */
1202
1203 static void
record_one_set(int regno,rtx insn)1204 record_one_set (int regno, rtx insn)
1205 {
1206 /* Allocate a new reg_set element and link it onto the list. */
1207 struct reg_set *new_reg_info;
1208
1209 /* If the table isn't big enough, enlarge it. */
1210 if (regno >= reg_set_table_size)
1211 {
1212 int new_size = regno + REG_SET_TABLE_SLOP;
1213
1214 reg_set_table = grealloc (reg_set_table,
1215 new_size * sizeof (struct reg_set *));
1216 memset (reg_set_table + reg_set_table_size, 0,
1217 (new_size - reg_set_table_size) * sizeof (struct reg_set *));
1218 reg_set_table_size = new_size;
1219 }
1220
1221 new_reg_info = obstack_alloc (®_set_obstack, sizeof (struct reg_set));
1222 bytes_used += sizeof (struct reg_set);
1223 new_reg_info->insn = insn;
1224 new_reg_info->next = reg_set_table[regno];
1225 reg_set_table[regno] = new_reg_info;
1226 }
1227
1228 /* Called from compute_sets via note_stores to handle one SET or CLOBBER in
1229 an insn. The DATA is really the instruction in which the SET is
1230 occurring. */
1231
1232 static void
record_set_info(rtx dest,rtx setter ATTRIBUTE_UNUSED,void * data)1233 record_set_info (rtx dest, rtx setter ATTRIBUTE_UNUSED, void *data)
1234 {
1235 rtx record_set_insn = (rtx) data;
1236
1237 if (GET_CODE (dest) == REG && REGNO (dest) >= FIRST_PSEUDO_REGISTER)
1238 record_one_set (REGNO (dest), record_set_insn);
1239 }
1240
1241 /* Scan the function and record each set of each pseudo-register.
1242
1243 This is called once, at the start of the gcse pass. See the comments for
1244 `reg_set_table' for further documentation. */
1245
1246 static void
compute_sets(rtx f)1247 compute_sets (rtx f)
1248 {
1249 rtx insn;
1250
1251 for (insn = f; insn != 0; insn = NEXT_INSN (insn))
1252 if (INSN_P (insn))
1253 note_stores (PATTERN (insn), record_set_info, insn);
1254 }
1255
1256 /* Hash table support. */
1257
1258 struct reg_avail_info
1259 {
1260 basic_block last_bb;
1261 int first_set;
1262 int last_set;
1263 };
1264
1265 static struct reg_avail_info *reg_avail_info;
1266 static basic_block current_bb;
1267
1268
1269 /* See whether X, the source of a set, is something we want to consider for
1270 GCSE. */
1271
1272 static GTY(()) rtx test_insn;
1273 static int
want_to_gcse_p(rtx x)1274 want_to_gcse_p (rtx x)
1275 {
1276 int num_clobbers = 0;
1277 int icode;
1278
1279 switch (GET_CODE (x))
1280 {
1281 case REG:
1282 case SUBREG:
1283 case CONST_INT:
1284 case CONST_DOUBLE:
1285 case CONST_VECTOR:
1286 case CALL:
1287 case CONSTANT_P_RTX:
1288 return 0;
1289
1290 default:
1291 break;
1292 }
1293
1294 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
1295 if (general_operand (x, GET_MODE (x)))
1296 return 1;
1297 else if (GET_MODE (x) == VOIDmode)
1298 return 0;
1299
1300 /* Otherwise, check if we can make a valid insn from it. First initialize
1301 our test insn if we haven't already. */
1302 if (test_insn == 0)
1303 {
1304 test_insn
1305 = make_insn_raw (gen_rtx_SET (VOIDmode,
1306 gen_rtx_REG (word_mode,
1307 FIRST_PSEUDO_REGISTER * 2),
1308 const0_rtx));
1309 NEXT_INSN (test_insn) = PREV_INSN (test_insn) = 0;
1310 }
1311
1312 /* Now make an insn like the one we would make when GCSE'ing and see if
1313 valid. */
1314 PUT_MODE (SET_DEST (PATTERN (test_insn)), GET_MODE (x));
1315 SET_SRC (PATTERN (test_insn)) = x;
1316 return ((icode = recog (PATTERN (test_insn), test_insn, &num_clobbers)) >= 0
1317 && (num_clobbers == 0 || ! added_clobbers_hard_reg_p (icode)));
1318 }
1319
1320 /* Return nonzero if the operands of expression X are unchanged from the
1321 start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
1322 or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
1323
1324 static int
oprs_unchanged_p(rtx x,rtx insn,int avail_p)1325 oprs_unchanged_p (rtx x, rtx insn, int avail_p)
1326 {
1327 int i, j;
1328 enum rtx_code code;
1329 const char *fmt;
1330
1331 if (x == 0)
1332 return 1;
1333
1334 code = GET_CODE (x);
1335 switch (code)
1336 {
1337 case REG:
1338 {
1339 struct reg_avail_info *info = ®_avail_info[REGNO (x)];
1340
1341 if (info->last_bb != current_bb)
1342 return 1;
1343 if (avail_p)
1344 return info->last_set < INSN_CUID (insn);
1345 else
1346 return info->first_set >= INSN_CUID (insn);
1347 }
1348
1349 case MEM:
1350 if (load_killed_in_block_p (current_bb, INSN_CUID (insn),
1351 x, avail_p))
1352 return 0;
1353 else
1354 return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
1355
1356 case PRE_DEC:
1357 case PRE_INC:
1358 case POST_DEC:
1359 case POST_INC:
1360 case PRE_MODIFY:
1361 case POST_MODIFY:
1362 return 0;
1363
1364 case PC:
1365 case CC0: /*FIXME*/
1366 case CONST:
1367 case CONST_INT:
1368 case CONST_DOUBLE:
1369 case CONST_VECTOR:
1370 case SYMBOL_REF:
1371 case LABEL_REF:
1372 case ADDR_VEC:
1373 case ADDR_DIFF_VEC:
1374 return 1;
1375
1376 default:
1377 break;
1378 }
1379
1380 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1381 {
1382 if (fmt[i] == 'e')
1383 {
1384 /* If we are about to do the last recursive call needed at this
1385 level, change it into iteration. This function is called enough
1386 to be worth it. */
1387 if (i == 0)
1388 return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
1389
1390 else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
1391 return 0;
1392 }
1393 else if (fmt[i] == 'E')
1394 for (j = 0; j < XVECLEN (x, i); j++)
1395 if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
1396 return 0;
1397 }
1398
1399 return 1;
1400 }
1401
1402 /* Used for communication between mems_conflict_for_gcse_p and
1403 load_killed_in_block_p. Nonzero if mems_conflict_for_gcse_p finds a
1404 conflict between two memory references. */
1405 static int gcse_mems_conflict_p;
1406
1407 /* Used for communication between mems_conflict_for_gcse_p and
1408 load_killed_in_block_p. A memory reference for a load instruction,
1409 mems_conflict_for_gcse_p will see if a memory store conflicts with
1410 this memory load. */
1411 static rtx gcse_mem_operand;
1412
1413 /* DEST is the output of an instruction. If it is a memory reference, and
1414 possibly conflicts with the load found in gcse_mem_operand, then set
1415 gcse_mems_conflict_p to a nonzero value. */
1416
1417 static void
mems_conflict_for_gcse_p(rtx dest,rtx setter ATTRIBUTE_UNUSED,void * data ATTRIBUTE_UNUSED)1418 mems_conflict_for_gcse_p (rtx dest, rtx setter ATTRIBUTE_UNUSED,
1419 void *data ATTRIBUTE_UNUSED)
1420 {
1421 while (GET_CODE (dest) == SUBREG
1422 || GET_CODE (dest) == ZERO_EXTRACT
1423 || GET_CODE (dest) == SIGN_EXTRACT
1424 || GET_CODE (dest) == STRICT_LOW_PART)
1425 dest = XEXP (dest, 0);
1426
1427 /* If DEST is not a MEM, then it will not conflict with the load. Note
1428 that function calls are assumed to clobber memory, but are handled
1429 elsewhere. */
1430 if (GET_CODE (dest) != MEM)
1431 return;
1432
1433 /* If we are setting a MEM in our list of specially recognized MEMs,
1434 don't mark as killed this time. */
1435
1436 if (expr_equiv_p (dest, gcse_mem_operand) && pre_ldst_mems != NULL)
1437 {
1438 if (!find_rtx_in_ldst (dest))
1439 gcse_mems_conflict_p = 1;
1440 return;
1441 }
1442
1443 if (true_dependence (dest, GET_MODE (dest), gcse_mem_operand,
1444 rtx_addr_varies_p))
1445 gcse_mems_conflict_p = 1;
1446 }
1447
1448 /* Return nonzero if the expression in X (a memory reference) is killed
1449 in block BB before or after the insn with the CUID in UID_LIMIT.
1450 AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
1451 before UID_LIMIT.
1452
1453 To check the entire block, set UID_LIMIT to max_uid + 1 and
1454 AVAIL_P to 0. */
1455
1456 static int
load_killed_in_block_p(basic_block bb,int uid_limit,rtx x,int avail_p)1457 load_killed_in_block_p (basic_block bb, int uid_limit, rtx x, int avail_p)
1458 {
1459 rtx list_entry = modify_mem_list[bb->index];
1460 while (list_entry)
1461 {
1462 rtx setter;
1463 /* Ignore entries in the list that do not apply. */
1464 if ((avail_p
1465 && INSN_CUID (XEXP (list_entry, 0)) < uid_limit)
1466 || (! avail_p
1467 && INSN_CUID (XEXP (list_entry, 0)) > uid_limit))
1468 {
1469 list_entry = XEXP (list_entry, 1);
1470 continue;
1471 }
1472
1473 setter = XEXP (list_entry, 0);
1474
1475 /* If SETTER is a call everything is clobbered. Note that calls
1476 to pure functions are never put on the list, so we need not
1477 worry about them. */
1478 if (GET_CODE (setter) == CALL_INSN)
1479 return 1;
1480
1481 /* SETTER must be an INSN of some kind that sets memory. Call
1482 note_stores to examine each hunk of memory that is modified.
1483
1484 The note_stores interface is pretty limited, so we have to
1485 communicate via global variables. Yuk. */
1486 gcse_mem_operand = x;
1487 gcse_mems_conflict_p = 0;
1488 note_stores (PATTERN (setter), mems_conflict_for_gcse_p, NULL);
1489 if (gcse_mems_conflict_p)
1490 return 1;
1491 list_entry = XEXP (list_entry, 1);
1492 }
1493 return 0;
1494 }
1495
1496 /* Return nonzero if the operands of expression X are unchanged from
1497 the start of INSN's basic block up to but not including INSN. */
1498
1499 static int
oprs_anticipatable_p(rtx x,rtx insn)1500 oprs_anticipatable_p (rtx x, rtx insn)
1501 {
1502 return oprs_unchanged_p (x, insn, 0);
1503 }
1504
1505 /* Return nonzero if the operands of expression X are unchanged from
1506 INSN to the end of INSN's basic block. */
1507
1508 static int
oprs_available_p(rtx x,rtx insn)1509 oprs_available_p (rtx x, rtx insn)
1510 {
1511 return oprs_unchanged_p (x, insn, 1);
1512 }
1513
1514 /* Hash expression X.
1515
1516 MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
1517 indicating if a volatile operand is found or if the expression contains
1518 something we don't want to insert in the table. HASH_TABLE_SIZE is
1519 the current size of the hash table to be probed.
1520
1521 ??? One might want to merge this with canon_hash. Later. */
1522
1523 static unsigned int
hash_expr(rtx x,enum machine_mode mode,int * do_not_record_p,int hash_table_size)1524 hash_expr (rtx x, enum machine_mode mode, int *do_not_record_p,
1525 int hash_table_size)
1526 {
1527 unsigned int hash;
1528
1529 *do_not_record_p = 0;
1530
1531 hash = hash_expr_1 (x, mode, do_not_record_p);
1532 return hash % hash_table_size;
1533 }
1534
1535 /* Hash a string. Just add its bytes up. */
1536
1537 static inline unsigned
hash_string_1(const char * ps)1538 hash_string_1 (const char *ps)
1539 {
1540 unsigned hash = 0;
1541 const unsigned char *p = (const unsigned char *) ps;
1542
1543 if (p)
1544 while (*p)
1545 hash += *p++;
1546
1547 return hash;
1548 }
1549
1550 /* Subroutine of hash_expr to do the actual work. */
1551
1552 static unsigned int
hash_expr_1(rtx x,enum machine_mode mode,int * do_not_record_p)1553 hash_expr_1 (rtx x, enum machine_mode mode, int *do_not_record_p)
1554 {
1555 int i, j;
1556 unsigned hash = 0;
1557 enum rtx_code code;
1558 const char *fmt;
1559
1560 /* Used to turn recursion into iteration. We can't rely on GCC's
1561 tail-recursion elimination since we need to keep accumulating values
1562 in HASH. */
1563
1564 if (x == 0)
1565 return hash;
1566
1567 repeat:
1568 code = GET_CODE (x);
1569 switch (code)
1570 {
1571 case REG:
1572 hash += ((unsigned int) REG << 7) + REGNO (x);
1573 return hash;
1574
1575 case CONST_INT:
1576 hash += (((unsigned int) CONST_INT << 7) + (unsigned int) mode
1577 + (unsigned int) INTVAL (x));
1578 return hash;
1579
1580 case CONST_DOUBLE:
1581 /* This is like the general case, except that it only counts
1582 the integers representing the constant. */
1583 hash += (unsigned int) code + (unsigned int) GET_MODE (x);
1584 if (GET_MODE (x) != VOIDmode)
1585 for (i = 2; i < GET_RTX_LENGTH (CONST_DOUBLE); i++)
1586 hash += (unsigned int) XWINT (x, i);
1587 else
1588 hash += ((unsigned int) CONST_DOUBLE_LOW (x)
1589 + (unsigned int) CONST_DOUBLE_HIGH (x));
1590 return hash;
1591
1592 case CONST_VECTOR:
1593 {
1594 int units;
1595 rtx elt;
1596
1597 units = CONST_VECTOR_NUNITS (x);
1598
1599 for (i = 0; i < units; ++i)
1600 {
1601 elt = CONST_VECTOR_ELT (x, i);
1602 hash += hash_expr_1 (elt, GET_MODE (elt), do_not_record_p);
1603 }
1604
1605 return hash;
1606 }
1607
1608 /* Assume there is only one rtx object for any given label. */
1609 case LABEL_REF:
1610 /* We don't hash on the address of the CODE_LABEL to avoid bootstrap
1611 differences and differences between each stage's debugging dumps. */
1612 hash += (((unsigned int) LABEL_REF << 7)
1613 + CODE_LABEL_NUMBER (XEXP (x, 0)));
1614 return hash;
1615
1616 case SYMBOL_REF:
1617 {
1618 /* Don't hash on the symbol's address to avoid bootstrap differences.
1619 Different hash values may cause expressions to be recorded in
1620 different orders and thus different registers to be used in the
1621 final assembler. This also avoids differences in the dump files
1622 between various stages. */
1623 unsigned int h = 0;
1624 const unsigned char *p = (const unsigned char *) XSTR (x, 0);
1625
1626 while (*p)
1627 h += (h << 7) + *p++; /* ??? revisit */
1628
1629 hash += ((unsigned int) SYMBOL_REF << 7) + h;
1630 return hash;
1631 }
1632
1633 case MEM:
1634 if (MEM_VOLATILE_P (x))
1635 {
1636 *do_not_record_p = 1;
1637 return 0;
1638 }
1639
1640 hash += (unsigned int) MEM;
1641 /* We used alias set for hashing, but this is not good, since the alias
1642 set may differ in -fprofile-arcs and -fbranch-probabilities compilation
1643 causing the profiles to fail to match. */
1644 x = XEXP (x, 0);
1645 goto repeat;
1646
1647 case PRE_DEC:
1648 case PRE_INC:
1649 case POST_DEC:
1650 case POST_INC:
1651 case PC:
1652 case CC0:
1653 case CALL:
1654 case UNSPEC_VOLATILE:
1655 *do_not_record_p = 1;
1656 return 0;
1657
1658 case ASM_OPERANDS:
1659 if (MEM_VOLATILE_P (x))
1660 {
1661 *do_not_record_p = 1;
1662 return 0;
1663 }
1664 else
1665 {
1666 /* We don't want to take the filename and line into account. */
1667 hash += (unsigned) code + (unsigned) GET_MODE (x)
1668 + hash_string_1 (ASM_OPERANDS_TEMPLATE (x))
1669 + hash_string_1 (ASM_OPERANDS_OUTPUT_CONSTRAINT (x))
1670 + (unsigned) ASM_OPERANDS_OUTPUT_IDX (x);
1671
1672 if (ASM_OPERANDS_INPUT_LENGTH (x))
1673 {
1674 for (i = 1; i < ASM_OPERANDS_INPUT_LENGTH (x); i++)
1675 {
1676 hash += (hash_expr_1 (ASM_OPERANDS_INPUT (x, i),
1677 GET_MODE (ASM_OPERANDS_INPUT (x, i)),
1678 do_not_record_p)
1679 + hash_string_1 (ASM_OPERANDS_INPUT_CONSTRAINT
1680 (x, i)));
1681 }
1682
1683 hash += hash_string_1 (ASM_OPERANDS_INPUT_CONSTRAINT (x, 0));
1684 x = ASM_OPERANDS_INPUT (x, 0);
1685 mode = GET_MODE (x);
1686 goto repeat;
1687 }
1688 return hash;
1689 }
1690
1691 default:
1692 break;
1693 }
1694
1695 hash += (unsigned) code + (unsigned) GET_MODE (x);
1696 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1697 {
1698 if (fmt[i] == 'e')
1699 {
1700 /* If we are about to do the last recursive call
1701 needed at this level, change it into iteration.
1702 This function is called enough to be worth it. */
1703 if (i == 0)
1704 {
1705 x = XEXP (x, i);
1706 goto repeat;
1707 }
1708
1709 hash += hash_expr_1 (XEXP (x, i), 0, do_not_record_p);
1710 if (*do_not_record_p)
1711 return 0;
1712 }
1713
1714 else if (fmt[i] == 'E')
1715 for (j = 0; j < XVECLEN (x, i); j++)
1716 {
1717 hash += hash_expr_1 (XVECEXP (x, i, j), 0, do_not_record_p);
1718 if (*do_not_record_p)
1719 return 0;
1720 }
1721
1722 else if (fmt[i] == 's')
1723 hash += hash_string_1 (XSTR (x, i));
1724 else if (fmt[i] == 'i')
1725 hash += (unsigned int) XINT (x, i);
1726 else
1727 abort ();
1728 }
1729
1730 return hash;
1731 }
1732
1733 /* Hash a set of register REGNO.
1734
1735 Sets are hashed on the register that is set. This simplifies the PRE copy
1736 propagation code.
1737
1738 ??? May need to make things more elaborate. Later, as necessary. */
1739
1740 static unsigned int
hash_set(int regno,int hash_table_size)1741 hash_set (int regno, int hash_table_size)
1742 {
1743 unsigned int hash;
1744
1745 hash = regno;
1746 return hash % hash_table_size;
1747 }
1748
1749 /* Return nonzero if exp1 is equivalent to exp2.
1750 ??? Borrowed from cse.c. Might want to remerge with cse.c. Later. */
1751
1752 static int
expr_equiv_p(rtx x,rtx y)1753 expr_equiv_p (rtx x, rtx y)
1754 {
1755 int i, j;
1756 enum rtx_code code;
1757 const char *fmt;
1758
1759 if (x == y)
1760 return 1;
1761
1762 if (x == 0 || y == 0)
1763 return 0;
1764
1765 code = GET_CODE (x);
1766 if (code != GET_CODE (y))
1767 return 0;
1768
1769 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */
1770 if (GET_MODE (x) != GET_MODE (y))
1771 return 0;
1772
1773 switch (code)
1774 {
1775 case PC:
1776 case CC0:
1777 case CONST_INT:
1778 return 0;
1779
1780 case LABEL_REF:
1781 return XEXP (x, 0) == XEXP (y, 0);
1782
1783 case SYMBOL_REF:
1784 return XSTR (x, 0) == XSTR (y, 0);
1785
1786 case REG:
1787 return REGNO (x) == REGNO (y);
1788
1789 case MEM:
1790 /* Can't merge two expressions in different alias sets, since we can
1791 decide that the expression is transparent in a block when it isn't,
1792 due to it being set with the different alias set. */
1793 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
1794 return 0;
1795
1796 /* A volatile mem should not be considered equivalent to any other. */
1797 if (MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y))
1798 return 0;
1799 break;
1800
1801 /* For commutative operations, check both orders. */
1802 case PLUS:
1803 case MULT:
1804 case AND:
1805 case IOR:
1806 case XOR:
1807 case NE:
1808 case EQ:
1809 return ((expr_equiv_p (XEXP (x, 0), XEXP (y, 0))
1810 && expr_equiv_p (XEXP (x, 1), XEXP (y, 1)))
1811 || (expr_equiv_p (XEXP (x, 0), XEXP (y, 1))
1812 && expr_equiv_p (XEXP (x, 1), XEXP (y, 0))));
1813
1814 case ASM_OPERANDS:
1815 /* We don't use the generic code below because we want to
1816 disregard filename and line numbers. */
1817
1818 /* A volatile asm isn't equivalent to any other. */
1819 if (MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y))
1820 return 0;
1821
1822 if (GET_MODE (x) != GET_MODE (y)
1823 || strcmp (ASM_OPERANDS_TEMPLATE (x), ASM_OPERANDS_TEMPLATE (y))
1824 || strcmp (ASM_OPERANDS_OUTPUT_CONSTRAINT (x),
1825 ASM_OPERANDS_OUTPUT_CONSTRAINT (y))
1826 || ASM_OPERANDS_OUTPUT_IDX (x) != ASM_OPERANDS_OUTPUT_IDX (y)
1827 || ASM_OPERANDS_INPUT_LENGTH (x) != ASM_OPERANDS_INPUT_LENGTH (y))
1828 return 0;
1829
1830 if (ASM_OPERANDS_INPUT_LENGTH (x))
1831 {
1832 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
1833 if (! expr_equiv_p (ASM_OPERANDS_INPUT (x, i),
1834 ASM_OPERANDS_INPUT (y, i))
1835 || strcmp (ASM_OPERANDS_INPUT_CONSTRAINT (x, i),
1836 ASM_OPERANDS_INPUT_CONSTRAINT (y, i)))
1837 return 0;
1838 }
1839
1840 return 1;
1841
1842 default:
1843 break;
1844 }
1845
1846 /* Compare the elements. If any pair of corresponding elements
1847 fail to match, return 0 for the whole thing. */
1848
1849 fmt = GET_RTX_FORMAT (code);
1850 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1851 {
1852 switch (fmt[i])
1853 {
1854 case 'e':
1855 if (! expr_equiv_p (XEXP (x, i), XEXP (y, i)))
1856 return 0;
1857 break;
1858
1859 case 'E':
1860 if (XVECLEN (x, i) != XVECLEN (y, i))
1861 return 0;
1862 for (j = 0; j < XVECLEN (x, i); j++)
1863 if (! expr_equiv_p (XVECEXP (x, i, j), XVECEXP (y, i, j)))
1864 return 0;
1865 break;
1866
1867 case 's':
1868 if (strcmp (XSTR (x, i), XSTR (y, i)))
1869 return 0;
1870 break;
1871
1872 case 'i':
1873 if (XINT (x, i) != XINT (y, i))
1874 return 0;
1875 break;
1876
1877 case 'w':
1878 if (XWINT (x, i) != XWINT (y, i))
1879 return 0;
1880 break;
1881
1882 case '0':
1883 break;
1884
1885 default:
1886 abort ();
1887 }
1888 }
1889
1890 return 1;
1891 }
1892
1893 /* Insert expression X in INSN in the hash TABLE.
1894 If it is already present, record it as the last occurrence in INSN's
1895 basic block.
1896
1897 MODE is the mode of the value X is being stored into.
1898 It is only used if X is a CONST_INT.
1899
1900 ANTIC_P is nonzero if X is an anticipatable expression.
1901 AVAIL_P is nonzero if X is an available expression. */
1902
1903 static void
insert_expr_in_table(rtx x,enum machine_mode mode,rtx insn,int antic_p,int avail_p,struct hash_table * table)1904 insert_expr_in_table (rtx x, enum machine_mode mode, rtx insn, int antic_p,
1905 int avail_p, struct hash_table *table)
1906 {
1907 int found, do_not_record_p;
1908 unsigned int hash;
1909 struct expr *cur_expr, *last_expr = NULL;
1910 struct occr *antic_occr, *avail_occr;
1911 struct occr *last_occr = NULL;
1912
1913 hash = hash_expr (x, mode, &do_not_record_p, table->size);
1914
1915 /* Do not insert expression in table if it contains volatile operands,
1916 or if hash_expr determines the expression is something we don't want
1917 to or can't handle. */
1918 if (do_not_record_p)
1919 return;
1920
1921 cur_expr = table->table[hash];
1922 found = 0;
1923
1924 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1925 {
1926 /* If the expression isn't found, save a pointer to the end of
1927 the list. */
1928 last_expr = cur_expr;
1929 cur_expr = cur_expr->next_same_hash;
1930 }
1931
1932 if (! found)
1933 {
1934 cur_expr = gcse_alloc (sizeof (struct expr));
1935 bytes_used += sizeof (struct expr);
1936 if (table->table[hash] == NULL)
1937 /* This is the first pattern that hashed to this index. */
1938 table->table[hash] = cur_expr;
1939 else
1940 /* Add EXPR to end of this hash chain. */
1941 last_expr->next_same_hash = cur_expr;
1942
1943 /* Set the fields of the expr element. */
1944 cur_expr->expr = x;
1945 cur_expr->bitmap_index = table->n_elems++;
1946 cur_expr->next_same_hash = NULL;
1947 cur_expr->antic_occr = NULL;
1948 cur_expr->avail_occr = NULL;
1949 }
1950
1951 /* Now record the occurrence(s). */
1952 if (antic_p)
1953 {
1954 antic_occr = cur_expr->antic_occr;
1955
1956 /* Search for another occurrence in the same basic block. */
1957 while (antic_occr && BLOCK_NUM (antic_occr->insn) != BLOCK_NUM (insn))
1958 {
1959 /* If an occurrence isn't found, save a pointer to the end of
1960 the list. */
1961 last_occr = antic_occr;
1962 antic_occr = antic_occr->next;
1963 }
1964
1965 if (antic_occr)
1966 /* Found another instance of the expression in the same basic block.
1967 Prefer the currently recorded one. We want the first one in the
1968 block and the block is scanned from start to end. */
1969 ; /* nothing to do */
1970 else
1971 {
1972 /* First occurrence of this expression in this basic block. */
1973 antic_occr = gcse_alloc (sizeof (struct occr));
1974 bytes_used += sizeof (struct occr);
1975 /* First occurrence of this expression in any block? */
1976 if (cur_expr->antic_occr == NULL)
1977 cur_expr->antic_occr = antic_occr;
1978 else
1979 last_occr->next = antic_occr;
1980
1981 antic_occr->insn = insn;
1982 antic_occr->next = NULL;
1983 }
1984 }
1985
1986 if (avail_p)
1987 {
1988 avail_occr = cur_expr->avail_occr;
1989
1990 /* Search for another occurrence in the same basic block. */
1991 while (avail_occr && BLOCK_NUM (avail_occr->insn) != BLOCK_NUM (insn))
1992 {
1993 /* If an occurrence isn't found, save a pointer to the end of
1994 the list. */
1995 last_occr = avail_occr;
1996 avail_occr = avail_occr->next;
1997 }
1998
1999 if (avail_occr)
2000 /* Found another instance of the expression in the same basic block.
2001 Prefer this occurrence to the currently recorded one. We want
2002 the last one in the block and the block is scanned from start
2003 to end. */
2004 avail_occr->insn = insn;
2005 else
2006 {
2007 /* First occurrence of this expression in this basic block. */
2008 avail_occr = gcse_alloc (sizeof (struct occr));
2009 bytes_used += sizeof (struct occr);
2010
2011 /* First occurrence of this expression in any block? */
2012 if (cur_expr->avail_occr == NULL)
2013 cur_expr->avail_occr = avail_occr;
2014 else
2015 last_occr->next = avail_occr;
2016
2017 avail_occr->insn = insn;
2018 avail_occr->next = NULL;
2019 }
2020 }
2021 }
2022
2023 /* Insert pattern X in INSN in the hash table.
2024 X is a SET of a reg to either another reg or a constant.
2025 If it is already present, record it as the last occurrence in INSN's
2026 basic block. */
2027
2028 static void
insert_set_in_table(rtx x,rtx insn,struct hash_table * table)2029 insert_set_in_table (rtx x, rtx insn, struct hash_table *table)
2030 {
2031 int found;
2032 unsigned int hash;
2033 struct expr *cur_expr, *last_expr = NULL;
2034 struct occr *cur_occr, *last_occr = NULL;
2035
2036 if (GET_CODE (x) != SET
2037 || GET_CODE (SET_DEST (x)) != REG)
2038 abort ();
2039
2040 hash = hash_set (REGNO (SET_DEST (x)), table->size);
2041
2042 cur_expr = table->table[hash];
2043 found = 0;
2044
2045 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
2046 {
2047 /* If the expression isn't found, save a pointer to the end of
2048 the list. */
2049 last_expr = cur_expr;
2050 cur_expr = cur_expr->next_same_hash;
2051 }
2052
2053 if (! found)
2054 {
2055 cur_expr = gcse_alloc (sizeof (struct expr));
2056 bytes_used += sizeof (struct expr);
2057 if (table->table[hash] == NULL)
2058 /* This is the first pattern that hashed to this index. */
2059 table->table[hash] = cur_expr;
2060 else
2061 /* Add EXPR to end of this hash chain. */
2062 last_expr->next_same_hash = cur_expr;
2063
2064 /* Set the fields of the expr element.
2065 We must copy X because it can be modified when copy propagation is
2066 performed on its operands. */
2067 cur_expr->expr = copy_rtx (x);
2068 cur_expr->bitmap_index = table->n_elems++;
2069 cur_expr->next_same_hash = NULL;
2070 cur_expr->antic_occr = NULL;
2071 cur_expr->avail_occr = NULL;
2072 }
2073
2074 /* Now record the occurrence. */
2075 cur_occr = cur_expr->avail_occr;
2076
2077 /* Search for another occurrence in the same basic block. */
2078 while (cur_occr && BLOCK_NUM (cur_occr->insn) != BLOCK_NUM (insn))
2079 {
2080 /* If an occurrence isn't found, save a pointer to the end of
2081 the list. */
2082 last_occr = cur_occr;
2083 cur_occr = cur_occr->next;
2084 }
2085
2086 if (cur_occr)
2087 /* Found another instance of the expression in the same basic block.
2088 Prefer this occurrence to the currently recorded one. We want the
2089 last one in the block and the block is scanned from start to end. */
2090 cur_occr->insn = insn;
2091 else
2092 {
2093 /* First occurrence of this expression in this basic block. */
2094 cur_occr = gcse_alloc (sizeof (struct occr));
2095 bytes_used += sizeof (struct occr);
2096
2097 /* First occurrence of this expression in any block? */
2098 if (cur_expr->avail_occr == NULL)
2099 cur_expr->avail_occr = cur_occr;
2100 else
2101 last_occr->next = cur_occr;
2102
2103 cur_occr->insn = insn;
2104 cur_occr->next = NULL;
2105 }
2106 }
2107
2108 /* Determine whether the rtx X should be treated as a constant for
2109 the purposes of GCSE's constant propagation. */
2110
2111 static bool
gcse_constant_p(rtx x)2112 gcse_constant_p (rtx x)
2113 {
2114 /* Consider a COMPARE of two integers constant. */
2115 if (GET_CODE (x) == COMPARE
2116 && GET_CODE (XEXP (x, 0)) == CONST_INT
2117 && GET_CODE (XEXP (x, 1)) == CONST_INT)
2118 return true;
2119
2120
2121 /* Consider a COMPARE of the same registers is a constant
2122 if they are not floating point registers. */
2123 if (GET_CODE(x) == COMPARE
2124 && GET_CODE (XEXP (x, 0)) == REG
2125 && GET_CODE (XEXP (x, 1)) == REG
2126 && REGNO (XEXP (x, 0)) == REGNO (XEXP (x, 1))
2127 && ! FLOAT_MODE_P (GET_MODE (XEXP (x, 0)))
2128 && ! FLOAT_MODE_P (GET_MODE (XEXP (x, 1))))
2129 return true;
2130
2131 if (GET_CODE (x) == CONSTANT_P_RTX)
2132 return false;
2133
2134 return CONSTANT_P (x);
2135 }
2136
2137 /* Scan pattern PAT of INSN and add an entry to the hash TABLE (set or
2138 expression one). */
2139
2140 static void
hash_scan_set(rtx pat,rtx insn,struct hash_table * table)2141 hash_scan_set (rtx pat, rtx insn, struct hash_table *table)
2142 {
2143 rtx src = SET_SRC (pat);
2144 rtx dest = SET_DEST (pat);
2145 rtx note;
2146
2147 if (GET_CODE (src) == CALL)
2148 hash_scan_call (src, insn, table);
2149
2150 else if (GET_CODE (dest) == REG)
2151 {
2152 unsigned int regno = REGNO (dest);
2153 rtx tmp;
2154
2155 /* If this is a single set and we are doing constant propagation,
2156 see if a REG_NOTE shows this equivalent to a constant. */
2157 if (table->set_p && (note = find_reg_equal_equiv_note (insn)) != 0
2158 && gcse_constant_p (XEXP (note, 0)))
2159 src = XEXP (note, 0), pat = gen_rtx_SET (VOIDmode, dest, src);
2160
2161 /* Only record sets of pseudo-regs in the hash table. */
2162 if (! table->set_p
2163 && regno >= FIRST_PSEUDO_REGISTER
2164 /* Don't GCSE something if we can't do a reg/reg copy. */
2165 && can_copy_p (GET_MODE (dest))
2166 /* GCSE commonly inserts instruction after the insn. We can't
2167 do that easily for EH_REGION notes so disable GCSE on these
2168 for now. */
2169 && !find_reg_note (insn, REG_EH_REGION, NULL_RTX)
2170 /* Is SET_SRC something we want to gcse? */
2171 && want_to_gcse_p (src)
2172 /* Don't CSE a nop. */
2173 && ! set_noop_p (pat)
2174 /* Don't GCSE if it has attached REG_EQUIV note.
2175 At this point this only function parameters should have
2176 REG_EQUIV notes and if the argument slot is used somewhere
2177 explicitly, it means address of parameter has been taken,
2178 so we should not extend the lifetime of the pseudo. */
2179 && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
2180 || GET_CODE (XEXP (note, 0)) != MEM))
2181 {
2182 /* An expression is not anticipatable if its operands are
2183 modified before this insn or if this is not the only SET in
2184 this insn. */
2185 int antic_p = oprs_anticipatable_p (src, insn) && single_set (insn);
2186 /* An expression is not available if its operands are
2187 subsequently modified, including this insn. It's also not
2188 available if this is a branch, because we can't insert
2189 a set after the branch. */
2190 int avail_p = (oprs_available_p (src, insn)
2191 && ! JUMP_P (insn));
2192
2193 insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p, table);
2194 }
2195
2196 /* Record sets for constant/copy propagation. */
2197 else if (table->set_p
2198 && regno >= FIRST_PSEUDO_REGISTER
2199 && ((GET_CODE (src) == REG
2200 && REGNO (src) >= FIRST_PSEUDO_REGISTER
2201 && can_copy_p (GET_MODE (dest))
2202 && REGNO (src) != regno)
2203 || gcse_constant_p (src))
2204 /* A copy is not available if its src or dest is subsequently
2205 modified. Here we want to search from INSN+1 on, but
2206 oprs_available_p searches from INSN on. */
2207 && (insn == BB_END (BLOCK_FOR_INSN (insn))
2208 || ((tmp = next_nonnote_insn (insn)) != NULL_RTX
2209 && oprs_available_p (pat, tmp))))
2210 insert_set_in_table (pat, insn, table);
2211 }
2212 /* In case of store we want to consider the memory value as available in
2213 the REG stored in that memory. This makes it possible to remove
2214 redundant loads from due to stores to the same location. */
2215 else if (flag_gcse_las && GET_CODE (src) == REG && GET_CODE (dest) == MEM)
2216 {
2217 unsigned int regno = REGNO (src);
2218
2219 /* Do not do this for constant/copy propagation. */
2220 if (! table->set_p
2221 /* Only record sets of pseudo-regs in the hash table. */
2222 && regno >= FIRST_PSEUDO_REGISTER
2223 /* Don't GCSE something if we can't do a reg/reg copy. */
2224 && can_copy_p (GET_MODE (src))
2225 /* GCSE commonly inserts instruction after the insn. We can't
2226 do that easily for EH_REGION notes so disable GCSE on these
2227 for now. */
2228 && ! find_reg_note (insn, REG_EH_REGION, NULL_RTX)
2229 /* Is SET_DEST something we want to gcse? */
2230 && want_to_gcse_p (dest)
2231 /* Don't CSE a nop. */
2232 && ! set_noop_p (pat)
2233 /* Don't GCSE if it has attached REG_EQUIV note.
2234 At this point this only function parameters should have
2235 REG_EQUIV notes and if the argument slot is used somewhere
2236 explicitly, it means address of parameter has been taken,
2237 so we should not extend the lifetime of the pseudo. */
2238 && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
2239 || GET_CODE (XEXP (note, 0)) != MEM))
2240 {
2241 /* Stores are never anticipatable. */
2242 int antic_p = 0;
2243 /* An expression is not available if its operands are
2244 subsequently modified, including this insn. It's also not
2245 available if this is a branch, because we can't insert
2246 a set after the branch. */
2247 int avail_p = oprs_available_p (dest, insn)
2248 && ! JUMP_P (insn);
2249
2250 /* Record the memory expression (DEST) in the hash table. */
2251 insert_expr_in_table (dest, GET_MODE (dest), insn,
2252 antic_p, avail_p, table);
2253 }
2254 }
2255 }
2256
2257 static void
hash_scan_clobber(rtx x ATTRIBUTE_UNUSED,rtx insn ATTRIBUTE_UNUSED,struct hash_table * table ATTRIBUTE_UNUSED)2258 hash_scan_clobber (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
2259 struct hash_table *table ATTRIBUTE_UNUSED)
2260 {
2261 /* Currently nothing to do. */
2262 }
2263
2264 static void
hash_scan_call(rtx x ATTRIBUTE_UNUSED,rtx insn ATTRIBUTE_UNUSED,struct hash_table * table ATTRIBUTE_UNUSED)2265 hash_scan_call (rtx x ATTRIBUTE_UNUSED, rtx insn ATTRIBUTE_UNUSED,
2266 struct hash_table *table ATTRIBUTE_UNUSED)
2267 {
2268 /* Currently nothing to do. */
2269 }
2270
2271 /* Process INSN and add hash table entries as appropriate.
2272
2273 Only available expressions that set a single pseudo-reg are recorded.
2274
2275 Single sets in a PARALLEL could be handled, but it's an extra complication
2276 that isn't dealt with right now. The trick is handling the CLOBBERs that
2277 are also in the PARALLEL. Later.
2278
2279 If SET_P is nonzero, this is for the assignment hash table,
2280 otherwise it is for the expression hash table.
2281 If IN_LIBCALL_BLOCK nonzero, we are in a libcall block, and should
2282 not record any expressions. */
2283
2284 static void
hash_scan_insn(rtx insn,struct hash_table * table,int in_libcall_block)2285 hash_scan_insn (rtx insn, struct hash_table *table, int in_libcall_block)
2286 {
2287 rtx pat = PATTERN (insn);
2288 int i;
2289
2290 if (in_libcall_block)
2291 return;
2292
2293 /* Pick out the sets of INSN and for other forms of instructions record
2294 what's been modified. */
2295
2296 if (GET_CODE (pat) == SET)
2297 hash_scan_set (pat, insn, table);
2298 else if (GET_CODE (pat) == PARALLEL)
2299 for (i = 0; i < XVECLEN (pat, 0); i++)
2300 {
2301 rtx x = XVECEXP (pat, 0, i);
2302
2303 if (GET_CODE (x) == SET)
2304 hash_scan_set (x, insn, table);
2305 else if (GET_CODE (x) == CLOBBER)
2306 hash_scan_clobber (x, insn, table);
2307 else if (GET_CODE (x) == CALL)
2308 hash_scan_call (x, insn, table);
2309 }
2310
2311 else if (GET_CODE (pat) == CLOBBER)
2312 hash_scan_clobber (pat, insn, table);
2313 else if (GET_CODE (pat) == CALL)
2314 hash_scan_call (pat, insn, table);
2315 }
2316
2317 static void
dump_hash_table(FILE * file,const char * name,struct hash_table * table)2318 dump_hash_table (FILE *file, const char *name, struct hash_table *table)
2319 {
2320 int i;
2321 /* Flattened out table, so it's printed in proper order. */
2322 struct expr **flat_table;
2323 unsigned int *hash_val;
2324 struct expr *expr;
2325
2326 flat_table = xcalloc (table->n_elems, sizeof (struct expr *));
2327 hash_val = xmalloc (table->n_elems * sizeof (unsigned int));
2328
2329 for (i = 0; i < (int) table->size; i++)
2330 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
2331 {
2332 flat_table[expr->bitmap_index] = expr;
2333 hash_val[expr->bitmap_index] = i;
2334 }
2335
2336 fprintf (file, "%s hash table (%d buckets, %d entries)\n",
2337 name, table->size, table->n_elems);
2338
2339 for (i = 0; i < (int) table->n_elems; i++)
2340 if (flat_table[i] != 0)
2341 {
2342 expr = flat_table[i];
2343 fprintf (file, "Index %d (hash value %d)\n ",
2344 expr->bitmap_index, hash_val[i]);
2345 print_rtl (file, expr->expr);
2346 fprintf (file, "\n");
2347 }
2348
2349 fprintf (file, "\n");
2350
2351 free (flat_table);
2352 free (hash_val);
2353 }
2354
2355 /* Record register first/last/block set information for REGNO in INSN.
2356
2357 first_set records the first place in the block where the register
2358 is set and is used to compute "anticipatability".
2359
2360 last_set records the last place in the block where the register
2361 is set and is used to compute "availability".
2362
2363 last_bb records the block for which first_set and last_set are
2364 valid, as a quick test to invalidate them.
2365
2366 reg_set_in_block records whether the register is set in the block
2367 and is used to compute "transparency". */
2368
2369 static void
record_last_reg_set_info(rtx insn,int regno)2370 record_last_reg_set_info (rtx insn, int regno)
2371 {
2372 struct reg_avail_info *info = ®_avail_info[regno];
2373 int cuid = INSN_CUID (insn);
2374
2375 info->last_set = cuid;
2376 if (info->last_bb != current_bb)
2377 {
2378 info->last_bb = current_bb;
2379 info->first_set = cuid;
2380 SET_BIT (reg_set_in_block[current_bb->index], regno);
2381 }
2382 }
2383
2384
2385 /* Record all of the canonicalized MEMs of record_last_mem_set_info's insn.
2386 Note we store a pair of elements in the list, so they have to be
2387 taken off pairwise. */
2388
2389 static void
canon_list_insert(rtx dest ATTRIBUTE_UNUSED,rtx unused1 ATTRIBUTE_UNUSED,void * v_insn)2390 canon_list_insert (rtx dest ATTRIBUTE_UNUSED, rtx unused1 ATTRIBUTE_UNUSED,
2391 void * v_insn)
2392 {
2393 rtx dest_addr, insn;
2394 int bb;
2395
2396 while (GET_CODE (dest) == SUBREG
2397 || GET_CODE (dest) == ZERO_EXTRACT
2398 || GET_CODE (dest) == SIGN_EXTRACT
2399 || GET_CODE (dest) == STRICT_LOW_PART)
2400 dest = XEXP (dest, 0);
2401
2402 /* If DEST is not a MEM, then it will not conflict with a load. Note
2403 that function calls are assumed to clobber memory, but are handled
2404 elsewhere. */
2405
2406 if (GET_CODE (dest) != MEM)
2407 return;
2408
2409 dest_addr = get_addr (XEXP (dest, 0));
2410 dest_addr = canon_rtx (dest_addr);
2411 insn = (rtx) v_insn;
2412 bb = BLOCK_NUM (insn);
2413
2414 canon_modify_mem_list[bb] =
2415 alloc_EXPR_LIST (VOIDmode, dest_addr, canon_modify_mem_list[bb]);
2416 canon_modify_mem_list[bb] =
2417 alloc_EXPR_LIST (VOIDmode, dest, canon_modify_mem_list[bb]);
2418 bitmap_set_bit (canon_modify_mem_list_set, bb);
2419 }
2420
2421 /* Record memory modification information for INSN. We do not actually care
2422 about the memory location(s) that are set, or even how they are set (consider
2423 a CALL_INSN). We merely need to record which insns modify memory. */
2424
2425 static void
record_last_mem_set_info(rtx insn)2426 record_last_mem_set_info (rtx insn)
2427 {
2428 int bb = BLOCK_NUM (insn);
2429
2430 /* load_killed_in_block_p will handle the case of calls clobbering
2431 everything. */
2432 modify_mem_list[bb] = alloc_INSN_LIST (insn, modify_mem_list[bb]);
2433 bitmap_set_bit (modify_mem_list_set, bb);
2434
2435 if (GET_CODE (insn) == CALL_INSN)
2436 {
2437 /* Note that traversals of this loop (other than for free-ing)
2438 will break after encountering a CALL_INSN. So, there's no
2439 need to insert a pair of items, as canon_list_insert does. */
2440 canon_modify_mem_list[bb] =
2441 alloc_INSN_LIST (insn, canon_modify_mem_list[bb]);
2442 bitmap_set_bit (canon_modify_mem_list_set, bb);
2443 }
2444 else
2445 note_stores (PATTERN (insn), canon_list_insert, (void*) insn);
2446 }
2447
2448 /* Called from compute_hash_table via note_stores to handle one
2449 SET or CLOBBER in an insn. DATA is really the instruction in which
2450 the SET is taking place. */
2451
2452 static void
record_last_set_info(rtx dest,rtx setter ATTRIBUTE_UNUSED,void * data)2453 record_last_set_info (rtx dest, rtx setter ATTRIBUTE_UNUSED, void *data)
2454 {
2455 rtx last_set_insn = (rtx) data;
2456
2457 if (GET_CODE (dest) == SUBREG)
2458 dest = SUBREG_REG (dest);
2459
2460 if (GET_CODE (dest) == REG)
2461 record_last_reg_set_info (last_set_insn, REGNO (dest));
2462 else if (GET_CODE (dest) == MEM
2463 /* Ignore pushes, they clobber nothing. */
2464 && ! push_operand (dest, GET_MODE (dest)))
2465 record_last_mem_set_info (last_set_insn);
2466 }
2467
2468 /* Top level function to create an expression or assignment hash table.
2469
2470 Expression entries are placed in the hash table if
2471 - they are of the form (set (pseudo-reg) src),
2472 - src is something we want to perform GCSE on,
2473 - none of the operands are subsequently modified in the block
2474
2475 Assignment entries are placed in the hash table if
2476 - they are of the form (set (pseudo-reg) src),
2477 - src is something we want to perform const/copy propagation on,
2478 - none of the operands or target are subsequently modified in the block
2479
2480 Currently src must be a pseudo-reg or a const_int.
2481
2482 TABLE is the table computed. */
2483
2484 static void
compute_hash_table_work(struct hash_table * table)2485 compute_hash_table_work (struct hash_table *table)
2486 {
2487 unsigned int i;
2488
2489 /* While we compute the hash table we also compute a bit array of which
2490 registers are set in which blocks.
2491 ??? This isn't needed during const/copy propagation, but it's cheap to
2492 compute. Later. */
2493 sbitmap_vector_zero (reg_set_in_block, last_basic_block);
2494
2495 /* re-Cache any INSN_LIST nodes we have allocated. */
2496 clear_modify_mem_tables ();
2497 /* Some working arrays used to track first and last set in each block. */
2498 reg_avail_info = gmalloc (max_gcse_regno * sizeof (struct reg_avail_info));
2499
2500 for (i = 0; i < max_gcse_regno; ++i)
2501 reg_avail_info[i].last_bb = NULL;
2502
2503 FOR_EACH_BB (current_bb)
2504 {
2505 rtx insn;
2506 unsigned int regno;
2507 int in_libcall_block;
2508
2509 /* First pass over the instructions records information used to
2510 determine when registers and memory are first and last set.
2511 ??? hard-reg reg_set_in_block computation
2512 could be moved to compute_sets since they currently don't change. */
2513
2514 for (insn = BB_HEAD (current_bb);
2515 insn && insn != NEXT_INSN (BB_END (current_bb));
2516 insn = NEXT_INSN (insn))
2517 {
2518 if (! INSN_P (insn))
2519 continue;
2520
2521 if (GET_CODE (insn) == CALL_INSN)
2522 {
2523 bool clobbers_all = false;
2524 #ifdef NON_SAVING_SETJMP
2525 if (NON_SAVING_SETJMP
2526 && find_reg_note (insn, REG_SETJMP, NULL_RTX))
2527 clobbers_all = true;
2528 #endif
2529
2530 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
2531 if (clobbers_all
2532 || TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
2533 record_last_reg_set_info (insn, regno);
2534
2535 mark_call (insn);
2536 }
2537
2538 note_stores (PATTERN (insn), record_last_set_info, insn);
2539 }
2540
2541 /* Insert implicit sets in the hash table. */
2542 if (table->set_p
2543 && implicit_sets[current_bb->index] != NULL_RTX)
2544 hash_scan_set (implicit_sets[current_bb->index],
2545 BB_HEAD (current_bb), table);
2546
2547 /* The next pass builds the hash table. */
2548
2549 for (insn = BB_HEAD (current_bb), in_libcall_block = 0;
2550 insn && insn != NEXT_INSN (BB_END (current_bb));
2551 insn = NEXT_INSN (insn))
2552 if (INSN_P (insn))
2553 {
2554 if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
2555 in_libcall_block = 1;
2556 else if (table->set_p && find_reg_note (insn, REG_RETVAL, NULL_RTX))
2557 in_libcall_block = 0;
2558 hash_scan_insn (insn, table, in_libcall_block);
2559 if (!table->set_p && find_reg_note (insn, REG_RETVAL, NULL_RTX))
2560 in_libcall_block = 0;
2561 }
2562 }
2563
2564 free (reg_avail_info);
2565 reg_avail_info = NULL;
2566 }
2567
2568 /* Allocate space for the set/expr hash TABLE.
2569 N_INSNS is the number of instructions in the function.
2570 It is used to determine the number of buckets to use.
2571 SET_P determines whether set or expression table will
2572 be created. */
2573
2574 static void
alloc_hash_table(int n_insns,struct hash_table * table,int set_p)2575 alloc_hash_table (int n_insns, struct hash_table *table, int set_p)
2576 {
2577 int n;
2578
2579 table->size = n_insns / 4;
2580 if (table->size < 11)
2581 table->size = 11;
2582
2583 /* Attempt to maintain efficient use of hash table.
2584 Making it an odd number is simplest for now.
2585 ??? Later take some measurements. */
2586 table->size |= 1;
2587 n = table->size * sizeof (struct expr *);
2588 table->table = gmalloc (n);
2589 table->set_p = set_p;
2590 }
2591
2592 /* Free things allocated by alloc_hash_table. */
2593
2594 static void
free_hash_table(struct hash_table * table)2595 free_hash_table (struct hash_table *table)
2596 {
2597 free (table->table);
2598 }
2599
2600 /* Compute the hash TABLE for doing copy/const propagation or
2601 expression hash table. */
2602
2603 static void
compute_hash_table(struct hash_table * table)2604 compute_hash_table (struct hash_table *table)
2605 {
2606 /* Initialize count of number of entries in hash table. */
2607 table->n_elems = 0;
2608 memset (table->table, 0, table->size * sizeof (struct expr *));
2609
2610 compute_hash_table_work (table);
2611 }
2612
2613 /* Expression tracking support. */
2614
2615 /* Lookup pattern PAT in the expression TABLE.
2616 The result is a pointer to the table entry, or NULL if not found. */
2617
2618 static struct expr *
lookup_expr(rtx pat,struct hash_table * table)2619 lookup_expr (rtx pat, struct hash_table *table)
2620 {
2621 int do_not_record_p;
2622 unsigned int hash = hash_expr (pat, GET_MODE (pat), &do_not_record_p,
2623 table->size);
2624 struct expr *expr;
2625
2626 if (do_not_record_p)
2627 return NULL;
2628
2629 expr = table->table[hash];
2630
2631 while (expr && ! expr_equiv_p (expr->expr, pat))
2632 expr = expr->next_same_hash;
2633
2634 return expr;
2635 }
2636
2637 /* Lookup REGNO in the set TABLE. The result is a pointer to the
2638 table entry, or NULL if not found. */
2639
2640 static struct expr *
lookup_set(unsigned int regno,struct hash_table * table)2641 lookup_set (unsigned int regno, struct hash_table *table)
2642 {
2643 unsigned int hash = hash_set (regno, table->size);
2644 struct expr *expr;
2645
2646 expr = table->table[hash];
2647
2648 while (expr && REGNO (SET_DEST (expr->expr)) != regno)
2649 expr = expr->next_same_hash;
2650
2651 return expr;
2652 }
2653
2654 /* Return the next entry for REGNO in list EXPR. */
2655
2656 static struct expr *
next_set(unsigned int regno,struct expr * expr)2657 next_set (unsigned int regno, struct expr *expr)
2658 {
2659 do
2660 expr = expr->next_same_hash;
2661 while (expr && REGNO (SET_DEST (expr->expr)) != regno);
2662
2663 return expr;
2664 }
2665
2666 /* Like free_INSN_LIST_list or free_EXPR_LIST_list, except that the node
2667 types may be mixed. */
2668
2669 static void
free_insn_expr_list_list(rtx * listp)2670 free_insn_expr_list_list (rtx *listp)
2671 {
2672 rtx list, next;
2673
2674 for (list = *listp; list ; list = next)
2675 {
2676 next = XEXP (list, 1);
2677 if (GET_CODE (list) == EXPR_LIST)
2678 free_EXPR_LIST_node (list);
2679 else
2680 free_INSN_LIST_node (list);
2681 }
2682
2683 *listp = NULL;
2684 }
2685
2686 /* Clear canon_modify_mem_list and modify_mem_list tables. */
2687 static void
clear_modify_mem_tables(void)2688 clear_modify_mem_tables (void)
2689 {
2690 int i;
2691
2692 EXECUTE_IF_SET_IN_BITMAP
2693 (modify_mem_list_set, 0, i, free_INSN_LIST_list (modify_mem_list + i));
2694 bitmap_clear (modify_mem_list_set);
2695
2696 EXECUTE_IF_SET_IN_BITMAP
2697 (canon_modify_mem_list_set, 0, i,
2698 free_insn_expr_list_list (canon_modify_mem_list + i));
2699 bitmap_clear (canon_modify_mem_list_set);
2700 }
2701
2702 /* Release memory used by modify_mem_list_set and canon_modify_mem_list_set. */
2703
2704 static void
free_modify_mem_tables(void)2705 free_modify_mem_tables (void)
2706 {
2707 clear_modify_mem_tables ();
2708 free (modify_mem_list);
2709 free (canon_modify_mem_list);
2710 modify_mem_list = 0;
2711 canon_modify_mem_list = 0;
2712 }
2713
2714 /* Reset tables used to keep track of what's still available [since the
2715 start of the block]. */
2716
2717 static void
reset_opr_set_tables(void)2718 reset_opr_set_tables (void)
2719 {
2720 /* Maintain a bitmap of which regs have been set since beginning of
2721 the block. */
2722 CLEAR_REG_SET (reg_set_bitmap);
2723
2724 /* Also keep a record of the last instruction to modify memory.
2725 For now this is very trivial, we only record whether any memory
2726 location has been modified. */
2727 clear_modify_mem_tables ();
2728 }
2729
2730 /* Return nonzero if the operands of X are not set before INSN in
2731 INSN's basic block. */
2732
2733 static int
oprs_not_set_p(rtx x,rtx insn)2734 oprs_not_set_p (rtx x, rtx insn)
2735 {
2736 int i, j;
2737 enum rtx_code code;
2738 const char *fmt;
2739
2740 if (x == 0)
2741 return 1;
2742
2743 code = GET_CODE (x);
2744 switch (code)
2745 {
2746 case PC:
2747 case CC0:
2748 case CONST:
2749 case CONST_INT:
2750 case CONST_DOUBLE:
2751 case CONST_VECTOR:
2752 case SYMBOL_REF:
2753 case LABEL_REF:
2754 case ADDR_VEC:
2755 case ADDR_DIFF_VEC:
2756 return 1;
2757
2758 case MEM:
2759 if (load_killed_in_block_p (BLOCK_FOR_INSN (insn),
2760 INSN_CUID (insn), x, 0))
2761 return 0;
2762 else
2763 return oprs_not_set_p (XEXP (x, 0), insn);
2764
2765 case REG:
2766 return ! REGNO_REG_SET_P (reg_set_bitmap, REGNO (x));
2767
2768 default:
2769 break;
2770 }
2771
2772 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2773 {
2774 if (fmt[i] == 'e')
2775 {
2776 /* If we are about to do the last recursive call
2777 needed at this level, change it into iteration.
2778 This function is called enough to be worth it. */
2779 if (i == 0)
2780 return oprs_not_set_p (XEXP (x, i), insn);
2781
2782 if (! oprs_not_set_p (XEXP (x, i), insn))
2783 return 0;
2784 }
2785 else if (fmt[i] == 'E')
2786 for (j = 0; j < XVECLEN (x, i); j++)
2787 if (! oprs_not_set_p (XVECEXP (x, i, j), insn))
2788 return 0;
2789 }
2790
2791 return 1;
2792 }
2793
2794 /* Mark things set by a CALL. */
2795
2796 static void
mark_call(rtx insn)2797 mark_call (rtx insn)
2798 {
2799 if (! CONST_OR_PURE_CALL_P (insn))
2800 record_last_mem_set_info (insn);
2801 }
2802
2803 /* Mark things set by a SET. */
2804
2805 static void
mark_set(rtx pat,rtx insn)2806 mark_set (rtx pat, rtx insn)
2807 {
2808 rtx dest = SET_DEST (pat);
2809
2810 while (GET_CODE (dest) == SUBREG
2811 || GET_CODE (dest) == ZERO_EXTRACT
2812 || GET_CODE (dest) == SIGN_EXTRACT
2813 || GET_CODE (dest) == STRICT_LOW_PART)
2814 dest = XEXP (dest, 0);
2815
2816 if (GET_CODE (dest) == REG)
2817 SET_REGNO_REG_SET (reg_set_bitmap, REGNO (dest));
2818 else if (GET_CODE (dest) == MEM)
2819 record_last_mem_set_info (insn);
2820
2821 if (GET_CODE (SET_SRC (pat)) == CALL)
2822 mark_call (insn);
2823 }
2824
2825 /* Record things set by a CLOBBER. */
2826
2827 static void
mark_clobber(rtx pat,rtx insn)2828 mark_clobber (rtx pat, rtx insn)
2829 {
2830 rtx clob = XEXP (pat, 0);
2831
2832 while (GET_CODE (clob) == SUBREG || GET_CODE (clob) == STRICT_LOW_PART)
2833 clob = XEXP (clob, 0);
2834
2835 if (GET_CODE (clob) == REG)
2836 SET_REGNO_REG_SET (reg_set_bitmap, REGNO (clob));
2837 else
2838 record_last_mem_set_info (insn);
2839 }
2840
2841 /* Record things set by INSN.
2842 This data is used by oprs_not_set_p. */
2843
2844 static void
mark_oprs_set(rtx insn)2845 mark_oprs_set (rtx insn)
2846 {
2847 rtx pat = PATTERN (insn);
2848 int i;
2849
2850 if (GET_CODE (pat) == SET)
2851 mark_set (pat, insn);
2852 else if (GET_CODE (pat) == PARALLEL)
2853 for (i = 0; i < XVECLEN (pat, 0); i++)
2854 {
2855 rtx x = XVECEXP (pat, 0, i);
2856
2857 if (GET_CODE (x) == SET)
2858 mark_set (x, insn);
2859 else if (GET_CODE (x) == CLOBBER)
2860 mark_clobber (x, insn);
2861 else if (GET_CODE (x) == CALL)
2862 mark_call (insn);
2863 }
2864
2865 else if (GET_CODE (pat) == CLOBBER)
2866 mark_clobber (pat, insn);
2867 else if (GET_CODE (pat) == CALL)
2868 mark_call (insn);
2869 }
2870
2871
2872 /* Classic GCSE reaching definition support. */
2873
2874 /* Allocate reaching def variables. */
2875
2876 static void
alloc_rd_mem(int n_blocks,int n_insns)2877 alloc_rd_mem (int n_blocks, int n_insns)
2878 {
2879 rd_kill = sbitmap_vector_alloc (n_blocks, n_insns);
2880 sbitmap_vector_zero (rd_kill, n_blocks);
2881
2882 rd_gen = sbitmap_vector_alloc (n_blocks, n_insns);
2883 sbitmap_vector_zero (rd_gen, n_blocks);
2884
2885 reaching_defs = sbitmap_vector_alloc (n_blocks, n_insns);
2886 sbitmap_vector_zero (reaching_defs, n_blocks);
2887
2888 rd_out = sbitmap_vector_alloc (n_blocks, n_insns);
2889 sbitmap_vector_zero (rd_out, n_blocks);
2890 }
2891
2892 /* Free reaching def variables. */
2893
2894 static void
free_rd_mem(void)2895 free_rd_mem (void)
2896 {
2897 sbitmap_vector_free (rd_kill);
2898 sbitmap_vector_free (rd_gen);
2899 sbitmap_vector_free (reaching_defs);
2900 sbitmap_vector_free (rd_out);
2901 }
2902
2903 /* Add INSN to the kills of BB. REGNO, set in BB, is killed by INSN. */
2904
2905 static void
handle_rd_kill_set(rtx insn,int regno,basic_block bb)2906 handle_rd_kill_set (rtx insn, int regno, basic_block bb)
2907 {
2908 struct reg_set *this_reg;
2909
2910 for (this_reg = reg_set_table[regno]; this_reg; this_reg = this_reg ->next)
2911 if (BLOCK_NUM (this_reg->insn) != BLOCK_NUM (insn))
2912 SET_BIT (rd_kill[bb->index], INSN_CUID (this_reg->insn));
2913 }
2914
2915 /* Compute the set of kill's for reaching definitions. */
2916
2917 static void
compute_kill_rd(void)2918 compute_kill_rd (void)
2919 {
2920 int cuid;
2921 unsigned int regno;
2922 int i;
2923 basic_block bb;
2924
2925 /* For each block
2926 For each set bit in `gen' of the block (i.e each insn which
2927 generates a definition in the block)
2928 Call the reg set by the insn corresponding to that bit regx
2929 Look at the linked list starting at reg_set_table[regx]
2930 For each setting of regx in the linked list, which is not in
2931 this block
2932 Set the bit in `kill' corresponding to that insn. */
2933 FOR_EACH_BB (bb)
2934 for (cuid = 0; cuid < max_cuid; cuid++)
2935 if (TEST_BIT (rd_gen[bb->index], cuid))
2936 {
2937 rtx insn = CUID_INSN (cuid);
2938 rtx pat = PATTERN (insn);
2939
2940 if (GET_CODE (insn) == CALL_INSN)
2941 {
2942 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
2943 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
2944 handle_rd_kill_set (insn, regno, bb);
2945 }
2946
2947 if (GET_CODE (pat) == PARALLEL)
2948 {
2949 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
2950 {
2951 enum rtx_code code = GET_CODE (XVECEXP (pat, 0, i));
2952
2953 if ((code == SET || code == CLOBBER)
2954 && GET_CODE (XEXP (XVECEXP (pat, 0, i), 0)) == REG)
2955 handle_rd_kill_set (insn,
2956 REGNO (XEXP (XVECEXP (pat, 0, i), 0)),
2957 bb);
2958 }
2959 }
2960 else if (GET_CODE (pat) == SET && GET_CODE (SET_DEST (pat)) == REG)
2961 /* Each setting of this register outside of this block
2962 must be marked in the set of kills in this block. */
2963 handle_rd_kill_set (insn, REGNO (SET_DEST (pat)), bb);
2964 }
2965 }
2966
2967 /* Compute the reaching definitions as in
2968 Compilers Principles, Techniques, and Tools. Aho, Sethi, Ullman,
2969 Chapter 10. It is the same algorithm as used for computing available
2970 expressions but applied to the gens and kills of reaching definitions. */
2971
2972 static void
compute_rd(void)2973 compute_rd (void)
2974 {
2975 int changed, passes;
2976 basic_block bb;
2977
2978 FOR_EACH_BB (bb)
2979 sbitmap_copy (rd_out[bb->index] /*dst*/, rd_gen[bb->index] /*src*/);
2980
2981 passes = 0;
2982 changed = 1;
2983 while (changed)
2984 {
2985 changed = 0;
2986 FOR_EACH_BB (bb)
2987 {
2988 sbitmap_union_of_preds (reaching_defs[bb->index], rd_out, bb->index);
2989 changed |= sbitmap_union_of_diff_cg (rd_out[bb->index], rd_gen[bb->index],
2990 reaching_defs[bb->index], rd_kill[bb->index]);
2991 }
2992 passes++;
2993 }
2994
2995 if (gcse_file)
2996 fprintf (gcse_file, "reaching def computation: %d passes\n", passes);
2997 }
2998
2999 /* Classic GCSE available expression support. */
3000
3001 /* Allocate memory for available expression computation. */
3002
3003 static void
alloc_avail_expr_mem(int n_blocks,int n_exprs)3004 alloc_avail_expr_mem (int n_blocks, int n_exprs)
3005 {
3006 ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
3007 sbitmap_vector_zero (ae_kill, n_blocks);
3008
3009 ae_gen = sbitmap_vector_alloc (n_blocks, n_exprs);
3010 sbitmap_vector_zero (ae_gen, n_blocks);
3011
3012 ae_in = sbitmap_vector_alloc (n_blocks, n_exprs);
3013 sbitmap_vector_zero (ae_in, n_blocks);
3014
3015 ae_out = sbitmap_vector_alloc (n_blocks, n_exprs);
3016 sbitmap_vector_zero (ae_out, n_blocks);
3017 }
3018
3019 static void
free_avail_expr_mem(void)3020 free_avail_expr_mem (void)
3021 {
3022 sbitmap_vector_free (ae_kill);
3023 sbitmap_vector_free (ae_gen);
3024 sbitmap_vector_free (ae_in);
3025 sbitmap_vector_free (ae_out);
3026 }
3027
3028 /* Compute the set of available expressions generated in each basic block. */
3029
3030 static void
compute_ae_gen(struct hash_table * expr_hash_table)3031 compute_ae_gen (struct hash_table *expr_hash_table)
3032 {
3033 unsigned int i;
3034 struct expr *expr;
3035 struct occr *occr;
3036
3037 /* For each recorded occurrence of each expression, set ae_gen[bb][expr].
3038 This is all we have to do because an expression is not recorded if it
3039 is not available, and the only expressions we want to work with are the
3040 ones that are recorded. */
3041 for (i = 0; i < expr_hash_table->size; i++)
3042 for (expr = expr_hash_table->table[i]; expr != 0; expr = expr->next_same_hash)
3043 for (occr = expr->avail_occr; occr != 0; occr = occr->next)
3044 SET_BIT (ae_gen[BLOCK_NUM (occr->insn)], expr->bitmap_index);
3045 }
3046
3047 /* Return nonzero if expression X is killed in BB. */
3048
3049 static int
expr_killed_p(rtx x,basic_block bb)3050 expr_killed_p (rtx x, basic_block bb)
3051 {
3052 int i, j;
3053 enum rtx_code code;
3054 const char *fmt;
3055
3056 if (x == 0)
3057 return 1;
3058
3059 code = GET_CODE (x);
3060 switch (code)
3061 {
3062 case REG:
3063 return TEST_BIT (reg_set_in_block[bb->index], REGNO (x));
3064
3065 case MEM:
3066 if (load_killed_in_block_p (bb, get_max_uid () + 1, x, 0))
3067 return 1;
3068 else
3069 return expr_killed_p (XEXP (x, 0), bb);
3070
3071 case PC:
3072 case CC0: /*FIXME*/
3073 case CONST:
3074 case CONST_INT:
3075 case CONST_DOUBLE:
3076 case CONST_VECTOR:
3077 case SYMBOL_REF:
3078 case LABEL_REF:
3079 case ADDR_VEC:
3080 case ADDR_DIFF_VEC:
3081 return 0;
3082
3083 default:
3084 break;
3085 }
3086
3087 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
3088 {
3089 if (fmt[i] == 'e')
3090 {
3091 /* If we are about to do the last recursive call
3092 needed at this level, change it into iteration.
3093 This function is called enough to be worth it. */
3094 if (i == 0)
3095 return expr_killed_p (XEXP (x, i), bb);
3096 else if (expr_killed_p (XEXP (x, i), bb))
3097 return 1;
3098 }
3099 else if (fmt[i] == 'E')
3100 for (j = 0; j < XVECLEN (x, i); j++)
3101 if (expr_killed_p (XVECEXP (x, i, j), bb))
3102 return 1;
3103 }
3104
3105 return 0;
3106 }
3107
3108 /* Compute the set of available expressions killed in each basic block. */
3109
3110 static void
compute_ae_kill(sbitmap * ae_gen,sbitmap * ae_kill,struct hash_table * expr_hash_table)3111 compute_ae_kill (sbitmap *ae_gen, sbitmap *ae_kill,
3112 struct hash_table *expr_hash_table)
3113 {
3114 basic_block bb;
3115 unsigned int i;
3116 struct expr *expr;
3117
3118 FOR_EACH_BB (bb)
3119 for (i = 0; i < expr_hash_table->size; i++)
3120 for (expr = expr_hash_table->table[i]; expr; expr = expr->next_same_hash)
3121 {
3122 /* Skip EXPR if generated in this block. */
3123 if (TEST_BIT (ae_gen[bb->index], expr->bitmap_index))
3124 continue;
3125
3126 if (expr_killed_p (expr->expr, bb))
3127 SET_BIT (ae_kill[bb->index], expr->bitmap_index);
3128 }
3129 }
3130
3131 /* Actually perform the Classic GCSE optimizations. */
3132
3133 /* Return nonzero if occurrence OCCR of expression EXPR reaches block BB.
3134
3135 CHECK_SELF_LOOP is nonzero if we should consider a block reaching itself
3136 as a positive reach. We want to do this when there are two computations
3137 of the expression in the block.
3138
3139 VISITED is a pointer to a working buffer for tracking which BB's have
3140 been visited. It is NULL for the top-level call.
3141
3142 We treat reaching expressions that go through blocks containing the same
3143 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
3144 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
3145 2 as not reaching. The intent is to improve the probability of finding
3146 only one reaching expression and to reduce register lifetimes by picking
3147 the closest such expression. */
3148
3149 static int
expr_reaches_here_p_work(struct occr * occr,struct expr * expr,basic_block bb,int check_self_loop,char * visited)3150 expr_reaches_here_p_work (struct occr *occr, struct expr *expr,
3151 basic_block bb, int check_self_loop, char *visited)
3152 {
3153 edge pred;
3154
3155 for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
3156 {
3157 basic_block pred_bb = pred->src;
3158
3159 if (visited[pred_bb->index])
3160 /* This predecessor has already been visited. Nothing to do. */
3161 ;
3162 else if (pred_bb == bb)
3163 {
3164 /* BB loops on itself. */
3165 if (check_self_loop
3166 && TEST_BIT (ae_gen[pred_bb->index], expr->bitmap_index)
3167 && BLOCK_NUM (occr->insn) == pred_bb->index)
3168 return 1;
3169
3170 visited[pred_bb->index] = 1;
3171 }
3172
3173 /* Ignore this predecessor if it kills the expression. */
3174 else if (TEST_BIT (ae_kill[pred_bb->index], expr->bitmap_index))
3175 visited[pred_bb->index] = 1;
3176
3177 /* Does this predecessor generate this expression? */
3178 else if (TEST_BIT (ae_gen[pred_bb->index], expr->bitmap_index))
3179 {
3180 /* Is this the occurrence we're looking for?
3181 Note that there's only one generating occurrence per block
3182 so we just need to check the block number. */
3183 if (BLOCK_NUM (occr->insn) == pred_bb->index)
3184 return 1;
3185
3186 visited[pred_bb->index] = 1;
3187 }
3188
3189 /* Neither gen nor kill. */
3190 else
3191 {
3192 visited[pred_bb->index] = 1;
3193 if (expr_reaches_here_p_work (occr, expr, pred_bb, check_self_loop,
3194 visited))
3195
3196 return 1;
3197 }
3198 }
3199
3200 /* All paths have been checked. */
3201 return 0;
3202 }
3203
3204 /* This wrapper for expr_reaches_here_p_work() is to ensure that any
3205 memory allocated for that function is returned. */
3206
3207 static int
expr_reaches_here_p(struct occr * occr,struct expr * expr,basic_block bb,int check_self_loop)3208 expr_reaches_here_p (struct occr *occr, struct expr *expr, basic_block bb,
3209 int check_self_loop)
3210 {
3211 int rval;
3212 char *visited = xcalloc (last_basic_block, 1);
3213
3214 rval = expr_reaches_here_p_work (occr, expr, bb, check_self_loop, visited);
3215
3216 free (visited);
3217 return rval;
3218 }
3219
3220 /* Return the instruction that computes EXPR that reaches INSN's basic block.
3221 If there is more than one such instruction, return NULL.
3222
3223 Called only by handle_avail_expr. */
3224
3225 static rtx
computing_insn(struct expr * expr,rtx insn)3226 computing_insn (struct expr *expr, rtx insn)
3227 {
3228 basic_block bb = BLOCK_FOR_INSN (insn);
3229
3230 if (expr->avail_occr->next == NULL)
3231 {
3232 if (BLOCK_FOR_INSN (expr->avail_occr->insn) == bb)
3233 /* The available expression is actually itself
3234 (i.e. a loop in the flow graph) so do nothing. */
3235 return NULL;
3236
3237 /* (FIXME) Case that we found a pattern that was created by
3238 a substitution that took place. */
3239 return expr->avail_occr->insn;
3240 }
3241 else
3242 {
3243 /* Pattern is computed more than once.
3244 Search backwards from this insn to see how many of these
3245 computations actually reach this insn. */
3246 struct occr *occr;
3247 rtx insn_computes_expr = NULL;
3248 int can_reach = 0;
3249
3250 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
3251 {
3252 if (BLOCK_FOR_INSN (occr->insn) == bb)
3253 {
3254 /* The expression is generated in this block.
3255 The only time we care about this is when the expression
3256 is generated later in the block [and thus there's a loop].
3257 We let the normal cse pass handle the other cases. */
3258 if (INSN_CUID (insn) < INSN_CUID (occr->insn)
3259 && expr_reaches_here_p (occr, expr, bb, 1))
3260 {
3261 can_reach++;
3262 if (can_reach > 1)
3263 return NULL;
3264
3265 insn_computes_expr = occr->insn;
3266 }
3267 }
3268 else if (expr_reaches_here_p (occr, expr, bb, 0))
3269 {
3270 can_reach++;
3271 if (can_reach > 1)
3272 return NULL;
3273
3274 insn_computes_expr = occr->insn;
3275 }
3276 }
3277
3278 if (insn_computes_expr == NULL)
3279 abort ();
3280
3281 return insn_computes_expr;
3282 }
3283 }
3284
3285 /* Return nonzero if the definition in DEF_INSN can reach INSN.
3286 Only called by can_disregard_other_sets. */
3287
3288 static int
def_reaches_here_p(rtx insn,rtx def_insn)3289 def_reaches_here_p (rtx insn, rtx def_insn)
3290 {
3291 rtx reg;
3292
3293 if (TEST_BIT (reaching_defs[BLOCK_NUM (insn)], INSN_CUID (def_insn)))
3294 return 1;
3295
3296 if (BLOCK_NUM (insn) == BLOCK_NUM (def_insn))
3297 {
3298 if (INSN_CUID (def_insn) < INSN_CUID (insn))
3299 {
3300 if (GET_CODE (PATTERN (def_insn)) == PARALLEL)
3301 return 1;
3302 else if (GET_CODE (PATTERN (def_insn)) == CLOBBER)
3303 reg = XEXP (PATTERN (def_insn), 0);
3304 else if (GET_CODE (PATTERN (def_insn)) == SET)
3305 reg = SET_DEST (PATTERN (def_insn));
3306 else
3307 abort ();
3308
3309 return ! reg_set_between_p (reg, NEXT_INSN (def_insn), insn);
3310 }
3311 else
3312 return 0;
3313 }
3314
3315 return 0;
3316 }
3317
3318 /* Return nonzero if *ADDR_THIS_REG can only have one value at INSN. The
3319 value returned is the number of definitions that reach INSN. Returning a
3320 value of zero means that [maybe] more than one definition reaches INSN and
3321 the caller can't perform whatever optimization it is trying. i.e. it is
3322 always safe to return zero. */
3323
3324 static int
can_disregard_other_sets(struct reg_set ** addr_this_reg,rtx insn,int for_combine)3325 can_disregard_other_sets (struct reg_set **addr_this_reg, rtx insn, int for_combine)
3326 {
3327 int number_of_reaching_defs = 0;
3328 struct reg_set *this_reg;
3329
3330 for (this_reg = *addr_this_reg; this_reg != 0; this_reg = this_reg->next)
3331 if (def_reaches_here_p (insn, this_reg->insn))
3332 {
3333 number_of_reaching_defs++;
3334 /* Ignore parallels for now. */
3335 if (GET_CODE (PATTERN (this_reg->insn)) == PARALLEL)
3336 return 0;
3337
3338 if (!for_combine
3339 && (GET_CODE (PATTERN (this_reg->insn)) == CLOBBER
3340 || ! rtx_equal_p (SET_SRC (PATTERN (this_reg->insn)),
3341 SET_SRC (PATTERN (insn)))))
3342 /* A setting of the reg to a different value reaches INSN. */
3343 return 0;
3344
3345 if (number_of_reaching_defs > 1)
3346 {
3347 /* If in this setting the value the register is being set to is
3348 equal to the previous value the register was set to and this
3349 setting reaches the insn we are trying to do the substitution
3350 on then we are ok. */
3351 if (GET_CODE (PATTERN (this_reg->insn)) == CLOBBER)
3352 return 0;
3353 else if (! rtx_equal_p (SET_SRC (PATTERN (this_reg->insn)),
3354 SET_SRC (PATTERN (insn))))
3355 return 0;
3356 }
3357
3358 *addr_this_reg = this_reg;
3359 }
3360
3361 return number_of_reaching_defs;
3362 }
3363
3364 /* Expression computed by insn is available and the substitution is legal,
3365 so try to perform the substitution.
3366
3367 The result is nonzero if any changes were made. */
3368
3369 static int
handle_avail_expr(rtx insn,struct expr * expr)3370 handle_avail_expr (rtx insn, struct expr *expr)
3371 {
3372 rtx pat, insn_computes_expr, expr_set;
3373 rtx to;
3374 struct reg_set *this_reg;
3375 int found_setting, use_src;
3376 int changed = 0;
3377
3378 /* We only handle the case where one computation of the expression
3379 reaches this instruction. */
3380 insn_computes_expr = computing_insn (expr, insn);
3381 if (insn_computes_expr == NULL)
3382 return 0;
3383 expr_set = single_set (insn_computes_expr);
3384 /* The set might be in a parallel with multiple sets; we could
3385 probably handle that, but there's currently no easy way to find
3386 the relevant sub-expression. */
3387 if (!expr_set)
3388 return 0;
3389
3390 found_setting = 0;
3391 use_src = 0;
3392
3393 /* At this point we know only one computation of EXPR outside of this
3394 block reaches this insn. Now try to find a register that the
3395 expression is computed into. */
3396 if (GET_CODE (SET_SRC (expr_set)) == REG)
3397 {
3398 /* This is the case when the available expression that reaches
3399 here has already been handled as an available expression. */
3400 unsigned int regnum_for_replacing
3401 = REGNO (SET_SRC (expr_set));
3402
3403 /* If the register was created by GCSE we can't use `reg_set_table',
3404 however we know it's set only once. */
3405 if (regnum_for_replacing >= max_gcse_regno
3406 /* If the register the expression is computed into is set only once,
3407 or only one set reaches this insn, we can use it. */
3408 || (((this_reg = reg_set_table[regnum_for_replacing]),
3409 this_reg->next == NULL)
3410 || can_disregard_other_sets (&this_reg, insn, 0)))
3411 {
3412 use_src = 1;
3413 found_setting = 1;
3414 }
3415 }
3416
3417 if (!found_setting)
3418 {
3419 unsigned int regnum_for_replacing
3420 = REGNO (SET_DEST (expr_set));
3421
3422 /* This shouldn't happen. */
3423 if (regnum_for_replacing >= max_gcse_regno)
3424 abort ();
3425
3426 this_reg = reg_set_table[regnum_for_replacing];
3427
3428 /* If the register the expression is computed into is set only once,
3429 or only one set reaches this insn, use it. */
3430 if (this_reg->next == NULL
3431 || can_disregard_other_sets (&this_reg, insn, 0))
3432 found_setting = 1;
3433 }
3434
3435 if (found_setting)
3436 {
3437 pat = PATTERN (insn);
3438 if (use_src)
3439 to = SET_SRC (expr_set);
3440 else
3441 to = SET_DEST (expr_set);
3442 changed = validate_change (insn, &SET_SRC (pat), to, 0);
3443
3444 /* We should be able to ignore the return code from validate_change but
3445 to play it safe we check. */
3446 if (changed)
3447 {
3448 gcse_subst_count++;
3449 if (gcse_file != NULL)
3450 {
3451 fprintf (gcse_file, "GCSE: Replacing the source in insn %d with",
3452 INSN_UID (insn));
3453 fprintf (gcse_file, " reg %d %s insn %d\n",
3454 REGNO (to), use_src ? "from" : "set in",
3455 INSN_UID (insn_computes_expr));
3456 }
3457 }
3458 }
3459
3460 /* The register that the expr is computed into is set more than once. */
3461 else if (1 /*expensive_op(this_pattrn->op) && do_expensive_gcse)*/)
3462 {
3463 /* Insert an insn after insnx that copies the reg set in insnx
3464 into a new pseudo register call this new register REGN.
3465 From insnb until end of basic block or until REGB is set
3466 replace all uses of REGB with REGN. */
3467 rtx new_insn;
3468
3469 to = gen_reg_rtx (GET_MODE (SET_DEST (expr_set)));
3470
3471 /* Generate the new insn. */
3472 /* ??? If the change fails, we return 0, even though we created
3473 an insn. I think this is ok. */
3474 new_insn
3475 = emit_insn_after (gen_rtx_SET (VOIDmode, to,
3476 SET_DEST (expr_set)),
3477 insn_computes_expr);
3478
3479 /* Keep register set table up to date. */
3480 record_one_set (REGNO (to), new_insn);
3481
3482 gcse_create_count++;
3483 if (gcse_file != NULL)
3484 {
3485 fprintf (gcse_file, "GCSE: Creating insn %d to copy value of reg %d",
3486 INSN_UID (NEXT_INSN (insn_computes_expr)),
3487 REGNO (SET_SRC (PATTERN (NEXT_INSN (insn_computes_expr)))));
3488 fprintf (gcse_file, ", computed in insn %d,\n",
3489 INSN_UID (insn_computes_expr));
3490 fprintf (gcse_file, " into newly allocated reg %d\n",
3491 REGNO (to));
3492 }
3493
3494 pat = PATTERN (insn);
3495
3496 /* Do register replacement for INSN. */
3497 changed = validate_change (insn, &SET_SRC (pat),
3498 SET_DEST (PATTERN
3499 (NEXT_INSN (insn_computes_expr))),
3500 0);
3501
3502 /* We should be able to ignore the return code from validate_change but
3503 to play it safe we check. */
3504 if (changed)
3505 {
3506 gcse_subst_count++;
3507 if (gcse_file != NULL)
3508 {
3509 fprintf (gcse_file,
3510 "GCSE: Replacing the source in insn %d with reg %d ",
3511 INSN_UID (insn),
3512 REGNO (SET_DEST (PATTERN (NEXT_INSN
3513 (insn_computes_expr)))));
3514 fprintf (gcse_file, "set in insn %d\n",
3515 INSN_UID (insn_computes_expr));
3516 }
3517 }
3518 }
3519
3520 return changed;
3521 }
3522
3523 /* Perform classic GCSE. This is called by one_classic_gcse_pass after all
3524 the dataflow analysis has been done.
3525
3526 The result is nonzero if a change was made. */
3527
3528 static int
classic_gcse(void)3529 classic_gcse (void)
3530 {
3531 int changed;
3532 rtx insn;
3533 basic_block bb;
3534
3535 /* Note we start at block 1. */
3536
3537 if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
3538 return 0;
3539
3540 changed = 0;
3541 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb, EXIT_BLOCK_PTR, next_bb)
3542 {
3543 /* Reset tables used to keep track of what's still valid [since the
3544 start of the block]. */
3545 reset_opr_set_tables ();
3546
3547 for (insn = BB_HEAD (bb);
3548 insn != NULL && insn != NEXT_INSN (BB_END (bb));
3549 insn = NEXT_INSN (insn))
3550 {
3551 /* Is insn of form (set (pseudo-reg) ...)? */
3552 if (GET_CODE (insn) == INSN
3553 && GET_CODE (PATTERN (insn)) == SET
3554 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
3555 && REGNO (SET_DEST (PATTERN (insn))) >= FIRST_PSEUDO_REGISTER)
3556 {
3557 rtx pat = PATTERN (insn);
3558 rtx src = SET_SRC (pat);
3559 struct expr *expr;
3560
3561 if (want_to_gcse_p (src)
3562 /* Is the expression recorded? */
3563 && ((expr = lookup_expr (src, &expr_hash_table)) != NULL)
3564 /* Is the expression available [at the start of the
3565 block]? */
3566 && TEST_BIT (ae_in[bb->index], expr->bitmap_index)
3567 /* Are the operands unchanged since the start of the
3568 block? */
3569 && oprs_not_set_p (src, insn))
3570 changed |= handle_avail_expr (insn, expr);
3571 }
3572
3573 /* Keep track of everything modified by this insn. */
3574 /* ??? Need to be careful w.r.t. mods done to INSN. */
3575 if (INSN_P (insn))
3576 mark_oprs_set (insn);
3577 }
3578 }
3579
3580 return changed;
3581 }
3582
3583 /* Top level routine to perform one classic GCSE pass.
3584
3585 Return nonzero if a change was made. */
3586
3587 static int
one_classic_gcse_pass(int pass)3588 one_classic_gcse_pass (int pass)
3589 {
3590 int changed = 0;
3591
3592 gcse_subst_count = 0;
3593 gcse_create_count = 0;
3594
3595 alloc_hash_table (max_cuid, &expr_hash_table, 0);
3596 alloc_rd_mem (last_basic_block, max_cuid);
3597 compute_hash_table (&expr_hash_table);
3598 if (gcse_file)
3599 dump_hash_table (gcse_file, "Expression", &expr_hash_table);
3600
3601 if (expr_hash_table.n_elems > 0)
3602 {
3603 compute_kill_rd ();
3604 compute_rd ();
3605 alloc_avail_expr_mem (last_basic_block, expr_hash_table.n_elems);
3606 compute_ae_gen (&expr_hash_table);
3607 compute_ae_kill (ae_gen, ae_kill, &expr_hash_table);
3608 compute_available (ae_gen, ae_kill, ae_out, ae_in);
3609 changed = classic_gcse ();
3610 free_avail_expr_mem ();
3611 }
3612
3613 free_rd_mem ();
3614 free_hash_table (&expr_hash_table);
3615
3616 if (gcse_file)
3617 {
3618 fprintf (gcse_file, "\n");
3619 fprintf (gcse_file, "GCSE of %s, pass %d: %d bytes needed, %d substs,",
3620 current_function_name (), pass, bytes_used, gcse_subst_count);
3621 fprintf (gcse_file, "%d insns created\n", gcse_create_count);
3622 }
3623
3624 return changed;
3625 }
3626
3627 /* Compute copy/constant propagation working variables. */
3628
3629 /* Local properties of assignments. */
3630 static sbitmap *cprop_pavloc;
3631 static sbitmap *cprop_absaltered;
3632
3633 /* Global properties of assignments (computed from the local properties). */
3634 static sbitmap *cprop_avin;
3635 static sbitmap *cprop_avout;
3636
3637 /* Allocate vars used for copy/const propagation. N_BLOCKS is the number of
3638 basic blocks. N_SETS is the number of sets. */
3639
3640 static void
alloc_cprop_mem(int n_blocks,int n_sets)3641 alloc_cprop_mem (int n_blocks, int n_sets)
3642 {
3643 cprop_pavloc = sbitmap_vector_alloc (n_blocks, n_sets);
3644 cprop_absaltered = sbitmap_vector_alloc (n_blocks, n_sets);
3645
3646 cprop_avin = sbitmap_vector_alloc (n_blocks, n_sets);
3647 cprop_avout = sbitmap_vector_alloc (n_blocks, n_sets);
3648 }
3649
3650 /* Free vars used by copy/const propagation. */
3651
3652 static void
free_cprop_mem(void)3653 free_cprop_mem (void)
3654 {
3655 sbitmap_vector_free (cprop_pavloc);
3656 sbitmap_vector_free (cprop_absaltered);
3657 sbitmap_vector_free (cprop_avin);
3658 sbitmap_vector_free (cprop_avout);
3659 }
3660
3661 /* For each block, compute whether X is transparent. X is either an
3662 expression or an assignment [though we don't care which, for this context
3663 an assignment is treated as an expression]. For each block where an
3664 element of X is modified, set (SET_P == 1) or reset (SET_P == 0) the INDX
3665 bit in BMAP. */
3666
3667 static void
compute_transp(rtx x,int indx,sbitmap * bmap,int set_p)3668 compute_transp (rtx x, int indx, sbitmap *bmap, int set_p)
3669 {
3670 int i, j;
3671 basic_block bb;
3672 enum rtx_code code;
3673 reg_set *r;
3674 const char *fmt;
3675
3676 /* repeat is used to turn tail-recursion into iteration since GCC
3677 can't do it when there's no return value. */
3678 repeat:
3679
3680 if (x == 0)
3681 return;
3682
3683 code = GET_CODE (x);
3684 switch (code)
3685 {
3686 case REG:
3687 if (set_p)
3688 {
3689 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
3690 {
3691 FOR_EACH_BB (bb)
3692 if (TEST_BIT (reg_set_in_block[bb->index], REGNO (x)))
3693 SET_BIT (bmap[bb->index], indx);
3694 }
3695 else
3696 {
3697 for (r = reg_set_table[REGNO (x)]; r != NULL; r = r->next)
3698 SET_BIT (bmap[BLOCK_NUM (r->insn)], indx);
3699 }
3700 }
3701 else
3702 {
3703 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
3704 {
3705 FOR_EACH_BB (bb)
3706 if (TEST_BIT (reg_set_in_block[bb->index], REGNO (x)))
3707 RESET_BIT (bmap[bb->index], indx);
3708 }
3709 else
3710 {
3711 for (r = reg_set_table[REGNO (x)]; r != NULL; r = r->next)
3712 RESET_BIT (bmap[BLOCK_NUM (r->insn)], indx);
3713 }
3714 }
3715
3716 return;
3717
3718 case MEM:
3719 FOR_EACH_BB (bb)
3720 {
3721 rtx list_entry = canon_modify_mem_list[bb->index];
3722
3723 while (list_entry)
3724 {
3725 rtx dest, dest_addr;
3726
3727 if (GET_CODE (XEXP (list_entry, 0)) == CALL_INSN)
3728 {
3729 if (set_p)
3730 SET_BIT (bmap[bb->index], indx);
3731 else
3732 RESET_BIT (bmap[bb->index], indx);
3733 break;
3734 }
3735 /* LIST_ENTRY must be an INSN of some kind that sets memory.
3736 Examine each hunk of memory that is modified. */
3737
3738 dest = XEXP (list_entry, 0);
3739 list_entry = XEXP (list_entry, 1);
3740 dest_addr = XEXP (list_entry, 0);
3741
3742 if (canon_true_dependence (dest, GET_MODE (dest), dest_addr,
3743 x, rtx_addr_varies_p))
3744 {
3745 if (set_p)
3746 SET_BIT (bmap[bb->index], indx);
3747 else
3748 RESET_BIT (bmap[bb->index], indx);
3749 break;
3750 }
3751 list_entry = XEXP (list_entry, 1);
3752 }
3753 }
3754
3755 x = XEXP (x, 0);
3756 goto repeat;
3757
3758 case PC:
3759 case CC0: /*FIXME*/
3760 case CONST:
3761 case CONST_INT:
3762 case CONST_DOUBLE:
3763 case CONST_VECTOR:
3764 case SYMBOL_REF:
3765 case LABEL_REF:
3766 case ADDR_VEC:
3767 case ADDR_DIFF_VEC:
3768 return;
3769
3770 default:
3771 break;
3772 }
3773
3774 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
3775 {
3776 if (fmt[i] == 'e')
3777 {
3778 /* If we are about to do the last recursive call
3779 needed at this level, change it into iteration.
3780 This function is called enough to be worth it. */
3781 if (i == 0)
3782 {
3783 x = XEXP (x, i);
3784 goto repeat;
3785 }
3786
3787 compute_transp (XEXP (x, i), indx, bmap, set_p);
3788 }
3789 else if (fmt[i] == 'E')
3790 for (j = 0; j < XVECLEN (x, i); j++)
3791 compute_transp (XVECEXP (x, i, j), indx, bmap, set_p);
3792 }
3793 }
3794
3795 /* Top level routine to do the dataflow analysis needed by copy/const
3796 propagation. */
3797
3798 static void
compute_cprop_data(void)3799 compute_cprop_data (void)
3800 {
3801 compute_local_properties (cprop_absaltered, cprop_pavloc, NULL, &set_hash_table);
3802 compute_available (cprop_pavloc, cprop_absaltered,
3803 cprop_avout, cprop_avin);
3804 }
3805
3806 /* Copy/constant propagation. */
3807
3808 /* Maximum number of register uses in an insn that we handle. */
3809 #define MAX_USES 8
3810
3811 /* Table of uses found in an insn.
3812 Allocated statically to avoid alloc/free complexity and overhead. */
3813 static struct reg_use reg_use_table[MAX_USES];
3814
3815 /* Index into `reg_use_table' while building it. */
3816 static int reg_use_count;
3817
3818 /* Set up a list of register numbers used in INSN. The found uses are stored
3819 in `reg_use_table'. `reg_use_count' is initialized to zero before entry,
3820 and contains the number of uses in the table upon exit.
3821
3822 ??? If a register appears multiple times we will record it multiple times.
3823 This doesn't hurt anything but it will slow things down. */
3824
3825 static void
find_used_regs(rtx * xptr,void * data ATTRIBUTE_UNUSED)3826 find_used_regs (rtx *xptr, void *data ATTRIBUTE_UNUSED)
3827 {
3828 int i, j;
3829 enum rtx_code code;
3830 const char *fmt;
3831 rtx x = *xptr;
3832
3833 /* repeat is used to turn tail-recursion into iteration since GCC
3834 can't do it when there's no return value. */
3835 repeat:
3836 if (x == 0)
3837 return;
3838
3839 code = GET_CODE (x);
3840 if (REG_P (x))
3841 {
3842 if (reg_use_count == MAX_USES)
3843 return;
3844
3845 reg_use_table[reg_use_count].reg_rtx = x;
3846 reg_use_count++;
3847 }
3848
3849 /* Recursively scan the operands of this expression. */
3850
3851 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
3852 {
3853 if (fmt[i] == 'e')
3854 {
3855 /* If we are about to do the last recursive call
3856 needed at this level, change it into iteration.
3857 This function is called enough to be worth it. */
3858 if (i == 0)
3859 {
3860 x = XEXP (x, 0);
3861 goto repeat;
3862 }
3863
3864 find_used_regs (&XEXP (x, i), data);
3865 }
3866 else if (fmt[i] == 'E')
3867 for (j = 0; j < XVECLEN (x, i); j++)
3868 find_used_regs (&XVECEXP (x, i, j), data);
3869 }
3870 }
3871
3872 /* Try to replace all non-SET_DEST occurrences of FROM in INSN with TO.
3873 Returns nonzero is successful. */
3874
3875 static int
try_replace_reg(rtx from,rtx to,rtx insn)3876 try_replace_reg (rtx from, rtx to, rtx insn)
3877 {
3878 rtx note = find_reg_equal_equiv_note (insn);
3879 rtx src = 0;
3880 int success = 0;
3881 rtx set = single_set (insn);
3882
3883 validate_replace_src_group (from, to, insn);
3884 if (num_changes_pending () && apply_change_group ())
3885 success = 1;
3886
3887 /* Try to simplify SET_SRC if we have substituted a constant. */
3888 if (success && set && CONSTANT_P (to))
3889 {
3890 src = simplify_rtx (SET_SRC (set));
3891
3892 if (src)
3893 validate_change (insn, &SET_SRC (set), src, 0);
3894 }
3895
3896 /* If there is already a NOTE, update the expression in it with our
3897 replacement. */
3898 if (note != 0)
3899 XEXP (note, 0) = simplify_replace_rtx (XEXP (note, 0), from, to);
3900
3901 if (!success && set && reg_mentioned_p (from, SET_SRC (set)))
3902 {
3903 /* If above failed and this is a single set, try to simplify the source of
3904 the set given our substitution. We could perhaps try this for multiple
3905 SETs, but it probably won't buy us anything. */
3906 src = simplify_replace_rtx (SET_SRC (set), from, to);
3907
3908 if (!rtx_equal_p (src, SET_SRC (set))
3909 && validate_change (insn, &SET_SRC (set), src, 0))
3910 success = 1;
3911
3912 /* If we've failed to do replacement, have a single SET, don't already
3913 have a note, and have no special SET, add a REG_EQUAL note to not
3914 lose information. */
3915 if (!success && note == 0 && set != 0
3916 && GET_CODE (XEXP (set, 0)) != ZERO_EXTRACT
3917 && GET_CODE (XEXP (set, 0)) != SIGN_EXTRACT)
3918 note = set_unique_reg_note (insn, REG_EQUAL, copy_rtx (src));
3919 }
3920
3921 /* REG_EQUAL may get simplified into register.
3922 We don't allow that. Remove that note. This code ought
3923 not to happen, because previous code ought to synthesize
3924 reg-reg move, but be on the safe side. */
3925 if (note && REG_P (XEXP (note, 0)))
3926 remove_note (insn, note);
3927
3928 return success;
3929 }
3930
3931 /* Find a set of REGNOs that are available on entry to INSN's block. Returns
3932 NULL no such set is found. */
3933
3934 static struct expr *
find_avail_set(int regno,rtx insn)3935 find_avail_set (int regno, rtx insn)
3936 {
3937 /* SET1 contains the last set found that can be returned to the caller for
3938 use in a substitution. */
3939 struct expr *set1 = 0;
3940
3941 /* Loops are not possible here. To get a loop we would need two sets
3942 available at the start of the block containing INSN. ie we would
3943 need two sets like this available at the start of the block:
3944
3945 (set (reg X) (reg Y))
3946 (set (reg Y) (reg X))
3947
3948 This can not happen since the set of (reg Y) would have killed the
3949 set of (reg X) making it unavailable at the start of this block. */
3950 while (1)
3951 {
3952 rtx src;
3953 struct expr *set = lookup_set (regno, &set_hash_table);
3954
3955 /* Find a set that is available at the start of the block
3956 which contains INSN. */
3957 while (set)
3958 {
3959 if (TEST_BIT (cprop_avin[BLOCK_NUM (insn)], set->bitmap_index))
3960 break;
3961 set = next_set (regno, set);
3962 }
3963
3964 /* If no available set was found we've reached the end of the
3965 (possibly empty) copy chain. */
3966 if (set == 0)
3967 break;
3968
3969 if (GET_CODE (set->expr) != SET)
3970 abort ();
3971
3972 src = SET_SRC (set->expr);
3973
3974 /* We know the set is available.
3975 Now check that SRC is ANTLOC (i.e. none of the source operands
3976 have changed since the start of the block).
3977
3978 If the source operand changed, we may still use it for the next
3979 iteration of this loop, but we may not use it for substitutions. */
3980
3981 if (gcse_constant_p (src) || oprs_not_set_p (src, insn))
3982 set1 = set;
3983
3984 /* If the source of the set is anything except a register, then
3985 we have reached the end of the copy chain. */
3986 if (GET_CODE (src) != REG)
3987 break;
3988
3989 /* Follow the copy chain, ie start another iteration of the loop
3990 and see if we have an available copy into SRC. */
3991 regno = REGNO (src);
3992 }
3993
3994 /* SET1 holds the last set that was available and anticipatable at
3995 INSN. */
3996 return set1;
3997 }
3998
3999 /* Subroutine of cprop_insn that tries to propagate constants into
4000 JUMP_INSNS. JUMP must be a conditional jump. If SETCC is non-NULL
4001 it is the instruction that immediately precedes JUMP, and must be a
4002 single SET of a register. FROM is what we will try to replace,
4003 SRC is the constant we will try to substitute for it. Returns nonzero
4004 if a change was made. */
4005
4006 static int
cprop_jump(basic_block bb,rtx setcc,rtx jump,rtx from,rtx src)4007 cprop_jump (basic_block bb, rtx setcc, rtx jump, rtx from, rtx src)
4008 {
4009 rtx new, set_src, note_src;
4010 rtx set = pc_set (jump);
4011 rtx note = find_reg_equal_equiv_note (jump);
4012
4013 if (note)
4014 {
4015 note_src = XEXP (note, 0);
4016 if (GET_CODE (note_src) == EXPR_LIST)
4017 note_src = NULL_RTX;
4018 }
4019 else note_src = NULL_RTX;
4020
4021 /* Prefer REG_EQUAL notes except those containing EXPR_LISTs. */
4022 set_src = note_src ? note_src : SET_SRC (set);
4023
4024 /* First substitute the SETCC condition into the JUMP instruction,
4025 then substitute that given values into this expanded JUMP. */
4026 if (setcc != NULL_RTX
4027 && !modified_between_p (from, setcc, jump)
4028 && !modified_between_p (src, setcc, jump))
4029 {
4030 rtx setcc_src;
4031 rtx setcc_set = single_set (setcc);
4032 rtx setcc_note = find_reg_equal_equiv_note (setcc);
4033 setcc_src = (setcc_note && GET_CODE (XEXP (setcc_note, 0)) != EXPR_LIST)
4034 ? XEXP (setcc_note, 0) : SET_SRC (setcc_set);
4035 set_src = simplify_replace_rtx (set_src, SET_DEST (setcc_set),
4036 setcc_src);
4037 }
4038 else
4039 setcc = NULL_RTX;
4040
4041 new = simplify_replace_rtx (set_src, from, src);
4042
4043 /* If no simplification can be made, then try the next register. */
4044 if (rtx_equal_p (new, SET_SRC (set)))
4045 return 0;
4046
4047 /* If this is now a no-op delete it, otherwise this must be a valid insn. */
4048 if (new == pc_rtx)
4049 delete_insn (jump);
4050 else
4051 {
4052 /* Ensure the value computed inside the jump insn to be equivalent
4053 to one computed by setcc. */
4054 if (setcc && modified_in_p (new, setcc))
4055 return 0;
4056 if (! validate_change (jump, &SET_SRC (set), new, 0))
4057 {
4058 /* When (some) constants are not valid in a comparison, and there
4059 are two registers to be replaced by constants before the entire
4060 comparison can be folded into a constant, we need to keep
4061 intermediate information in REG_EQUAL notes. For targets with
4062 separate compare insns, such notes are added by try_replace_reg.
4063 When we have a combined compare-and-branch instruction, however,
4064 we need to attach a note to the branch itself to make this
4065 optimization work. */
4066
4067 if (!rtx_equal_p (new, note_src))
4068 set_unique_reg_note (jump, REG_EQUAL, copy_rtx (new));
4069 return 0;
4070 }
4071
4072 /* Remove REG_EQUAL note after simplification. */
4073 if (note_src)
4074 remove_note (jump, note);
4075
4076 /* If this has turned into an unconditional jump,
4077 then put a barrier after it so that the unreachable
4078 code will be deleted. */
4079 if (GET_CODE (SET_SRC (set)) == LABEL_REF)
4080 emit_barrier_after (jump);
4081 }
4082
4083 #ifdef HAVE_cc0
4084 /* Delete the cc0 setter. */
4085 if (setcc != NULL && CC0_P (SET_DEST (single_set (setcc))))
4086 delete_insn (setcc);
4087 #endif
4088
4089 run_jump_opt_after_gcse = 1;
4090
4091 const_prop_count++;
4092 if (gcse_file != NULL)
4093 {
4094 fprintf (gcse_file,
4095 "CONST-PROP: Replacing reg %d in jump_insn %d with constant ",
4096 REGNO (from), INSN_UID (jump));
4097 print_rtl (gcse_file, src);
4098 fprintf (gcse_file, "\n");
4099 }
4100 purge_dead_edges (bb);
4101
4102 return 1;
4103 }
4104
4105 static bool
constprop_register(rtx insn,rtx from,rtx to,int alter_jumps)4106 constprop_register (rtx insn, rtx from, rtx to, int alter_jumps)
4107 {
4108 rtx sset;
4109
4110 /* Check for reg or cc0 setting instructions followed by
4111 conditional branch instructions first. */
4112 if (alter_jumps
4113 && (sset = single_set (insn)) != NULL
4114 && NEXT_INSN (insn)
4115 && any_condjump_p (NEXT_INSN (insn)) && onlyjump_p (NEXT_INSN (insn)))
4116 {
4117 rtx dest = SET_DEST (sset);
4118 if ((REG_P (dest) || CC0_P (dest))
4119 && cprop_jump (BLOCK_FOR_INSN (insn), insn, NEXT_INSN (insn), from, to))
4120 return 1;
4121 }
4122
4123 /* Handle normal insns next. */
4124 if (GET_CODE (insn) == INSN
4125 && try_replace_reg (from, to, insn))
4126 return 1;
4127
4128 /* Try to propagate a CONST_INT into a conditional jump.
4129 We're pretty specific about what we will handle in this
4130 code, we can extend this as necessary over time.
4131
4132 Right now the insn in question must look like
4133 (set (pc) (if_then_else ...)) */
4134 else if (alter_jumps && any_condjump_p (insn) && onlyjump_p (insn))
4135 return cprop_jump (BLOCK_FOR_INSN (insn), NULL, insn, from, to);
4136 return 0;
4137 }
4138
4139 /* Perform constant and copy propagation on INSN.
4140 The result is nonzero if a change was made. */
4141
4142 static int
cprop_insn(rtx insn,int alter_jumps)4143 cprop_insn (rtx insn, int alter_jumps)
4144 {
4145 struct reg_use *reg_used;
4146 int changed = 0;
4147 rtx note;
4148
4149 if (!INSN_P (insn))
4150 return 0;
4151
4152 reg_use_count = 0;
4153 note_uses (&PATTERN (insn), find_used_regs, NULL);
4154
4155 note = find_reg_equal_equiv_note (insn);
4156
4157 /* We may win even when propagating constants into notes. */
4158 if (note)
4159 find_used_regs (&XEXP (note, 0), NULL);
4160
4161 for (reg_used = ®_use_table[0]; reg_use_count > 0;
4162 reg_used++, reg_use_count--)
4163 {
4164 unsigned int regno = REGNO (reg_used->reg_rtx);
4165 rtx pat, src;
4166 struct expr *set;
4167
4168 /* Ignore registers created by GCSE.
4169 We do this because ... */
4170 if (regno >= max_gcse_regno)
4171 continue;
4172
4173 /* If the register has already been set in this block, there's
4174 nothing we can do. */
4175 if (! oprs_not_set_p (reg_used->reg_rtx, insn))
4176 continue;
4177
4178 /* Find an assignment that sets reg_used and is available
4179 at the start of the block. */
4180 set = find_avail_set (regno, insn);
4181 if (! set)
4182 continue;
4183
4184 pat = set->expr;
4185 /* ??? We might be able to handle PARALLELs. Later. */
4186 if (GET_CODE (pat) != SET)
4187 abort ();
4188
4189 src = SET_SRC (pat);
4190
4191 /* Constant propagation. */
4192 if (gcse_constant_p (src))
4193 {
4194 if (constprop_register (insn, reg_used->reg_rtx, src, alter_jumps))
4195 {
4196 changed = 1;
4197 const_prop_count++;
4198 if (gcse_file != NULL)
4199 {
4200 fprintf (gcse_file, "GLOBAL CONST-PROP: Replacing reg %d in ", regno);
4201 fprintf (gcse_file, "insn %d with constant ", INSN_UID (insn));
4202 print_rtl (gcse_file, src);
4203 fprintf (gcse_file, "\n");
4204 }
4205 if (INSN_DELETED_P (insn))
4206 return 1;
4207 }
4208 }
4209 else if (GET_CODE (src) == REG
4210 && REGNO (src) >= FIRST_PSEUDO_REGISTER
4211 && REGNO (src) != regno)
4212 {
4213 if (try_replace_reg (reg_used->reg_rtx, src, insn))
4214 {
4215 changed = 1;
4216 copy_prop_count++;
4217 if (gcse_file != NULL)
4218 {
4219 fprintf (gcse_file, "GLOBAL COPY-PROP: Replacing reg %d in insn %d",
4220 regno, INSN_UID (insn));
4221 fprintf (gcse_file, " with reg %d\n", REGNO (src));
4222 }
4223
4224 /* The original insn setting reg_used may or may not now be
4225 deletable. We leave the deletion to flow. */
4226 /* FIXME: If it turns out that the insn isn't deletable,
4227 then we may have unnecessarily extended register lifetimes
4228 and made things worse. */
4229 }
4230 }
4231 }
4232
4233 return changed;
4234 }
4235
4236 /* Like find_used_regs, but avoid recording uses that appear in
4237 input-output contexts such as zero_extract or pre_dec. This
4238 restricts the cases we consider to those for which local cprop
4239 can legitimately make replacements. */
4240
4241 static void
local_cprop_find_used_regs(rtx * xptr,void * data)4242 local_cprop_find_used_regs (rtx *xptr, void *data)
4243 {
4244 rtx x = *xptr;
4245
4246 if (x == 0)
4247 return;
4248
4249 switch (GET_CODE (x))
4250 {
4251 case ZERO_EXTRACT:
4252 case SIGN_EXTRACT:
4253 case STRICT_LOW_PART:
4254 return;
4255
4256 case PRE_DEC:
4257 case PRE_INC:
4258 case POST_DEC:
4259 case POST_INC:
4260 case PRE_MODIFY:
4261 case POST_MODIFY:
4262 /* Can only legitimately appear this early in the context of
4263 stack pushes for function arguments, but handle all of the
4264 codes nonetheless. */
4265 return;
4266
4267 case SUBREG:
4268 /* Setting a subreg of a register larger than word_mode leaves
4269 the non-written words unchanged. */
4270 if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) > BITS_PER_WORD)
4271 return;
4272 break;
4273
4274 default:
4275 break;
4276 }
4277
4278 find_used_regs (xptr, data);
4279 }
4280
4281 /* LIBCALL_SP is a zero-terminated array of insns at the end of a libcall;
4282 their REG_EQUAL notes need updating. */
4283
4284 static bool
do_local_cprop(rtx x,rtx insn,int alter_jumps,rtx * libcall_sp)4285 do_local_cprop (rtx x, rtx insn, int alter_jumps, rtx *libcall_sp)
4286 {
4287 rtx newreg = NULL, newcnst = NULL;
4288
4289 /* Rule out USE instructions and ASM statements as we don't want to
4290 change the hard registers mentioned. */
4291 if (GET_CODE (x) == REG
4292 && (REGNO (x) >= FIRST_PSEUDO_REGISTER
4293 || (GET_CODE (PATTERN (insn)) != USE
4294 && asm_noperands (PATTERN (insn)) < 0)))
4295 {
4296 cselib_val *val = cselib_lookup (x, GET_MODE (x), 0);
4297 struct elt_loc_list *l;
4298
4299 if (!val)
4300 return false;
4301 for (l = val->locs; l; l = l->next)
4302 {
4303 rtx this_rtx = l->loc;
4304 rtx note;
4305
4306 if (l->in_libcall)
4307 continue;
4308
4309 if (gcse_constant_p (this_rtx))
4310 newcnst = this_rtx;
4311 if (REG_P (this_rtx) && REGNO (this_rtx) >= FIRST_PSEUDO_REGISTER
4312 /* Don't copy propagate if it has attached REG_EQUIV note.
4313 At this point this only function parameters should have
4314 REG_EQUIV notes and if the argument slot is used somewhere
4315 explicitly, it means address of parameter has been taken,
4316 so we should not extend the lifetime of the pseudo. */
4317 && (!(note = find_reg_note (l->setting_insn, REG_EQUIV, NULL_RTX))
4318 || GET_CODE (XEXP (note, 0)) != MEM))
4319 newreg = this_rtx;
4320 }
4321 if (newcnst && constprop_register (insn, x, newcnst, alter_jumps))
4322 {
4323 /* If we find a case where we can't fix the retval REG_EQUAL notes
4324 match the new register, we either have to abandon this replacement
4325 or fix delete_trivially_dead_insns to preserve the setting insn,
4326 or make it delete the REG_EUAQL note, and fix up all passes that
4327 require the REG_EQUAL note there. */
4328 if (!adjust_libcall_notes (x, newcnst, insn, libcall_sp))
4329 abort ();
4330 if (gcse_file != NULL)
4331 {
4332 fprintf (gcse_file, "LOCAL CONST-PROP: Replacing reg %d in ",
4333 REGNO (x));
4334 fprintf (gcse_file, "insn %d with constant ",
4335 INSN_UID (insn));
4336 print_rtl (gcse_file, newcnst);
4337 fprintf (gcse_file, "\n");
4338 }
4339 const_prop_count++;
4340 return true;
4341 }
4342 else if (newreg && newreg != x && try_replace_reg (x, newreg, insn))
4343 {
4344 adjust_libcall_notes (x, newreg, insn, libcall_sp);
4345 if (gcse_file != NULL)
4346 {
4347 fprintf (gcse_file,
4348 "LOCAL COPY-PROP: Replacing reg %d in insn %d",
4349 REGNO (x), INSN_UID (insn));
4350 fprintf (gcse_file, " with reg %d\n", REGNO (newreg));
4351 }
4352 copy_prop_count++;
4353 return true;
4354 }
4355 }
4356 return false;
4357 }
4358
4359 /* LIBCALL_SP is a zero-terminated array of insns at the end of a libcall;
4360 their REG_EQUAL notes need updating to reflect that OLDREG has been
4361 replaced with NEWVAL in INSN. Return true if all substitutions could
4362 be made. */
4363 static bool
adjust_libcall_notes(rtx oldreg,rtx newval,rtx insn,rtx * libcall_sp)4364 adjust_libcall_notes (rtx oldreg, rtx newval, rtx insn, rtx *libcall_sp)
4365 {
4366 rtx end;
4367
4368 while ((end = *libcall_sp++))
4369 {
4370 rtx note = find_reg_equal_equiv_note (end);
4371
4372 if (! note)
4373 continue;
4374
4375 if (REG_P (newval))
4376 {
4377 if (reg_set_between_p (newval, PREV_INSN (insn), end))
4378 {
4379 do
4380 {
4381 note = find_reg_equal_equiv_note (end);
4382 if (! note)
4383 continue;
4384 if (reg_mentioned_p (newval, XEXP (note, 0)))
4385 return false;
4386 }
4387 while ((end = *libcall_sp++));
4388 return true;
4389 }
4390 }
4391 XEXP (note, 0) = replace_rtx (XEXP (note, 0), oldreg, newval);
4392 insn = end;
4393 }
4394 return true;
4395 }
4396
4397 #define MAX_NESTED_LIBCALLS 9
4398
4399 static void
local_cprop_pass(int alter_jumps)4400 local_cprop_pass (int alter_jumps)
4401 {
4402 rtx insn;
4403 struct reg_use *reg_used;
4404 rtx libcall_stack[MAX_NESTED_LIBCALLS + 1], *libcall_sp;
4405 bool changed = false;
4406
4407 cselib_init ();
4408 libcall_sp = &libcall_stack[MAX_NESTED_LIBCALLS];
4409 *libcall_sp = 0;
4410 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
4411 {
4412 if (INSN_P (insn))
4413 {
4414 rtx note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
4415
4416 if (note)
4417 {
4418 if (libcall_sp == libcall_stack)
4419 abort ();
4420 *--libcall_sp = XEXP (note, 0);
4421 }
4422 note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
4423 if (note)
4424 libcall_sp++;
4425 note = find_reg_equal_equiv_note (insn);
4426 do
4427 {
4428 reg_use_count = 0;
4429 note_uses (&PATTERN (insn), local_cprop_find_used_regs, NULL);
4430 if (note)
4431 local_cprop_find_used_regs (&XEXP (note, 0), NULL);
4432
4433 for (reg_used = ®_use_table[0]; reg_use_count > 0;
4434 reg_used++, reg_use_count--)
4435 if (do_local_cprop (reg_used->reg_rtx, insn, alter_jumps,
4436 libcall_sp))
4437 {
4438 changed = true;
4439 break;
4440 }
4441 if (INSN_DELETED_P (insn))
4442 break;
4443 }
4444 while (reg_use_count);
4445 }
4446 cselib_process_insn (insn);
4447 }
4448 cselib_finish ();
4449 /* Global analysis may get into infinite loops for unreachable blocks. */
4450 if (changed && alter_jumps)
4451 {
4452 delete_unreachable_blocks ();
4453 free_reg_set_mem ();
4454 alloc_reg_set_mem (max_reg_num ());
4455 compute_sets (get_insns ());
4456 }
4457 }
4458
4459 /* Forward propagate copies. This includes copies and constants. Return
4460 nonzero if a change was made. */
4461
4462 static int
cprop(int alter_jumps)4463 cprop (int alter_jumps)
4464 {
4465 int changed;
4466 basic_block bb;
4467 rtx insn;
4468
4469 /* Note we start at block 1. */
4470 if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
4471 {
4472 if (gcse_file != NULL)
4473 fprintf (gcse_file, "\n");
4474 return 0;
4475 }
4476
4477 changed = 0;
4478 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb, EXIT_BLOCK_PTR, next_bb)
4479 {
4480 /* Reset tables used to keep track of what's still valid [since the
4481 start of the block]. */
4482 reset_opr_set_tables ();
4483
4484 for (insn = BB_HEAD (bb);
4485 insn != NULL && insn != NEXT_INSN (BB_END (bb));
4486 insn = NEXT_INSN (insn))
4487 if (INSN_P (insn))
4488 {
4489 changed |= cprop_insn (insn, alter_jumps);
4490
4491 /* Keep track of everything modified by this insn. */
4492 /* ??? Need to be careful w.r.t. mods done to INSN. Don't
4493 call mark_oprs_set if we turned the insn into a NOTE. */
4494 if (GET_CODE (insn) != NOTE)
4495 mark_oprs_set (insn);
4496 }
4497 }
4498
4499 if (gcse_file != NULL)
4500 fprintf (gcse_file, "\n");
4501
4502 return changed;
4503 }
4504
4505 /* Similar to get_condition, only the resulting condition must be
4506 valid at JUMP, instead of at EARLIEST.
4507
4508 This differs from noce_get_condition in ifcvt.c in that we prefer not to
4509 settle for the condition variable in the jump instruction being integral.
4510 We prefer to be able to record the value of a user variable, rather than
4511 the value of a temporary used in a condition. This could be solved by
4512 recording the value of *every* register scaned by canonicalize_condition,
4513 but this would require some code reorganization. */
4514
4515 rtx
fis_get_condition(rtx jump)4516 fis_get_condition (rtx jump)
4517 {
4518 rtx cond, set, tmp, insn, earliest;
4519 bool reverse;
4520
4521 if (! any_condjump_p (jump))
4522 return NULL_RTX;
4523
4524 set = pc_set (jump);
4525 cond = XEXP (SET_SRC (set), 0);
4526
4527 /* If this branches to JUMP_LABEL when the condition is false,
4528 reverse the condition. */
4529 reverse = (GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
4530 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump));
4531
4532 /* Use canonicalize_condition to do the dirty work of manipulating
4533 MODE_CC values and COMPARE rtx codes. */
4534 tmp = canonicalize_condition (jump, cond, reverse, &earliest, NULL_RTX,
4535 false);
4536 if (!tmp)
4537 return NULL_RTX;
4538
4539 /* Verify that the given condition is valid at JUMP by virtue of not
4540 having been modified since EARLIEST. */
4541 for (insn = earliest; insn != jump; insn = NEXT_INSN (insn))
4542 if (INSN_P (insn) && modified_in_p (tmp, insn))
4543 break;
4544 if (insn == jump)
4545 return tmp;
4546
4547 /* The condition was modified. See if we can get a partial result
4548 that doesn't follow all the reversals. Perhaps combine can fold
4549 them together later. */
4550 tmp = XEXP (tmp, 0);
4551 if (!REG_P (tmp) || GET_MODE_CLASS (GET_MODE (tmp)) != MODE_INT)
4552 return NULL_RTX;
4553 tmp = canonicalize_condition (jump, cond, reverse, &earliest, tmp,
4554 false);
4555 if (!tmp)
4556 return NULL_RTX;
4557
4558 /* For sanity's sake, re-validate the new result. */
4559 for (insn = earliest; insn != jump; insn = NEXT_INSN (insn))
4560 if (INSN_P (insn) && modified_in_p (tmp, insn))
4561 return NULL_RTX;
4562
4563 return tmp;
4564 }
4565
4566 /* Check the comparison COND to see if we can safely form an implicit set from
4567 it. COND is either an EQ or NE comparison. */
4568
4569 static bool
implicit_set_cond_p(rtx cond)4570 implicit_set_cond_p (rtx cond)
4571 {
4572 enum machine_mode mode = GET_MODE (XEXP (cond, 0));
4573 rtx cst = XEXP (cond, 1);
4574
4575 /* We can't perform this optimization if either operand might be or might
4576 contain a signed zero. */
4577 if (HONOR_SIGNED_ZEROS (mode))
4578 {
4579 /* It is sufficient to check if CST is or contains a zero. We must
4580 handle float, complex, and vector. If any subpart is a zero, then
4581 the optimization can't be performed. */
4582 /* ??? The complex and vector checks are not implemented yet. We just
4583 always return zero for them. */
4584 if (GET_CODE (cst) == CONST_DOUBLE)
4585 {
4586 REAL_VALUE_TYPE d;
4587 REAL_VALUE_FROM_CONST_DOUBLE (d, cst);
4588 if (REAL_VALUES_EQUAL (d, dconst0))
4589 return 0;
4590 }
4591 else
4592 return 0;
4593 }
4594
4595 return gcse_constant_p (cst);
4596 }
4597
4598 /* Find the implicit sets of a function. An "implicit set" is a constraint
4599 on the value of a variable, implied by a conditional jump. For example,
4600 following "if (x == 2)", the then branch may be optimized as though the
4601 conditional performed an "explicit set", in this example, "x = 2". This
4602 function records the set patterns that are implicit at the start of each
4603 basic block. */
4604
4605 static void
find_implicit_sets(void)4606 find_implicit_sets (void)
4607 {
4608 basic_block bb, dest;
4609 unsigned int count;
4610 rtx cond, new;
4611
4612 count = 0;
4613 FOR_EACH_BB (bb)
4614 /* Check for more than one successor. */
4615 if (bb->succ && bb->succ->succ_next)
4616 {
4617 cond = fis_get_condition (BB_END (bb));
4618
4619 if (cond
4620 && (GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
4621 && GET_CODE (XEXP (cond, 0)) == REG
4622 && REGNO (XEXP (cond, 0)) >= FIRST_PSEUDO_REGISTER
4623 && implicit_set_cond_p (cond))
4624 {
4625 dest = GET_CODE (cond) == EQ ? BRANCH_EDGE (bb)->dest
4626 : FALLTHRU_EDGE (bb)->dest;
4627
4628 if (dest && ! dest->pred->pred_next
4629 && dest != EXIT_BLOCK_PTR)
4630 {
4631 new = gen_rtx_SET (VOIDmode, XEXP (cond, 0),
4632 XEXP (cond, 1));
4633 implicit_sets[dest->index] = new;
4634 if (gcse_file)
4635 {
4636 fprintf(gcse_file, "Implicit set of reg %d in ",
4637 REGNO (XEXP (cond, 0)));
4638 fprintf(gcse_file, "basic block %d\n", dest->index);
4639 }
4640 count++;
4641 }
4642 }
4643 }
4644
4645 if (gcse_file)
4646 fprintf (gcse_file, "Found %d implicit sets\n", count);
4647 }
4648
4649 /* Perform one copy/constant propagation pass.
4650 PASS is the pass count. If CPROP_JUMPS is true, perform constant
4651 propagation into conditional jumps. If BYPASS_JUMPS is true,
4652 perform conditional jump bypassing optimizations. */
4653
4654 static int
one_cprop_pass(int pass,int cprop_jumps,int bypass_jumps)4655 one_cprop_pass (int pass, int cprop_jumps, int bypass_jumps)
4656 {
4657 int changed = 0;
4658
4659 const_prop_count = 0;
4660 copy_prop_count = 0;
4661
4662 local_cprop_pass (cprop_jumps);
4663
4664 /* Determine implicit sets. */
4665 implicit_sets = xcalloc (last_basic_block, sizeof (rtx));
4666 find_implicit_sets ();
4667
4668 alloc_hash_table (max_cuid, &set_hash_table, 1);
4669 compute_hash_table (&set_hash_table);
4670
4671 /* Free implicit_sets before peak usage. */
4672 free (implicit_sets);
4673 implicit_sets = NULL;
4674
4675 if (gcse_file)
4676 dump_hash_table (gcse_file, "SET", &set_hash_table);
4677 if (set_hash_table.n_elems > 0)
4678 {
4679 alloc_cprop_mem (last_basic_block, set_hash_table.n_elems);
4680 compute_cprop_data ();
4681 changed = cprop (cprop_jumps);
4682 if (bypass_jumps)
4683 changed |= bypass_conditional_jumps ();
4684 free_cprop_mem ();
4685 }
4686
4687 free_hash_table (&set_hash_table);
4688
4689 if (gcse_file)
4690 {
4691 fprintf (gcse_file, "CPROP of %s, pass %d: %d bytes needed, ",
4692 current_function_name (), pass, bytes_used);
4693 fprintf (gcse_file, "%d const props, %d copy props\n\n",
4694 const_prop_count, copy_prop_count);
4695 }
4696 /* Global analysis may get into infinite loops for unreachable blocks. */
4697 if (changed && cprop_jumps)
4698 delete_unreachable_blocks ();
4699
4700 return changed;
4701 }
4702
4703 /* Bypass conditional jumps. */
4704
4705 /* The value of last_basic_block at the beginning of the jump_bypass
4706 pass. The use of redirect_edge_and_branch_force may introduce new
4707 basic blocks, but the data flow analysis is only valid for basic
4708 block indices less than bypass_last_basic_block. */
4709
4710 static int bypass_last_basic_block;
4711
4712 /* Find a set of REGNO to a constant that is available at the end of basic
4713 block BB. Returns NULL if no such set is found. Based heavily upon
4714 find_avail_set. */
4715
4716 static struct expr *
find_bypass_set(int regno,int bb)4717 find_bypass_set (int regno, int bb)
4718 {
4719 struct expr *result = 0;
4720
4721 for (;;)
4722 {
4723 rtx src;
4724 struct expr *set = lookup_set (regno, &set_hash_table);
4725
4726 while (set)
4727 {
4728 if (TEST_BIT (cprop_avout[bb], set->bitmap_index))
4729 break;
4730 set = next_set (regno, set);
4731 }
4732
4733 if (set == 0)
4734 break;
4735
4736 if (GET_CODE (set->expr) != SET)
4737 abort ();
4738
4739 src = SET_SRC (set->expr);
4740 if (gcse_constant_p (src))
4741 result = set;
4742
4743 if (GET_CODE (src) != REG)
4744 break;
4745
4746 regno = REGNO (src);
4747 }
4748 return result;
4749 }
4750
4751
4752 /* Subroutine of bypass_block that checks whether a pseudo is killed by
4753 any of the instructions inserted on an edge. Jump bypassing places
4754 condition code setters on CFG edges using insert_insn_on_edge. This
4755 function is required to check that our data flow analysis is still
4756 valid prior to commit_edge_insertions. */
4757
4758 static bool
reg_killed_on_edge(rtx reg,edge e)4759 reg_killed_on_edge (rtx reg, edge e)
4760 {
4761 rtx insn;
4762
4763 for (insn = e->insns; insn; insn = NEXT_INSN (insn))
4764 if (INSN_P (insn) && reg_set_p (reg, insn))
4765 return true;
4766
4767 return false;
4768 }
4769
4770 /* Subroutine of bypass_conditional_jumps that attempts to bypass the given
4771 basic block BB which has more than one predecessor. If not NULL, SETCC
4772 is the first instruction of BB, which is immediately followed by JUMP_INSN
4773 JUMP. Otherwise, SETCC is NULL, and JUMP is the first insn of BB.
4774 Returns nonzero if a change was made.
4775
4776 During the jump bypassing pass, we may place copies of SETCC instructions
4777 on CFG edges. The following routine must be careful to pay attention to
4778 these inserted insns when performing its transformations. */
4779
4780 static int
bypass_block(basic_block bb,rtx setcc,rtx jump)4781 bypass_block (basic_block bb, rtx setcc, rtx jump)
4782 {
4783 rtx insn, note;
4784 edge e, enext, edest;
4785 int i, change;
4786 int may_be_loop_header;
4787
4788 insn = (setcc != NULL) ? setcc : jump;
4789
4790 /* Determine set of register uses in INSN. */
4791 reg_use_count = 0;
4792 note_uses (&PATTERN (insn), find_used_regs, NULL);
4793 note = find_reg_equal_equiv_note (insn);
4794 if (note)
4795 find_used_regs (&XEXP (note, 0), NULL);
4796
4797 may_be_loop_header = false;
4798 for (e = bb->pred; e; e = e->pred_next)
4799 if (e->flags & EDGE_DFS_BACK)
4800 {
4801 may_be_loop_header = true;
4802 break;
4803 }
4804
4805 change = 0;
4806 for (e = bb->pred; e; e = enext)
4807 {
4808 enext = e->pred_next;
4809 if (e->flags & EDGE_COMPLEX)
4810 continue;
4811
4812 /* We can't redirect edges from new basic blocks. */
4813 if (e->src->index >= bypass_last_basic_block)
4814 continue;
4815
4816 /* The irreducible loops created by redirecting of edges entering the
4817 loop from outside would decrease effectiveness of some of the following
4818 optimizations, so prevent this. */
4819 if (may_be_loop_header
4820 && !(e->flags & EDGE_DFS_BACK))
4821 continue;
4822
4823 for (i = 0; i < reg_use_count; i++)
4824 {
4825 struct reg_use *reg_used = ®_use_table[i];
4826 unsigned int regno = REGNO (reg_used->reg_rtx);
4827 basic_block dest, old_dest;
4828 struct expr *set;
4829 rtx src, new;
4830
4831 if (regno >= max_gcse_regno)
4832 continue;
4833
4834 set = find_bypass_set (regno, e->src->index);
4835
4836 if (! set)
4837 continue;
4838
4839 /* Check the data flow is valid after edge insertions. */
4840 if (e->insns && reg_killed_on_edge (reg_used->reg_rtx, e))
4841 continue;
4842
4843 src = SET_SRC (pc_set (jump));
4844
4845 if (setcc != NULL)
4846 src = simplify_replace_rtx (src,
4847 SET_DEST (PATTERN (setcc)),
4848 SET_SRC (PATTERN (setcc)));
4849
4850 new = simplify_replace_rtx (src, reg_used->reg_rtx,
4851 SET_SRC (set->expr));
4852
4853 /* Jump bypassing may have already placed instructions on
4854 edges of the CFG. We can't bypass an outgoing edge that
4855 has instructions associated with it, as these insns won't
4856 get executed if the incoming edge is redirected. */
4857
4858 if (new == pc_rtx)
4859 {
4860 edest = FALLTHRU_EDGE (bb);
4861 dest = edest->insns ? NULL : edest->dest;
4862 }
4863 else if (GET_CODE (new) == LABEL_REF)
4864 {
4865 dest = BLOCK_FOR_INSN (XEXP (new, 0));
4866 /* Don't bypass edges containing instructions. */
4867 for (edest = bb->succ; edest; edest = edest->succ_next)
4868 if (edest->dest == dest && edest->insns)
4869 {
4870 dest = NULL;
4871 break;
4872 }
4873 }
4874 else
4875 dest = NULL;
4876
4877 /* Avoid unification of the edge with other edges from original
4878 branch. We would end up emitting the instruction on "both"
4879 edges. */
4880
4881 if (dest && setcc && !CC0_P (SET_DEST (PATTERN (setcc))))
4882 {
4883 edge e2;
4884 for (e2 = e->src->succ; e2; e2 = e2->succ_next)
4885 if (e2->dest == dest)
4886 break;
4887 if (e2)
4888 dest = NULL;
4889 }
4890
4891 old_dest = e->dest;
4892 if (dest != NULL
4893 && dest != old_dest
4894 && dest != EXIT_BLOCK_PTR)
4895 {
4896 redirect_edge_and_branch_force (e, dest);
4897
4898 /* Copy the register setter to the redirected edge.
4899 Don't copy CC0 setters, as CC0 is dead after jump. */
4900 if (setcc)
4901 {
4902 rtx pat = PATTERN (setcc);
4903 if (!CC0_P (SET_DEST (pat)))
4904 insert_insn_on_edge (copy_insn (pat), e);
4905 }
4906
4907 if (gcse_file != NULL)
4908 {
4909 fprintf (gcse_file, "JUMP-BYPASS: Proved reg %d in jump_insn %d equals constant ",
4910 regno, INSN_UID (jump));
4911 print_rtl (gcse_file, SET_SRC (set->expr));
4912 fprintf (gcse_file, "\nBypass edge from %d->%d to %d\n",
4913 e->src->index, old_dest->index, dest->index);
4914 }
4915 change = 1;
4916 break;
4917 }
4918 }
4919 }
4920 return change;
4921 }
4922
4923 /* Find basic blocks with more than one predecessor that only contain a
4924 single conditional jump. If the result of the comparison is known at
4925 compile-time from any incoming edge, redirect that edge to the
4926 appropriate target. Returns nonzero if a change was made.
4927
4928 This function is now mis-named, because we also handle indirect jumps. */
4929
4930 static int
bypass_conditional_jumps(void)4931 bypass_conditional_jumps (void)
4932 {
4933 basic_block bb;
4934 int changed;
4935 rtx setcc;
4936 rtx insn;
4937 rtx dest;
4938
4939 /* Note we start at block 1. */
4940 if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
4941 return 0;
4942
4943 bypass_last_basic_block = last_basic_block;
4944 mark_dfs_back_edges ();
4945
4946 changed = 0;
4947 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb,
4948 EXIT_BLOCK_PTR, next_bb)
4949 {
4950 /* Check for more than one predecessor. */
4951 if (bb->pred && bb->pred->pred_next)
4952 {
4953 setcc = NULL_RTX;
4954 for (insn = BB_HEAD (bb);
4955 insn != NULL && insn != NEXT_INSN (BB_END (bb));
4956 insn = NEXT_INSN (insn))
4957 if (GET_CODE (insn) == INSN)
4958 {
4959 if (setcc)
4960 break;
4961 if (GET_CODE (PATTERN (insn)) != SET)
4962 break;
4963
4964 dest = SET_DEST (PATTERN (insn));
4965 if (REG_P (dest) || CC0_P (dest))
4966 setcc = insn;
4967 else
4968 break;
4969 }
4970 else if (GET_CODE (insn) == JUMP_INSN)
4971 {
4972 if ((any_condjump_p (insn) || computed_jump_p (insn))
4973 && onlyjump_p (insn))
4974 changed |= bypass_block (bb, setcc, insn);
4975 break;
4976 }
4977 else if (INSN_P (insn))
4978 break;
4979 }
4980 }
4981
4982 /* If we bypassed any register setting insns, we inserted a
4983 copy on the redirected edge. These need to be committed. */
4984 if (changed)
4985 commit_edge_insertions();
4986
4987 return changed;
4988 }
4989
4990 /* Compute PRE+LCM working variables. */
4991
4992 /* Local properties of expressions. */
4993 /* Nonzero for expressions that are transparent in the block. */
4994 static sbitmap *transp;
4995
4996 /* Nonzero for expressions that are transparent at the end of the block.
4997 This is only zero for expressions killed by abnormal critical edge
4998 created by a calls. */
4999 static sbitmap *transpout;
5000
5001 /* Nonzero for expressions that are computed (available) in the block. */
5002 static sbitmap *comp;
5003
5004 /* Nonzero for expressions that are locally anticipatable in the block. */
5005 static sbitmap *antloc;
5006
5007 /* Nonzero for expressions where this block is an optimal computation
5008 point. */
5009 static sbitmap *pre_optimal;
5010
5011 /* Nonzero for expressions which are redundant in a particular block. */
5012 static sbitmap *pre_redundant;
5013
5014 /* Nonzero for expressions which should be inserted on a specific edge. */
5015 static sbitmap *pre_insert_map;
5016
5017 /* Nonzero for expressions which should be deleted in a specific block. */
5018 static sbitmap *pre_delete_map;
5019
5020 /* Contains the edge_list returned by pre_edge_lcm. */
5021 static struct edge_list *edge_list;
5022
5023 /* Redundant insns. */
5024 static sbitmap pre_redundant_insns;
5025
5026 /* Allocate vars used for PRE analysis. */
5027
5028 static void
alloc_pre_mem(int n_blocks,int n_exprs)5029 alloc_pre_mem (int n_blocks, int n_exprs)
5030 {
5031 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
5032 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
5033 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
5034
5035 pre_optimal = NULL;
5036 pre_redundant = NULL;
5037 pre_insert_map = NULL;
5038 pre_delete_map = NULL;
5039 ae_in = NULL;
5040 ae_out = NULL;
5041 ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
5042
5043 /* pre_insert and pre_delete are allocated later. */
5044 }
5045
5046 /* Free vars used for PRE analysis. */
5047
5048 static void
free_pre_mem(void)5049 free_pre_mem (void)
5050 {
5051 sbitmap_vector_free (transp);
5052 sbitmap_vector_free (comp);
5053
5054 /* ANTLOC and AE_KILL are freed just after pre_lcm finishes. */
5055
5056 if (pre_optimal)
5057 sbitmap_vector_free (pre_optimal);
5058 if (pre_redundant)
5059 sbitmap_vector_free (pre_redundant);
5060 if (pre_insert_map)
5061 sbitmap_vector_free (pre_insert_map);
5062 if (pre_delete_map)
5063 sbitmap_vector_free (pre_delete_map);
5064 if (ae_in)
5065 sbitmap_vector_free (ae_in);
5066 if (ae_out)
5067 sbitmap_vector_free (ae_out);
5068
5069 transp = comp = NULL;
5070 pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
5071 ae_in = ae_out = NULL;
5072 }
5073
5074 /* Top level routine to do the dataflow analysis needed by PRE. */
5075
5076 static void
compute_pre_data(void)5077 compute_pre_data (void)
5078 {
5079 sbitmap trapping_expr;
5080 basic_block bb;
5081 unsigned int ui;
5082
5083 compute_local_properties (transp, comp, antloc, &expr_hash_table);
5084 sbitmap_vector_zero (ae_kill, last_basic_block);
5085
5086 /* Collect expressions which might trap. */
5087 trapping_expr = sbitmap_alloc (expr_hash_table.n_elems);
5088 sbitmap_zero (trapping_expr);
5089 for (ui = 0; ui < expr_hash_table.size; ui++)
5090 {
5091 struct expr *e;
5092 for (e = expr_hash_table.table[ui]; e != NULL; e = e->next_same_hash)
5093 if (may_trap_p (e->expr))
5094 SET_BIT (trapping_expr, e->bitmap_index);
5095 }
5096
5097 /* Compute ae_kill for each basic block using:
5098
5099 ~(TRANSP | COMP)
5100
5101 This is significantly faster than compute_ae_kill. */
5102
5103 FOR_EACH_BB (bb)
5104 {
5105 edge e;
5106
5107 /* If the current block is the destination of an abnormal edge, we
5108 kill all trapping expressions because we won't be able to properly
5109 place the instruction on the edge. So make them neither
5110 anticipatable nor transparent. This is fairly conservative. */
5111 for (e = bb->pred; e ; e = e->pred_next)
5112 if (e->flags & EDGE_ABNORMAL)
5113 {
5114 sbitmap_difference (antloc[bb->index], antloc[bb->index], trapping_expr);
5115 sbitmap_difference (transp[bb->index], transp[bb->index], trapping_expr);
5116 break;
5117 }
5118
5119 sbitmap_a_or_b (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
5120 sbitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
5121 }
5122
5123 edge_list = pre_edge_lcm (gcse_file, expr_hash_table.n_elems, transp, comp, antloc,
5124 ae_kill, &pre_insert_map, &pre_delete_map);
5125 sbitmap_vector_free (antloc);
5126 antloc = NULL;
5127 sbitmap_vector_free (ae_kill);
5128 ae_kill = NULL;
5129 sbitmap_free (trapping_expr);
5130 }
5131
5132 /* PRE utilities */
5133
5134 /* Return nonzero if an occurrence of expression EXPR in OCCR_BB would reach
5135 block BB.
5136
5137 VISITED is a pointer to a working buffer for tracking which BB's have
5138 been visited. It is NULL for the top-level call.
5139
5140 We treat reaching expressions that go through blocks containing the same
5141 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
5142 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
5143 2 as not reaching. The intent is to improve the probability of finding
5144 only one reaching expression and to reduce register lifetimes by picking
5145 the closest such expression. */
5146
5147 static int
pre_expr_reaches_here_p_work(basic_block occr_bb,struct expr * expr,basic_block bb,char * visited)5148 pre_expr_reaches_here_p_work (basic_block occr_bb, struct expr *expr, basic_block bb, char *visited)
5149 {
5150 edge pred;
5151
5152 for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
5153 {
5154 basic_block pred_bb = pred->src;
5155
5156 if (pred->src == ENTRY_BLOCK_PTR
5157 /* Has predecessor has already been visited? */
5158 || visited[pred_bb->index])
5159 ;/* Nothing to do. */
5160
5161 /* Does this predecessor generate this expression? */
5162 else if (TEST_BIT (comp[pred_bb->index], expr->bitmap_index))
5163 {
5164 /* Is this the occurrence we're looking for?
5165 Note that there's only one generating occurrence per block
5166 so we just need to check the block number. */
5167 if (occr_bb == pred_bb)
5168 return 1;
5169
5170 visited[pred_bb->index] = 1;
5171 }
5172 /* Ignore this predecessor if it kills the expression. */
5173 else if (! TEST_BIT (transp[pred_bb->index], expr->bitmap_index))
5174 visited[pred_bb->index] = 1;
5175
5176 /* Neither gen nor kill. */
5177 else
5178 {
5179 visited[pred_bb->index] = 1;
5180 if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
5181 return 1;
5182 }
5183 }
5184
5185 /* All paths have been checked. */
5186 return 0;
5187 }
5188
5189 /* The wrapper for pre_expr_reaches_here_work that ensures that any
5190 memory allocated for that function is returned. */
5191
5192 static int
pre_expr_reaches_here_p(basic_block occr_bb,struct expr * expr,basic_block bb)5193 pre_expr_reaches_here_p (basic_block occr_bb, struct expr *expr, basic_block bb)
5194 {
5195 int rval;
5196 char *visited = xcalloc (last_basic_block, 1);
5197
5198 rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
5199
5200 free (visited);
5201 return rval;
5202 }
5203
5204
5205 /* Given an expr, generate RTL which we can insert at the end of a BB,
5206 or on an edge. Set the block number of any insns generated to
5207 the value of BB. */
5208
5209 static rtx
process_insert_insn(struct expr * expr)5210 process_insert_insn (struct expr *expr)
5211 {
5212 rtx reg = expr->reaching_reg;
5213 rtx exp = copy_rtx (expr->expr);
5214 rtx pat;
5215
5216 start_sequence ();
5217
5218 /* If the expression is something that's an operand, like a constant,
5219 just copy it to a register. */
5220 if (general_operand (exp, GET_MODE (reg)))
5221 emit_move_insn (reg, exp);
5222
5223 /* Otherwise, make a new insn to compute this expression and make sure the
5224 insn will be recognized (this also adds any needed CLOBBERs). Copy the
5225 expression to make sure we don't have any sharing issues. */
5226 else if (insn_invalid_p (emit_insn (gen_rtx_SET (VOIDmode, reg, exp))))
5227 abort ();
5228
5229 pat = get_insns ();
5230 end_sequence ();
5231
5232 return pat;
5233 }
5234
5235 /* Add EXPR to the end of basic block BB.
5236
5237 This is used by both the PRE and code hoisting.
5238
5239 For PRE, we want to verify that the expr is either transparent
5240 or locally anticipatable in the target block. This check makes
5241 no sense for code hoisting. */
5242
5243 static void
insert_insn_end_bb(struct expr * expr,basic_block bb,int pre)5244 insert_insn_end_bb (struct expr *expr, basic_block bb, int pre)
5245 {
5246 rtx insn = BB_END (bb);
5247 rtx new_insn;
5248 rtx reg = expr->reaching_reg;
5249 int regno = REGNO (reg);
5250 rtx pat, pat_end;
5251
5252 pat = process_insert_insn (expr);
5253 if (pat == NULL_RTX || ! INSN_P (pat))
5254 abort ();
5255
5256 pat_end = pat;
5257 while (NEXT_INSN (pat_end) != NULL_RTX)
5258 pat_end = NEXT_INSN (pat_end);
5259
5260 /* If the last insn is a jump, insert EXPR in front [taking care to
5261 handle cc0, etc. properly]. Similarly we need to care trapping
5262 instructions in presence of non-call exceptions. */
5263
5264 if (GET_CODE (insn) == JUMP_INSN
5265 || (GET_CODE (insn) == INSN
5266 && (bb->succ->succ_next || (bb->succ->flags & EDGE_ABNORMAL))))
5267 {
5268 #ifdef HAVE_cc0
5269 rtx note;
5270 #endif
5271 /* It should always be the case that we can put these instructions
5272 anywhere in the basic block with performing PRE optimizations.
5273 Check this. */
5274 if (GET_CODE (insn) == INSN && pre
5275 && !TEST_BIT (antloc[bb->index], expr->bitmap_index)
5276 && !TEST_BIT (transp[bb->index], expr->bitmap_index))
5277 abort ();
5278
5279 /* If this is a jump table, then we can't insert stuff here. Since
5280 we know the previous real insn must be the tablejump, we insert
5281 the new instruction just before the tablejump. */
5282 if (GET_CODE (PATTERN (insn)) == ADDR_VEC
5283 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
5284 insn = prev_real_insn (insn);
5285
5286 #ifdef HAVE_cc0
5287 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
5288 if cc0 isn't set. */
5289 note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
5290 if (note)
5291 insn = XEXP (note, 0);
5292 else
5293 {
5294 rtx maybe_cc0_setter = prev_nonnote_insn (insn);
5295 if (maybe_cc0_setter
5296 && INSN_P (maybe_cc0_setter)
5297 && sets_cc0_p (PATTERN (maybe_cc0_setter)))
5298 insn = maybe_cc0_setter;
5299 }
5300 #endif
5301 /* FIXME: What if something in cc0/jump uses value set in new insn? */
5302 new_insn = emit_insn_before_noloc (pat, insn);
5303 }
5304
5305 /* Likewise if the last insn is a call, as will happen in the presence
5306 of exception handling. */
5307 else if (GET_CODE (insn) == CALL_INSN
5308 && (bb->succ->succ_next || (bb->succ->flags & EDGE_ABNORMAL)))
5309 {
5310 /* Keeping in mind SMALL_REGISTER_CLASSES and parameters in registers,
5311 we search backward and place the instructions before the first
5312 parameter is loaded. Do this for everyone for consistency and a
5313 presumption that we'll get better code elsewhere as well.
5314
5315 It should always be the case that we can put these instructions
5316 anywhere in the basic block with performing PRE optimizations.
5317 Check this. */
5318
5319 if (pre
5320 && !TEST_BIT (antloc[bb->index], expr->bitmap_index)
5321 && !TEST_BIT (transp[bb->index], expr->bitmap_index))
5322 abort ();
5323
5324 /* Since different machines initialize their parameter registers
5325 in different orders, assume nothing. Collect the set of all
5326 parameter registers. */
5327 insn = find_first_parameter_load (insn, BB_HEAD (bb));
5328
5329 /* If we found all the parameter loads, then we want to insert
5330 before the first parameter load.
5331
5332 If we did not find all the parameter loads, then we might have
5333 stopped on the head of the block, which could be a CODE_LABEL.
5334 If we inserted before the CODE_LABEL, then we would be putting
5335 the insn in the wrong basic block. In that case, put the insn
5336 after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
5337 while (GET_CODE (insn) == CODE_LABEL
5338 || NOTE_INSN_BASIC_BLOCK_P (insn))
5339 insn = NEXT_INSN (insn);
5340
5341 new_insn = emit_insn_before_noloc (pat, insn);
5342 }
5343 else
5344 new_insn = emit_insn_after_noloc (pat, insn);
5345
5346 while (1)
5347 {
5348 if (INSN_P (pat))
5349 {
5350 add_label_notes (PATTERN (pat), new_insn);
5351 note_stores (PATTERN (pat), record_set_info, pat);
5352 }
5353 if (pat == pat_end)
5354 break;
5355 pat = NEXT_INSN (pat);
5356 }
5357
5358 gcse_create_count++;
5359
5360 if (gcse_file)
5361 {
5362 fprintf (gcse_file, "PRE/HOIST: end of bb %d, insn %d, ",
5363 bb->index, INSN_UID (new_insn));
5364 fprintf (gcse_file, "copying expression %d to reg %d\n",
5365 expr->bitmap_index, regno);
5366 }
5367 }
5368
5369 /* Insert partially redundant expressions on edges in the CFG to make
5370 the expressions fully redundant. */
5371
5372 static int
pre_edge_insert(struct edge_list * edge_list,struct expr ** index_map)5373 pre_edge_insert (struct edge_list *edge_list, struct expr **index_map)
5374 {
5375 int e, i, j, num_edges, set_size, did_insert = 0;
5376 sbitmap *inserted;
5377
5378 /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
5379 if it reaches any of the deleted expressions. */
5380
5381 set_size = pre_insert_map[0]->size;
5382 num_edges = NUM_EDGES (edge_list);
5383 inserted = sbitmap_vector_alloc (num_edges, expr_hash_table.n_elems);
5384 sbitmap_vector_zero (inserted, num_edges);
5385
5386 for (e = 0; e < num_edges; e++)
5387 {
5388 int indx;
5389 basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
5390
5391 for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
5392 {
5393 SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
5394
5395 for (j = indx; insert && j < (int) expr_hash_table.n_elems; j++, insert >>= 1)
5396 if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
5397 {
5398 struct expr *expr = index_map[j];
5399 struct occr *occr;
5400
5401 /* Now look at each deleted occurrence of this expression. */
5402 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
5403 {
5404 if (! occr->deleted_p)
5405 continue;
5406
5407 /* Insert this expression on this edge if if it would
5408 reach the deleted occurrence in BB. */
5409 if (!TEST_BIT (inserted[e], j))
5410 {
5411 rtx insn;
5412 edge eg = INDEX_EDGE (edge_list, e);
5413
5414 /* We can't insert anything on an abnormal and
5415 critical edge, so we insert the insn at the end of
5416 the previous block. There are several alternatives
5417 detailed in Morgans book P277 (sec 10.5) for
5418 handling this situation. This one is easiest for
5419 now. */
5420
5421 if ((eg->flags & EDGE_ABNORMAL) == EDGE_ABNORMAL)
5422 insert_insn_end_bb (index_map[j], bb, 0);
5423 else
5424 {
5425 insn = process_insert_insn (index_map[j]);
5426 insert_insn_on_edge (insn, eg);
5427 }
5428
5429 if (gcse_file)
5430 {
5431 fprintf (gcse_file, "PRE/HOIST: edge (%d,%d), ",
5432 bb->index,
5433 INDEX_EDGE_SUCC_BB (edge_list, e)->index);
5434 fprintf (gcse_file, "copy expression %d\n",
5435 expr->bitmap_index);
5436 }
5437
5438 update_ld_motion_stores (expr);
5439 SET_BIT (inserted[e], j);
5440 did_insert = 1;
5441 gcse_create_count++;
5442 }
5443 }
5444 }
5445 }
5446 }
5447
5448 sbitmap_vector_free (inserted);
5449 return did_insert;
5450 }
5451
5452 /* Copy the result of EXPR->EXPR generated by INSN to EXPR->REACHING_REG.
5453 Given "old_reg <- expr" (INSN), instead of adding after it
5454 reaching_reg <- old_reg
5455 it's better to do the following:
5456 reaching_reg <- expr
5457 old_reg <- reaching_reg
5458 because this way copy propagation can discover additional PRE
5459 opportunities. But if this fails, we try the old way.
5460 When "expr" is a store, i.e.
5461 given "MEM <- old_reg", instead of adding after it
5462 reaching_reg <- old_reg
5463 it's better to add it before as follows:
5464 reaching_reg <- old_reg
5465 MEM <- reaching_reg. */
5466
5467 static void
pre_insert_copy_insn(struct expr * expr,rtx insn)5468 pre_insert_copy_insn (struct expr *expr, rtx insn)
5469 {
5470 rtx reg = expr->reaching_reg;
5471 int regno = REGNO (reg);
5472 int indx = expr->bitmap_index;
5473 rtx pat = PATTERN (insn);
5474 rtx set, new_insn;
5475 rtx old_reg;
5476 int i;
5477
5478 /* This block matches the logic in hash_scan_insn. */
5479 if (GET_CODE (pat) == SET)
5480 set = pat;
5481 else if (GET_CODE (pat) == PARALLEL)
5482 {
5483 /* Search through the parallel looking for the set whose
5484 source was the expression that we're interested in. */
5485 set = NULL_RTX;
5486 for (i = 0; i < XVECLEN (pat, 0); i++)
5487 {
5488 rtx x = XVECEXP (pat, 0, i);
5489 if (GET_CODE (x) == SET
5490 && expr_equiv_p (SET_SRC (x), expr->expr))
5491 {
5492 set = x;
5493 break;
5494 }
5495 }
5496 }
5497 else
5498 abort ();
5499
5500 if (GET_CODE (SET_DEST (set)) == REG)
5501 {
5502 old_reg = SET_DEST (set);
5503 /* Check if we can modify the set destination in the original insn. */
5504 if (validate_change (insn, &SET_DEST (set), reg, 0))
5505 {
5506 new_insn = gen_move_insn (old_reg, reg);
5507 new_insn = emit_insn_after (new_insn, insn);
5508
5509 /* Keep register set table up to date. */
5510 replace_one_set (REGNO (old_reg), insn, new_insn);
5511 record_one_set (regno, insn);
5512 }
5513 else
5514 {
5515 new_insn = gen_move_insn (reg, old_reg);
5516 new_insn = emit_insn_after (new_insn, insn);
5517
5518 /* Keep register set table up to date. */
5519 record_one_set (regno, new_insn);
5520 }
5521 }
5522 else /* This is possible only in case of a store to memory. */
5523 {
5524 old_reg = SET_SRC (set);
5525 new_insn = gen_move_insn (reg, old_reg);
5526
5527 /* Check if we can modify the set source in the original insn. */
5528 if (validate_change (insn, &SET_SRC (set), reg, 0))
5529 new_insn = emit_insn_before (new_insn, insn);
5530 else
5531 new_insn = emit_insn_after (new_insn, insn);
5532
5533 /* Keep register set table up to date. */
5534 record_one_set (regno, new_insn);
5535 }
5536
5537 gcse_create_count++;
5538
5539 if (gcse_file)
5540 fprintf (gcse_file,
5541 "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
5542 BLOCK_NUM (insn), INSN_UID (new_insn), indx,
5543 INSN_UID (insn), regno);
5544 }
5545
5546 /* Copy available expressions that reach the redundant expression
5547 to `reaching_reg'. */
5548
5549 static void
pre_insert_copies(void)5550 pre_insert_copies (void)
5551 {
5552 unsigned int i, added_copy;
5553 struct expr *expr;
5554 struct occr *occr;
5555 struct occr *avail;
5556
5557 /* For each available expression in the table, copy the result to
5558 `reaching_reg' if the expression reaches a deleted one.
5559
5560 ??? The current algorithm is rather brute force.
5561 Need to do some profiling. */
5562
5563 for (i = 0; i < expr_hash_table.size; i++)
5564 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
5565 {
5566 /* If the basic block isn't reachable, PPOUT will be TRUE. However,
5567 we don't want to insert a copy here because the expression may not
5568 really be redundant. So only insert an insn if the expression was
5569 deleted. This test also avoids further processing if the
5570 expression wasn't deleted anywhere. */
5571 if (expr->reaching_reg == NULL)
5572 continue;
5573
5574 /* Set when we add a copy for that expression. */
5575 added_copy = 0;
5576
5577 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
5578 {
5579 if (! occr->deleted_p)
5580 continue;
5581
5582 for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
5583 {
5584 rtx insn = avail->insn;
5585
5586 /* No need to handle this one if handled already. */
5587 if (avail->copied_p)
5588 continue;
5589
5590 /* Don't handle this one if it's a redundant one. */
5591 if (TEST_BIT (pre_redundant_insns, INSN_CUID (insn)))
5592 continue;
5593
5594 /* Or if the expression doesn't reach the deleted one. */
5595 if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
5596 expr,
5597 BLOCK_FOR_INSN (occr->insn)))
5598 continue;
5599
5600 added_copy = 1;
5601
5602 /* Copy the result of avail to reaching_reg. */
5603 pre_insert_copy_insn (expr, insn);
5604 avail->copied_p = 1;
5605 }
5606 }
5607
5608 if (added_copy)
5609 update_ld_motion_stores (expr);
5610 }
5611 }
5612
5613 /* Emit move from SRC to DEST noting the equivalence with expression computed
5614 in INSN. */
5615 static rtx
gcse_emit_move_after(rtx src,rtx dest,rtx insn)5616 gcse_emit_move_after (rtx src, rtx dest, rtx insn)
5617 {
5618 rtx new;
5619 rtx set = single_set (insn), set2;
5620 rtx note;
5621 rtx eqv;
5622
5623 /* This should never fail since we're creating a reg->reg copy
5624 we've verified to be valid. */
5625
5626 new = emit_insn_after (gen_move_insn (dest, src), insn);
5627
5628 /* Note the equivalence for local CSE pass. */
5629 set2 = single_set (new);
5630 if (!set2 || !rtx_equal_p (SET_DEST (set2), dest))
5631 return new;
5632 if ((note = find_reg_equal_equiv_note (insn)))
5633 eqv = XEXP (note, 0);
5634 else
5635 eqv = SET_SRC (set);
5636
5637 set_unique_reg_note (new, REG_EQUAL, copy_insn_1 (eqv));
5638
5639 return new;
5640 }
5641
5642 /* Delete redundant computations.
5643 Deletion is done by changing the insn to copy the `reaching_reg' of
5644 the expression into the result of the SET. It is left to later passes
5645 (cprop, cse2, flow, combine, regmove) to propagate the copy or eliminate it.
5646
5647 Returns nonzero if a change is made. */
5648
5649 static int
pre_delete(void)5650 pre_delete (void)
5651 {
5652 unsigned int i;
5653 int changed;
5654 struct expr *expr;
5655 struct occr *occr;
5656
5657 changed = 0;
5658 for (i = 0; i < expr_hash_table.size; i++)
5659 for (expr = expr_hash_table.table[i];
5660 expr != NULL;
5661 expr = expr->next_same_hash)
5662 {
5663 int indx = expr->bitmap_index;
5664
5665 /* We only need to search antic_occr since we require
5666 ANTLOC != 0. */
5667
5668 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
5669 {
5670 rtx insn = occr->insn;
5671 rtx set;
5672 basic_block bb = BLOCK_FOR_INSN (insn);
5673
5674 /* We only delete insns that have a single_set. */
5675 if (TEST_BIT (pre_delete_map[bb->index], indx)
5676 && (set = single_set (insn)) != 0)
5677 {
5678 /* Create a pseudo-reg to store the result of reaching
5679 expressions into. Get the mode for the new pseudo from
5680 the mode of the original destination pseudo. */
5681 if (expr->reaching_reg == NULL)
5682 expr->reaching_reg
5683 = gen_reg_rtx (GET_MODE (SET_DEST (set)));
5684
5685 gcse_emit_move_after (expr->reaching_reg, SET_DEST (set), insn);
5686 delete_insn (insn);
5687 occr->deleted_p = 1;
5688 SET_BIT (pre_redundant_insns, INSN_CUID (insn));
5689 changed = 1;
5690 gcse_subst_count++;
5691
5692 if (gcse_file)
5693 {
5694 fprintf (gcse_file,
5695 "PRE: redundant insn %d (expression %d) in ",
5696 INSN_UID (insn), indx);
5697 fprintf (gcse_file, "bb %d, reaching reg is %d\n",
5698 bb->index, REGNO (expr->reaching_reg));
5699 }
5700 }
5701 }
5702 }
5703
5704 return changed;
5705 }
5706
5707 /* Perform GCSE optimizations using PRE.
5708 This is called by one_pre_gcse_pass after all the dataflow analysis
5709 has been done.
5710
5711 This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
5712 lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
5713 Compiler Design and Implementation.
5714
5715 ??? A new pseudo reg is created to hold the reaching expression. The nice
5716 thing about the classical approach is that it would try to use an existing
5717 reg. If the register can't be adequately optimized [i.e. we introduce
5718 reload problems], one could add a pass here to propagate the new register
5719 through the block.
5720
5721 ??? We don't handle single sets in PARALLELs because we're [currently] not
5722 able to copy the rest of the parallel when we insert copies to create full
5723 redundancies from partial redundancies. However, there's no reason why we
5724 can't handle PARALLELs in the cases where there are no partial
5725 redundancies. */
5726
5727 static int
pre_gcse(void)5728 pre_gcse (void)
5729 {
5730 unsigned int i;
5731 int did_insert, changed;
5732 struct expr **index_map;
5733 struct expr *expr;
5734
5735 /* Compute a mapping from expression number (`bitmap_index') to
5736 hash table entry. */
5737
5738 index_map = xcalloc (expr_hash_table.n_elems, sizeof (struct expr *));
5739 for (i = 0; i < expr_hash_table.size; i++)
5740 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
5741 index_map[expr->bitmap_index] = expr;
5742
5743 /* Reset bitmap used to track which insns are redundant. */
5744 pre_redundant_insns = sbitmap_alloc (max_cuid);
5745 sbitmap_zero (pre_redundant_insns);
5746
5747 /* Delete the redundant insns first so that
5748 - we know what register to use for the new insns and for the other
5749 ones with reaching expressions
5750 - we know which insns are redundant when we go to create copies */
5751
5752 changed = pre_delete ();
5753
5754 did_insert = pre_edge_insert (edge_list, index_map);
5755
5756 /* In other places with reaching expressions, copy the expression to the
5757 specially allocated pseudo-reg that reaches the redundant expr. */
5758 pre_insert_copies ();
5759 if (did_insert)
5760 {
5761 commit_edge_insertions ();
5762 changed = 1;
5763 }
5764
5765 free (index_map);
5766 sbitmap_free (pre_redundant_insns);
5767 return changed;
5768 }
5769
5770 /* Top level routine to perform one PRE GCSE pass.
5771
5772 Return nonzero if a change was made. */
5773
5774 static int
one_pre_gcse_pass(int pass)5775 one_pre_gcse_pass (int pass)
5776 {
5777 int changed = 0;
5778
5779 gcse_subst_count = 0;
5780 gcse_create_count = 0;
5781
5782 alloc_hash_table (max_cuid, &expr_hash_table, 0);
5783 add_noreturn_fake_exit_edges ();
5784 if (flag_gcse_lm)
5785 compute_ld_motion_mems ();
5786
5787 compute_hash_table (&expr_hash_table);
5788 trim_ld_motion_mems ();
5789 if (gcse_file)
5790 dump_hash_table (gcse_file, "Expression", &expr_hash_table);
5791
5792 if (expr_hash_table.n_elems > 0)
5793 {
5794 alloc_pre_mem (last_basic_block, expr_hash_table.n_elems);
5795 compute_pre_data ();
5796 changed |= pre_gcse ();
5797 free_edge_list (edge_list);
5798 free_pre_mem ();
5799 }
5800
5801 free_ldst_mems ();
5802 remove_fake_edges ();
5803 free_hash_table (&expr_hash_table);
5804
5805 if (gcse_file)
5806 {
5807 fprintf (gcse_file, "\nPRE GCSE of %s, pass %d: %d bytes needed, ",
5808 current_function_name (), pass, bytes_used);
5809 fprintf (gcse_file, "%d substs, %d insns created\n",
5810 gcse_subst_count, gcse_create_count);
5811 }
5812
5813 return changed;
5814 }
5815
5816 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to INSN.
5817 If notes are added to an insn which references a CODE_LABEL, the
5818 LABEL_NUSES count is incremented. We have to add REG_LABEL notes,
5819 because the following loop optimization pass requires them. */
5820
5821 /* ??? This is very similar to the loop.c add_label_notes function. We
5822 could probably share code here. */
5823
5824 /* ??? If there was a jump optimization pass after gcse and before loop,
5825 then we would not need to do this here, because jump would add the
5826 necessary REG_LABEL notes. */
5827
5828 static void
add_label_notes(rtx x,rtx insn)5829 add_label_notes (rtx x, rtx insn)
5830 {
5831 enum rtx_code code = GET_CODE (x);
5832 int i, j;
5833 const char *fmt;
5834
5835 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
5836 {
5837 /* This code used to ignore labels that referred to dispatch tables to
5838 avoid flow generating (slightly) worse code.
5839
5840 We no longer ignore such label references (see LABEL_REF handling in
5841 mark_jump_label for additional information). */
5842
5843 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
5844 REG_NOTES (insn));
5845 if (LABEL_P (XEXP (x, 0)))
5846 LABEL_NUSES (XEXP (x, 0))++;
5847 return;
5848 }
5849
5850 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
5851 {
5852 if (fmt[i] == 'e')
5853 add_label_notes (XEXP (x, i), insn);
5854 else if (fmt[i] == 'E')
5855 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5856 add_label_notes (XVECEXP (x, i, j), insn);
5857 }
5858 }
5859
5860 /* Compute transparent outgoing information for each block.
5861
5862 An expression is transparent to an edge unless it is killed by
5863 the edge itself. This can only happen with abnormal control flow,
5864 when the edge is traversed through a call. This happens with
5865 non-local labels and exceptions.
5866
5867 This would not be necessary if we split the edge. While this is
5868 normally impossible for abnormal critical edges, with some effort
5869 it should be possible with exception handling, since we still have
5870 control over which handler should be invoked. But due to increased
5871 EH table sizes, this may not be worthwhile. */
5872
5873 static void
compute_transpout(void)5874 compute_transpout (void)
5875 {
5876 basic_block bb;
5877 unsigned int i;
5878 struct expr *expr;
5879
5880 sbitmap_vector_ones (transpout, last_basic_block);
5881
5882 FOR_EACH_BB (bb)
5883 {
5884 /* Note that flow inserted a nop a the end of basic blocks that
5885 end in call instructions for reasons other than abnormal
5886 control flow. */
5887 if (GET_CODE (BB_END (bb)) != CALL_INSN)
5888 continue;
5889
5890 for (i = 0; i < expr_hash_table.size; i++)
5891 for (expr = expr_hash_table.table[i]; expr ; expr = expr->next_same_hash)
5892 if (GET_CODE (expr->expr) == MEM)
5893 {
5894 if (GET_CODE (XEXP (expr->expr, 0)) == SYMBOL_REF
5895 && CONSTANT_POOL_ADDRESS_P (XEXP (expr->expr, 0)))
5896 continue;
5897
5898 /* ??? Optimally, we would use interprocedural alias
5899 analysis to determine if this mem is actually killed
5900 by this call. */
5901 RESET_BIT (transpout[bb->index], expr->bitmap_index);
5902 }
5903 }
5904 }
5905
5906 /* Removal of useless null pointer checks */
5907
5908 /* Called via note_stores. X is set by SETTER. If X is a register we must
5909 invalidate nonnull_local and set nonnull_killed. DATA is really a
5910 `null_pointer_info *'.
5911
5912 We ignore hard registers. */
5913
5914 static void
invalidate_nonnull_info(rtx x,rtx setter ATTRIBUTE_UNUSED,void * data)5915 invalidate_nonnull_info (rtx x, rtx setter ATTRIBUTE_UNUSED, void *data)
5916 {
5917 unsigned int regno;
5918 struct null_pointer_info *npi = (struct null_pointer_info *) data;
5919
5920 while (GET_CODE (x) == SUBREG)
5921 x = SUBREG_REG (x);
5922
5923 /* Ignore anything that is not a register or is a hard register. */
5924 if (GET_CODE (x) != REG
5925 || REGNO (x) < npi->min_reg
5926 || REGNO (x) >= npi->max_reg)
5927 return;
5928
5929 regno = REGNO (x) - npi->min_reg;
5930
5931 RESET_BIT (npi->nonnull_local[npi->current_block->index], regno);
5932 SET_BIT (npi->nonnull_killed[npi->current_block->index], regno);
5933 }
5934
5935 /* Do null-pointer check elimination for the registers indicated in
5936 NPI. NONNULL_AVIN and NONNULL_AVOUT are pre-allocated sbitmaps;
5937 they are not our responsibility to free. */
5938
5939 static int
delete_null_pointer_checks_1(unsigned int * block_reg,sbitmap * nonnull_avin,sbitmap * nonnull_avout,struct null_pointer_info * npi)5940 delete_null_pointer_checks_1 (unsigned int *block_reg, sbitmap *nonnull_avin,
5941 sbitmap *nonnull_avout,
5942 struct null_pointer_info *npi)
5943 {
5944 basic_block bb, current_block;
5945 sbitmap *nonnull_local = npi->nonnull_local;
5946 sbitmap *nonnull_killed = npi->nonnull_killed;
5947 int something_changed = 0;
5948
5949 /* Compute local properties, nonnull and killed. A register will have
5950 the nonnull property if at the end of the current block its value is
5951 known to be nonnull. The killed property indicates that somewhere in
5952 the block any information we had about the register is killed.
5953
5954 Note that a register can have both properties in a single block. That
5955 indicates that it's killed, then later in the block a new value is
5956 computed. */
5957 sbitmap_vector_zero (nonnull_local, last_basic_block);
5958 sbitmap_vector_zero (nonnull_killed, last_basic_block);
5959
5960 FOR_EACH_BB (current_block)
5961 {
5962 rtx insn, stop_insn;
5963
5964 /* Set the current block for invalidate_nonnull_info. */
5965 npi->current_block = current_block;
5966
5967 /* Scan each insn in the basic block looking for memory references and
5968 register sets. */
5969 stop_insn = NEXT_INSN (BB_END (current_block));
5970 for (insn = BB_HEAD (current_block);
5971 insn != stop_insn;
5972 insn = NEXT_INSN (insn))
5973 {
5974 rtx set;
5975 rtx reg;
5976
5977 /* Ignore anything that is not a normal insn. */
5978 if (! INSN_P (insn))
5979 continue;
5980
5981 /* Basically ignore anything that is not a simple SET. We do have
5982 to make sure to invalidate nonnull_local and set nonnull_killed
5983 for such insns though. */
5984 set = single_set (insn);
5985 if (!set)
5986 {
5987 note_stores (PATTERN (insn), invalidate_nonnull_info, npi);
5988 continue;
5989 }
5990
5991 /* See if we've got a usable memory load. We handle it first
5992 in case it uses its address register as a dest (which kills
5993 the nonnull property). */
5994 if (GET_CODE (SET_SRC (set)) == MEM
5995 && GET_CODE ((reg = XEXP (SET_SRC (set), 0))) == REG
5996 && REGNO (reg) >= npi->min_reg
5997 && REGNO (reg) < npi->max_reg)
5998 SET_BIT (nonnull_local[current_block->index],
5999 REGNO (reg) - npi->min_reg);
6000
6001 /* Now invalidate stuff clobbered by this insn. */
6002 note_stores (PATTERN (insn), invalidate_nonnull_info, npi);
6003
6004 /* And handle stores, we do these last since any sets in INSN can
6005 not kill the nonnull property if it is derived from a MEM
6006 appearing in a SET_DEST. */
6007 if (GET_CODE (SET_DEST (set)) == MEM
6008 && GET_CODE ((reg = XEXP (SET_DEST (set), 0))) == REG
6009 && REGNO (reg) >= npi->min_reg
6010 && REGNO (reg) < npi->max_reg)
6011 SET_BIT (nonnull_local[current_block->index],
6012 REGNO (reg) - npi->min_reg);
6013 }
6014 }
6015
6016 /* Now compute global properties based on the local properties. This
6017 is a classic global availability algorithm. */
6018 compute_available (nonnull_local, nonnull_killed,
6019 nonnull_avout, nonnull_avin);
6020
6021 /* Now look at each bb and see if it ends with a compare of a value
6022 against zero. */
6023 FOR_EACH_BB (bb)
6024 {
6025 rtx last_insn = BB_END (bb);
6026 rtx condition, earliest;
6027 int compare_and_branch;
6028
6029 /* Since MIN_REG is always at least FIRST_PSEUDO_REGISTER, and
6030 since BLOCK_REG[BB] is zero if this block did not end with a
6031 comparison against zero, this condition works. */
6032 if (block_reg[bb->index] < npi->min_reg
6033 || block_reg[bb->index] >= npi->max_reg)
6034 continue;
6035
6036 /* LAST_INSN is a conditional jump. Get its condition. */
6037 condition = get_condition (last_insn, &earliest, false);
6038
6039 /* If we can't determine the condition then skip. */
6040 if (! condition)
6041 continue;
6042
6043 /* Is the register known to have a nonzero value? */
6044 if (!TEST_BIT (nonnull_avout[bb->index], block_reg[bb->index] - npi->min_reg))
6045 continue;
6046
6047 /* Try to compute whether the compare/branch at the loop end is one or
6048 two instructions. */
6049 if (earliest == last_insn)
6050 compare_and_branch = 1;
6051 else if (earliest == prev_nonnote_insn (last_insn))
6052 compare_and_branch = 2;
6053 else
6054 continue;
6055
6056 /* We know the register in this comparison is nonnull at exit from
6057 this block. We can optimize this comparison. */
6058 if (GET_CODE (condition) == NE)
6059 {
6060 rtx new_jump;
6061
6062 new_jump = emit_jump_insn_after (gen_jump (JUMP_LABEL (last_insn)),
6063 last_insn);
6064 JUMP_LABEL (new_jump) = JUMP_LABEL (last_insn);
6065 LABEL_NUSES (JUMP_LABEL (new_jump))++;
6066 emit_barrier_after (new_jump);
6067 }
6068
6069 something_changed = 1;
6070 delete_insn (last_insn);
6071 #ifdef HAVE_cc0
6072 if (compare_and_branch == 2)
6073 delete_insn (earliest);
6074 #endif
6075 purge_dead_edges (bb);
6076
6077 /* Don't check this block again. (Note that BB_END is
6078 invalid here; we deleted the last instruction in the
6079 block.) */
6080 block_reg[bb->index] = 0;
6081 }
6082
6083 return something_changed;
6084 }
6085
6086 /* Find EQ/NE comparisons against zero which can be (indirectly) evaluated
6087 at compile time.
6088
6089 This is conceptually similar to global constant/copy propagation and
6090 classic global CSE (it even uses the same dataflow equations as cprop).
6091
6092 If a register is used as memory address with the form (mem (reg)), then we
6093 know that REG can not be zero at that point in the program. Any instruction
6094 which sets REG "kills" this property.
6095
6096 So, if every path leading to a conditional branch has an available memory
6097 reference of that form, then we know the register can not have the value
6098 zero at the conditional branch.
6099
6100 So we merely need to compute the local properties and propagate that data
6101 around the cfg, then optimize where possible.
6102
6103 We run this pass two times. Once before CSE, then again after CSE. This
6104 has proven to be the most profitable approach. It is rare for new
6105 optimization opportunities of this nature to appear after the first CSE
6106 pass.
6107
6108 This could probably be integrated with global cprop with a little work. */
6109
6110 int
delete_null_pointer_checks(rtx f ATTRIBUTE_UNUSED)6111 delete_null_pointer_checks (rtx f ATTRIBUTE_UNUSED)
6112 {
6113 sbitmap *nonnull_avin, *nonnull_avout;
6114 unsigned int *block_reg;
6115 basic_block bb;
6116 int reg;
6117 int regs_per_pass;
6118 int max_reg = max_reg_num ();
6119 struct null_pointer_info npi;
6120 int something_changed = 0;
6121
6122 /* If we have only a single block, or it is too expensive, give up. */
6123 if (n_basic_blocks <= 1
6124 || is_too_expensive (_ ("NULL pointer checks disabled")))
6125 return 0;
6126
6127 /* We need four bitmaps, each with a bit for each register in each
6128 basic block. */
6129 regs_per_pass = get_bitmap_width (4, last_basic_block, max_reg);
6130
6131 /* Allocate bitmaps to hold local and global properties. */
6132 npi.nonnull_local = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
6133 npi.nonnull_killed = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
6134 nonnull_avin = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
6135 nonnull_avout = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
6136
6137 /* Go through the basic blocks, seeing whether or not each block
6138 ends with a conditional branch whose condition is a comparison
6139 against zero. Record the register compared in BLOCK_REG. */
6140 block_reg = xcalloc (last_basic_block, sizeof (int));
6141 FOR_EACH_BB (bb)
6142 {
6143 rtx last_insn = BB_END (bb);
6144 rtx condition, earliest, reg;
6145
6146 /* We only want conditional branches. */
6147 if (GET_CODE (last_insn) != JUMP_INSN
6148 || !any_condjump_p (last_insn)
6149 || !onlyjump_p (last_insn))
6150 continue;
6151
6152 /* LAST_INSN is a conditional jump. Get its condition. */
6153 condition = get_condition (last_insn, &earliest, false);
6154
6155 /* If we were unable to get the condition, or it is not an equality
6156 comparison against zero then there's nothing we can do. */
6157 if (!condition
6158 || (GET_CODE (condition) != NE && GET_CODE (condition) != EQ)
6159 || GET_CODE (XEXP (condition, 1)) != CONST_INT
6160 || (XEXP (condition, 1)
6161 != CONST0_RTX (GET_MODE (XEXP (condition, 0)))))
6162 continue;
6163
6164 /* We must be checking a register against zero. */
6165 reg = XEXP (condition, 0);
6166 if (GET_CODE (reg) != REG)
6167 continue;
6168
6169 block_reg[bb->index] = REGNO (reg);
6170 }
6171
6172 /* Go through the algorithm for each block of registers. */
6173 for (reg = FIRST_PSEUDO_REGISTER; reg < max_reg; reg += regs_per_pass)
6174 {
6175 npi.min_reg = reg;
6176 npi.max_reg = MIN (reg + regs_per_pass, max_reg);
6177 something_changed |= delete_null_pointer_checks_1 (block_reg,
6178 nonnull_avin,
6179 nonnull_avout,
6180 &npi);
6181 }
6182
6183 /* Free the table of registers compared at the end of every block. */
6184 free (block_reg);
6185
6186 /* Free bitmaps. */
6187 sbitmap_vector_free (npi.nonnull_local);
6188 sbitmap_vector_free (npi.nonnull_killed);
6189 sbitmap_vector_free (nonnull_avin);
6190 sbitmap_vector_free (nonnull_avout);
6191
6192 return something_changed;
6193 }
6194
6195 /* Code Hoisting variables and subroutines. */
6196
6197 /* Very busy expressions. */
6198 static sbitmap *hoist_vbein;
6199 static sbitmap *hoist_vbeout;
6200
6201 /* Hoistable expressions. */
6202 static sbitmap *hoist_exprs;
6203
6204 /* ??? We could compute post dominators and run this algorithm in
6205 reverse to perform tail merging, doing so would probably be
6206 more effective than the tail merging code in jump.c.
6207
6208 It's unclear if tail merging could be run in parallel with
6209 code hoisting. It would be nice. */
6210
6211 /* Allocate vars used for code hoisting analysis. */
6212
6213 static void
alloc_code_hoist_mem(int n_blocks,int n_exprs)6214 alloc_code_hoist_mem (int n_blocks, int n_exprs)
6215 {
6216 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
6217 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
6218 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
6219
6220 hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
6221 hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
6222 hoist_exprs = sbitmap_vector_alloc (n_blocks, n_exprs);
6223 transpout = sbitmap_vector_alloc (n_blocks, n_exprs);
6224 }
6225
6226 /* Free vars used for code hoisting analysis. */
6227
6228 static void
free_code_hoist_mem(void)6229 free_code_hoist_mem (void)
6230 {
6231 sbitmap_vector_free (antloc);
6232 sbitmap_vector_free (transp);
6233 sbitmap_vector_free (comp);
6234
6235 sbitmap_vector_free (hoist_vbein);
6236 sbitmap_vector_free (hoist_vbeout);
6237 sbitmap_vector_free (hoist_exprs);
6238 sbitmap_vector_free (transpout);
6239
6240 free_dominance_info (CDI_DOMINATORS);
6241 }
6242
6243 /* Compute the very busy expressions at entry/exit from each block.
6244
6245 An expression is very busy if all paths from a given point
6246 compute the expression. */
6247
6248 static void
compute_code_hoist_vbeinout(void)6249 compute_code_hoist_vbeinout (void)
6250 {
6251 int changed, passes;
6252 basic_block bb;
6253
6254 sbitmap_vector_zero (hoist_vbeout, last_basic_block);
6255 sbitmap_vector_zero (hoist_vbein, last_basic_block);
6256
6257 passes = 0;
6258 changed = 1;
6259
6260 while (changed)
6261 {
6262 changed = 0;
6263
6264 /* We scan the blocks in the reverse order to speed up
6265 the convergence. */
6266 FOR_EACH_BB_REVERSE (bb)
6267 {
6268 changed |= sbitmap_a_or_b_and_c_cg (hoist_vbein[bb->index], antloc[bb->index],
6269 hoist_vbeout[bb->index], transp[bb->index]);
6270 if (bb->next_bb != EXIT_BLOCK_PTR)
6271 sbitmap_intersection_of_succs (hoist_vbeout[bb->index], hoist_vbein, bb->index);
6272 }
6273
6274 passes++;
6275 }
6276
6277 if (gcse_file)
6278 fprintf (gcse_file, "hoisting vbeinout computation: %d passes\n", passes);
6279 }
6280
6281 /* Top level routine to do the dataflow analysis needed by code hoisting. */
6282
6283 static void
compute_code_hoist_data(void)6284 compute_code_hoist_data (void)
6285 {
6286 compute_local_properties (transp, comp, antloc, &expr_hash_table);
6287 compute_transpout ();
6288 compute_code_hoist_vbeinout ();
6289 calculate_dominance_info (CDI_DOMINATORS);
6290 if (gcse_file)
6291 fprintf (gcse_file, "\n");
6292 }
6293
6294 /* Determine if the expression identified by EXPR_INDEX would
6295 reach BB unimpared if it was placed at the end of EXPR_BB.
6296
6297 It's unclear exactly what Muchnick meant by "unimpared". It seems
6298 to me that the expression must either be computed or transparent in
6299 *every* block in the path(s) from EXPR_BB to BB. Any other definition
6300 would allow the expression to be hoisted out of loops, even if
6301 the expression wasn't a loop invariant.
6302
6303 Contrast this to reachability for PRE where an expression is
6304 considered reachable if *any* path reaches instead of *all*
6305 paths. */
6306
6307 static int
hoist_expr_reaches_here_p(basic_block expr_bb,int expr_index,basic_block bb,char * visited)6308 hoist_expr_reaches_here_p (basic_block expr_bb, int expr_index, basic_block bb, char *visited)
6309 {
6310 edge pred;
6311 int visited_allocated_locally = 0;
6312
6313
6314 if (visited == NULL)
6315 {
6316 visited_allocated_locally = 1;
6317 visited = xcalloc (last_basic_block, 1);
6318 }
6319
6320 for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
6321 {
6322 basic_block pred_bb = pred->src;
6323
6324 if (pred->src == ENTRY_BLOCK_PTR)
6325 break;
6326 else if (pred_bb == expr_bb)
6327 continue;
6328 else if (visited[pred_bb->index])
6329 continue;
6330
6331 /* Does this predecessor generate this expression? */
6332 else if (TEST_BIT (comp[pred_bb->index], expr_index))
6333 break;
6334 else if (! TEST_BIT (transp[pred_bb->index], expr_index))
6335 break;
6336
6337 /* Not killed. */
6338 else
6339 {
6340 visited[pred_bb->index] = 1;
6341 if (! hoist_expr_reaches_here_p (expr_bb, expr_index,
6342 pred_bb, visited))
6343 break;
6344 }
6345 }
6346 if (visited_allocated_locally)
6347 free (visited);
6348
6349 return (pred == NULL);
6350 }
6351
6352 /* Actually perform code hoisting. */
6353
6354 static void
hoist_code(void)6355 hoist_code (void)
6356 {
6357 basic_block bb, dominated;
6358 basic_block *domby;
6359 unsigned int domby_len;
6360 unsigned int i,j;
6361 struct expr **index_map;
6362 struct expr *expr;
6363
6364 sbitmap_vector_zero (hoist_exprs, last_basic_block);
6365
6366 /* Compute a mapping from expression number (`bitmap_index') to
6367 hash table entry. */
6368
6369 index_map = xcalloc (expr_hash_table.n_elems, sizeof (struct expr *));
6370 for (i = 0; i < expr_hash_table.size; i++)
6371 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
6372 index_map[expr->bitmap_index] = expr;
6373
6374 /* Walk over each basic block looking for potentially hoistable
6375 expressions, nothing gets hoisted from the entry block. */
6376 FOR_EACH_BB (bb)
6377 {
6378 int found = 0;
6379 int insn_inserted_p;
6380
6381 domby_len = get_dominated_by (CDI_DOMINATORS, bb, &domby);
6382 /* Examine each expression that is very busy at the exit of this
6383 block. These are the potentially hoistable expressions. */
6384 for (i = 0; i < hoist_vbeout[bb->index]->n_bits; i++)
6385 {
6386 int hoistable = 0;
6387
6388 if (TEST_BIT (hoist_vbeout[bb->index], i)
6389 && TEST_BIT (transpout[bb->index], i))
6390 {
6391 /* We've found a potentially hoistable expression, now
6392 we look at every block BB dominates to see if it
6393 computes the expression. */
6394 for (j = 0; j < domby_len; j++)
6395 {
6396 dominated = domby[j];
6397 /* Ignore self dominance. */
6398 if (bb == dominated)
6399 continue;
6400 /* We've found a dominated block, now see if it computes
6401 the busy expression and whether or not moving that
6402 expression to the "beginning" of that block is safe. */
6403 if (!TEST_BIT (antloc[dominated->index], i))
6404 continue;
6405
6406 /* Note if the expression would reach the dominated block
6407 unimpared if it was placed at the end of BB.
6408
6409 Keep track of how many times this expression is hoistable
6410 from a dominated block into BB. */
6411 if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
6412 hoistable++;
6413 }
6414
6415 /* If we found more than one hoistable occurrence of this
6416 expression, then note it in the bitmap of expressions to
6417 hoist. It makes no sense to hoist things which are computed
6418 in only one BB, and doing so tends to pessimize register
6419 allocation. One could increase this value to try harder
6420 to avoid any possible code expansion due to register
6421 allocation issues; however experiments have shown that
6422 the vast majority of hoistable expressions are only movable
6423 from two successors, so raising this threshold is likely
6424 to nullify any benefit we get from code hoisting. */
6425 if (hoistable > 1)
6426 {
6427 SET_BIT (hoist_exprs[bb->index], i);
6428 found = 1;
6429 }
6430 }
6431 }
6432 /* If we found nothing to hoist, then quit now. */
6433 if (! found)
6434 {
6435 free (domby);
6436 continue;
6437 }
6438
6439 /* Loop over all the hoistable expressions. */
6440 for (i = 0; i < hoist_exprs[bb->index]->n_bits; i++)
6441 {
6442 /* We want to insert the expression into BB only once, so
6443 note when we've inserted it. */
6444 insn_inserted_p = 0;
6445
6446 /* These tests should be the same as the tests above. */
6447 if (TEST_BIT (hoist_vbeout[bb->index], i))
6448 {
6449 /* We've found a potentially hoistable expression, now
6450 we look at every block BB dominates to see if it
6451 computes the expression. */
6452 for (j = 0; j < domby_len; j++)
6453 {
6454 dominated = domby[j];
6455 /* Ignore self dominance. */
6456 if (bb == dominated)
6457 continue;
6458
6459 /* We've found a dominated block, now see if it computes
6460 the busy expression and whether or not moving that
6461 expression to the "beginning" of that block is safe. */
6462 if (!TEST_BIT (antloc[dominated->index], i))
6463 continue;
6464
6465 /* The expression is computed in the dominated block and
6466 it would be safe to compute it at the start of the
6467 dominated block. Now we have to determine if the
6468 expression would reach the dominated block if it was
6469 placed at the end of BB. */
6470 if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
6471 {
6472 struct expr *expr = index_map[i];
6473 struct occr *occr = expr->antic_occr;
6474 rtx insn;
6475 rtx set;
6476
6477 /* Find the right occurrence of this expression. */
6478 while (BLOCK_FOR_INSN (occr->insn) != dominated && occr)
6479 occr = occr->next;
6480
6481 /* Should never happen. */
6482 if (!occr)
6483 abort ();
6484
6485 insn = occr->insn;
6486
6487 set = single_set (insn);
6488 if (! set)
6489 abort ();
6490
6491 /* Create a pseudo-reg to store the result of reaching
6492 expressions into. Get the mode for the new pseudo
6493 from the mode of the original destination pseudo. */
6494 if (expr->reaching_reg == NULL)
6495 expr->reaching_reg
6496 = gen_reg_rtx (GET_MODE (SET_DEST (set)));
6497
6498 gcse_emit_move_after (expr->reaching_reg, SET_DEST (set), insn);
6499 delete_insn (insn);
6500 occr->deleted_p = 1;
6501 if (!insn_inserted_p)
6502 {
6503 insert_insn_end_bb (index_map[i], bb, 0);
6504 insn_inserted_p = 1;
6505 }
6506 }
6507 }
6508 }
6509 }
6510 free (domby);
6511 }
6512
6513 free (index_map);
6514 }
6515
6516 /* Top level routine to perform one code hoisting (aka unification) pass
6517
6518 Return nonzero if a change was made. */
6519
6520 static int
one_code_hoisting_pass(void)6521 one_code_hoisting_pass (void)
6522 {
6523 int changed = 0;
6524
6525 alloc_hash_table (max_cuid, &expr_hash_table, 0);
6526 compute_hash_table (&expr_hash_table);
6527 if (gcse_file)
6528 dump_hash_table (gcse_file, "Code Hosting Expressions", &expr_hash_table);
6529
6530 if (expr_hash_table.n_elems > 0)
6531 {
6532 alloc_code_hoist_mem (last_basic_block, expr_hash_table.n_elems);
6533 compute_code_hoist_data ();
6534 hoist_code ();
6535 free_code_hoist_mem ();
6536 }
6537
6538 free_hash_table (&expr_hash_table);
6539
6540 return changed;
6541 }
6542
6543 /* Here we provide the things required to do store motion towards
6544 the exit. In order for this to be effective, gcse also needed to
6545 be taught how to move a load when it is kill only by a store to itself.
6546
6547 int i;
6548 float a[10];
6549
6550 void foo(float scale)
6551 {
6552 for (i=0; i<10; i++)
6553 a[i] *= scale;
6554 }
6555
6556 'i' is both loaded and stored to in the loop. Normally, gcse cannot move
6557 the load out since its live around the loop, and stored at the bottom
6558 of the loop.
6559
6560 The 'Load Motion' referred to and implemented in this file is
6561 an enhancement to gcse which when using edge based lcm, recognizes
6562 this situation and allows gcse to move the load out of the loop.
6563
6564 Once gcse has hoisted the load, store motion can then push this
6565 load towards the exit, and we end up with no loads or stores of 'i'
6566 in the loop. */
6567
6568 /* This will search the ldst list for a matching expression. If it
6569 doesn't find one, we create one and initialize it. */
6570
6571 static struct ls_expr *
ldst_entry(rtx x)6572 ldst_entry (rtx x)
6573 {
6574 int do_not_record_p = 0;
6575 struct ls_expr * ptr;
6576 unsigned int hash;
6577
6578 hash = hash_expr_1 (x, GET_MODE (x), & do_not_record_p);
6579
6580 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
6581 if (ptr->hash_index == hash && expr_equiv_p (ptr->pattern, x))
6582 return ptr;
6583
6584 ptr = xmalloc (sizeof (struct ls_expr));
6585
6586 ptr->next = pre_ldst_mems;
6587 ptr->expr = NULL;
6588 ptr->pattern = x;
6589 ptr->pattern_regs = NULL_RTX;
6590 ptr->loads = NULL_RTX;
6591 ptr->stores = NULL_RTX;
6592 ptr->reaching_reg = NULL_RTX;
6593 ptr->invalid = 0;
6594 ptr->index = 0;
6595 ptr->hash_index = hash;
6596 pre_ldst_mems = ptr;
6597
6598 return ptr;
6599 }
6600
6601 /* Free up an individual ldst entry. */
6602
6603 static void
free_ldst_entry(struct ls_expr * ptr)6604 free_ldst_entry (struct ls_expr * ptr)
6605 {
6606 free_INSN_LIST_list (& ptr->loads);
6607 free_INSN_LIST_list (& ptr->stores);
6608
6609 free (ptr);
6610 }
6611
6612 /* Free up all memory associated with the ldst list. */
6613
6614 static void
free_ldst_mems(void)6615 free_ldst_mems (void)
6616 {
6617 while (pre_ldst_mems)
6618 {
6619 struct ls_expr * tmp = pre_ldst_mems;
6620
6621 pre_ldst_mems = pre_ldst_mems->next;
6622
6623 free_ldst_entry (tmp);
6624 }
6625
6626 pre_ldst_mems = NULL;
6627 }
6628
6629 /* Dump debugging info about the ldst list. */
6630
6631 static void
print_ldst_list(FILE * file)6632 print_ldst_list (FILE * file)
6633 {
6634 struct ls_expr * ptr;
6635
6636 fprintf (file, "LDST list: \n");
6637
6638 for (ptr = first_ls_expr(); ptr != NULL; ptr = next_ls_expr (ptr))
6639 {
6640 fprintf (file, " Pattern (%3d): ", ptr->index);
6641
6642 print_rtl (file, ptr->pattern);
6643
6644 fprintf (file, "\n Loads : ");
6645
6646 if (ptr->loads)
6647 print_rtl (file, ptr->loads);
6648 else
6649 fprintf (file, "(nil)");
6650
6651 fprintf (file, "\n Stores : ");
6652
6653 if (ptr->stores)
6654 print_rtl (file, ptr->stores);
6655 else
6656 fprintf (file, "(nil)");
6657
6658 fprintf (file, "\n\n");
6659 }
6660
6661 fprintf (file, "\n");
6662 }
6663
6664 /* Returns 1 if X is in the list of ldst only expressions. */
6665
6666 static struct ls_expr *
find_rtx_in_ldst(rtx x)6667 find_rtx_in_ldst (rtx x)
6668 {
6669 struct ls_expr * ptr;
6670
6671 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
6672 if (expr_equiv_p (ptr->pattern, x) && ! ptr->invalid)
6673 return ptr;
6674
6675 return NULL;
6676 }
6677
6678 /* Assign each element of the list of mems a monotonically increasing value. */
6679
6680 static int
enumerate_ldsts(void)6681 enumerate_ldsts (void)
6682 {
6683 struct ls_expr * ptr;
6684 int n = 0;
6685
6686 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
6687 ptr->index = n++;
6688
6689 return n;
6690 }
6691
6692 /* Return first item in the list. */
6693
6694 static inline struct ls_expr *
first_ls_expr(void)6695 first_ls_expr (void)
6696 {
6697 return pre_ldst_mems;
6698 }
6699
6700 /* Return the next item in the list after the specified one. */
6701
6702 static inline struct ls_expr *
next_ls_expr(struct ls_expr * ptr)6703 next_ls_expr (struct ls_expr * ptr)
6704 {
6705 return ptr->next;
6706 }
6707
6708 /* Load Motion for loads which only kill themselves. */
6709
6710 /* Return true if x is a simple MEM operation, with no registers or
6711 side effects. These are the types of loads we consider for the
6712 ld_motion list, otherwise we let the usual aliasing take care of it. */
6713
6714 static int
simple_mem(rtx x)6715 simple_mem (rtx x)
6716 {
6717 if (GET_CODE (x) != MEM)
6718 return 0;
6719
6720 if (MEM_VOLATILE_P (x))
6721 return 0;
6722
6723 if (GET_MODE (x) == BLKmode)
6724 return 0;
6725
6726 /* If we are handling exceptions, we must be careful with memory references
6727 that may trap. If we are not, the behavior is undefined, so we may just
6728 continue. */
6729 if (flag_non_call_exceptions && may_trap_p (x))
6730 return 0;
6731
6732 if (side_effects_p (x))
6733 return 0;
6734
6735 /* Do not consider function arguments passed on stack. */
6736 if (reg_mentioned_p (stack_pointer_rtx, x))
6737 return 0;
6738
6739 if (flag_float_store && FLOAT_MODE_P (GET_MODE (x)))
6740 return 0;
6741
6742 return 1;
6743 }
6744
6745 /* Make sure there isn't a buried reference in this pattern anywhere.
6746 If there is, invalidate the entry for it since we're not capable
6747 of fixing it up just yet.. We have to be sure we know about ALL
6748 loads since the aliasing code will allow all entries in the
6749 ld_motion list to not-alias itself. If we miss a load, we will get
6750 the wrong value since gcse might common it and we won't know to
6751 fix it up. */
6752
6753 static void
invalidate_any_buried_refs(rtx x)6754 invalidate_any_buried_refs (rtx x)
6755 {
6756 const char * fmt;
6757 int i, j;
6758 struct ls_expr * ptr;
6759
6760 /* Invalidate it in the list. */
6761 if (GET_CODE (x) == MEM && simple_mem (x))
6762 {
6763 ptr = ldst_entry (x);
6764 ptr->invalid = 1;
6765 }
6766
6767 /* Recursively process the insn. */
6768 fmt = GET_RTX_FORMAT (GET_CODE (x));
6769
6770 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6771 {
6772 if (fmt[i] == 'e')
6773 invalidate_any_buried_refs (XEXP (x, i));
6774 else if (fmt[i] == 'E')
6775 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6776 invalidate_any_buried_refs (XVECEXP (x, i, j));
6777 }
6778 }
6779
6780 /* Find all the 'simple' MEMs which are used in LOADs and STORES. Simple
6781 being defined as MEM loads and stores to symbols, with no side effects
6782 and no registers in the expression. For a MEM destination, we also
6783 check that the insn is still valid if we replace the destination with a
6784 REG, as is done in update_ld_motion_stores. If there are any uses/defs
6785 which don't match this criteria, they are invalidated and trimmed out
6786 later. */
6787
6788 static void
compute_ld_motion_mems(void)6789 compute_ld_motion_mems (void)
6790 {
6791 struct ls_expr * ptr;
6792 basic_block bb;
6793 rtx insn;
6794
6795 pre_ldst_mems = NULL;
6796
6797 FOR_EACH_BB (bb)
6798 {
6799 for (insn = BB_HEAD (bb);
6800 insn && insn != NEXT_INSN (BB_END (bb));
6801 insn = NEXT_INSN (insn))
6802 {
6803 if (INSN_P (insn))
6804 {
6805 if (GET_CODE (PATTERN (insn)) == SET)
6806 {
6807 rtx src = SET_SRC (PATTERN (insn));
6808 rtx dest = SET_DEST (PATTERN (insn));
6809
6810 /* Check for a simple LOAD... */
6811 if (GET_CODE (src) == MEM && simple_mem (src))
6812 {
6813 ptr = ldst_entry (src);
6814 if (GET_CODE (dest) == REG)
6815 ptr->loads = alloc_INSN_LIST (insn, ptr->loads);
6816 else
6817 ptr->invalid = 1;
6818 }
6819 else
6820 {
6821 /* Make sure there isn't a buried load somewhere. */
6822 invalidate_any_buried_refs (src);
6823 }
6824
6825 /* Check for stores. Don't worry about aliased ones, they
6826 will block any movement we might do later. We only care
6827 about this exact pattern since those are the only
6828 circumstance that we will ignore the aliasing info. */
6829 if (GET_CODE (dest) == MEM && simple_mem (dest))
6830 {
6831 ptr = ldst_entry (dest);
6832
6833 if (GET_CODE (src) != MEM
6834 && GET_CODE (src) != ASM_OPERANDS
6835 /* Check for REG manually since want_to_gcse_p
6836 returns 0 for all REGs. */
6837 && (REG_P (src) || want_to_gcse_p (src)))
6838 ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
6839 else
6840 ptr->invalid = 1;
6841 }
6842 }
6843 else
6844 invalidate_any_buried_refs (PATTERN (insn));
6845 }
6846 }
6847 }
6848 }
6849
6850 /* Remove any references that have been either invalidated or are not in the
6851 expression list for pre gcse. */
6852
6853 static void
trim_ld_motion_mems(void)6854 trim_ld_motion_mems (void)
6855 {
6856 struct ls_expr * * last = & pre_ldst_mems;
6857 struct ls_expr * ptr = pre_ldst_mems;
6858
6859 while (ptr != NULL)
6860 {
6861 struct expr * expr;
6862
6863 /* Delete if entry has been made invalid. */
6864 if (! ptr->invalid)
6865 {
6866 /* Delete if we cannot find this mem in the expression list. */
6867 unsigned int hash = ptr->hash_index % expr_hash_table.size;
6868
6869 for (expr = expr_hash_table.table[hash];
6870 expr != NULL;
6871 expr = expr->next_same_hash)
6872 if (expr_equiv_p (expr->expr, ptr->pattern))
6873 break;
6874 }
6875 else
6876 expr = (struct expr *) 0;
6877
6878 if (expr)
6879 {
6880 /* Set the expression field if we are keeping it. */
6881 ptr->expr = expr;
6882 last = & ptr->next;
6883 ptr = ptr->next;
6884 }
6885 else
6886 {
6887 *last = ptr->next;
6888 free_ldst_entry (ptr);
6889 ptr = * last;
6890 }
6891 }
6892
6893 /* Show the world what we've found. */
6894 if (gcse_file && pre_ldst_mems != NULL)
6895 print_ldst_list (gcse_file);
6896 }
6897
6898 /* This routine will take an expression which we are replacing with
6899 a reaching register, and update any stores that are needed if
6900 that expression is in the ld_motion list. Stores are updated by
6901 copying their SRC to the reaching register, and then storing
6902 the reaching register into the store location. These keeps the
6903 correct value in the reaching register for the loads. */
6904
6905 static void
update_ld_motion_stores(struct expr * expr)6906 update_ld_motion_stores (struct expr * expr)
6907 {
6908 struct ls_expr * mem_ptr;
6909
6910 if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
6911 {
6912 /* We can try to find just the REACHED stores, but is shouldn't
6913 matter to set the reaching reg everywhere... some might be
6914 dead and should be eliminated later. */
6915
6916 /* We replace (set mem expr) with (set reg expr) (set mem reg)
6917 where reg is the reaching reg used in the load. We checked in
6918 compute_ld_motion_mems that we can replace (set mem expr) with
6919 (set reg expr) in that insn. */
6920 rtx list = mem_ptr->stores;
6921
6922 for ( ; list != NULL_RTX; list = XEXP (list, 1))
6923 {
6924 rtx insn = XEXP (list, 0);
6925 rtx pat = PATTERN (insn);
6926 rtx src = SET_SRC (pat);
6927 rtx reg = expr->reaching_reg;
6928 rtx copy, new;
6929
6930 /* If we've already copied it, continue. */
6931 if (expr->reaching_reg == src)
6932 continue;
6933
6934 if (gcse_file)
6935 {
6936 fprintf (gcse_file, "PRE: store updated with reaching reg ");
6937 print_rtl (gcse_file, expr->reaching_reg);
6938 fprintf (gcse_file, ":\n ");
6939 print_inline_rtx (gcse_file, insn, 8);
6940 fprintf (gcse_file, "\n");
6941 }
6942
6943 copy = gen_move_insn ( reg, copy_rtx (SET_SRC (pat)));
6944 new = emit_insn_before (copy, insn);
6945 record_one_set (REGNO (reg), new);
6946 SET_SRC (pat) = reg;
6947
6948 /* un-recognize this pattern since it's probably different now. */
6949 INSN_CODE (insn) = -1;
6950 gcse_create_count++;
6951 }
6952 }
6953 }
6954
6955 /* Store motion code. */
6956
6957 #define ANTIC_STORE_LIST(x) ((x)->loads)
6958 #define AVAIL_STORE_LIST(x) ((x)->stores)
6959 #define LAST_AVAIL_CHECK_FAILURE(x) ((x)->reaching_reg)
6960
6961 /* This is used to communicate the target bitvector we want to use in the
6962 reg_set_info routine when called via the note_stores mechanism. */
6963 static int * regvec;
6964
6965 /* And current insn, for the same routine. */
6966 static rtx compute_store_table_current_insn;
6967
6968 /* Used in computing the reverse edge graph bit vectors. */
6969 static sbitmap * st_antloc;
6970
6971 /* Global holding the number of store expressions we are dealing with. */
6972 static int num_stores;
6973
6974 /* Checks to set if we need to mark a register set. Called from
6975 note_stores. */
6976
6977 static void
reg_set_info(rtx dest,rtx setter ATTRIBUTE_UNUSED,void * data)6978 reg_set_info (rtx dest, rtx setter ATTRIBUTE_UNUSED,
6979 void *data)
6980 {
6981 sbitmap bb_reg = data;
6982
6983 if (GET_CODE (dest) == SUBREG)
6984 dest = SUBREG_REG (dest);
6985
6986 if (GET_CODE (dest) == REG)
6987 {
6988 regvec[REGNO (dest)] = INSN_UID (compute_store_table_current_insn);
6989 if (bb_reg)
6990 SET_BIT (bb_reg, REGNO (dest));
6991 }
6992 }
6993
6994 /* Clear any mark that says that this insn sets dest. Called from
6995 note_stores. */
6996
6997 static void
reg_clear_last_set(rtx dest,rtx setter ATTRIBUTE_UNUSED,void * data)6998 reg_clear_last_set (rtx dest, rtx setter ATTRIBUTE_UNUSED,
6999 void *data)
7000 {
7001 int *dead_vec = data;
7002
7003 if (GET_CODE (dest) == SUBREG)
7004 dest = SUBREG_REG (dest);
7005
7006 if (GET_CODE (dest) == REG &&
7007 dead_vec[REGNO (dest)] == INSN_UID (compute_store_table_current_insn))
7008 dead_vec[REGNO (dest)] = 0;
7009 }
7010
7011 /* Return zero if some of the registers in list X are killed
7012 due to set of registers in bitmap REGS_SET. */
7013
7014 static bool
store_ops_ok(rtx x,int * regs_set)7015 store_ops_ok (rtx x, int *regs_set)
7016 {
7017 rtx reg;
7018
7019 for (; x; x = XEXP (x, 1))
7020 {
7021 reg = XEXP (x, 0);
7022 if (regs_set[REGNO(reg)])
7023 return false;
7024 }
7025
7026 return true;
7027 }
7028
7029 /* Returns a list of registers mentioned in X. */
7030 static rtx
extract_mentioned_regs(rtx x)7031 extract_mentioned_regs (rtx x)
7032 {
7033 return extract_mentioned_regs_helper (x, NULL_RTX);
7034 }
7035
7036 /* Helper for extract_mentioned_regs; ACCUM is used to accumulate used
7037 registers. */
7038 static rtx
extract_mentioned_regs_helper(rtx x,rtx accum)7039 extract_mentioned_regs_helper (rtx x, rtx accum)
7040 {
7041 int i;
7042 enum rtx_code code;
7043 const char * fmt;
7044
7045 /* Repeat is used to turn tail-recursion into iteration. */
7046 repeat:
7047
7048 if (x == 0)
7049 return accum;
7050
7051 code = GET_CODE (x);
7052 switch (code)
7053 {
7054 case REG:
7055 return alloc_EXPR_LIST (0, x, accum);
7056
7057 case MEM:
7058 x = XEXP (x, 0);
7059 goto repeat;
7060
7061 case PRE_DEC:
7062 case PRE_INC:
7063 case POST_DEC:
7064 case POST_INC:
7065 /* We do not run this function with arguments having side effects. */
7066 abort ();
7067
7068 case PC:
7069 case CC0: /*FIXME*/
7070 case CONST:
7071 case CONST_INT:
7072 case CONST_DOUBLE:
7073 case CONST_VECTOR:
7074 case SYMBOL_REF:
7075 case LABEL_REF:
7076 case ADDR_VEC:
7077 case ADDR_DIFF_VEC:
7078 return accum;
7079
7080 default:
7081 break;
7082 }
7083
7084 i = GET_RTX_LENGTH (code) - 1;
7085 fmt = GET_RTX_FORMAT (code);
7086
7087 for (; i >= 0; i--)
7088 {
7089 if (fmt[i] == 'e')
7090 {
7091 rtx tem = XEXP (x, i);
7092
7093 /* If we are about to do the last recursive call
7094 needed at this level, change it into iteration. */
7095 if (i == 0)
7096 {
7097 x = tem;
7098 goto repeat;
7099 }
7100
7101 accum = extract_mentioned_regs_helper (tem, accum);
7102 }
7103 else if (fmt[i] == 'E')
7104 {
7105 int j;
7106
7107 for (j = 0; j < XVECLEN (x, i); j++)
7108 accum = extract_mentioned_regs_helper (XVECEXP (x, i, j), accum);
7109 }
7110 }
7111
7112 return accum;
7113 }
7114
7115 /* Determine whether INSN is MEM store pattern that we will consider moving.
7116 REGS_SET_BEFORE is bitmap of registers set before (and including) the
7117 current insn, REGS_SET_AFTER is bitmap of registers set after (and
7118 including) the insn in this basic block. We must be passing through BB from
7119 head to end, as we are using this fact to speed things up.
7120
7121 The results are stored this way:
7122
7123 -- the first anticipatable expression is added into ANTIC_STORE_LIST
7124 -- if the processed expression is not anticipatable, NULL_RTX is added
7125 there instead, so that we can use it as indicator that no further
7126 expression of this type may be anticipatable
7127 -- if the expression is available, it is added as head of AVAIL_STORE_LIST;
7128 consequently, all of them but this head are dead and may be deleted.
7129 -- if the expression is not available, the insn due to that it fails to be
7130 available is stored in reaching_reg.
7131
7132 The things are complicated a bit by fact that there already may be stores
7133 to the same MEM from other blocks; also caller must take care of the
7134 necessary cleanup of the temporary markers after end of the basic block.
7135 */
7136
7137 static void
find_moveable_store(rtx insn,int * regs_set_before,int * regs_set_after)7138 find_moveable_store (rtx insn, int *regs_set_before, int *regs_set_after)
7139 {
7140 struct ls_expr * ptr;
7141 rtx dest, set, tmp;
7142 int check_anticipatable, check_available;
7143 basic_block bb = BLOCK_FOR_INSN (insn);
7144
7145 set = single_set (insn);
7146 if (!set)
7147 return;
7148
7149 dest = SET_DEST (set);
7150
7151 if (GET_CODE (dest) != MEM || MEM_VOLATILE_P (dest)
7152 || GET_MODE (dest) == BLKmode)
7153 return;
7154
7155 if (side_effects_p (dest))
7156 return;
7157
7158 /* If we are handling exceptions, we must be careful with memory references
7159 that may trap. If we are not, the behavior is undefined, so we may just
7160 continue. */
7161 if (flag_non_call_exceptions && may_trap_p (dest))
7162 return;
7163
7164 ptr = ldst_entry (dest);
7165 if (!ptr->pattern_regs)
7166 ptr->pattern_regs = extract_mentioned_regs (dest);
7167
7168 /* Do not check for anticipatability if we either found one anticipatable
7169 store already, or tested for one and found out that it was killed. */
7170 check_anticipatable = 0;
7171 if (!ANTIC_STORE_LIST (ptr))
7172 check_anticipatable = 1;
7173 else
7174 {
7175 tmp = XEXP (ANTIC_STORE_LIST (ptr), 0);
7176 if (tmp != NULL_RTX
7177 && BLOCK_FOR_INSN (tmp) != bb)
7178 check_anticipatable = 1;
7179 }
7180 if (check_anticipatable)
7181 {
7182 if (store_killed_before (dest, ptr->pattern_regs, insn, bb, regs_set_before))
7183 tmp = NULL_RTX;
7184 else
7185 tmp = insn;
7186 ANTIC_STORE_LIST (ptr) = alloc_INSN_LIST (tmp,
7187 ANTIC_STORE_LIST (ptr));
7188 }
7189
7190 /* It is not necessary to check whether store is available if we did
7191 it successfully before; if we failed before, do not bother to check
7192 until we reach the insn that caused us to fail. */
7193 check_available = 0;
7194 if (!AVAIL_STORE_LIST (ptr))
7195 check_available = 1;
7196 else
7197 {
7198 tmp = XEXP (AVAIL_STORE_LIST (ptr), 0);
7199 if (BLOCK_FOR_INSN (tmp) != bb)
7200 check_available = 1;
7201 }
7202 if (check_available)
7203 {
7204 /* Check that we have already reached the insn at that the check
7205 failed last time. */
7206 if (LAST_AVAIL_CHECK_FAILURE (ptr))
7207 {
7208 for (tmp = BB_END (bb);
7209 tmp != insn && tmp != LAST_AVAIL_CHECK_FAILURE (ptr);
7210 tmp = PREV_INSN (tmp))
7211 continue;
7212 if (tmp == insn)
7213 check_available = 0;
7214 }
7215 else
7216 check_available = store_killed_after (dest, ptr->pattern_regs, insn,
7217 bb, regs_set_after,
7218 &LAST_AVAIL_CHECK_FAILURE (ptr));
7219 }
7220 if (!check_available)
7221 AVAIL_STORE_LIST (ptr) = alloc_INSN_LIST (insn, AVAIL_STORE_LIST (ptr));
7222 }
7223
7224 /* Find available and anticipatable stores. */
7225
7226 static int
compute_store_table(void)7227 compute_store_table (void)
7228 {
7229 int ret;
7230 basic_block bb;
7231 unsigned regno;
7232 rtx insn, pat, tmp;
7233 int *last_set_in, *already_set;
7234 struct ls_expr * ptr, **prev_next_ptr_ptr;
7235
7236 max_gcse_regno = max_reg_num ();
7237
7238 reg_set_in_block = sbitmap_vector_alloc (last_basic_block,
7239 max_gcse_regno);
7240 sbitmap_vector_zero (reg_set_in_block, last_basic_block);
7241 pre_ldst_mems = 0;
7242 last_set_in = xcalloc (max_gcse_regno, sizeof (int));
7243 already_set = xmalloc (sizeof (int) * max_gcse_regno);
7244
7245 /* Find all the stores we care about. */
7246 FOR_EACH_BB (bb)
7247 {
7248 /* First compute the registers set in this block. */
7249 regvec = last_set_in;
7250
7251 for (insn = BB_HEAD (bb);
7252 insn != NEXT_INSN (BB_END (bb));
7253 insn = NEXT_INSN (insn))
7254 {
7255 if (! INSN_P (insn))
7256 continue;
7257
7258 if (GET_CODE (insn) == CALL_INSN)
7259 {
7260 bool clobbers_all = false;
7261 #ifdef NON_SAVING_SETJMP
7262 if (NON_SAVING_SETJMP
7263 && find_reg_note (insn, REG_SETJMP, NULL_RTX))
7264 clobbers_all = true;
7265 #endif
7266
7267 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7268 if (clobbers_all
7269 || TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
7270 {
7271 last_set_in[regno] = INSN_UID (insn);
7272 SET_BIT (reg_set_in_block[bb->index], regno);
7273 }
7274 }
7275
7276 pat = PATTERN (insn);
7277 compute_store_table_current_insn = insn;
7278 note_stores (pat, reg_set_info, reg_set_in_block[bb->index]);
7279 }
7280
7281 /* Now find the stores. */
7282 memset (already_set, 0, sizeof (int) * max_gcse_regno);
7283 regvec = already_set;
7284 for (insn = BB_HEAD (bb);
7285 insn != NEXT_INSN (BB_END (bb));
7286 insn = NEXT_INSN (insn))
7287 {
7288 if (! INSN_P (insn))
7289 continue;
7290
7291 if (GET_CODE (insn) == CALL_INSN)
7292 {
7293 bool clobbers_all = false;
7294 #ifdef NON_SAVING_SETJMP
7295 if (NON_SAVING_SETJMP
7296 && find_reg_note (insn, REG_SETJMP, NULL_RTX))
7297 clobbers_all = true;
7298 #endif
7299
7300 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7301 if (clobbers_all
7302 || TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
7303 already_set[regno] = 1;
7304 }
7305
7306 pat = PATTERN (insn);
7307 note_stores (pat, reg_set_info, NULL);
7308
7309 /* Now that we've marked regs, look for stores. */
7310 find_moveable_store (insn, already_set, last_set_in);
7311
7312 /* Unmark regs that are no longer set. */
7313 compute_store_table_current_insn = insn;
7314 note_stores (pat, reg_clear_last_set, last_set_in);
7315 if (GET_CODE (insn) == CALL_INSN)
7316 {
7317 bool clobbers_all = false;
7318 #ifdef NON_SAVING_SETJMP
7319 if (NON_SAVING_SETJMP
7320 && find_reg_note (insn, REG_SETJMP, NULL_RTX))
7321 clobbers_all = true;
7322 #endif
7323
7324 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7325 if ((clobbers_all
7326 || TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
7327 && last_set_in[regno] == INSN_UID (insn))
7328 last_set_in[regno] = 0;
7329 }
7330 }
7331
7332 #ifdef ENABLE_CHECKING
7333 /* last_set_in should now be all-zero. */
7334 for (regno = 0; regno < max_gcse_regno; regno++)
7335 if (last_set_in[regno] != 0)
7336 abort ();
7337 #endif
7338
7339 /* Clear temporary marks. */
7340 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
7341 {
7342 LAST_AVAIL_CHECK_FAILURE(ptr) = NULL_RTX;
7343 if (ANTIC_STORE_LIST (ptr)
7344 && (tmp = XEXP (ANTIC_STORE_LIST (ptr), 0)) == NULL_RTX)
7345 ANTIC_STORE_LIST (ptr) = XEXP (ANTIC_STORE_LIST (ptr), 1);
7346 }
7347 }
7348
7349 /* Remove the stores that are not available anywhere, as there will
7350 be no opportunity to optimize them. */
7351 for (ptr = pre_ldst_mems, prev_next_ptr_ptr = &pre_ldst_mems;
7352 ptr != NULL;
7353 ptr = *prev_next_ptr_ptr)
7354 {
7355 if (!AVAIL_STORE_LIST (ptr))
7356 {
7357 *prev_next_ptr_ptr = ptr->next;
7358 free_ldst_entry (ptr);
7359 }
7360 else
7361 prev_next_ptr_ptr = &ptr->next;
7362 }
7363
7364 ret = enumerate_ldsts ();
7365
7366 if (gcse_file)
7367 {
7368 fprintf (gcse_file, "ST_avail and ST_antic (shown under loads..)\n");
7369 print_ldst_list (gcse_file);
7370 }
7371
7372 free (last_set_in);
7373 free (already_set);
7374 return ret;
7375 }
7376
7377 /* Check to see if the load X is aliased with STORE_PATTERN.
7378 AFTER is true if we are checking the case when STORE_PATTERN occurs
7379 after the X. */
7380
7381 static bool
load_kills_store(rtx x,rtx store_pattern,int after)7382 load_kills_store (rtx x, rtx store_pattern, int after)
7383 {
7384 if (after)
7385 return anti_dependence (x, store_pattern);
7386 else
7387 return true_dependence (store_pattern, GET_MODE (store_pattern), x,
7388 rtx_addr_varies_p);
7389 }
7390
7391 /* Go through the entire insn X, looking for any loads which might alias
7392 STORE_PATTERN. Return true if found.
7393 AFTER is true if we are checking the case when STORE_PATTERN occurs
7394 after the insn X. */
7395
7396 static bool
find_loads(rtx x,rtx store_pattern,int after)7397 find_loads (rtx x, rtx store_pattern, int after)
7398 {
7399 const char * fmt;
7400 int i, j;
7401 int ret = false;
7402
7403 if (!x)
7404 return false;
7405
7406 if (GET_CODE (x) == SET)
7407 x = SET_SRC (x);
7408
7409 if (GET_CODE (x) == MEM)
7410 {
7411 if (load_kills_store (x, store_pattern, after))
7412 return true;
7413 }
7414
7415 /* Recursively process the insn. */
7416 fmt = GET_RTX_FORMAT (GET_CODE (x));
7417
7418 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0 && !ret; i--)
7419 {
7420 if (fmt[i] == 'e')
7421 ret |= find_loads (XEXP (x, i), store_pattern, after);
7422 else if (fmt[i] == 'E')
7423 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
7424 ret |= find_loads (XVECEXP (x, i, j), store_pattern, after);
7425 }
7426 return ret;
7427 }
7428
7429 /* Check if INSN kills the store pattern X (is aliased with it).
7430 AFTER is true if we are checking the case when store X occurs
7431 after the insn. Return true if it it does. */
7432
7433 static bool
store_killed_in_insn(rtx x,rtx x_regs,rtx insn,int after)7434 store_killed_in_insn (rtx x, rtx x_regs, rtx insn, int after)
7435 {
7436 rtx reg, base, note;
7437
7438 if (!INSN_P (insn))
7439 return false;
7440
7441 if (GET_CODE (insn) == CALL_INSN)
7442 {
7443 /* A normal or pure call might read from pattern,
7444 but a const call will not. */
7445 if (! CONST_OR_PURE_CALL_P (insn) || pure_call_p (insn))
7446 return true;
7447
7448 /* But even a const call reads its parameters. Check whether the
7449 base of some of registers used in mem is stack pointer. */
7450 for (reg = x_regs; reg; reg = XEXP (reg, 1))
7451 {
7452 base = find_base_term (XEXP (reg, 0));
7453 if (!base
7454 || (GET_CODE (base) == ADDRESS
7455 && GET_MODE (base) == Pmode
7456 && XEXP (base, 0) == stack_pointer_rtx))
7457 return true;
7458 }
7459
7460 return false;
7461 }
7462
7463 if (GET_CODE (PATTERN (insn)) == SET)
7464 {
7465 rtx pat = PATTERN (insn);
7466 rtx dest = SET_DEST (pat);
7467
7468 if (GET_CODE (dest) == SIGN_EXTRACT
7469 || GET_CODE (dest) == ZERO_EXTRACT)
7470 dest = XEXP (dest, 0);
7471
7472 /* Check for memory stores to aliased objects. */
7473 if (GET_CODE (dest) == MEM
7474 && !expr_equiv_p (dest, x))
7475 {
7476 if (after)
7477 {
7478 if (output_dependence (dest, x))
7479 return true;
7480 }
7481 else
7482 {
7483 if (output_dependence (x, dest))
7484 return true;
7485 }
7486 }
7487 if (find_loads (SET_SRC (pat), x, after))
7488 return true;
7489 }
7490 else if (find_loads (PATTERN (insn), x, after))
7491 return true;
7492
7493 /* If this insn has a REG_EQUAL or REG_EQUIV note referencing a memory
7494 location aliased with X, then this insn kills X. */
7495 note = find_reg_equal_equiv_note (insn);
7496 if (! note)
7497 return false;
7498 note = XEXP (note, 0);
7499
7500 /* However, if the note represents a must alias rather than a may
7501 alias relationship, then it does not kill X. */
7502 if (expr_equiv_p (note, x))
7503 return false;
7504
7505 /* See if there are any aliased loads in the note. */
7506 return find_loads (note, x, after);
7507 }
7508
7509 /* Returns true if the expression X is loaded or clobbered on or after INSN
7510 within basic block BB. REGS_SET_AFTER is bitmap of registers set in
7511 or after the insn. X_REGS is list of registers mentioned in X. If the store
7512 is killed, return the last insn in that it occurs in FAIL_INSN. */
7513
7514 static bool
store_killed_after(rtx x,rtx x_regs,rtx insn,basic_block bb,int * regs_set_after,rtx * fail_insn)7515 store_killed_after (rtx x, rtx x_regs, rtx insn, basic_block bb,
7516 int *regs_set_after, rtx *fail_insn)
7517 {
7518 rtx last = BB_END (bb), act;
7519
7520 if (!store_ops_ok (x_regs, regs_set_after))
7521 {
7522 /* We do not know where it will happen. */
7523 if (fail_insn)
7524 *fail_insn = NULL_RTX;
7525 return true;
7526 }
7527
7528 /* Scan from the end, so that fail_insn is determined correctly. */
7529 for (act = last; act != PREV_INSN (insn); act = PREV_INSN (act))
7530 if (store_killed_in_insn (x, x_regs, act, false))
7531 {
7532 if (fail_insn)
7533 *fail_insn = act;
7534 return true;
7535 }
7536
7537 return false;
7538 }
7539
7540 /* Returns true if the expression X is loaded or clobbered on or before INSN
7541 within basic block BB. X_REGS is list of registers mentioned in X.
7542 REGS_SET_BEFORE is bitmap of registers set before or in this insn. */
7543 static bool
store_killed_before(rtx x,rtx x_regs,rtx insn,basic_block bb,int * regs_set_before)7544 store_killed_before (rtx x, rtx x_regs, rtx insn, basic_block bb,
7545 int *regs_set_before)
7546 {
7547 rtx first = BB_HEAD (bb);
7548
7549 if (!store_ops_ok (x_regs, regs_set_before))
7550 return true;
7551
7552 for ( ; insn != PREV_INSN (first); insn = PREV_INSN (insn))
7553 if (store_killed_in_insn (x, x_regs, insn, true))
7554 return true;
7555
7556 return false;
7557 }
7558
7559 /* Fill in available, anticipatable, transparent and kill vectors in
7560 STORE_DATA, based on lists of available and anticipatable stores. */
7561 static void
build_store_vectors(void)7562 build_store_vectors (void)
7563 {
7564 basic_block bb;
7565 int *regs_set_in_block;
7566 rtx insn, st;
7567 struct ls_expr * ptr;
7568 unsigned regno;
7569
7570 /* Build the gen_vector. This is any store in the table which is not killed
7571 by aliasing later in its block. */
7572 ae_gen = sbitmap_vector_alloc (last_basic_block, num_stores);
7573 sbitmap_vector_zero (ae_gen, last_basic_block);
7574
7575 st_antloc = sbitmap_vector_alloc (last_basic_block, num_stores);
7576 sbitmap_vector_zero (st_antloc, last_basic_block);
7577
7578 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
7579 {
7580 for (st = AVAIL_STORE_LIST (ptr); st != NULL; st = XEXP (st, 1))
7581 {
7582 insn = XEXP (st, 0);
7583 bb = BLOCK_FOR_INSN (insn);
7584
7585 /* If we've already seen an available expression in this block,
7586 we can delete this one (It occurs earlier in the block). We'll
7587 copy the SRC expression to an unused register in case there
7588 are any side effects. */
7589 if (TEST_BIT (ae_gen[bb->index], ptr->index))
7590 {
7591 rtx r = gen_reg_rtx (GET_MODE (ptr->pattern));
7592 if (gcse_file)
7593 fprintf (gcse_file, "Removing redundant store:\n");
7594 replace_store_insn (r, XEXP (st, 0), bb, ptr);
7595 continue;
7596 }
7597 SET_BIT (ae_gen[bb->index], ptr->index);
7598 }
7599
7600 for (st = ANTIC_STORE_LIST (ptr); st != NULL; st = XEXP (st, 1))
7601 {
7602 insn = XEXP (st, 0);
7603 bb = BLOCK_FOR_INSN (insn);
7604 SET_BIT (st_antloc[bb->index], ptr->index);
7605 }
7606 }
7607
7608 ae_kill = sbitmap_vector_alloc (last_basic_block, num_stores);
7609 sbitmap_vector_zero (ae_kill, last_basic_block);
7610
7611 transp = sbitmap_vector_alloc (last_basic_block, num_stores);
7612 sbitmap_vector_zero (transp, last_basic_block);
7613 regs_set_in_block = xmalloc (sizeof (int) * max_gcse_regno);
7614
7615 FOR_EACH_BB (bb)
7616 {
7617 for (regno = 0; regno < max_gcse_regno; regno++)
7618 regs_set_in_block[regno] = TEST_BIT (reg_set_in_block[bb->index], regno);
7619
7620 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
7621 {
7622 if (store_killed_after (ptr->pattern, ptr->pattern_regs, BB_HEAD (bb),
7623 bb, regs_set_in_block, NULL))
7624 {
7625 /* It should not be necessary to consider the expression
7626 killed if it is both anticipatable and available. */
7627 if (!TEST_BIT (st_antloc[bb->index], ptr->index)
7628 || !TEST_BIT (ae_gen[bb->index], ptr->index))
7629 SET_BIT (ae_kill[bb->index], ptr->index);
7630 }
7631 else
7632 SET_BIT (transp[bb->index], ptr->index);
7633 }
7634 }
7635
7636 free (regs_set_in_block);
7637
7638 if (gcse_file)
7639 {
7640 dump_sbitmap_vector (gcse_file, "st_antloc", "", st_antloc, last_basic_block);
7641 dump_sbitmap_vector (gcse_file, "st_kill", "", ae_kill, last_basic_block);
7642 dump_sbitmap_vector (gcse_file, "Transpt", "", transp, last_basic_block);
7643 dump_sbitmap_vector (gcse_file, "st_avloc", "", ae_gen, last_basic_block);
7644 }
7645 }
7646
7647 /* Insert an instruction at the beginning of a basic block, and update
7648 the BB_HEAD if needed. */
7649
7650 static void
insert_insn_start_bb(rtx insn,basic_block bb)7651 insert_insn_start_bb (rtx insn, basic_block bb)
7652 {
7653 /* Insert at start of successor block. */
7654 rtx prev = PREV_INSN (BB_HEAD (bb));
7655 rtx before = BB_HEAD (bb);
7656 while (before != 0)
7657 {
7658 if (GET_CODE (before) != CODE_LABEL
7659 && (GET_CODE (before) != NOTE
7660 || NOTE_LINE_NUMBER (before) != NOTE_INSN_BASIC_BLOCK))
7661 break;
7662 prev = before;
7663 if (prev == BB_END (bb))
7664 break;
7665 before = NEXT_INSN (before);
7666 }
7667
7668 insn = emit_insn_after_noloc (insn, prev);
7669
7670 if (gcse_file)
7671 {
7672 fprintf (gcse_file, "STORE_MOTION insert store at start of BB %d:\n",
7673 bb->index);
7674 print_inline_rtx (gcse_file, insn, 6);
7675 fprintf (gcse_file, "\n");
7676 }
7677 }
7678
7679 /* This routine will insert a store on an edge. EXPR is the ldst entry for
7680 the memory reference, and E is the edge to insert it on. Returns nonzero
7681 if an edge insertion was performed. */
7682
7683 static int
insert_store(struct ls_expr * expr,edge e)7684 insert_store (struct ls_expr * expr, edge e)
7685 {
7686 rtx reg, insn;
7687 basic_block bb;
7688 edge tmp;
7689
7690 /* We did all the deleted before this insert, so if we didn't delete a
7691 store, then we haven't set the reaching reg yet either. */
7692 if (expr->reaching_reg == NULL_RTX)
7693 return 0;
7694
7695 if (e->flags & EDGE_FAKE)
7696 return 0;
7697
7698 reg = expr->reaching_reg;
7699 insn = gen_move_insn (copy_rtx (expr->pattern), reg);
7700
7701 /* If we are inserting this expression on ALL predecessor edges of a BB,
7702 insert it at the start of the BB, and reset the insert bits on the other
7703 edges so we don't try to insert it on the other edges. */
7704 bb = e->dest;
7705 for (tmp = e->dest->pred; tmp ; tmp = tmp->pred_next)
7706 if (!(tmp->flags & EDGE_FAKE))
7707 {
7708 int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest);
7709 if (index == EDGE_INDEX_NO_EDGE)
7710 abort ();
7711 if (! TEST_BIT (pre_insert_map[index], expr->index))
7712 break;
7713 }
7714
7715 /* If tmp is NULL, we found an insertion on every edge, blank the
7716 insertion vector for these edges, and insert at the start of the BB. */
7717 if (!tmp && bb != EXIT_BLOCK_PTR)
7718 {
7719 for (tmp = e->dest->pred; tmp ; tmp = tmp->pred_next)
7720 {
7721 int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest);
7722 RESET_BIT (pre_insert_map[index], expr->index);
7723 }
7724 insert_insn_start_bb (insn, bb);
7725 return 0;
7726 }
7727
7728 /* We can't insert on this edge, so we'll insert at the head of the
7729 successors block. See Morgan, sec 10.5. */
7730 if ((e->flags & EDGE_ABNORMAL) == EDGE_ABNORMAL)
7731 {
7732 insert_insn_start_bb (insn, bb);
7733 return 0;
7734 }
7735
7736 insert_insn_on_edge (insn, e);
7737
7738 if (gcse_file)
7739 {
7740 fprintf (gcse_file, "STORE_MOTION insert insn on edge (%d, %d):\n",
7741 e->src->index, e->dest->index);
7742 print_inline_rtx (gcse_file, insn, 6);
7743 fprintf (gcse_file, "\n");
7744 }
7745
7746 return 1;
7747 }
7748
7749 /* Remove any REG_EQUAL or REG_EQUIV notes containing a reference to the
7750 memory location in SMEXPR set in basic block BB.
7751
7752 This could be rather expensive. */
7753
7754 static void
remove_reachable_equiv_notes(basic_block bb,struct ls_expr * smexpr)7755 remove_reachable_equiv_notes (basic_block bb, struct ls_expr *smexpr)
7756 {
7757 edge *stack = xmalloc (sizeof (edge) * n_basic_blocks), act;
7758 sbitmap visited = sbitmap_alloc (last_basic_block);
7759 int stack_top = 0;
7760 rtx last, insn, note;
7761 rtx mem = smexpr->pattern;
7762
7763 sbitmap_zero (visited);
7764 act = bb->succ;
7765
7766 while (1)
7767 {
7768 if (!act)
7769 {
7770 if (!stack_top)
7771 {
7772 free (stack);
7773 sbitmap_free (visited);
7774 return;
7775 }
7776 act = stack[--stack_top];
7777 }
7778 bb = act->dest;
7779
7780 /* We used to continue the loop without scanning this block if the
7781 store expression was killed in this block. That is wrong as
7782 we could have had a REG_EQUAL note with the store expression
7783 appear in the block before the insn which killed the store
7784 expression and that REG_EQUAL note needs to be removed as it
7785 is invalid. */
7786 if (bb == EXIT_BLOCK_PTR
7787 || TEST_BIT (visited, bb->index))
7788 {
7789 act = act->succ_next;
7790 continue;
7791 }
7792 SET_BIT (visited, bb->index);
7793
7794 if (TEST_BIT (st_antloc[bb->index], smexpr->index))
7795 {
7796 for (last = ANTIC_STORE_LIST (smexpr);
7797 BLOCK_FOR_INSN (XEXP (last, 0)) != bb;
7798 last = XEXP (last, 1))
7799 continue;
7800 last = XEXP (last, 0);
7801 }
7802 else
7803 last = NEXT_INSN (BB_END (bb));
7804
7805 for (insn = BB_HEAD (bb); insn != last; insn = NEXT_INSN (insn))
7806 if (INSN_P (insn))
7807 {
7808 note = find_reg_equal_equiv_note (insn);
7809 if (!note || !expr_equiv_p (XEXP (note, 0), mem))
7810 continue;
7811
7812 if (gcse_file)
7813 fprintf (gcse_file, "STORE_MOTION drop REG_EQUAL note at insn %d:\n",
7814 INSN_UID (insn));
7815 remove_note (insn, note);
7816 }
7817 act = act->succ_next;
7818 if (bb->succ)
7819 {
7820 if (act)
7821 stack[stack_top++] = act;
7822 act = bb->succ;
7823 }
7824 }
7825 }
7826
7827 /* This routine will replace a store with a SET to a specified register. */
7828
7829 static void
replace_store_insn(rtx reg,rtx del,basic_block bb,struct ls_expr * smexpr)7830 replace_store_insn (rtx reg, rtx del, basic_block bb, struct ls_expr *smexpr)
7831 {
7832 rtx insn, mem, note, set, ptr, pair;
7833
7834 mem = smexpr->pattern;
7835 insn = gen_move_insn (reg, SET_SRC (single_set (del)));
7836 insn = emit_insn_after (insn, del);
7837
7838 if (gcse_file)
7839 {
7840 fprintf (gcse_file,
7841 "STORE_MOTION delete insn in BB %d:\n ", bb->index);
7842 print_inline_rtx (gcse_file, del, 6);
7843 fprintf (gcse_file, "\nSTORE MOTION replaced with insn:\n ");
7844 print_inline_rtx (gcse_file, insn, 6);
7845 fprintf (gcse_file, "\n");
7846 }
7847
7848 for (ptr = ANTIC_STORE_LIST (smexpr); ptr; ptr = XEXP (ptr, 1))
7849 if (XEXP (ptr, 0) == del)
7850 {
7851 XEXP (ptr, 0) = insn;
7852 break;
7853 }
7854
7855 /* Move the notes from the deleted insn to its replacement, and patch
7856 up the LIBCALL notes. */
7857 REG_NOTES (insn) = REG_NOTES (del);
7858
7859 note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
7860 if (note)
7861 {
7862 pair = XEXP (note, 0);
7863 note = find_reg_note (pair, REG_LIBCALL, NULL_RTX);
7864 XEXP (note, 0) = insn;
7865 }
7866 note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
7867 if (note)
7868 {
7869 pair = XEXP (note, 0);
7870 note = find_reg_note (pair, REG_RETVAL, NULL_RTX);
7871 XEXP (note, 0) = insn;
7872 }
7873
7874 delete_insn (del);
7875
7876 /* Now we must handle REG_EQUAL notes whose contents is equal to the mem;
7877 they are no longer accurate provided that they are reached by this
7878 definition, so drop them. */
7879 for (; insn != NEXT_INSN (BB_END (bb)); insn = NEXT_INSN (insn))
7880 if (INSN_P (insn))
7881 {
7882 set = single_set (insn);
7883 if (!set)
7884 continue;
7885 if (expr_equiv_p (SET_DEST (set), mem))
7886 return;
7887 note = find_reg_equal_equiv_note (insn);
7888 if (!note || !expr_equiv_p (XEXP (note, 0), mem))
7889 continue;
7890
7891 if (gcse_file)
7892 fprintf (gcse_file, "STORE_MOTION drop REG_EQUAL note at insn %d:\n",
7893 INSN_UID (insn));
7894 remove_note (insn, note);
7895 }
7896 remove_reachable_equiv_notes (bb, smexpr);
7897 }
7898
7899
7900 /* Delete a store, but copy the value that would have been stored into
7901 the reaching_reg for later storing. */
7902
7903 static void
delete_store(struct ls_expr * expr,basic_block bb)7904 delete_store (struct ls_expr * expr, basic_block bb)
7905 {
7906 rtx reg, i, del;
7907
7908 if (expr->reaching_reg == NULL_RTX)
7909 expr->reaching_reg = gen_reg_rtx (GET_MODE (expr->pattern));
7910
7911 reg = expr->reaching_reg;
7912
7913 for (i = AVAIL_STORE_LIST (expr); i; i = XEXP (i, 1))
7914 {
7915 del = XEXP (i, 0);
7916 if (BLOCK_FOR_INSN (del) == bb)
7917 {
7918 /* We know there is only one since we deleted redundant
7919 ones during the available computation. */
7920 replace_store_insn (reg, del, bb, expr);
7921 break;
7922 }
7923 }
7924 }
7925
7926 /* Free memory used by store motion. */
7927
7928 static void
free_store_memory(void)7929 free_store_memory (void)
7930 {
7931 free_ldst_mems ();
7932
7933 if (ae_gen)
7934 sbitmap_vector_free (ae_gen);
7935 if (ae_kill)
7936 sbitmap_vector_free (ae_kill);
7937 if (transp)
7938 sbitmap_vector_free (transp);
7939 if (st_antloc)
7940 sbitmap_vector_free (st_antloc);
7941 if (pre_insert_map)
7942 sbitmap_vector_free (pre_insert_map);
7943 if (pre_delete_map)
7944 sbitmap_vector_free (pre_delete_map);
7945 if (reg_set_in_block)
7946 sbitmap_vector_free (reg_set_in_block);
7947
7948 ae_gen = ae_kill = transp = st_antloc = NULL;
7949 pre_insert_map = pre_delete_map = reg_set_in_block = NULL;
7950 }
7951
7952 /* Perform store motion. Much like gcse, except we move expressions the
7953 other way by looking at the flowgraph in reverse. */
7954
7955 static void
store_motion(void)7956 store_motion (void)
7957 {
7958 basic_block bb;
7959 int x;
7960 struct ls_expr * ptr;
7961 int update_flow = 0;
7962
7963 if (gcse_file)
7964 {
7965 fprintf (gcse_file, "before store motion\n");
7966 print_rtl (gcse_file, get_insns ());
7967 }
7968
7969 init_alias_analysis ();
7970
7971 /* Find all the available and anticipatable stores. */
7972 num_stores = compute_store_table ();
7973 if (num_stores == 0)
7974 {
7975 sbitmap_vector_free (reg_set_in_block);
7976 end_alias_analysis ();
7977 return;
7978 }
7979
7980 /* Now compute kill & transp vectors. */
7981 build_store_vectors ();
7982 add_noreturn_fake_exit_edges ();
7983 connect_infinite_loops_to_exit ();
7984
7985 edge_list = pre_edge_rev_lcm (gcse_file, num_stores, transp, ae_gen,
7986 st_antloc, ae_kill, &pre_insert_map,
7987 &pre_delete_map);
7988
7989 /* Now we want to insert the new stores which are going to be needed. */
7990 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
7991 {
7992 FOR_EACH_BB (bb)
7993 if (TEST_BIT (pre_delete_map[bb->index], ptr->index))
7994 delete_store (ptr, bb);
7995
7996 for (x = 0; x < NUM_EDGES (edge_list); x++)
7997 if (TEST_BIT (pre_insert_map[x], ptr->index))
7998 update_flow |= insert_store (ptr, INDEX_EDGE (edge_list, x));
7999 }
8000
8001 if (update_flow)
8002 commit_edge_insertions ();
8003
8004 free_store_memory ();
8005 free_edge_list (edge_list);
8006 remove_fake_edges ();
8007 end_alias_analysis ();
8008 }
8009
8010
8011 /* Entry point for jump bypassing optimization pass. */
8012
8013 int
bypass_jumps(FILE * file)8014 bypass_jumps (FILE *file)
8015 {
8016 int changed;
8017
8018 /* We do not construct an accurate cfg in functions which call
8019 setjmp, so just punt to be safe. */
8020 if (current_function_calls_setjmp)
8021 return 0;
8022
8023 /* For calling dump_foo fns from gdb. */
8024 debug_stderr = stderr;
8025 gcse_file = file;
8026
8027 /* Identify the basic block information for this function, including
8028 successors and predecessors. */
8029 max_gcse_regno = max_reg_num ();
8030
8031 if (file)
8032 dump_flow_info (file);
8033
8034 /* Return if there's nothing to do, or it is too expensive. */
8035 if (n_basic_blocks <= 1 || is_too_expensive (_ ("jump bypassing disabled")))
8036 return 0;
8037
8038 gcc_obstack_init (&gcse_obstack);
8039 bytes_used = 0;
8040
8041 /* We need alias. */
8042 init_alias_analysis ();
8043
8044 /* Record where pseudo-registers are set. This data is kept accurate
8045 during each pass. ??? We could also record hard-reg information here
8046 [since it's unchanging], however it is currently done during hash table
8047 computation.
8048
8049 It may be tempting to compute MEM set information here too, but MEM sets
8050 will be subject to code motion one day and thus we need to compute
8051 information about memory sets when we build the hash tables. */
8052
8053 alloc_reg_set_mem (max_gcse_regno);
8054 compute_sets (get_insns ());
8055
8056 max_gcse_regno = max_reg_num ();
8057 alloc_gcse_mem (get_insns ());
8058 changed = one_cprop_pass (1, 1, 1);
8059 free_gcse_mem ();
8060
8061 if (file)
8062 {
8063 fprintf (file, "BYPASS of %s: %d basic blocks, ",
8064 current_function_name (), n_basic_blocks);
8065 fprintf (file, "%d bytes\n\n", bytes_used);
8066 }
8067
8068 obstack_free (&gcse_obstack, NULL);
8069 free_reg_set_mem ();
8070
8071 /* We are finished with alias. */
8072 end_alias_analysis ();
8073 allocate_reg_info (max_reg_num (), FALSE, FALSE);
8074
8075 return changed;
8076 }
8077
8078 /* Return true if the graph is too expensive to optimize. PASS is the
8079 optimization about to be performed. */
8080
8081 static bool
is_too_expensive(const char * pass)8082 is_too_expensive (const char *pass)
8083 {
8084 /* Trying to perform global optimizations on flow graphs which have
8085 a high connectivity will take a long time and is unlikely to be
8086 particularly useful.
8087
8088 In normal circumstances a cfg should have about twice as many
8089 edges as blocks. But we do not want to punish small functions
8090 which have a couple switch statements. Rather than simply
8091 threshold the number of blocks, uses something with a more
8092 graceful degradation. */
8093 if (n_edges > 20000 + n_basic_blocks * 4)
8094 {
8095 if (warn_disabled_optimization)
8096 warning ("%s: %d basic blocks and %d edges/basic block",
8097 pass, n_basic_blocks, n_edges / n_basic_blocks);
8098
8099 return true;
8100 }
8101
8102 /* If allocating memory for the cprop bitmap would take up too much
8103 storage it's better just to disable the optimization. */
8104 if ((n_basic_blocks
8105 * SBITMAP_SET_SIZE (max_reg_num ())
8106 * sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
8107 {
8108 if (warn_disabled_optimization)
8109 warning ("%s: %d basic blocks and %d registers",
8110 pass, n_basic_blocks, max_reg_num ());
8111
8112 return true;
8113 }
8114
8115 return false;
8116 }
8117
8118 #include "gt-gcse.h"
8119