1 /* Global common subexpression elimination/Partial redundancy elimination
2 and global constant/copy propagation for GNU compiler.
3 Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
21 02111-1307, USA. */
22
23 /* TODO
24 - reordering of memory allocation and freeing to be more space efficient
25 - do rough calc of how many regs are needed in each block, and a rough
26 calc of how many regs are available in each class and use that to
27 throttle back the code in cases where RTX_COST is minimal.
28 - a store to the same address as a load does not kill the load if the
29 source of the store is also the destination of the load. Handling this
30 allows more load motion, particularly out of loops.
31 - ability to realloc sbitmap vectors would allow one initial computation
32 of reg_set_in_block with only subsequent additions, rather than
33 recomputing it for each pass
34
35 */
36
37 /* References searched while implementing this.
38
39 Compilers Principles, Techniques and Tools
40 Aho, Sethi, Ullman
41 Addison-Wesley, 1988
42
43 Global Optimization by Suppression of Partial Redundancies
44 E. Morel, C. Renvoise
45 communications of the acm, Vol. 22, Num. 2, Feb. 1979
46
47 A Portable Machine-Independent Global Optimizer - Design and Measurements
48 Frederick Chow
49 Stanford Ph.D. thesis, Dec. 1983
50
51 A Fast Algorithm for Code Movement Optimization
52 D.M. Dhamdhere
53 SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
54
55 A Solution to a Problem with Morel and Renvoise's
56 Global Optimization by Suppression of Partial Redundancies
57 K-H Drechsler, M.P. Stadel
58 ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
59
60 Practical Adaptation of the Global Optimization
61 Algorithm of Morel and Renvoise
62 D.M. Dhamdhere
63 ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
64
65 Efficiently Computing Static Single Assignment Form and the Control
66 Dependence Graph
67 R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
68 ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
69
70 Lazy Code Motion
71 J. Knoop, O. Ruthing, B. Steffen
72 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
73
74 What's In a Region? Or Computing Control Dependence Regions in Near-Linear
75 Time for Reducible Flow Control
76 Thomas Ball
77 ACM Letters on Programming Languages and Systems,
78 Vol. 2, Num. 1-4, Mar-Dec 1993
79
80 An Efficient Representation for Sparse Sets
81 Preston Briggs, Linda Torczon
82 ACM Letters on Programming Languages and Systems,
83 Vol. 2, Num. 1-4, Mar-Dec 1993
84
85 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
86 K-H Drechsler, M.P. Stadel
87 ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
88
89 Partial Dead Code Elimination
90 J. Knoop, O. Ruthing, B. Steffen
91 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
92
93 Effective Partial Redundancy Elimination
94 P. Briggs, K.D. Cooper
95 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
96
97 The Program Structure Tree: Computing Control Regions in Linear Time
98 R. Johnson, D. Pearson, K. Pingali
99 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
100
101 Optimal Code Motion: Theory and Practice
102 J. Knoop, O. Ruthing, B. Steffen
103 ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
104
105 The power of assignment motion
106 J. Knoop, O. Ruthing, B. Steffen
107 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
108
109 Global code motion / global value numbering
110 C. Click
111 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
112
113 Value Driven Redundancy Elimination
114 L.T. Simpson
115 Rice University Ph.D. thesis, Apr. 1996
116
117 Value Numbering
118 L.T. Simpson
119 Massively Scalar Compiler Project, Rice University, Sep. 1996
120
121 High Performance Compilers for Parallel Computing
122 Michael Wolfe
123 Addison-Wesley, 1996
124
125 Advanced Compiler Design and Implementation
126 Steven Muchnick
127 Morgan Kaufmann, 1997
128
129 Building an Optimizing Compiler
130 Robert Morgan
131 Digital Press, 1998
132
133 People wishing to speed up the code here should read:
134 Elimination Algorithms for Data Flow Analysis
135 B.G. Ryder, M.C. Paull
136 ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
137
138 How to Analyze Large Programs Efficiently and Informatively
139 D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
140 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
141
142 People wishing to do something different can find various possibilities
143 in the above papers and elsewhere.
144 */
145
146 #include "config.h"
147 #include "system.h"
148 #include "toplev.h"
149
150 #include "rtl.h"
151 #include "tm_p.h"
152 #include "regs.h"
153 #include "hard-reg-set.h"
154 #include "flags.h"
155 #include "real.h"
156 #include "insn-config.h"
157 #include "recog.h"
158 #include "basic-block.h"
159 #include "output.h"
160 #include "function.h"
161 #include "expr.h"
162 #include "except.h"
163 #include "ggc.h"
164 #include "params.h"
165 #include "cselib.h"
166
167 #include "obstack.h"
168
169 /* Propagate flow information through back edges and thus enable PRE's
170 moving loop invariant calculations out of loops.
171
172 Originally this tended to create worse overall code, but several
173 improvements during the development of PRE seem to have made following
174 back edges generally a win.
175
176 Note much of the loop invariant code motion done here would normally
177 be done by loop.c, which has more heuristics for when to move invariants
178 out of loops. At some point we might need to move some of those
179 heuristics into gcse.c. */
180
181 /* We support GCSE via Partial Redundancy Elimination. PRE optimizations
182 are a superset of those done by GCSE.
183
184 We perform the following steps:
185
186 1) Compute basic block information.
187
188 2) Compute table of places where registers are set.
189
190 3) Perform copy/constant propagation.
191
192 4) Perform global cse.
193
194 5) Perform another pass of copy/constant propagation.
195
196 Two passes of copy/constant propagation are done because the first one
197 enables more GCSE and the second one helps to clean up the copies that
198 GCSE creates. This is needed more for PRE than for Classic because Classic
199 GCSE will try to use an existing register containing the common
200 subexpression rather than create a new one. This is harder to do for PRE
201 because of the code motion (which Classic GCSE doesn't do).
202
203 Expressions we are interested in GCSE-ing are of the form
204 (set (pseudo-reg) (expression)).
205 Function want_to_gcse_p says what these are.
206
207 PRE handles moving invariant expressions out of loops (by treating them as
208 partially redundant).
209
210 Eventually it would be nice to replace cse.c/gcse.c with SSA (static single
211 assignment) based GVN (global value numbering). L. T. Simpson's paper
212 (Rice University) on value numbering is a useful reference for this.
213
214 **********************
215
216 We used to support multiple passes but there are diminishing returns in
217 doing so. The first pass usually makes 90% of the changes that are doable.
218 A second pass can make a few more changes made possible by the first pass.
219 Experiments show any further passes don't make enough changes to justify
220 the expense.
221
222 A study of spec92 using an unlimited number of passes:
223 [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
224 [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
225 [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
226
227 It was found doing copy propagation between each pass enables further
228 substitutions.
229
230 PRE is quite expensive in complicated functions because the DFA can take
231 awhile to converge. Hence we only perform one pass. The parameter max-gcse-passes can
232 be modified if one wants to experiment.
233
234 **********************
235
236 The steps for PRE are:
237
238 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
239
240 2) Perform the data flow analysis for PRE.
241
242 3) Delete the redundant instructions
243
244 4) Insert the required copies [if any] that make the partially
245 redundant instructions fully redundant.
246
247 5) For other reaching expressions, insert an instruction to copy the value
248 to a newly created pseudo that will reach the redundant instruction.
249
250 The deletion is done first so that when we do insertions we
251 know which pseudo reg to use.
252
253 Various papers have argued that PRE DFA is expensive (O(n^2)) and others
254 argue it is not. The number of iterations for the algorithm to converge
255 is typically 2-4 so I don't view it as that expensive (relatively speaking).
256
257 PRE GCSE depends heavily on the second CSE pass to clean up the copies
258 we create. To make an expression reach the place where it's redundant,
259 the result of the expression is copied to a new register, and the redundant
260 expression is deleted by replacing it with this new register. Classic GCSE
261 doesn't have this problem as much as it computes the reaching defs of
262 each register in each block and thus can try to use an existing register.
263
264 **********************
265
266 A fair bit of simplicity is created by creating small functions for simple
267 tasks, even when the function is only called in one place. This may
268 measurably slow things down [or may not] by creating more function call
269 overhead than is necessary. The source is laid out so that it's trivial
270 to make the affected functions inline so that one can measure what speed
271 up, if any, can be achieved, and maybe later when things settle things can
272 be rearranged.
273
274 Help stamp out big monolithic functions! */
275
276 /* GCSE global vars. */
277
278 /* -dG dump file. */
279 static FILE *gcse_file;
280
281 /* Note whether or not we should run jump optimization after gcse. We
282 want to do this for two cases.
283
284 * If we changed any jumps via cprop.
285
286 * If we added any labels via edge splitting. */
287
288 static int run_jump_opt_after_gcse;
289
290 /* Bitmaps are normally not included in debugging dumps.
291 However it's useful to be able to print them from GDB.
292 We could create special functions for this, but it's simpler to
293 just allow passing stderr to the dump_foo fns. Since stderr can
294 be a macro, we store a copy here. */
295 static FILE *debug_stderr;
296
297 /* An obstack for our working variables. */
298 static struct obstack gcse_obstack;
299
300 /* Nonzero for each mode that supports (set (reg) (reg)).
301 This is trivially true for integer and floating point values.
302 It may or may not be true for condition codes. */
303 static char can_copy_p[(int) NUM_MACHINE_MODES];
304
305 /* Nonzero if can_copy_p has been initialized. */
306 static int can_copy_init_p;
307
308 struct reg_use {rtx reg_rtx; };
309
310 /* Hash table of expressions. */
311
312 struct expr
313 {
314 /* The expression (SET_SRC for expressions, PATTERN for assignments). */
315 rtx expr;
316 /* Index in the available expression bitmaps. */
317 int bitmap_index;
318 /* Next entry with the same hash. */
319 struct expr *next_same_hash;
320 /* List of anticipatable occurrences in basic blocks in the function.
321 An "anticipatable occurrence" is one that is the first occurrence in the
322 basic block, the operands are not modified in the basic block prior
323 to the occurrence and the output is not used between the start of
324 the block and the occurrence. */
325 struct occr *antic_occr;
326 /* List of available occurrence in basic blocks in the function.
327 An "available occurrence" is one that is the last occurrence in the
328 basic block and the operands are not modified by following statements in
329 the basic block [including this insn]. */
330 struct occr *avail_occr;
331 /* Non-null if the computation is PRE redundant.
332 The value is the newly created pseudo-reg to record a copy of the
333 expression in all the places that reach the redundant copy. */
334 rtx reaching_reg;
335 };
336
337 /* Occurrence of an expression.
338 There is one per basic block. If a pattern appears more than once the
339 last appearance is used [or first for anticipatable expressions]. */
340
341 struct occr
342 {
343 /* Next occurrence of this expression. */
344 struct occr *next;
345 /* The insn that computes the expression. */
346 rtx insn;
347 /* Nonzero if this [anticipatable] occurrence has been deleted. */
348 char deleted_p;
349 /* Nonzero if this [available] occurrence has been copied to
350 reaching_reg. */
351 /* ??? This is mutually exclusive with deleted_p, so they could share
352 the same byte. */
353 char copied_p;
354 };
355
356 /* Expression and copy propagation hash tables.
357 Each hash table is an array of buckets.
358 ??? It is known that if it were an array of entries, structure elements
359 `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
360 not clear whether in the final analysis a sufficient amount of memory would
361 be saved as the size of the available expression bitmaps would be larger
362 [one could build a mapping table without holes afterwards though].
363 Someday I'll perform the computation and figure it out. */
364
365 struct hash_table
366 {
367 /* The table itself.
368 This is an array of `expr_hash_table_size' elements. */
369 struct expr **table;
370
371 /* Size of the hash table, in elements. */
372 unsigned int size;
373
374 /* Number of hash table elements. */
375 unsigned int n_elems;
376
377 /* Whether the table is expression of copy propagation one. */
378 int set_p;
379 };
380
381 /* Expression hash table. */
382 static struct hash_table expr_hash_table;
383
384 /* Copy propagation hash table. */
385 static struct hash_table set_hash_table;
386
387 /* Mapping of uids to cuids.
388 Only real insns get cuids. */
389 static int *uid_cuid;
390
391 /* Highest UID in UID_CUID. */
392 static int max_uid;
393
394 /* Get the cuid of an insn. */
395 #ifdef ENABLE_CHECKING
396 #define INSN_CUID(INSN) (INSN_UID (INSN) > max_uid ? (abort (), 0) : uid_cuid[INSN_UID (INSN)])
397 #else
398 #define INSN_CUID(INSN) (uid_cuid[INSN_UID (INSN)])
399 #endif
400
401 /* Number of cuids. */
402 static int max_cuid;
403
404 /* Mapping of cuids to insns. */
405 static rtx *cuid_insn;
406
407 /* Get insn from cuid. */
408 #define CUID_INSN(CUID) (cuid_insn[CUID])
409
410 /* Maximum register number in function prior to doing gcse + 1.
411 Registers created during this pass have regno >= max_gcse_regno.
412 This is named with "gcse" to not collide with global of same name. */
413 static unsigned int max_gcse_regno;
414
415 /* Table of registers that are modified.
416
417 For each register, each element is a list of places where the pseudo-reg
418 is set.
419
420 For simplicity, GCSE is done on sets of pseudo-regs only. PRE GCSE only
421 requires knowledge of which blocks kill which regs [and thus could use
422 a bitmap instead of the lists `reg_set_table' uses].
423
424 `reg_set_table' and could be turned into an array of bitmaps (num-bbs x
425 num-regs) [however perhaps it may be useful to keep the data as is]. One
426 advantage of recording things this way is that `reg_set_table' is fairly
427 sparse with respect to pseudo regs but for hard regs could be fairly dense
428 [relatively speaking]. And recording sets of pseudo-regs in lists speeds
429 up functions like compute_transp since in the case of pseudo-regs we only
430 need to iterate over the number of times a pseudo-reg is set, not over the
431 number of basic blocks [clearly there is a bit of a slow down in the cases
432 where a pseudo is set more than once in a block, however it is believed
433 that the net effect is to speed things up]. This isn't done for hard-regs
434 because recording call-clobbered hard-regs in `reg_set_table' at each
435 function call can consume a fair bit of memory, and iterating over
436 hard-regs stored this way in compute_transp will be more expensive. */
437
438 typedef struct reg_set
439 {
440 /* The next setting of this register. */
441 struct reg_set *next;
442 /* The insn where it was set. */
443 rtx insn;
444 } reg_set;
445
446 static reg_set **reg_set_table;
447
448 /* Size of `reg_set_table'.
449 The table starts out at max_gcse_regno + slop, and is enlarged as
450 necessary. */
451 static int reg_set_table_size;
452
453 /* Amount to grow `reg_set_table' by when it's full. */
454 #define REG_SET_TABLE_SLOP 100
455
456 /* This is a list of expressions which are MEMs and will be used by load
457 or store motion.
458 Load motion tracks MEMs which aren't killed by
459 anything except itself. (ie, loads and stores to a single location).
460 We can then allow movement of these MEM refs with a little special
461 allowance. (all stores copy the same value to the reaching reg used
462 for the loads). This means all values used to store into memory must have
463 no side effects so we can re-issue the setter value.
464 Store Motion uses this structure as an expression table to track stores
465 which look interesting, and might be moveable towards the exit block. */
466
467 struct ls_expr
468 {
469 struct expr * expr; /* Gcse expression reference for LM. */
470 rtx pattern; /* Pattern of this mem. */
471 rtx loads; /* INSN list of loads seen. */
472 rtx stores; /* INSN list of stores seen. */
473 struct ls_expr * next; /* Next in the list. */
474 int invalid; /* Invalid for some reason. */
475 int index; /* If it maps to a bitmap index. */
476 int hash_index; /* Index when in a hash table. */
477 rtx reaching_reg; /* Register to use when re-writing. */
478 };
479
480 /* Head of the list of load/store memory refs. */
481 static struct ls_expr * pre_ldst_mems = NULL;
482
483 /* Bitmap containing one bit for each register in the program.
484 Used when performing GCSE to track which registers have been set since
485 the start of the basic block. */
486 static regset reg_set_bitmap;
487
488 /* For each block, a bitmap of registers set in the block.
489 This is used by expr_killed_p and compute_transp.
490 It is computed during hash table computation and not by compute_sets
491 as it includes registers added since the last pass (or between cprop and
492 gcse) and it's currently not easy to realloc sbitmap vectors. */
493 static sbitmap *reg_set_in_block;
494
495 /* Array, indexed by basic block number for a list of insns which modify
496 memory within that block. */
497 static rtx * modify_mem_list;
498 bitmap modify_mem_list_set;
499
500 /* This array parallels modify_mem_list, but is kept canonicalized. */
501 static rtx * canon_modify_mem_list;
502 bitmap canon_modify_mem_list_set;
503 /* Various variables for statistics gathering. */
504
505 /* Memory used in a pass.
506 This isn't intended to be absolutely precise. Its intent is only
507 to keep an eye on memory usage. */
508 static int bytes_used;
509
510 /* GCSE substitutions made. */
511 static int gcse_subst_count;
512 /* Number of copy instructions created. */
513 static int gcse_create_count;
514 /* Number of constants propagated. */
515 static int const_prop_count;
516 /* Number of copys propagated. */
517 static int copy_prop_count;
518
519 /* These variables are used by classic GCSE.
520 Normally they'd be defined a bit later, but `rd_gen' needs to
521 be declared sooner. */
522
523 /* Each block has a bitmap of each type.
524 The length of each blocks bitmap is:
525
526 max_cuid - for reaching definitions
527 n_exprs - for available expressions
528
529 Thus we view the bitmaps as 2 dimensional arrays. i.e.
530 rd_kill[block_num][cuid_num]
531 ae_kill[block_num][expr_num] */
532
533 /* For reaching defs */
534 static sbitmap *rd_kill, *rd_gen, *reaching_defs, *rd_out;
535
536 /* for available exprs */
537 static sbitmap *ae_kill, *ae_gen, *ae_in, *ae_out;
538
539 /* Objects of this type are passed around by the null-pointer check
540 removal routines. */
541 struct null_pointer_info
542 {
543 /* The basic block being processed. */
544 basic_block current_block;
545 /* The first register to be handled in this pass. */
546 unsigned int min_reg;
547 /* One greater than the last register to be handled in this pass. */
548 unsigned int max_reg;
549 sbitmap *nonnull_local;
550 sbitmap *nonnull_killed;
551 };
552
553 static void compute_can_copy PARAMS ((void));
554 static char *gmalloc PARAMS ((unsigned int));
555 static char *grealloc PARAMS ((char *, unsigned int));
556 static char *gcse_alloc PARAMS ((unsigned long));
557 static void alloc_gcse_mem PARAMS ((rtx));
558 static void free_gcse_mem PARAMS ((void));
559 static void alloc_reg_set_mem PARAMS ((int));
560 static void free_reg_set_mem PARAMS ((void));
561 static int get_bitmap_width PARAMS ((int, int, int));
562 static void record_one_set PARAMS ((int, rtx));
563 static void record_set_info PARAMS ((rtx, rtx, void *));
564 static void compute_sets PARAMS ((rtx));
565 static void hash_scan_insn PARAMS ((rtx, struct hash_table *, int));
566 static void hash_scan_set PARAMS ((rtx, rtx, struct hash_table *));
567 static void hash_scan_clobber PARAMS ((rtx, rtx, struct hash_table *));
568 static void hash_scan_call PARAMS ((rtx, rtx, struct hash_table *));
569 static int want_to_gcse_p PARAMS ((rtx));
570 static int oprs_unchanged_p PARAMS ((rtx, rtx, int));
571 static int oprs_anticipatable_p PARAMS ((rtx, rtx));
572 static int oprs_available_p PARAMS ((rtx, rtx));
573 static void insert_expr_in_table PARAMS ((rtx, enum machine_mode, rtx,
574 int, int, struct hash_table *));
575 static void insert_set_in_table PARAMS ((rtx, rtx, struct hash_table *));
576 static unsigned int hash_expr PARAMS ((rtx, enum machine_mode, int *, int));
577 static unsigned int hash_expr_1 PARAMS ((rtx, enum machine_mode, int *));
578 static unsigned int hash_string_1 PARAMS ((const char *));
579 static unsigned int hash_set PARAMS ((int, int));
580 static int expr_equiv_p PARAMS ((rtx, rtx));
581 static void record_last_reg_set_info PARAMS ((rtx, int));
582 static void record_last_mem_set_info PARAMS ((rtx));
583 static void record_last_set_info PARAMS ((rtx, rtx, void *));
584 static void compute_hash_table PARAMS ((struct hash_table *));
585 static void alloc_hash_table PARAMS ((int, struct hash_table *, int));
586 static void free_hash_table PARAMS ((struct hash_table *));
587 static void compute_hash_table_work PARAMS ((struct hash_table *));
588 static void dump_hash_table PARAMS ((FILE *, const char *,
589 struct hash_table *));
590 static struct expr *lookup_expr PARAMS ((rtx, struct hash_table *));
591 static struct expr *lookup_set PARAMS ((unsigned int, rtx, struct hash_table *));
592 static struct expr *next_set PARAMS ((unsigned int, struct expr *));
593 static void reset_opr_set_tables PARAMS ((void));
594 static int oprs_not_set_p PARAMS ((rtx, rtx));
595 static void mark_call PARAMS ((rtx));
596 static void mark_set PARAMS ((rtx, rtx));
597 static void mark_clobber PARAMS ((rtx, rtx));
598 static void mark_oprs_set PARAMS ((rtx));
599 static void alloc_cprop_mem PARAMS ((int, int));
600 static void free_cprop_mem PARAMS ((void));
601 static void compute_transp PARAMS ((rtx, int, sbitmap *, int));
602 static void compute_transpout PARAMS ((void));
603 static void compute_local_properties PARAMS ((sbitmap *, sbitmap *, sbitmap *,
604 struct hash_table *));
605 static void compute_cprop_data PARAMS ((void));
606 static void find_used_regs PARAMS ((rtx *, void *));
607 static int try_replace_reg PARAMS ((rtx, rtx, rtx));
608 static struct expr *find_avail_set PARAMS ((int, rtx));
609 static int cprop_jump PARAMS ((basic_block, rtx, rtx, rtx, rtx));
610 static void mems_conflict_for_gcse_p PARAMS ((rtx, rtx, void *));
611 static int load_killed_in_block_p PARAMS ((basic_block, int, rtx, int));
612 static void canon_list_insert PARAMS ((rtx, rtx, void *));
613 static int cprop_insn PARAMS ((rtx, int));
614 static int cprop PARAMS ((int));
615 static int one_cprop_pass PARAMS ((int, int));
616 static bool constprop_register PARAMS ((rtx, rtx, rtx, int));
617 static struct expr *find_bypass_set PARAMS ((int, int));
618 static bool reg_killed_on_edge PARAMS ((rtx, edge));
619 static int bypass_block PARAMS ((basic_block, rtx, rtx));
620 static int bypass_conditional_jumps PARAMS ((void));
621 static void alloc_pre_mem PARAMS ((int, int));
622 static void free_pre_mem PARAMS ((void));
623 static void compute_pre_data PARAMS ((void));
624 static int pre_expr_reaches_here_p PARAMS ((basic_block, struct expr *,
625 basic_block));
626 static void insert_insn_end_bb PARAMS ((struct expr *, basic_block, int));
627 static void pre_insert_copy_insn PARAMS ((struct expr *, rtx));
628 static void pre_insert_copies PARAMS ((void));
629 static int pre_delete PARAMS ((void));
630 static int pre_gcse PARAMS ((void));
631 static int one_pre_gcse_pass PARAMS ((int));
632 static void add_label_notes PARAMS ((rtx, rtx));
633 static void alloc_code_hoist_mem PARAMS ((int, int));
634 static void free_code_hoist_mem PARAMS ((void));
635 static void compute_code_hoist_vbeinout PARAMS ((void));
636 static void compute_code_hoist_data PARAMS ((void));
637 static int hoist_expr_reaches_here_p PARAMS ((basic_block, int, basic_block,
638 char *));
639 static void hoist_code PARAMS ((void));
640 static int one_code_hoisting_pass PARAMS ((void));
641 static void alloc_rd_mem PARAMS ((int, int));
642 static void free_rd_mem PARAMS ((void));
643 static void handle_rd_kill_set PARAMS ((rtx, int, basic_block));
644 static void compute_kill_rd PARAMS ((void));
645 static void compute_rd PARAMS ((void));
646 static void alloc_avail_expr_mem PARAMS ((int, int));
647 static void free_avail_expr_mem PARAMS ((void));
648 static void compute_ae_gen PARAMS ((struct hash_table *));
649 static int expr_killed_p PARAMS ((rtx, basic_block));
650 static void compute_ae_kill PARAMS ((sbitmap *, sbitmap *, struct hash_table *));
651 static int expr_reaches_here_p PARAMS ((struct occr *, struct expr *,
652 basic_block, int));
653 static rtx computing_insn PARAMS ((struct expr *, rtx));
654 static int def_reaches_here_p PARAMS ((rtx, rtx));
655 static int can_disregard_other_sets PARAMS ((struct reg_set **, rtx, int));
656 static int handle_avail_expr PARAMS ((rtx, struct expr *));
657 static int classic_gcse PARAMS ((void));
658 static int one_classic_gcse_pass PARAMS ((int));
659 static void invalidate_nonnull_info PARAMS ((rtx, rtx, void *));
660 static int delete_null_pointer_checks_1 PARAMS ((unsigned int *,
661 sbitmap *, sbitmap *,
662 struct null_pointer_info *));
663 static rtx process_insert_insn PARAMS ((struct expr *));
664 static int pre_edge_insert PARAMS ((struct edge_list *, struct expr **));
665 static int expr_reaches_here_p_work PARAMS ((struct occr *, struct expr *,
666 basic_block, int, char *));
667 static int pre_expr_reaches_here_p_work PARAMS ((basic_block, struct expr *,
668 basic_block, char *));
669 static struct ls_expr * ldst_entry PARAMS ((rtx));
670 static void free_ldst_entry PARAMS ((struct ls_expr *));
671 static void free_ldst_mems PARAMS ((void));
672 static void print_ldst_list PARAMS ((FILE *));
673 static struct ls_expr * find_rtx_in_ldst PARAMS ((rtx));
674 static int enumerate_ldsts PARAMS ((void));
675 static inline struct ls_expr * first_ls_expr PARAMS ((void));
676 static inline struct ls_expr * next_ls_expr PARAMS ((struct ls_expr *));
677 static int simple_mem PARAMS ((rtx));
678 static void invalidate_any_buried_refs PARAMS ((rtx));
679 static void compute_ld_motion_mems PARAMS ((void));
680 static void trim_ld_motion_mems PARAMS ((void));
681 static void update_ld_motion_stores PARAMS ((struct expr *));
682 static void reg_set_info PARAMS ((rtx, rtx, void *));
683 static int store_ops_ok PARAMS ((rtx, basic_block));
684 static void find_moveable_store PARAMS ((rtx));
685 static int compute_store_table PARAMS ((void));
686 static int load_kills_store PARAMS ((rtx, rtx));
687 static int find_loads PARAMS ((rtx, rtx));
688 static int store_killed_in_insn PARAMS ((rtx, rtx));
689 static int store_killed_after PARAMS ((rtx, rtx, basic_block));
690 static int store_killed_before PARAMS ((rtx, rtx, basic_block));
691 static void build_store_vectors PARAMS ((void));
692 static void insert_insn_start_bb PARAMS ((rtx, basic_block));
693 static int insert_store PARAMS ((struct ls_expr *, edge));
694 static void replace_store_insn PARAMS ((rtx, rtx, basic_block));
695 static void delete_store PARAMS ((struct ls_expr *,
696 basic_block));
697 static void free_store_memory PARAMS ((void));
698 static void store_motion PARAMS ((void));
699 static void free_insn_expr_list_list PARAMS ((rtx *));
700 static void clear_modify_mem_tables PARAMS ((void));
701 static void free_modify_mem_tables PARAMS ((void));
702 static rtx gcse_emit_move_after PARAMS ((rtx, rtx, rtx));
703 static void local_cprop_find_used_regs PARAMS ((rtx *, void *));
704 static bool do_local_cprop PARAMS ((rtx, rtx, int, rtx*));
705 static bool adjust_libcall_notes PARAMS ((rtx, rtx, rtx, rtx*));
706 static void local_cprop_pass PARAMS ((int));
707
708 /* Entry point for global common subexpression elimination.
709 F is the first instruction in the function. */
710
711 int
gcse_main(f,file)712 gcse_main (f, file)
713 rtx f;
714 FILE *file;
715 {
716 int changed, pass;
717 /* Bytes used at start of pass. */
718 int initial_bytes_used;
719 /* Maximum number of bytes used by a pass. */
720 int max_pass_bytes;
721 /* Point to release obstack data from for each pass. */
722 char *gcse_obstack_bottom;
723
724 /* Insertion of instructions on edges can create new basic blocks; we
725 need the original basic block count so that we can properly deallocate
726 arrays sized on the number of basic blocks originally in the cfg. */
727 int orig_bb_count;
728 /* We do not construct an accurate cfg in functions which call
729 setjmp, so just punt to be safe. */
730 if (current_function_calls_setjmp)
731 return 0;
732
733 /* Assume that we do not need to run jump optimizations after gcse. */
734 run_jump_opt_after_gcse = 0;
735
736 /* For calling dump_foo fns from gdb. */
737 debug_stderr = stderr;
738 gcse_file = file;
739
740 /* Identify the basic block information for this function, including
741 successors and predecessors. */
742 max_gcse_regno = max_reg_num ();
743
744 if (file)
745 dump_flow_info (file);
746
747 orig_bb_count = n_basic_blocks;
748 /* Return if there's nothing to do. */
749 if (n_basic_blocks <= 1)
750 return 0;
751
752 /* Trying to perform global optimizations on flow graphs which have
753 a high connectivity will take a long time and is unlikely to be
754 particularly useful.
755
756 In normal circumstances a cfg should have about twice as many edges
757 as blocks. But we do not want to punish small functions which have
758 a couple switch statements. So we require a relatively large number
759 of basic blocks and the ratio of edges to blocks to be high. */
760 if (n_basic_blocks > 1000 && n_edges / n_basic_blocks >= 20)
761 {
762 if (warn_disabled_optimization)
763 warning ("GCSE disabled: %d > 1000 basic blocks and %d >= 20 edges/basic block",
764 n_basic_blocks, n_edges / n_basic_blocks);
765 return 0;
766 }
767
768 /* If allocating memory for the cprop bitmap would take up too much
769 storage it's better just to disable the optimization. */
770 if ((n_basic_blocks
771 * SBITMAP_SET_SIZE (max_gcse_regno)
772 * sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
773 {
774 if (warn_disabled_optimization)
775 warning ("GCSE disabled: %d basic blocks and %d registers",
776 n_basic_blocks, max_gcse_regno);
777
778 return 0;
779 }
780
781 /* See what modes support reg/reg copy operations. */
782 if (! can_copy_init_p)
783 {
784 compute_can_copy ();
785 can_copy_init_p = 1;
786 }
787
788 gcc_obstack_init (&gcse_obstack);
789 bytes_used = 0;
790
791 /* We need alias. */
792 init_alias_analysis ();
793 /* Record where pseudo-registers are set. This data is kept accurate
794 during each pass. ??? We could also record hard-reg information here
795 [since it's unchanging], however it is currently done during hash table
796 computation.
797
798 It may be tempting to compute MEM set information here too, but MEM sets
799 will be subject to code motion one day and thus we need to compute
800 information about memory sets when we build the hash tables. */
801
802 alloc_reg_set_mem (max_gcse_regno);
803 compute_sets (f);
804
805 pass = 0;
806 initial_bytes_used = bytes_used;
807 max_pass_bytes = 0;
808 gcse_obstack_bottom = gcse_alloc (1);
809 changed = 1;
810 while (changed && pass < MAX_GCSE_PASSES)
811 {
812 changed = 0;
813 if (file)
814 fprintf (file, "GCSE pass %d\n\n", pass + 1);
815
816 /* Initialize bytes_used to the space for the pred/succ lists,
817 and the reg_set_table data. */
818 bytes_used = initial_bytes_used;
819
820 /* Each pass may create new registers, so recalculate each time. */
821 max_gcse_regno = max_reg_num ();
822
823 alloc_gcse_mem (f);
824
825 /* Don't allow constant propagation to modify jumps
826 during this pass. */
827 changed = one_cprop_pass (pass + 1, 0);
828
829 if (optimize_size)
830 changed |= one_classic_gcse_pass (pass + 1);
831 else
832 {
833 changed |= one_pre_gcse_pass (pass + 1);
834 /* We may have just created new basic blocks. Release and
835 recompute various things which are sized on the number of
836 basic blocks. */
837 if (changed)
838 {
839 free_modify_mem_tables ();
840 modify_mem_list
841 = (rtx *) gmalloc (last_basic_block * sizeof (rtx));
842 canon_modify_mem_list
843 = (rtx *) gmalloc (last_basic_block * sizeof (rtx));
844 memset ((char *) modify_mem_list, 0, last_basic_block * sizeof (rtx));
845 memset ((char *) canon_modify_mem_list, 0, last_basic_block * sizeof (rtx));
846 orig_bb_count = n_basic_blocks;
847 }
848 free_reg_set_mem ();
849 alloc_reg_set_mem (max_reg_num ());
850 compute_sets (f);
851 run_jump_opt_after_gcse = 1;
852 }
853
854 if (max_pass_bytes < bytes_used)
855 max_pass_bytes = bytes_used;
856
857 /* Free up memory, then reallocate for code hoisting. We can
858 not re-use the existing allocated memory because the tables
859 will not have info for the insns or registers created by
860 partial redundancy elimination. */
861 free_gcse_mem ();
862
863 /* It does not make sense to run code hoisting unless we optimizing
864 for code size -- it rarely makes programs faster, and can make
865 them bigger if we did partial redundancy elimination (when optimizing
866 for space, we use a classic gcse algorithm instead of partial
867 redundancy algorithms). */
868 if (optimize_size)
869 {
870 max_gcse_regno = max_reg_num ();
871 alloc_gcse_mem (f);
872 changed |= one_code_hoisting_pass ();
873 free_gcse_mem ();
874
875 if (max_pass_bytes < bytes_used)
876 max_pass_bytes = bytes_used;
877 }
878
879 if (file)
880 {
881 fprintf (file, "\n");
882 fflush (file);
883 }
884
885 obstack_free (&gcse_obstack, gcse_obstack_bottom);
886 pass++;
887 }
888
889 /* Do one last pass of copy propagation, including cprop into
890 conditional jumps. */
891
892 max_gcse_regno = max_reg_num ();
893 alloc_gcse_mem (f);
894 /* This time, go ahead and allow cprop to alter jumps. */
895 one_cprop_pass (pass + 1, 1);
896 free_gcse_mem ();
897
898 if (file)
899 {
900 fprintf (file, "GCSE of %s: %d basic blocks, ",
901 current_function_name, n_basic_blocks);
902 fprintf (file, "%d pass%s, %d bytes\n\n",
903 pass, pass > 1 ? "es" : "", max_pass_bytes);
904 }
905
906 obstack_free (&gcse_obstack, NULL);
907 free_reg_set_mem ();
908 /* We are finished with alias. */
909 end_alias_analysis ();
910 allocate_reg_info (max_reg_num (), FALSE, FALSE);
911
912 /* Store motion disabled until it is fixed. */
913 if (0 && !optimize_size && flag_gcse_sm)
914 store_motion ();
915 /* Record where pseudo-registers are set. */
916 return run_jump_opt_after_gcse;
917 }
918
919 /* Misc. utilities. */
920
921 /* Compute which modes support reg/reg copy operations. */
922
923 static void
compute_can_copy()924 compute_can_copy ()
925 {
926 int i;
927 #ifndef AVOID_CCMODE_COPIES
928 rtx reg, insn;
929 #endif
930 memset (can_copy_p, 0, NUM_MACHINE_MODES);
931
932 start_sequence ();
933 for (i = 0; i < NUM_MACHINE_MODES; i++)
934 if (GET_MODE_CLASS (i) == MODE_CC)
935 {
936 #ifdef AVOID_CCMODE_COPIES
937 can_copy_p[i] = 0;
938 #else
939 reg = gen_rtx_REG ((enum machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
940 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, reg));
941 if (recog (PATTERN (insn), insn, NULL) >= 0)
942 can_copy_p[i] = 1;
943 #endif
944 }
945 else
946 can_copy_p[i] = 1;
947
948 end_sequence ();
949 }
950
951 /* Cover function to xmalloc to record bytes allocated. */
952
953 static char *
gmalloc(size)954 gmalloc (size)
955 unsigned int size;
956 {
957 bytes_used += size;
958 return xmalloc (size);
959 }
960
961 /* Cover function to xrealloc.
962 We don't record the additional size since we don't know it.
963 It won't affect memory usage stats much anyway. */
964
965 static char *
grealloc(ptr,size)966 grealloc (ptr, size)
967 char *ptr;
968 unsigned int size;
969 {
970 return xrealloc (ptr, size);
971 }
972
973 /* Cover function to obstack_alloc. */
974
975 static char *
gcse_alloc(size)976 gcse_alloc (size)
977 unsigned long size;
978 {
979 bytes_used += size;
980 return (char *) obstack_alloc (&gcse_obstack, size);
981 }
982
983 /* Allocate memory for the cuid mapping array,
984 and reg/memory set tracking tables.
985
986 This is called at the start of each pass. */
987
988 static void
alloc_gcse_mem(f)989 alloc_gcse_mem (f)
990 rtx f;
991 {
992 int i, n;
993 rtx insn;
994
995 /* Find the largest UID and create a mapping from UIDs to CUIDs.
996 CUIDs are like UIDs except they increase monotonically, have no gaps,
997 and only apply to real insns. */
998
999 max_uid = get_max_uid ();
1000 n = (max_uid + 1) * sizeof (int);
1001 uid_cuid = (int *) gmalloc (n);
1002 memset ((char *) uid_cuid, 0, n);
1003 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
1004 {
1005 if (INSN_P (insn))
1006 uid_cuid[INSN_UID (insn)] = i++;
1007 else
1008 uid_cuid[INSN_UID (insn)] = i;
1009 }
1010
1011 /* Create a table mapping cuids to insns. */
1012
1013 max_cuid = i;
1014 n = (max_cuid + 1) * sizeof (rtx);
1015 cuid_insn = (rtx *) gmalloc (n);
1016 memset ((char *) cuid_insn, 0, n);
1017 for (insn = f, i = 0; insn; insn = NEXT_INSN (insn))
1018 if (INSN_P (insn))
1019 CUID_INSN (i++) = insn;
1020
1021 /* Allocate vars to track sets of regs. */
1022 reg_set_bitmap = BITMAP_XMALLOC ();
1023
1024 /* Allocate vars to track sets of regs, memory per block. */
1025 reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (last_basic_block,
1026 max_gcse_regno);
1027 /* Allocate array to keep a list of insns which modify memory in each
1028 basic block. */
1029 modify_mem_list = (rtx *) gmalloc (last_basic_block * sizeof (rtx));
1030 canon_modify_mem_list = (rtx *) gmalloc (last_basic_block * sizeof (rtx));
1031 memset ((char *) modify_mem_list, 0, last_basic_block * sizeof (rtx));
1032 memset ((char *) canon_modify_mem_list, 0, last_basic_block * sizeof (rtx));
1033 modify_mem_list_set = BITMAP_XMALLOC ();
1034 canon_modify_mem_list_set = BITMAP_XMALLOC ();
1035 }
1036
1037 /* Free memory allocated by alloc_gcse_mem. */
1038
1039 static void
free_gcse_mem()1040 free_gcse_mem ()
1041 {
1042 free (uid_cuid);
1043 free (cuid_insn);
1044
1045 BITMAP_XFREE (reg_set_bitmap);
1046
1047 sbitmap_vector_free (reg_set_in_block);
1048 free_modify_mem_tables ();
1049 BITMAP_XFREE (modify_mem_list_set);
1050 BITMAP_XFREE (canon_modify_mem_list_set);
1051 }
1052
1053 /* Many of the global optimization algorithms work by solving dataflow
1054 equations for various expressions. Initially, some local value is
1055 computed for each expression in each block. Then, the values across the
1056 various blocks are combined (by following flow graph edges) to arrive at
1057 global values. Conceptually, each set of equations is independent. We
1058 may therefore solve all the equations in parallel, solve them one at a
1059 time, or pick any intermediate approach.
1060
1061 When you're going to need N two-dimensional bitmaps, each X (say, the
1062 number of blocks) by Y (say, the number of expressions), call this
1063 function. It's not important what X and Y represent; only that Y
1064 correspond to the things that can be done in parallel. This function will
1065 return an appropriate chunking factor C; you should solve C sets of
1066 equations in parallel. By going through this function, we can easily
1067 trade space against time; by solving fewer equations in parallel we use
1068 less space. */
1069
1070 static int
get_bitmap_width(n,x,y)1071 get_bitmap_width (n, x, y)
1072 int n;
1073 int x;
1074 int y;
1075 {
1076 /* It's not really worth figuring out *exactly* how much memory will
1077 be used by a particular choice. The important thing is to get
1078 something approximately right. */
1079 size_t max_bitmap_memory = 10 * 1024 * 1024;
1080
1081 /* The number of bytes we'd use for a single column of minimum
1082 width. */
1083 size_t column_size = n * x * sizeof (SBITMAP_ELT_TYPE);
1084
1085 /* Often, it's reasonable just to solve all the equations in
1086 parallel. */
1087 if (column_size * SBITMAP_SET_SIZE (y) <= max_bitmap_memory)
1088 return y;
1089
1090 /* Otherwise, pick the largest width we can, without going over the
1091 limit. */
1092 return SBITMAP_ELT_BITS * ((max_bitmap_memory + column_size - 1)
1093 / column_size);
1094 }
1095
1096 /* Compute the local properties of each recorded expression.
1097
1098 Local properties are those that are defined by the block, irrespective of
1099 other blocks.
1100
1101 An expression is transparent in a block if its operands are not modified
1102 in the block.
1103
1104 An expression is computed (locally available) in a block if it is computed
1105 at least once and expression would contain the same value if the
1106 computation was moved to the end of the block.
1107
1108 An expression is locally anticipatable in a block if it is computed at
1109 least once and expression would contain the same value if the computation
1110 was moved to the beginning of the block.
1111
1112 We call this routine for cprop, pre and code hoisting. They all compute
1113 basically the same information and thus can easily share this code.
1114
1115 TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
1116 properties. If NULL, then it is not necessary to compute or record that
1117 particular property.
1118
1119 TABLE controls which hash table to look at. If it is set hash table,
1120 additionally, TRANSP is computed as ~TRANSP, since this is really cprop's
1121 ABSALTERED. */
1122
1123 static void
compute_local_properties(transp,comp,antloc,table)1124 compute_local_properties (transp, comp, antloc, table)
1125 sbitmap *transp;
1126 sbitmap *comp;
1127 sbitmap *antloc;
1128 struct hash_table *table;
1129 {
1130 unsigned int i;
1131
1132 /* Initialize any bitmaps that were passed in. */
1133 if (transp)
1134 {
1135 if (table->set_p)
1136 sbitmap_vector_zero (transp, last_basic_block);
1137 else
1138 sbitmap_vector_ones (transp, last_basic_block);
1139 }
1140
1141 if (comp)
1142 sbitmap_vector_zero (comp, last_basic_block);
1143 if (antloc)
1144 sbitmap_vector_zero (antloc, last_basic_block);
1145
1146 for (i = 0; i < table->size; i++)
1147 {
1148 struct expr *expr;
1149
1150 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
1151 {
1152 int indx = expr->bitmap_index;
1153 struct occr *occr;
1154
1155 /* The expression is transparent in this block if it is not killed.
1156 We start by assuming all are transparent [none are killed], and
1157 then reset the bits for those that are. */
1158 if (transp)
1159 compute_transp (expr->expr, indx, transp, table->set_p);
1160
1161 /* The occurrences recorded in antic_occr are exactly those that
1162 we want to set to nonzero in ANTLOC. */
1163 if (antloc)
1164 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
1165 {
1166 SET_BIT (antloc[BLOCK_NUM (occr->insn)], indx);
1167
1168 /* While we're scanning the table, this is a good place to
1169 initialize this. */
1170 occr->deleted_p = 0;
1171 }
1172
1173 /* The occurrences recorded in avail_occr are exactly those that
1174 we want to set to nonzero in COMP. */
1175 if (comp)
1176 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
1177 {
1178 SET_BIT (comp[BLOCK_NUM (occr->insn)], indx);
1179
1180 /* While we're scanning the table, this is a good place to
1181 initialize this. */
1182 occr->copied_p = 0;
1183 }
1184
1185 /* While we're scanning the table, this is a good place to
1186 initialize this. */
1187 expr->reaching_reg = 0;
1188 }
1189 }
1190 }
1191
1192 /* Register set information.
1193
1194 `reg_set_table' records where each register is set or otherwise
1195 modified. */
1196
1197 static struct obstack reg_set_obstack;
1198
1199 static void
alloc_reg_set_mem(n_regs)1200 alloc_reg_set_mem (n_regs)
1201 int n_regs;
1202 {
1203 unsigned int n;
1204
1205 reg_set_table_size = n_regs + REG_SET_TABLE_SLOP;
1206 n = reg_set_table_size * sizeof (struct reg_set *);
1207 reg_set_table = (struct reg_set **) gmalloc (n);
1208 memset ((char *) reg_set_table, 0, n);
1209
1210 gcc_obstack_init (®_set_obstack);
1211 }
1212
1213 static void
free_reg_set_mem()1214 free_reg_set_mem ()
1215 {
1216 free (reg_set_table);
1217 obstack_free (®_set_obstack, NULL);
1218 }
1219
1220 /* Record REGNO in the reg_set table. */
1221
1222 static void
record_one_set(regno,insn)1223 record_one_set (regno, insn)
1224 int regno;
1225 rtx insn;
1226 {
1227 /* Allocate a new reg_set element and link it onto the list. */
1228 struct reg_set *new_reg_info;
1229
1230 /* If the table isn't big enough, enlarge it. */
1231 if (regno >= reg_set_table_size)
1232 {
1233 int new_size = regno + REG_SET_TABLE_SLOP;
1234
1235 reg_set_table
1236 = (struct reg_set **) grealloc ((char *) reg_set_table,
1237 new_size * sizeof (struct reg_set *));
1238 memset ((char *) (reg_set_table + reg_set_table_size), 0,
1239 (new_size - reg_set_table_size) * sizeof (struct reg_set *));
1240 reg_set_table_size = new_size;
1241 }
1242
1243 new_reg_info = (struct reg_set *) obstack_alloc (®_set_obstack,
1244 sizeof (struct reg_set));
1245 bytes_used += sizeof (struct reg_set);
1246 new_reg_info->insn = insn;
1247 new_reg_info->next = reg_set_table[regno];
1248 reg_set_table[regno] = new_reg_info;
1249 }
1250
1251 /* Called from compute_sets via note_stores to handle one SET or CLOBBER in
1252 an insn. The DATA is really the instruction in which the SET is
1253 occurring. */
1254
1255 static void
record_set_info(dest,setter,data)1256 record_set_info (dest, setter, data)
1257 rtx dest, setter ATTRIBUTE_UNUSED;
1258 void *data;
1259 {
1260 rtx record_set_insn = (rtx) data;
1261
1262 if (GET_CODE (dest) == REG && REGNO (dest) >= FIRST_PSEUDO_REGISTER)
1263 record_one_set (REGNO (dest), record_set_insn);
1264 }
1265
1266 /* Scan the function and record each set of each pseudo-register.
1267
1268 This is called once, at the start of the gcse pass. See the comments for
1269 `reg_set_table' for further documenation. */
1270
1271 static void
compute_sets(f)1272 compute_sets (f)
1273 rtx f;
1274 {
1275 rtx insn;
1276
1277 for (insn = f; insn != 0; insn = NEXT_INSN (insn))
1278 if (INSN_P (insn))
1279 note_stores (PATTERN (insn), record_set_info, insn);
1280 }
1281
1282 /* Hash table support. */
1283
1284 struct reg_avail_info
1285 {
1286 basic_block last_bb;
1287 int first_set;
1288 int last_set;
1289 };
1290
1291 static struct reg_avail_info *reg_avail_info;
1292 static basic_block current_bb;
1293
1294
1295 /* See whether X, the source of a set, is something we want to consider for
1296 GCSE. */
1297
1298 static GTY(()) rtx test_insn;
1299 static int
want_to_gcse_p(x)1300 want_to_gcse_p (x)
1301 rtx x;
1302 {
1303 int num_clobbers = 0;
1304 int icode;
1305
1306 switch (GET_CODE (x))
1307 {
1308 case REG:
1309 case SUBREG:
1310 case CONST_INT:
1311 case CONST_DOUBLE:
1312 case CONST_VECTOR:
1313 case CALL:
1314 return 0;
1315
1316 default:
1317 break;
1318 }
1319
1320 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
1321 if (general_operand (x, GET_MODE (x)))
1322 return 1;
1323 else if (GET_MODE (x) == VOIDmode)
1324 return 0;
1325
1326 /* Otherwise, check if we can make a valid insn from it. First initialize
1327 our test insn if we haven't already. */
1328 if (test_insn == 0)
1329 {
1330 test_insn
1331 = make_insn_raw (gen_rtx_SET (VOIDmode,
1332 gen_rtx_REG (word_mode,
1333 FIRST_PSEUDO_REGISTER * 2),
1334 const0_rtx));
1335 NEXT_INSN (test_insn) = PREV_INSN (test_insn) = 0;
1336 }
1337
1338 /* Now make an insn like the one we would make when GCSE'ing and see if
1339 valid. */
1340 PUT_MODE (SET_DEST (PATTERN (test_insn)), GET_MODE (x));
1341 SET_SRC (PATTERN (test_insn)) = x;
1342 return ((icode = recog (PATTERN (test_insn), test_insn, &num_clobbers)) >= 0
1343 && (num_clobbers == 0 || ! added_clobbers_hard_reg_p (icode)));
1344 }
1345
1346 /* Return nonzero if the operands of expression X are unchanged from the
1347 start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
1348 or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
1349
1350 static int
oprs_unchanged_p(x,insn,avail_p)1351 oprs_unchanged_p (x, insn, avail_p)
1352 rtx x, insn;
1353 int avail_p;
1354 {
1355 int i, j;
1356 enum rtx_code code;
1357 const char *fmt;
1358
1359 if (x == 0)
1360 return 1;
1361
1362 code = GET_CODE (x);
1363 switch (code)
1364 {
1365 case REG:
1366 {
1367 struct reg_avail_info *info = ®_avail_info[REGNO (x)];
1368
1369 if (info->last_bb != current_bb)
1370 return 1;
1371 if (avail_p)
1372 return info->last_set < INSN_CUID (insn);
1373 else
1374 return info->first_set >= INSN_CUID (insn);
1375 }
1376
1377 case MEM:
1378 if (load_killed_in_block_p (current_bb, INSN_CUID (insn),
1379 x, avail_p))
1380 return 0;
1381 else
1382 return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
1383
1384 case PRE_DEC:
1385 case PRE_INC:
1386 case POST_DEC:
1387 case POST_INC:
1388 case PRE_MODIFY:
1389 case POST_MODIFY:
1390 return 0;
1391
1392 case PC:
1393 case CC0: /*FIXME*/
1394 case CONST:
1395 case CONST_INT:
1396 case CONST_DOUBLE:
1397 case CONST_VECTOR:
1398 case SYMBOL_REF:
1399 case LABEL_REF:
1400 case ADDR_VEC:
1401 case ADDR_DIFF_VEC:
1402 return 1;
1403
1404 default:
1405 break;
1406 }
1407
1408 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1409 {
1410 if (fmt[i] == 'e')
1411 {
1412 /* If we are about to do the last recursive call needed at this
1413 level, change it into iteration. This function is called enough
1414 to be worth it. */
1415 if (i == 0)
1416 return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
1417
1418 else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
1419 return 0;
1420 }
1421 else if (fmt[i] == 'E')
1422 for (j = 0; j < XVECLEN (x, i); j++)
1423 if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
1424 return 0;
1425 }
1426
1427 return 1;
1428 }
1429
1430 /* Used for communication between mems_conflict_for_gcse_p and
1431 load_killed_in_block_p. Nonzero if mems_conflict_for_gcse_p finds a
1432 conflict between two memory references. */
1433 static int gcse_mems_conflict_p;
1434
1435 /* Used for communication between mems_conflict_for_gcse_p and
1436 load_killed_in_block_p. A memory reference for a load instruction,
1437 mems_conflict_for_gcse_p will see if a memory store conflicts with
1438 this memory load. */
1439 static rtx gcse_mem_operand;
1440
1441 /* DEST is the output of an instruction. If it is a memory reference, and
1442 possibly conflicts with the load found in gcse_mem_operand, then set
1443 gcse_mems_conflict_p to a nonzero value. */
1444
1445 static void
mems_conflict_for_gcse_p(dest,setter,data)1446 mems_conflict_for_gcse_p (dest, setter, data)
1447 rtx dest, setter ATTRIBUTE_UNUSED;
1448 void *data ATTRIBUTE_UNUSED;
1449 {
1450 while (GET_CODE (dest) == SUBREG
1451 || GET_CODE (dest) == ZERO_EXTRACT
1452 || GET_CODE (dest) == SIGN_EXTRACT
1453 || GET_CODE (dest) == STRICT_LOW_PART)
1454 dest = XEXP (dest, 0);
1455
1456 /* If DEST is not a MEM, then it will not conflict with the load. Note
1457 that function calls are assumed to clobber memory, but are handled
1458 elsewhere. */
1459 if (GET_CODE (dest) != MEM)
1460 return;
1461
1462 /* If we are setting a MEM in our list of specially recognized MEMs,
1463 don't mark as killed this time. */
1464
1465 if (dest == gcse_mem_operand && pre_ldst_mems != NULL)
1466 {
1467 if (!find_rtx_in_ldst (dest))
1468 gcse_mems_conflict_p = 1;
1469 return;
1470 }
1471
1472 if (true_dependence (dest, GET_MODE (dest), gcse_mem_operand,
1473 rtx_addr_varies_p))
1474 gcse_mems_conflict_p = 1;
1475 }
1476
1477 /* Return nonzero if the expression in X (a memory reference) is killed
1478 in block BB before or after the insn with the CUID in UID_LIMIT.
1479 AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
1480 before UID_LIMIT.
1481
1482 To check the entire block, set UID_LIMIT to max_uid + 1 and
1483 AVAIL_P to 0. */
1484
1485 static int
load_killed_in_block_p(bb,uid_limit,x,avail_p)1486 load_killed_in_block_p (bb, uid_limit, x, avail_p)
1487 basic_block bb;
1488 int uid_limit;
1489 rtx x;
1490 int avail_p;
1491 {
1492 rtx list_entry = modify_mem_list[bb->index];
1493 while (list_entry)
1494 {
1495 rtx setter;
1496 /* Ignore entries in the list that do not apply. */
1497 if ((avail_p
1498 && INSN_CUID (XEXP (list_entry, 0)) < uid_limit)
1499 || (! avail_p
1500 && INSN_CUID (XEXP (list_entry, 0)) > uid_limit))
1501 {
1502 list_entry = XEXP (list_entry, 1);
1503 continue;
1504 }
1505
1506 setter = XEXP (list_entry, 0);
1507
1508 /* If SETTER is a call everything is clobbered. Note that calls
1509 to pure functions are never put on the list, so we need not
1510 worry about them. */
1511 if (GET_CODE (setter) == CALL_INSN)
1512 return 1;
1513
1514 /* SETTER must be an INSN of some kind that sets memory. Call
1515 note_stores to examine each hunk of memory that is modified.
1516
1517 The note_stores interface is pretty limited, so we have to
1518 communicate via global variables. Yuk. */
1519 gcse_mem_operand = x;
1520 gcse_mems_conflict_p = 0;
1521 note_stores (PATTERN (setter), mems_conflict_for_gcse_p, NULL);
1522 if (gcse_mems_conflict_p)
1523 return 1;
1524 list_entry = XEXP (list_entry, 1);
1525 }
1526 return 0;
1527 }
1528
1529 /* Return nonzero if the operands of expression X are unchanged from
1530 the start of INSN's basic block up to but not including INSN. */
1531
1532 static int
oprs_anticipatable_p(x,insn)1533 oprs_anticipatable_p (x, insn)
1534 rtx x, insn;
1535 {
1536 return oprs_unchanged_p (x, insn, 0);
1537 }
1538
1539 /* Return nonzero if the operands of expression X are unchanged from
1540 INSN to the end of INSN's basic block. */
1541
1542 static int
oprs_available_p(x,insn)1543 oprs_available_p (x, insn)
1544 rtx x, insn;
1545 {
1546 return oprs_unchanged_p (x, insn, 1);
1547 }
1548
1549 /* Hash expression X.
1550
1551 MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
1552 indicating if a volatile operand is found or if the expression contains
1553 something we don't want to insert in the table.
1554
1555 ??? One might want to merge this with canon_hash. Later. */
1556
1557 static unsigned int
hash_expr(x,mode,do_not_record_p,hash_table_size)1558 hash_expr (x, mode, do_not_record_p, hash_table_size)
1559 rtx x;
1560 enum machine_mode mode;
1561 int *do_not_record_p;
1562 int hash_table_size;
1563 {
1564 unsigned int hash;
1565
1566 *do_not_record_p = 0;
1567
1568 hash = hash_expr_1 (x, mode, do_not_record_p);
1569 return hash % hash_table_size;
1570 }
1571
1572 /* Hash a string. Just add its bytes up. */
1573
1574 static inline unsigned
hash_string_1(ps)1575 hash_string_1 (ps)
1576 const char *ps;
1577 {
1578 unsigned hash = 0;
1579 const unsigned char *p = (const unsigned char *) ps;
1580
1581 if (p)
1582 while (*p)
1583 hash += *p++;
1584
1585 return hash;
1586 }
1587
1588 /* Subroutine of hash_expr to do the actual work. */
1589
1590 static unsigned int
hash_expr_1(x,mode,do_not_record_p)1591 hash_expr_1 (x, mode, do_not_record_p)
1592 rtx x;
1593 enum machine_mode mode;
1594 int *do_not_record_p;
1595 {
1596 int i, j;
1597 unsigned hash = 0;
1598 enum rtx_code code;
1599 const char *fmt;
1600
1601 /* Used to turn recursion into iteration. We can't rely on GCC's
1602 tail-recursion eliminatio since we need to keep accumulating values
1603 in HASH. */
1604
1605 if (x == 0)
1606 return hash;
1607
1608 repeat:
1609 code = GET_CODE (x);
1610 switch (code)
1611 {
1612 case REG:
1613 hash += ((unsigned int) REG << 7) + REGNO (x);
1614 return hash;
1615
1616 case CONST_INT:
1617 hash += (((unsigned int) CONST_INT << 7) + (unsigned int) mode
1618 + (unsigned int) INTVAL (x));
1619 return hash;
1620
1621 case CONST_DOUBLE:
1622 /* This is like the general case, except that it only counts
1623 the integers representing the constant. */
1624 hash += (unsigned int) code + (unsigned int) GET_MODE (x);
1625 if (GET_MODE (x) != VOIDmode)
1626 for (i = 2; i < GET_RTX_LENGTH (CONST_DOUBLE); i++)
1627 hash += (unsigned int) XWINT (x, i);
1628 else
1629 hash += ((unsigned int) CONST_DOUBLE_LOW (x)
1630 + (unsigned int) CONST_DOUBLE_HIGH (x));
1631 return hash;
1632
1633 case CONST_VECTOR:
1634 {
1635 int units;
1636 rtx elt;
1637
1638 units = CONST_VECTOR_NUNITS (x);
1639
1640 for (i = 0; i < units; ++i)
1641 {
1642 elt = CONST_VECTOR_ELT (x, i);
1643 hash += hash_expr_1 (elt, GET_MODE (elt), do_not_record_p);
1644 }
1645
1646 return hash;
1647 }
1648
1649 /* Assume there is only one rtx object for any given label. */
1650 case LABEL_REF:
1651 /* We don't hash on the address of the CODE_LABEL to avoid bootstrap
1652 differences and differences between each stage's debugging dumps. */
1653 hash += (((unsigned int) LABEL_REF << 7)
1654 + CODE_LABEL_NUMBER (XEXP (x, 0)));
1655 return hash;
1656
1657 case SYMBOL_REF:
1658 {
1659 /* Don't hash on the symbol's address to avoid bootstrap differences.
1660 Different hash values may cause expressions to be recorded in
1661 different orders and thus different registers to be used in the
1662 final assembler. This also avoids differences in the dump files
1663 between various stages. */
1664 unsigned int h = 0;
1665 const unsigned char *p = (const unsigned char *) XSTR (x, 0);
1666
1667 while (*p)
1668 h += (h << 7) + *p++; /* ??? revisit */
1669
1670 hash += ((unsigned int) SYMBOL_REF << 7) + h;
1671 return hash;
1672 }
1673
1674 case MEM:
1675 if (MEM_VOLATILE_P (x))
1676 {
1677 *do_not_record_p = 1;
1678 return 0;
1679 }
1680
1681 hash += (unsigned int) MEM;
1682 /* We used alias set for hashing, but this is not good, since the alias
1683 set may differ in -fprofile-arcs and -fbranch-probabilities compilation
1684 causing the profiles to fail to match. */
1685 x = XEXP (x, 0);
1686 goto repeat;
1687
1688 case PRE_DEC:
1689 case PRE_INC:
1690 case POST_DEC:
1691 case POST_INC:
1692 case PC:
1693 case CC0:
1694 case CALL:
1695 case UNSPEC_VOLATILE:
1696 *do_not_record_p = 1;
1697 return 0;
1698
1699 case ASM_OPERANDS:
1700 if (MEM_VOLATILE_P (x))
1701 {
1702 *do_not_record_p = 1;
1703 return 0;
1704 }
1705 else
1706 {
1707 /* We don't want to take the filename and line into account. */
1708 hash += (unsigned) code + (unsigned) GET_MODE (x)
1709 + hash_string_1 (ASM_OPERANDS_TEMPLATE (x))
1710 + hash_string_1 (ASM_OPERANDS_OUTPUT_CONSTRAINT (x))
1711 + (unsigned) ASM_OPERANDS_OUTPUT_IDX (x);
1712
1713 if (ASM_OPERANDS_INPUT_LENGTH (x))
1714 {
1715 for (i = 1; i < ASM_OPERANDS_INPUT_LENGTH (x); i++)
1716 {
1717 hash += (hash_expr_1 (ASM_OPERANDS_INPUT (x, i),
1718 GET_MODE (ASM_OPERANDS_INPUT (x, i)),
1719 do_not_record_p)
1720 + hash_string_1 (ASM_OPERANDS_INPUT_CONSTRAINT
1721 (x, i)));
1722 }
1723
1724 hash += hash_string_1 (ASM_OPERANDS_INPUT_CONSTRAINT (x, 0));
1725 x = ASM_OPERANDS_INPUT (x, 0);
1726 mode = GET_MODE (x);
1727 goto repeat;
1728 }
1729 return hash;
1730 }
1731
1732 default:
1733 break;
1734 }
1735
1736 hash += (unsigned) code + (unsigned) GET_MODE (x);
1737 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
1738 {
1739 if (fmt[i] == 'e')
1740 {
1741 /* If we are about to do the last recursive call
1742 needed at this level, change it into iteration.
1743 This function is called enough to be worth it. */
1744 if (i == 0)
1745 {
1746 x = XEXP (x, i);
1747 goto repeat;
1748 }
1749
1750 hash += hash_expr_1 (XEXP (x, i), 0, do_not_record_p);
1751 if (*do_not_record_p)
1752 return 0;
1753 }
1754
1755 else if (fmt[i] == 'E')
1756 for (j = 0; j < XVECLEN (x, i); j++)
1757 {
1758 hash += hash_expr_1 (XVECEXP (x, i, j), 0, do_not_record_p);
1759 if (*do_not_record_p)
1760 return 0;
1761 }
1762
1763 else if (fmt[i] == 's')
1764 hash += hash_string_1 (XSTR (x, i));
1765 else if (fmt[i] == 'i')
1766 hash += (unsigned int) XINT (x, i);
1767 else
1768 abort ();
1769 }
1770
1771 return hash;
1772 }
1773
1774 /* Hash a set of register REGNO.
1775
1776 Sets are hashed on the register that is set. This simplifies the PRE copy
1777 propagation code.
1778
1779 ??? May need to make things more elaborate. Later, as necessary. */
1780
1781 static unsigned int
hash_set(regno,hash_table_size)1782 hash_set (regno, hash_table_size)
1783 int regno;
1784 int hash_table_size;
1785 {
1786 unsigned int hash;
1787
1788 hash = regno;
1789 return hash % hash_table_size;
1790 }
1791
1792 /* Return nonzero if exp1 is equivalent to exp2.
1793 ??? Borrowed from cse.c. Might want to remerge with cse.c. Later. */
1794
1795 static int
expr_equiv_p(x,y)1796 expr_equiv_p (x, y)
1797 rtx x, y;
1798 {
1799 int i, j;
1800 enum rtx_code code;
1801 const char *fmt;
1802
1803 if (x == y)
1804 return 1;
1805
1806 if (x == 0 || y == 0)
1807 return x == y;
1808
1809 code = GET_CODE (x);
1810 if (code != GET_CODE (y))
1811 return 0;
1812
1813 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */
1814 if (GET_MODE (x) != GET_MODE (y))
1815 return 0;
1816
1817 switch (code)
1818 {
1819 case PC:
1820 case CC0:
1821 return x == y;
1822
1823 case CONST_INT:
1824 return INTVAL (x) == INTVAL (y);
1825
1826 case LABEL_REF:
1827 return XEXP (x, 0) == XEXP (y, 0);
1828
1829 case SYMBOL_REF:
1830 return XSTR (x, 0) == XSTR (y, 0);
1831
1832 case REG:
1833 return REGNO (x) == REGNO (y);
1834
1835 case MEM:
1836 /* Can't merge two expressions in different alias sets, since we can
1837 decide that the expression is transparent in a block when it isn't,
1838 due to it being set with the different alias set. */
1839 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
1840 return 0;
1841 break;
1842
1843 /* For commutative operations, check both orders. */
1844 case PLUS:
1845 case MULT:
1846 case AND:
1847 case IOR:
1848 case XOR:
1849 case NE:
1850 case EQ:
1851 return ((expr_equiv_p (XEXP (x, 0), XEXP (y, 0))
1852 && expr_equiv_p (XEXP (x, 1), XEXP (y, 1)))
1853 || (expr_equiv_p (XEXP (x, 0), XEXP (y, 1))
1854 && expr_equiv_p (XEXP (x, 1), XEXP (y, 0))));
1855
1856 case ASM_OPERANDS:
1857 /* We don't use the generic code below because we want to
1858 disregard filename and line numbers. */
1859
1860 /* A volatile asm isn't equivalent to any other. */
1861 if (MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y))
1862 return 0;
1863
1864 if (GET_MODE (x) != GET_MODE (y)
1865 || strcmp (ASM_OPERANDS_TEMPLATE (x), ASM_OPERANDS_TEMPLATE (y))
1866 || strcmp (ASM_OPERANDS_OUTPUT_CONSTRAINT (x),
1867 ASM_OPERANDS_OUTPUT_CONSTRAINT (y))
1868 || ASM_OPERANDS_OUTPUT_IDX (x) != ASM_OPERANDS_OUTPUT_IDX (y)
1869 || ASM_OPERANDS_INPUT_LENGTH (x) != ASM_OPERANDS_INPUT_LENGTH (y))
1870 return 0;
1871
1872 if (ASM_OPERANDS_INPUT_LENGTH (x))
1873 {
1874 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
1875 if (! expr_equiv_p (ASM_OPERANDS_INPUT (x, i),
1876 ASM_OPERANDS_INPUT (y, i))
1877 || strcmp (ASM_OPERANDS_INPUT_CONSTRAINT (x, i),
1878 ASM_OPERANDS_INPUT_CONSTRAINT (y, i)))
1879 return 0;
1880 }
1881
1882 return 1;
1883
1884 default:
1885 break;
1886 }
1887
1888 /* Compare the elements. If any pair of corresponding elements
1889 fail to match, return 0 for the whole thing. */
1890
1891 fmt = GET_RTX_FORMAT (code);
1892 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1893 {
1894 switch (fmt[i])
1895 {
1896 case 'e':
1897 if (! expr_equiv_p (XEXP (x, i), XEXP (y, i)))
1898 return 0;
1899 break;
1900
1901 case 'E':
1902 if (XVECLEN (x, i) != XVECLEN (y, i))
1903 return 0;
1904 for (j = 0; j < XVECLEN (x, i); j++)
1905 if (! expr_equiv_p (XVECEXP (x, i, j), XVECEXP (y, i, j)))
1906 return 0;
1907 break;
1908
1909 case 's':
1910 if (strcmp (XSTR (x, i), XSTR (y, i)))
1911 return 0;
1912 break;
1913
1914 case 'i':
1915 if (XINT (x, i) != XINT (y, i))
1916 return 0;
1917 break;
1918
1919 case 'w':
1920 if (XWINT (x, i) != XWINT (y, i))
1921 return 0;
1922 break;
1923
1924 case '0':
1925 break;
1926
1927 default:
1928 abort ();
1929 }
1930 }
1931
1932 return 1;
1933 }
1934
1935 /* Insert expression X in INSN in the hash TABLE.
1936 If it is already present, record it as the last occurrence in INSN's
1937 basic block.
1938
1939 MODE is the mode of the value X is being stored into.
1940 It is only used if X is a CONST_INT.
1941
1942 ANTIC_P is nonzero if X is an anticipatable expression.
1943 AVAIL_P is nonzero if X is an available expression. */
1944
1945 static void
insert_expr_in_table(x,mode,insn,antic_p,avail_p,table)1946 insert_expr_in_table (x, mode, insn, antic_p, avail_p, table)
1947 rtx x;
1948 enum machine_mode mode;
1949 rtx insn;
1950 int antic_p, avail_p;
1951 struct hash_table *table;
1952 {
1953 int found, do_not_record_p;
1954 unsigned int hash;
1955 struct expr *cur_expr, *last_expr = NULL;
1956 struct occr *antic_occr, *avail_occr;
1957 struct occr *last_occr = NULL;
1958
1959 hash = hash_expr (x, mode, &do_not_record_p, table->size);
1960
1961 /* Do not insert expression in table if it contains volatile operands,
1962 or if hash_expr determines the expression is something we don't want
1963 to or can't handle. */
1964 if (do_not_record_p)
1965 return;
1966
1967 cur_expr = table->table[hash];
1968 found = 0;
1969
1970 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
1971 {
1972 /* If the expression isn't found, save a pointer to the end of
1973 the list. */
1974 last_expr = cur_expr;
1975 cur_expr = cur_expr->next_same_hash;
1976 }
1977
1978 if (! found)
1979 {
1980 cur_expr = (struct expr *) gcse_alloc (sizeof (struct expr));
1981 bytes_used += sizeof (struct expr);
1982 if (table->table[hash] == NULL)
1983 /* This is the first pattern that hashed to this index. */
1984 table->table[hash] = cur_expr;
1985 else
1986 /* Add EXPR to end of this hash chain. */
1987 last_expr->next_same_hash = cur_expr;
1988
1989 /* Set the fields of the expr element. */
1990 cur_expr->expr = x;
1991 cur_expr->bitmap_index = table->n_elems++;
1992 cur_expr->next_same_hash = NULL;
1993 cur_expr->antic_occr = NULL;
1994 cur_expr->avail_occr = NULL;
1995 }
1996
1997 /* Now record the occurrence(s). */
1998 if (antic_p)
1999 {
2000 antic_occr = cur_expr->antic_occr;
2001
2002 /* Search for another occurrence in the same basic block. */
2003 while (antic_occr && BLOCK_NUM (antic_occr->insn) != BLOCK_NUM (insn))
2004 {
2005 /* If an occurrence isn't found, save a pointer to the end of
2006 the list. */
2007 last_occr = antic_occr;
2008 antic_occr = antic_occr->next;
2009 }
2010
2011 if (antic_occr)
2012 /* Found another instance of the expression in the same basic block.
2013 Prefer the currently recorded one. We want the first one in the
2014 block and the block is scanned from start to end. */
2015 ; /* nothing to do */
2016 else
2017 {
2018 /* First occurrence of this expression in this basic block. */
2019 antic_occr = (struct occr *) gcse_alloc (sizeof (struct occr));
2020 bytes_used += sizeof (struct occr);
2021 /* First occurrence of this expression in any block? */
2022 if (cur_expr->antic_occr == NULL)
2023 cur_expr->antic_occr = antic_occr;
2024 else
2025 last_occr->next = antic_occr;
2026
2027 antic_occr->insn = insn;
2028 antic_occr->next = NULL;
2029 }
2030 }
2031
2032 if (avail_p)
2033 {
2034 avail_occr = cur_expr->avail_occr;
2035
2036 /* Search for another occurrence in the same basic block. */
2037 while (avail_occr && BLOCK_NUM (avail_occr->insn) != BLOCK_NUM (insn))
2038 {
2039 /* If an occurrence isn't found, save a pointer to the end of
2040 the list. */
2041 last_occr = avail_occr;
2042 avail_occr = avail_occr->next;
2043 }
2044
2045 if (avail_occr)
2046 /* Found another instance of the expression in the same basic block.
2047 Prefer this occurrence to the currently recorded one. We want
2048 the last one in the block and the block is scanned from start
2049 to end. */
2050 avail_occr->insn = insn;
2051 else
2052 {
2053 /* First occurrence of this expression in this basic block. */
2054 avail_occr = (struct occr *) gcse_alloc (sizeof (struct occr));
2055 bytes_used += sizeof (struct occr);
2056
2057 /* First occurrence of this expression in any block? */
2058 if (cur_expr->avail_occr == NULL)
2059 cur_expr->avail_occr = avail_occr;
2060 else
2061 last_occr->next = avail_occr;
2062
2063 avail_occr->insn = insn;
2064 avail_occr->next = NULL;
2065 }
2066 }
2067 }
2068
2069 /* Insert pattern X in INSN in the hash table.
2070 X is a SET of a reg to either another reg or a constant.
2071 If it is already present, record it as the last occurrence in INSN's
2072 basic block. */
2073
2074 static void
insert_set_in_table(x,insn,table)2075 insert_set_in_table (x, insn, table)
2076 rtx x;
2077 rtx insn;
2078 struct hash_table *table;
2079 {
2080 int found;
2081 unsigned int hash;
2082 struct expr *cur_expr, *last_expr = NULL;
2083 struct occr *cur_occr, *last_occr = NULL;
2084
2085 if (GET_CODE (x) != SET
2086 || GET_CODE (SET_DEST (x)) != REG)
2087 abort ();
2088
2089 hash = hash_set (REGNO (SET_DEST (x)), table->size);
2090
2091 cur_expr = table->table[hash];
2092 found = 0;
2093
2094 while (cur_expr && 0 == (found = expr_equiv_p (cur_expr->expr, x)))
2095 {
2096 /* If the expression isn't found, save a pointer to the end of
2097 the list. */
2098 last_expr = cur_expr;
2099 cur_expr = cur_expr->next_same_hash;
2100 }
2101
2102 if (! found)
2103 {
2104 cur_expr = (struct expr *) gcse_alloc (sizeof (struct expr));
2105 bytes_used += sizeof (struct expr);
2106 if (table->table[hash] == NULL)
2107 /* This is the first pattern that hashed to this index. */
2108 table->table[hash] = cur_expr;
2109 else
2110 /* Add EXPR to end of this hash chain. */
2111 last_expr->next_same_hash = cur_expr;
2112
2113 /* Set the fields of the expr element.
2114 We must copy X because it can be modified when copy propagation is
2115 performed on its operands. */
2116 cur_expr->expr = copy_rtx (x);
2117 cur_expr->bitmap_index = table->n_elems++;
2118 cur_expr->next_same_hash = NULL;
2119 cur_expr->antic_occr = NULL;
2120 cur_expr->avail_occr = NULL;
2121 }
2122
2123 /* Now record the occurrence. */
2124 cur_occr = cur_expr->avail_occr;
2125
2126 /* Search for another occurrence in the same basic block. */
2127 while (cur_occr && BLOCK_NUM (cur_occr->insn) != BLOCK_NUM (insn))
2128 {
2129 /* If an occurrence isn't found, save a pointer to the end of
2130 the list. */
2131 last_occr = cur_occr;
2132 cur_occr = cur_occr->next;
2133 }
2134
2135 if (cur_occr)
2136 /* Found another instance of the expression in the same basic block.
2137 Prefer this occurrence to the currently recorded one. We want the
2138 last one in the block and the block is scanned from start to end. */
2139 cur_occr->insn = insn;
2140 else
2141 {
2142 /* First occurrence of this expression in this basic block. */
2143 cur_occr = (struct occr *) gcse_alloc (sizeof (struct occr));
2144 bytes_used += sizeof (struct occr);
2145
2146 /* First occurrence of this expression in any block? */
2147 if (cur_expr->avail_occr == NULL)
2148 cur_expr->avail_occr = cur_occr;
2149 else
2150 last_occr->next = cur_occr;
2151
2152 cur_occr->insn = insn;
2153 cur_occr->next = NULL;
2154 }
2155 }
2156
2157 /* Scan pattern PAT of INSN and add an entry to the hash TABLE (set or
2158 expression one). */
2159
2160 static void
hash_scan_set(pat,insn,table)2161 hash_scan_set (pat, insn, table)
2162 rtx pat, insn;
2163 struct hash_table *table;
2164 {
2165 rtx src = SET_SRC (pat);
2166 rtx dest = SET_DEST (pat);
2167 rtx note;
2168
2169 if (GET_CODE (src) == CALL)
2170 hash_scan_call (src, insn, table);
2171
2172 else if (GET_CODE (dest) == REG)
2173 {
2174 unsigned int regno = REGNO (dest);
2175 rtx tmp;
2176
2177 /* If this is a single set and we are doing constant propagation,
2178 see if a REG_NOTE shows this equivalent to a constant. */
2179 if (table->set_p && (note = find_reg_equal_equiv_note (insn)) != 0
2180 && CONSTANT_P (XEXP (note, 0)))
2181 src = XEXP (note, 0), pat = gen_rtx_SET (VOIDmode, dest, src);
2182
2183 /* Only record sets of pseudo-regs in the hash table. */
2184 if (! table->set_p
2185 && regno >= FIRST_PSEUDO_REGISTER
2186 /* Don't GCSE something if we can't do a reg/reg copy. */
2187 && can_copy_p [GET_MODE (dest)]
2188 /* GCSE commonly inserts instruction after the insn. We can't
2189 do that easily for EH_REGION notes so disable GCSE on these
2190 for now. */
2191 && !find_reg_note (insn, REG_EH_REGION, NULL_RTX)
2192 /* Is SET_SRC something we want to gcse? */
2193 && want_to_gcse_p (src)
2194 /* Don't CSE a nop. */
2195 && ! set_noop_p (pat)
2196 /* Don't GCSE if it has attached REG_EQUIV note.
2197 At this point this only function parameters should have
2198 REG_EQUIV notes and if the argument slot is used somewhere
2199 explicitly, it means address of parameter has been taken,
2200 so we should not extend the lifetime of the pseudo. */
2201 && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
2202 || GET_CODE (XEXP (note, 0)) != MEM))
2203 {
2204 /* An expression is not anticipatable if its operands are
2205 modified before this insn or if this is not the only SET in
2206 this insn. */
2207 int antic_p = oprs_anticipatable_p (src, insn) && single_set (insn);
2208 /* An expression is not available if its operands are
2209 subsequently modified, including this insn. It's also not
2210 available if this is a branch, because we can't insert
2211 a set after the branch. */
2212 int avail_p = (oprs_available_p (src, insn)
2213 && ! JUMP_P (insn));
2214
2215 insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p, table);
2216 }
2217
2218 /* Record sets for constant/copy propagation. */
2219 else if (table->set_p
2220 && regno >= FIRST_PSEUDO_REGISTER
2221 && ((GET_CODE (src) == REG
2222 && REGNO (src) >= FIRST_PSEUDO_REGISTER
2223 && can_copy_p [GET_MODE (dest)]
2224 && REGNO (src) != regno)
2225 || CONSTANT_P (src))
2226 /* A copy is not available if its src or dest is subsequently
2227 modified. Here we want to search from INSN+1 on, but
2228 oprs_available_p searches from INSN on. */
2229 && (insn == BLOCK_END (BLOCK_NUM (insn))
2230 || ((tmp = next_nonnote_insn (insn)) != NULL_RTX
2231 && oprs_available_p (pat, tmp))))
2232 insert_set_in_table (pat, insn, table);
2233 }
2234 }
2235
2236 static void
hash_scan_clobber(x,insn,table)2237 hash_scan_clobber (x, insn, table)
2238 rtx x ATTRIBUTE_UNUSED, insn ATTRIBUTE_UNUSED;
2239 struct hash_table *table ATTRIBUTE_UNUSED;
2240 {
2241 /* Currently nothing to do. */
2242 }
2243
2244 static void
hash_scan_call(x,insn,table)2245 hash_scan_call (x, insn, table)
2246 rtx x ATTRIBUTE_UNUSED, insn ATTRIBUTE_UNUSED;
2247 struct hash_table *table ATTRIBUTE_UNUSED;
2248 {
2249 /* Currently nothing to do. */
2250 }
2251
2252 /* Process INSN and add hash table entries as appropriate.
2253
2254 Only available expressions that set a single pseudo-reg are recorded.
2255
2256 Single sets in a PARALLEL could be handled, but it's an extra complication
2257 that isn't dealt with right now. The trick is handling the CLOBBERs that
2258 are also in the PARALLEL. Later.
2259
2260 If SET_P is nonzero, this is for the assignment hash table,
2261 otherwise it is for the expression hash table.
2262 If IN_LIBCALL_BLOCK nonzero, we are in a libcall block, and should
2263 not record any expressions. */
2264
2265 static void
hash_scan_insn(insn,table,in_libcall_block)2266 hash_scan_insn (insn, table, in_libcall_block)
2267 rtx insn;
2268 struct hash_table *table;
2269 int in_libcall_block;
2270 {
2271 rtx pat = PATTERN (insn);
2272 int i;
2273
2274 if (in_libcall_block)
2275 return;
2276
2277 /* Pick out the sets of INSN and for other forms of instructions record
2278 what's been modified. */
2279
2280 if (GET_CODE (pat) == SET)
2281 hash_scan_set (pat, insn, table);
2282 else if (GET_CODE (pat) == PARALLEL)
2283 for (i = 0; i < XVECLEN (pat, 0); i++)
2284 {
2285 rtx x = XVECEXP (pat, 0, i);
2286
2287 if (GET_CODE (x) == SET)
2288 hash_scan_set (x, insn, table);
2289 else if (GET_CODE (x) == CLOBBER)
2290 hash_scan_clobber (x, insn, table);
2291 else if (GET_CODE (x) == CALL)
2292 hash_scan_call (x, insn, table);
2293 }
2294
2295 else if (GET_CODE (pat) == CLOBBER)
2296 hash_scan_clobber (pat, insn, table);
2297 else if (GET_CODE (pat) == CALL)
2298 hash_scan_call (pat, insn, table);
2299 }
2300
2301 static void
dump_hash_table(file,name,table)2302 dump_hash_table (file, name, table)
2303 FILE *file;
2304 const char *name;
2305 struct hash_table *table;
2306 {
2307 int i;
2308 /* Flattened out table, so it's printed in proper order. */
2309 struct expr **flat_table;
2310 unsigned int *hash_val;
2311 struct expr *expr;
2312
2313 flat_table
2314 = (struct expr **) xcalloc (table->n_elems, sizeof (struct expr *));
2315 hash_val = (unsigned int *) xmalloc (table->n_elems * sizeof (unsigned int));
2316
2317 for (i = 0; i < (int) table->size; i++)
2318 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
2319 {
2320 flat_table[expr->bitmap_index] = expr;
2321 hash_val[expr->bitmap_index] = i;
2322 }
2323
2324 fprintf (file, "%s hash table (%d buckets, %d entries)\n",
2325 name, table->size, table->n_elems);
2326
2327 for (i = 0; i < (int) table->n_elems; i++)
2328 if (flat_table[i] != 0)
2329 {
2330 expr = flat_table[i];
2331 fprintf (file, "Index %d (hash value %d)\n ",
2332 expr->bitmap_index, hash_val[i]);
2333 print_rtl (file, expr->expr);
2334 fprintf (file, "\n");
2335 }
2336
2337 fprintf (file, "\n");
2338
2339 free (flat_table);
2340 free (hash_val);
2341 }
2342
2343 /* Record register first/last/block set information for REGNO in INSN.
2344
2345 first_set records the first place in the block where the register
2346 is set and is used to compute "anticipatability".
2347
2348 last_set records the last place in the block where the register
2349 is set and is used to compute "availability".
2350
2351 last_bb records the block for which first_set and last_set are
2352 valid, as a quick test to invalidate them.
2353
2354 reg_set_in_block records whether the register is set in the block
2355 and is used to compute "transparency". */
2356
2357 static void
record_last_reg_set_info(insn,regno)2358 record_last_reg_set_info (insn, regno)
2359 rtx insn;
2360 int regno;
2361 {
2362 struct reg_avail_info *info = ®_avail_info[regno];
2363 int cuid = INSN_CUID (insn);
2364
2365 info->last_set = cuid;
2366 if (info->last_bb != current_bb)
2367 {
2368 info->last_bb = current_bb;
2369 info->first_set = cuid;
2370 SET_BIT (reg_set_in_block[current_bb->index], regno);
2371 }
2372 }
2373
2374
2375 /* Record all of the canonicalized MEMs of record_last_mem_set_info's insn.
2376 Note we store a pair of elements in the list, so they have to be
2377 taken off pairwise. */
2378
2379 static void
canon_list_insert(dest,unused1,v_insn)2380 canon_list_insert (dest, unused1, v_insn)
2381 rtx dest ATTRIBUTE_UNUSED;
2382 rtx unused1 ATTRIBUTE_UNUSED;
2383 void * v_insn;
2384 {
2385 rtx dest_addr, insn;
2386 int bb;
2387
2388 while (GET_CODE (dest) == SUBREG
2389 || GET_CODE (dest) == ZERO_EXTRACT
2390 || GET_CODE (dest) == SIGN_EXTRACT
2391 || GET_CODE (dest) == STRICT_LOW_PART)
2392 dest = XEXP (dest, 0);
2393
2394 /* If DEST is not a MEM, then it will not conflict with a load. Note
2395 that function calls are assumed to clobber memory, but are handled
2396 elsewhere. */
2397
2398 if (GET_CODE (dest) != MEM)
2399 return;
2400
2401 dest_addr = get_addr (XEXP (dest, 0));
2402 dest_addr = canon_rtx (dest_addr);
2403 insn = (rtx) v_insn;
2404 bb = BLOCK_NUM (insn);
2405
2406 canon_modify_mem_list[bb] =
2407 alloc_EXPR_LIST (VOIDmode, dest_addr, canon_modify_mem_list[bb]);
2408 canon_modify_mem_list[bb] =
2409 alloc_EXPR_LIST (VOIDmode, dest, canon_modify_mem_list[bb]);
2410 bitmap_set_bit (canon_modify_mem_list_set, bb);
2411 }
2412
2413 /* Record memory modification information for INSN. We do not actually care
2414 about the memory location(s) that are set, or even how they are set (consider
2415 a CALL_INSN). We merely need to record which insns modify memory. */
2416
2417 static void
record_last_mem_set_info(insn)2418 record_last_mem_set_info (insn)
2419 rtx insn;
2420 {
2421 int bb = BLOCK_NUM (insn);
2422
2423 /* load_killed_in_block_p will handle the case of calls clobbering
2424 everything. */
2425 modify_mem_list[bb] = alloc_INSN_LIST (insn, modify_mem_list[bb]);
2426 bitmap_set_bit (modify_mem_list_set, bb);
2427
2428 if (GET_CODE (insn) == CALL_INSN)
2429 {
2430 /* Note that traversals of this loop (other than for free-ing)
2431 will break after encountering a CALL_INSN. So, there's no
2432 need to insert a pair of items, as canon_list_insert does. */
2433 canon_modify_mem_list[bb] =
2434 alloc_INSN_LIST (insn, canon_modify_mem_list[bb]);
2435 bitmap_set_bit (canon_modify_mem_list_set, bb);
2436 }
2437 else
2438 note_stores (PATTERN (insn), canon_list_insert, (void*) insn);
2439 }
2440
2441 /* Called from compute_hash_table via note_stores to handle one
2442 SET or CLOBBER in an insn. DATA is really the instruction in which
2443 the SET is taking place. */
2444
2445 static void
record_last_set_info(dest,setter,data)2446 record_last_set_info (dest, setter, data)
2447 rtx dest, setter ATTRIBUTE_UNUSED;
2448 void *data;
2449 {
2450 rtx last_set_insn = (rtx) data;
2451
2452 if (GET_CODE (dest) == SUBREG)
2453 dest = SUBREG_REG (dest);
2454
2455 if (GET_CODE (dest) == REG)
2456 record_last_reg_set_info (last_set_insn, REGNO (dest));
2457 else if (GET_CODE (dest) == MEM
2458 /* Ignore pushes, they clobber nothing. */
2459 && ! push_operand (dest, GET_MODE (dest)))
2460 record_last_mem_set_info (last_set_insn);
2461 }
2462
2463 /* Top level function to create an expression or assignment hash table.
2464
2465 Expression entries are placed in the hash table if
2466 - they are of the form (set (pseudo-reg) src),
2467 - src is something we want to perform GCSE on,
2468 - none of the operands are subsequently modified in the block
2469
2470 Assignment entries are placed in the hash table if
2471 - they are of the form (set (pseudo-reg) src),
2472 - src is something we want to perform const/copy propagation on,
2473 - none of the operands or target are subsequently modified in the block
2474
2475 Currently src must be a pseudo-reg or a const_int.
2476
2477 F is the first insn.
2478 TABLE is the table computed. */
2479
2480 static void
compute_hash_table_work(table)2481 compute_hash_table_work (table)
2482 struct hash_table *table;
2483 {
2484 unsigned int i;
2485
2486 /* While we compute the hash table we also compute a bit array of which
2487 registers are set in which blocks.
2488 ??? This isn't needed during const/copy propagation, but it's cheap to
2489 compute. Later. */
2490 sbitmap_vector_zero (reg_set_in_block, last_basic_block);
2491
2492 /* re-Cache any INSN_LIST nodes we have allocated. */
2493 clear_modify_mem_tables ();
2494 /* Some working arrays used to track first and last set in each block. */
2495 reg_avail_info = (struct reg_avail_info*)
2496 gmalloc (max_gcse_regno * sizeof (struct reg_avail_info));
2497
2498 for (i = 0; i < max_gcse_regno; ++i)
2499 reg_avail_info[i].last_bb = NULL;
2500
2501 FOR_EACH_BB (current_bb)
2502 {
2503 rtx insn;
2504 unsigned int regno;
2505 int in_libcall_block;
2506
2507 /* First pass over the instructions records information used to
2508 determine when registers and memory are first and last set.
2509 ??? hard-reg reg_set_in_block computation
2510 could be moved to compute_sets since they currently don't change. */
2511
2512 for (insn = current_bb->head;
2513 insn && insn != NEXT_INSN (current_bb->end);
2514 insn = NEXT_INSN (insn))
2515 {
2516 if (! INSN_P (insn))
2517 continue;
2518
2519 if (GET_CODE (insn) == CALL_INSN)
2520 {
2521 bool clobbers_all = false;
2522 #ifdef NON_SAVING_SETJMP
2523 if (NON_SAVING_SETJMP
2524 && find_reg_note (insn, REG_SETJMP, NULL_RTX))
2525 clobbers_all = true;
2526 #endif
2527
2528 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
2529 if (clobbers_all
2530 || TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
2531 record_last_reg_set_info (insn, regno);
2532
2533 mark_call (insn);
2534 }
2535
2536 note_stores (PATTERN (insn), record_last_set_info, insn);
2537 }
2538
2539 /* The next pass builds the hash table. */
2540
2541 for (insn = current_bb->head, in_libcall_block = 0;
2542 insn && insn != NEXT_INSN (current_bb->end);
2543 insn = NEXT_INSN (insn))
2544 if (INSN_P (insn))
2545 {
2546 if (find_reg_note (insn, REG_LIBCALL, NULL_RTX))
2547 in_libcall_block = 1;
2548 else if (table->set_p && find_reg_note (insn, REG_RETVAL, NULL_RTX))
2549 in_libcall_block = 0;
2550 hash_scan_insn (insn, table, in_libcall_block);
2551 if (!table->set_p && find_reg_note (insn, REG_RETVAL, NULL_RTX))
2552 in_libcall_block = 0;
2553 }
2554 }
2555
2556 free (reg_avail_info);
2557 reg_avail_info = NULL;
2558 }
2559
2560 /* Allocate space for the set/expr hash TABLE.
2561 N_INSNS is the number of instructions in the function.
2562 It is used to determine the number of buckets to use.
2563 SET_P determines whether set or expression table will
2564 be created. */
2565
2566 static void
alloc_hash_table(n_insns,table,set_p)2567 alloc_hash_table (n_insns, table, set_p)
2568 int n_insns;
2569 struct hash_table *table;
2570 int set_p;
2571 {
2572 int n;
2573
2574 table->size = n_insns / 4;
2575 if (table->size < 11)
2576 table->size = 11;
2577
2578 /* Attempt to maintain efficient use of hash table.
2579 Making it an odd number is simplest for now.
2580 ??? Later take some measurements. */
2581 table->size |= 1;
2582 n = table->size * sizeof (struct expr *);
2583 table->table = (struct expr **) gmalloc (n);
2584 table->set_p = set_p;
2585 }
2586
2587 /* Free things allocated by alloc_hash_table. */
2588
2589 static void
free_hash_table(table)2590 free_hash_table (table)
2591 struct hash_table *table;
2592 {
2593 free (table->table);
2594 }
2595
2596 /* Compute the hash TABLE for doing copy/const propagation or
2597 expression hash table. */
2598
2599 static void
compute_hash_table(table)2600 compute_hash_table (table)
2601 struct hash_table *table;
2602 {
2603 /* Initialize count of number of entries in hash table. */
2604 table->n_elems = 0;
2605 memset ((char *) table->table, 0,
2606 table->size * sizeof (struct expr *));
2607
2608 compute_hash_table_work (table);
2609 }
2610
2611 /* Expression tracking support. */
2612
2613 /* Lookup pattern PAT in the expression TABLE.
2614 The result is a pointer to the table entry, or NULL if not found. */
2615
2616 static struct expr *
lookup_expr(pat,table)2617 lookup_expr (pat, table)
2618 rtx pat;
2619 struct hash_table *table;
2620 {
2621 int do_not_record_p;
2622 unsigned int hash = hash_expr (pat, GET_MODE (pat), &do_not_record_p,
2623 table->size);
2624 struct expr *expr;
2625
2626 if (do_not_record_p)
2627 return NULL;
2628
2629 expr = table->table[hash];
2630
2631 while (expr && ! expr_equiv_p (expr->expr, pat))
2632 expr = expr->next_same_hash;
2633
2634 return expr;
2635 }
2636
2637 /* Lookup REGNO in the set TABLE. If PAT is non-NULL look for the entry that
2638 matches it, otherwise return the first entry for REGNO. The result is a
2639 pointer to the table entry, or NULL if not found. */
2640
2641 static struct expr *
lookup_set(regno,pat,table)2642 lookup_set (regno, pat, table)
2643 unsigned int regno;
2644 rtx pat;
2645 struct hash_table *table;
2646 {
2647 unsigned int hash = hash_set (regno, table->size);
2648 struct expr *expr;
2649
2650 expr = table->table[hash];
2651
2652 if (pat)
2653 {
2654 while (expr && ! expr_equiv_p (expr->expr, pat))
2655 expr = expr->next_same_hash;
2656 }
2657 else
2658 {
2659 while (expr && REGNO (SET_DEST (expr->expr)) != regno)
2660 expr = expr->next_same_hash;
2661 }
2662
2663 return expr;
2664 }
2665
2666 /* Return the next entry for REGNO in list EXPR. */
2667
2668 static struct expr *
next_set(regno,expr)2669 next_set (regno, expr)
2670 unsigned int regno;
2671 struct expr *expr;
2672 {
2673 do
2674 expr = expr->next_same_hash;
2675 while (expr && REGNO (SET_DEST (expr->expr)) != regno);
2676
2677 return expr;
2678 }
2679
2680 /* Like free_INSN_LIST_list or free_EXPR_LIST_list, except that the node
2681 types may be mixed. */
2682
2683 static void
free_insn_expr_list_list(listp)2684 free_insn_expr_list_list (listp)
2685 rtx *listp;
2686 {
2687 rtx list, next;
2688
2689 for (list = *listp; list ; list = next)
2690 {
2691 next = XEXP (list, 1);
2692 if (GET_CODE (list) == EXPR_LIST)
2693 free_EXPR_LIST_node (list);
2694 else
2695 free_INSN_LIST_node (list);
2696 }
2697
2698 *listp = NULL;
2699 }
2700
2701 /* Clear canon_modify_mem_list and modify_mem_list tables. */
2702 static void
clear_modify_mem_tables()2703 clear_modify_mem_tables ()
2704 {
2705 int i;
2706
2707 EXECUTE_IF_SET_IN_BITMAP
2708 (modify_mem_list_set, 0, i, free_INSN_LIST_list (modify_mem_list + i));
2709 bitmap_clear (modify_mem_list_set);
2710
2711 EXECUTE_IF_SET_IN_BITMAP
2712 (canon_modify_mem_list_set, 0, i,
2713 free_insn_expr_list_list (canon_modify_mem_list + i));
2714 bitmap_clear (canon_modify_mem_list_set);
2715 }
2716
2717 /* Release memory used by modify_mem_list_set and canon_modify_mem_list_set. */
2718
2719 static void
free_modify_mem_tables()2720 free_modify_mem_tables ()
2721 {
2722 clear_modify_mem_tables ();
2723 free (modify_mem_list);
2724 free (canon_modify_mem_list);
2725 modify_mem_list = 0;
2726 canon_modify_mem_list = 0;
2727 }
2728
2729 /* Reset tables used to keep track of what's still available [since the
2730 start of the block]. */
2731
2732 static void
reset_opr_set_tables()2733 reset_opr_set_tables ()
2734 {
2735 /* Maintain a bitmap of which regs have been set since beginning of
2736 the block. */
2737 CLEAR_REG_SET (reg_set_bitmap);
2738
2739 /* Also keep a record of the last instruction to modify memory.
2740 For now this is very trivial, we only record whether any memory
2741 location has been modified. */
2742 clear_modify_mem_tables ();
2743 }
2744
2745 /* Return nonzero if the operands of X are not set before INSN in
2746 INSN's basic block. */
2747
2748 static int
oprs_not_set_p(x,insn)2749 oprs_not_set_p (x, insn)
2750 rtx x, insn;
2751 {
2752 int i, j;
2753 enum rtx_code code;
2754 const char *fmt;
2755
2756 if (x == 0)
2757 return 1;
2758
2759 code = GET_CODE (x);
2760 switch (code)
2761 {
2762 case PC:
2763 case CC0:
2764 case CONST:
2765 case CONST_INT:
2766 case CONST_DOUBLE:
2767 case CONST_VECTOR:
2768 case SYMBOL_REF:
2769 case LABEL_REF:
2770 case ADDR_VEC:
2771 case ADDR_DIFF_VEC:
2772 return 1;
2773
2774 case MEM:
2775 if (load_killed_in_block_p (BLOCK_FOR_INSN (insn),
2776 INSN_CUID (insn), x, 0))
2777 return 0;
2778 else
2779 return oprs_not_set_p (XEXP (x, 0), insn);
2780
2781 case REG:
2782 return ! REGNO_REG_SET_P (reg_set_bitmap, REGNO (x));
2783
2784 default:
2785 break;
2786 }
2787
2788 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2789 {
2790 if (fmt[i] == 'e')
2791 {
2792 /* If we are about to do the last recursive call
2793 needed at this level, change it into iteration.
2794 This function is called enough to be worth it. */
2795 if (i == 0)
2796 return oprs_not_set_p (XEXP (x, i), insn);
2797
2798 if (! oprs_not_set_p (XEXP (x, i), insn))
2799 return 0;
2800 }
2801 else if (fmt[i] == 'E')
2802 for (j = 0; j < XVECLEN (x, i); j++)
2803 if (! oprs_not_set_p (XVECEXP (x, i, j), insn))
2804 return 0;
2805 }
2806
2807 return 1;
2808 }
2809
2810 /* Mark things set by a CALL. */
2811
2812 static void
mark_call(insn)2813 mark_call (insn)
2814 rtx insn;
2815 {
2816 if (! CONST_OR_PURE_CALL_P (insn))
2817 record_last_mem_set_info (insn);
2818 }
2819
2820 /* Mark things set by a SET. */
2821
2822 static void
mark_set(pat,insn)2823 mark_set (pat, insn)
2824 rtx pat, insn;
2825 {
2826 rtx dest = SET_DEST (pat);
2827
2828 while (GET_CODE (dest) == SUBREG
2829 || GET_CODE (dest) == ZERO_EXTRACT
2830 || GET_CODE (dest) == SIGN_EXTRACT
2831 || GET_CODE (dest) == STRICT_LOW_PART)
2832 dest = XEXP (dest, 0);
2833
2834 if (GET_CODE (dest) == REG)
2835 SET_REGNO_REG_SET (reg_set_bitmap, REGNO (dest));
2836 else if (GET_CODE (dest) == MEM)
2837 record_last_mem_set_info (insn);
2838
2839 if (GET_CODE (SET_SRC (pat)) == CALL)
2840 mark_call (insn);
2841 }
2842
2843 /* Record things set by a CLOBBER. */
2844
2845 static void
mark_clobber(pat,insn)2846 mark_clobber (pat, insn)
2847 rtx pat, insn;
2848 {
2849 rtx clob = XEXP (pat, 0);
2850
2851 while (GET_CODE (clob) == SUBREG || GET_CODE (clob) == STRICT_LOW_PART)
2852 clob = XEXP (clob, 0);
2853
2854 if (GET_CODE (clob) == REG)
2855 SET_REGNO_REG_SET (reg_set_bitmap, REGNO (clob));
2856 else
2857 record_last_mem_set_info (insn);
2858 }
2859
2860 /* Record things set by INSN.
2861 This data is used by oprs_not_set_p. */
2862
2863 static void
mark_oprs_set(insn)2864 mark_oprs_set (insn)
2865 rtx insn;
2866 {
2867 rtx pat = PATTERN (insn);
2868 int i;
2869
2870 if (GET_CODE (pat) == SET)
2871 mark_set (pat, insn);
2872 else if (GET_CODE (pat) == PARALLEL)
2873 for (i = 0; i < XVECLEN (pat, 0); i++)
2874 {
2875 rtx x = XVECEXP (pat, 0, i);
2876
2877 if (GET_CODE (x) == SET)
2878 mark_set (x, insn);
2879 else if (GET_CODE (x) == CLOBBER)
2880 mark_clobber (x, insn);
2881 else if (GET_CODE (x) == CALL)
2882 mark_call (insn);
2883 }
2884
2885 else if (GET_CODE (pat) == CLOBBER)
2886 mark_clobber (pat, insn);
2887 else if (GET_CODE (pat) == CALL)
2888 mark_call (insn);
2889 }
2890
2891
2892 /* Classic GCSE reaching definition support. */
2893
2894 /* Allocate reaching def variables. */
2895
2896 static void
alloc_rd_mem(n_blocks,n_insns)2897 alloc_rd_mem (n_blocks, n_insns)
2898 int n_blocks, n_insns;
2899 {
2900 rd_kill = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2901 sbitmap_vector_zero (rd_kill, n_blocks);
2902
2903 rd_gen = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2904 sbitmap_vector_zero (rd_gen, n_blocks);
2905
2906 reaching_defs = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2907 sbitmap_vector_zero (reaching_defs, n_blocks);
2908
2909 rd_out = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
2910 sbitmap_vector_zero (rd_out, n_blocks);
2911 }
2912
2913 /* Free reaching def variables. */
2914
2915 static void
free_rd_mem()2916 free_rd_mem ()
2917 {
2918 sbitmap_vector_free (rd_kill);
2919 sbitmap_vector_free (rd_gen);
2920 sbitmap_vector_free (reaching_defs);
2921 sbitmap_vector_free (rd_out);
2922 }
2923
2924 /* Add INSN to the kills of BB. REGNO, set in BB, is killed by INSN. */
2925
2926 static void
handle_rd_kill_set(insn,regno,bb)2927 handle_rd_kill_set (insn, regno, bb)
2928 rtx insn;
2929 int regno;
2930 basic_block bb;
2931 {
2932 struct reg_set *this_reg;
2933
2934 for (this_reg = reg_set_table[regno]; this_reg; this_reg = this_reg ->next)
2935 if (BLOCK_NUM (this_reg->insn) != BLOCK_NUM (insn))
2936 SET_BIT (rd_kill[bb->index], INSN_CUID (this_reg->insn));
2937 }
2938
2939 /* Compute the set of kill's for reaching definitions. */
2940
2941 static void
compute_kill_rd()2942 compute_kill_rd ()
2943 {
2944 int cuid;
2945 unsigned int regno;
2946 int i;
2947 basic_block bb;
2948
2949 /* For each block
2950 For each set bit in `gen' of the block (i.e each insn which
2951 generates a definition in the block)
2952 Call the reg set by the insn corresponding to that bit regx
2953 Look at the linked list starting at reg_set_table[regx]
2954 For each setting of regx in the linked list, which is not in
2955 this block
2956 Set the bit in `kill' corresponding to that insn. */
2957 FOR_EACH_BB (bb)
2958 for (cuid = 0; cuid < max_cuid; cuid++)
2959 if (TEST_BIT (rd_gen[bb->index], cuid))
2960 {
2961 rtx insn = CUID_INSN (cuid);
2962 rtx pat = PATTERN (insn);
2963
2964 if (GET_CODE (insn) == CALL_INSN)
2965 {
2966 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
2967 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
2968 handle_rd_kill_set (insn, regno, bb);
2969 }
2970
2971 if (GET_CODE (pat) == PARALLEL)
2972 {
2973 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
2974 {
2975 enum rtx_code code = GET_CODE (XVECEXP (pat, 0, i));
2976
2977 if ((code == SET || code == CLOBBER)
2978 && GET_CODE (XEXP (XVECEXP (pat, 0, i), 0)) == REG)
2979 handle_rd_kill_set (insn,
2980 REGNO (XEXP (XVECEXP (pat, 0, i), 0)),
2981 bb);
2982 }
2983 }
2984 else if (GET_CODE (pat) == SET && GET_CODE (SET_DEST (pat)) == REG)
2985 /* Each setting of this register outside of this block
2986 must be marked in the set of kills in this block. */
2987 handle_rd_kill_set (insn, REGNO (SET_DEST (pat)), bb);
2988 }
2989 }
2990
2991 /* Compute the reaching definitions as in
2992 Compilers Principles, Techniques, and Tools. Aho, Sethi, Ullman,
2993 Chapter 10. It is the same algorithm as used for computing available
2994 expressions but applied to the gens and kills of reaching definitions. */
2995
2996 static void
compute_rd()2997 compute_rd ()
2998 {
2999 int changed, passes;
3000 basic_block bb;
3001
3002 FOR_EACH_BB (bb)
3003 sbitmap_copy (rd_out[bb->index] /*dst*/, rd_gen[bb->index] /*src*/);
3004
3005 passes = 0;
3006 changed = 1;
3007 while (changed)
3008 {
3009 changed = 0;
3010 FOR_EACH_BB (bb)
3011 {
3012 sbitmap_union_of_preds (reaching_defs[bb->index], rd_out, bb->index);
3013 changed |= sbitmap_union_of_diff_cg (rd_out[bb->index], rd_gen[bb->index],
3014 reaching_defs[bb->index], rd_kill[bb->index]);
3015 }
3016 passes++;
3017 }
3018
3019 if (gcse_file)
3020 fprintf (gcse_file, "reaching def computation: %d passes\n", passes);
3021 }
3022
3023 /* Classic GCSE available expression support. */
3024
3025 /* Allocate memory for available expression computation. */
3026
3027 static void
alloc_avail_expr_mem(n_blocks,n_exprs)3028 alloc_avail_expr_mem (n_blocks, n_exprs)
3029 int n_blocks, n_exprs;
3030 {
3031 ae_kill = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
3032 sbitmap_vector_zero (ae_kill, n_blocks);
3033
3034 ae_gen = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
3035 sbitmap_vector_zero (ae_gen, n_blocks);
3036
3037 ae_in = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
3038 sbitmap_vector_zero (ae_in, n_blocks);
3039
3040 ae_out = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
3041 sbitmap_vector_zero (ae_out, n_blocks);
3042 }
3043
3044 static void
free_avail_expr_mem()3045 free_avail_expr_mem ()
3046 {
3047 sbitmap_vector_free (ae_kill);
3048 sbitmap_vector_free (ae_gen);
3049 sbitmap_vector_free (ae_in);
3050 sbitmap_vector_free (ae_out);
3051 }
3052
3053 /* Compute the set of available expressions generated in each basic block. */
3054
3055 static void
compute_ae_gen(expr_hash_table)3056 compute_ae_gen (expr_hash_table)
3057 struct hash_table *expr_hash_table;
3058 {
3059 unsigned int i;
3060 struct expr *expr;
3061 struct occr *occr;
3062
3063 /* For each recorded occurrence of each expression, set ae_gen[bb][expr].
3064 This is all we have to do because an expression is not recorded if it
3065 is not available, and the only expressions we want to work with are the
3066 ones that are recorded. */
3067 for (i = 0; i < expr_hash_table->size; i++)
3068 for (expr = expr_hash_table->table[i]; expr != 0; expr = expr->next_same_hash)
3069 for (occr = expr->avail_occr; occr != 0; occr = occr->next)
3070 SET_BIT (ae_gen[BLOCK_NUM (occr->insn)], expr->bitmap_index);
3071 }
3072
3073 /* Return nonzero if expression X is killed in BB. */
3074
3075 static int
expr_killed_p(x,bb)3076 expr_killed_p (x, bb)
3077 rtx x;
3078 basic_block bb;
3079 {
3080 int i, j;
3081 enum rtx_code code;
3082 const char *fmt;
3083
3084 if (x == 0)
3085 return 1;
3086
3087 code = GET_CODE (x);
3088 switch (code)
3089 {
3090 case REG:
3091 return TEST_BIT (reg_set_in_block[bb->index], REGNO (x));
3092
3093 case MEM:
3094 if (load_killed_in_block_p (bb, get_max_uid () + 1, x, 0))
3095 return 1;
3096 else
3097 return expr_killed_p (XEXP (x, 0), bb);
3098
3099 case PC:
3100 case CC0: /*FIXME*/
3101 case CONST:
3102 case CONST_INT:
3103 case CONST_DOUBLE:
3104 case CONST_VECTOR:
3105 case SYMBOL_REF:
3106 case LABEL_REF:
3107 case ADDR_VEC:
3108 case ADDR_DIFF_VEC:
3109 return 0;
3110
3111 default:
3112 break;
3113 }
3114
3115 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
3116 {
3117 if (fmt[i] == 'e')
3118 {
3119 /* If we are about to do the last recursive call
3120 needed at this level, change it into iteration.
3121 This function is called enough to be worth it. */
3122 if (i == 0)
3123 return expr_killed_p (XEXP (x, i), bb);
3124 else if (expr_killed_p (XEXP (x, i), bb))
3125 return 1;
3126 }
3127 else if (fmt[i] == 'E')
3128 for (j = 0; j < XVECLEN (x, i); j++)
3129 if (expr_killed_p (XVECEXP (x, i, j), bb))
3130 return 1;
3131 }
3132
3133 return 0;
3134 }
3135
3136 /* Compute the set of available expressions killed in each basic block. */
3137
3138 static void
compute_ae_kill(ae_gen,ae_kill,expr_hash_table)3139 compute_ae_kill (ae_gen, ae_kill, expr_hash_table)
3140 sbitmap *ae_gen, *ae_kill;
3141 struct hash_table *expr_hash_table;
3142 {
3143 basic_block bb;
3144 unsigned int i;
3145 struct expr *expr;
3146
3147 FOR_EACH_BB (bb)
3148 for (i = 0; i < expr_hash_table->size; i++)
3149 for (expr = expr_hash_table->table[i]; expr; expr = expr->next_same_hash)
3150 {
3151 /* Skip EXPR if generated in this block. */
3152 if (TEST_BIT (ae_gen[bb->index], expr->bitmap_index))
3153 continue;
3154
3155 if (expr_killed_p (expr->expr, bb))
3156 SET_BIT (ae_kill[bb->index], expr->bitmap_index);
3157 }
3158 }
3159
3160 /* Actually perform the Classic GCSE optimizations. */
3161
3162 /* Return nonzero if occurrence OCCR of expression EXPR reaches block BB.
3163
3164 CHECK_SELF_LOOP is nonzero if we should consider a block reaching itself
3165 as a positive reach. We want to do this when there are two computations
3166 of the expression in the block.
3167
3168 VISITED is a pointer to a working buffer for tracking which BB's have
3169 been visited. It is NULL for the top-level call.
3170
3171 We treat reaching expressions that go through blocks containing the same
3172 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
3173 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
3174 2 as not reaching. The intent is to improve the probability of finding
3175 only one reaching expression and to reduce register lifetimes by picking
3176 the closest such expression. */
3177
3178 static int
expr_reaches_here_p_work(occr,expr,bb,check_self_loop,visited)3179 expr_reaches_here_p_work (occr, expr, bb, check_self_loop, visited)
3180 struct occr *occr;
3181 struct expr *expr;
3182 basic_block bb;
3183 int check_self_loop;
3184 char *visited;
3185 {
3186 edge pred;
3187
3188 for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
3189 {
3190 basic_block pred_bb = pred->src;
3191
3192 if (visited[pred_bb->index])
3193 /* This predecessor has already been visited. Nothing to do. */
3194 ;
3195 else if (pred_bb == bb)
3196 {
3197 /* BB loops on itself. */
3198 if (check_self_loop
3199 && TEST_BIT (ae_gen[pred_bb->index], expr->bitmap_index)
3200 && BLOCK_NUM (occr->insn) == pred_bb->index)
3201 return 1;
3202
3203 visited[pred_bb->index] = 1;
3204 }
3205
3206 /* Ignore this predecessor if it kills the expression. */
3207 else if (TEST_BIT (ae_kill[pred_bb->index], expr->bitmap_index))
3208 visited[pred_bb->index] = 1;
3209
3210 /* Does this predecessor generate this expression? */
3211 else if (TEST_BIT (ae_gen[pred_bb->index], expr->bitmap_index))
3212 {
3213 /* Is this the occurrence we're looking for?
3214 Note that there's only one generating occurrence per block
3215 so we just need to check the block number. */
3216 if (BLOCK_NUM (occr->insn) == pred_bb->index)
3217 return 1;
3218
3219 visited[pred_bb->index] = 1;
3220 }
3221
3222 /* Neither gen nor kill. */
3223 else
3224 {
3225 visited[pred_bb->index] = 1;
3226 if (expr_reaches_here_p_work (occr, expr, pred_bb, check_self_loop,
3227 visited))
3228
3229 return 1;
3230 }
3231 }
3232
3233 /* All paths have been checked. */
3234 return 0;
3235 }
3236
3237 /* This wrapper for expr_reaches_here_p_work() is to ensure that any
3238 memory allocated for that function is returned. */
3239
3240 static int
expr_reaches_here_p(occr,expr,bb,check_self_loop)3241 expr_reaches_here_p (occr, expr, bb, check_self_loop)
3242 struct occr *occr;
3243 struct expr *expr;
3244 basic_block bb;
3245 int check_self_loop;
3246 {
3247 int rval;
3248 char *visited = (char *) xcalloc (last_basic_block, 1);
3249
3250 rval = expr_reaches_here_p_work (occr, expr, bb, check_self_loop, visited);
3251
3252 free (visited);
3253 return rval;
3254 }
3255
3256 /* Return the instruction that computes EXPR that reaches INSN's basic block.
3257 If there is more than one such instruction, return NULL.
3258
3259 Called only by handle_avail_expr. */
3260
3261 static rtx
computing_insn(expr,insn)3262 computing_insn (expr, insn)
3263 struct expr *expr;
3264 rtx insn;
3265 {
3266 basic_block bb = BLOCK_FOR_INSN (insn);
3267
3268 if (expr->avail_occr->next == NULL)
3269 {
3270 if (BLOCK_FOR_INSN (expr->avail_occr->insn) == bb)
3271 /* The available expression is actually itself
3272 (i.e. a loop in the flow graph) so do nothing. */
3273 return NULL;
3274
3275 /* (FIXME) Case that we found a pattern that was created by
3276 a substitution that took place. */
3277 return expr->avail_occr->insn;
3278 }
3279 else
3280 {
3281 /* Pattern is computed more than once.
3282 Search backwards from this insn to see how many of these
3283 computations actually reach this insn. */
3284 struct occr *occr;
3285 rtx insn_computes_expr = NULL;
3286 int can_reach = 0;
3287
3288 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
3289 {
3290 if (BLOCK_FOR_INSN (occr->insn) == bb)
3291 {
3292 /* The expression is generated in this block.
3293 The only time we care about this is when the expression
3294 is generated later in the block [and thus there's a loop].
3295 We let the normal cse pass handle the other cases. */
3296 if (INSN_CUID (insn) < INSN_CUID (occr->insn)
3297 && expr_reaches_here_p (occr, expr, bb, 1))
3298 {
3299 can_reach++;
3300 if (can_reach > 1)
3301 return NULL;
3302
3303 insn_computes_expr = occr->insn;
3304 }
3305 }
3306 else if (expr_reaches_here_p (occr, expr, bb, 0))
3307 {
3308 can_reach++;
3309 if (can_reach > 1)
3310 return NULL;
3311
3312 insn_computes_expr = occr->insn;
3313 }
3314 }
3315
3316 if (insn_computes_expr == NULL)
3317 abort ();
3318
3319 return insn_computes_expr;
3320 }
3321 }
3322
3323 /* Return nonzero if the definition in DEF_INSN can reach INSN.
3324 Only called by can_disregard_other_sets. */
3325
3326 static int
def_reaches_here_p(insn,def_insn)3327 def_reaches_here_p (insn, def_insn)
3328 rtx insn, def_insn;
3329 {
3330 rtx reg;
3331
3332 if (TEST_BIT (reaching_defs[BLOCK_NUM (insn)], INSN_CUID (def_insn)))
3333 return 1;
3334
3335 if (BLOCK_NUM (insn) == BLOCK_NUM (def_insn))
3336 {
3337 if (INSN_CUID (def_insn) < INSN_CUID (insn))
3338 {
3339 if (GET_CODE (PATTERN (def_insn)) == PARALLEL)
3340 return 1;
3341 else if (GET_CODE (PATTERN (def_insn)) == CLOBBER)
3342 reg = XEXP (PATTERN (def_insn), 0);
3343 else if (GET_CODE (PATTERN (def_insn)) == SET)
3344 reg = SET_DEST (PATTERN (def_insn));
3345 else
3346 abort ();
3347
3348 return ! reg_set_between_p (reg, NEXT_INSN (def_insn), insn);
3349 }
3350 else
3351 return 0;
3352 }
3353
3354 return 0;
3355 }
3356
3357 /* Return nonzero if *ADDR_THIS_REG can only have one value at INSN. The
3358 value returned is the number of definitions that reach INSN. Returning a
3359 value of zero means that [maybe] more than one definition reaches INSN and
3360 the caller can't perform whatever optimization it is trying. i.e. it is
3361 always safe to return zero. */
3362
3363 static int
can_disregard_other_sets(addr_this_reg,insn,for_combine)3364 can_disregard_other_sets (addr_this_reg, insn, for_combine)
3365 struct reg_set **addr_this_reg;
3366 rtx insn;
3367 int for_combine;
3368 {
3369 int number_of_reaching_defs = 0;
3370 struct reg_set *this_reg;
3371
3372 for (this_reg = *addr_this_reg; this_reg != 0; this_reg = this_reg->next)
3373 if (def_reaches_here_p (insn, this_reg->insn))
3374 {
3375 number_of_reaching_defs++;
3376 /* Ignore parallels for now. */
3377 if (GET_CODE (PATTERN (this_reg->insn)) == PARALLEL)
3378 return 0;
3379
3380 if (!for_combine
3381 && (GET_CODE (PATTERN (this_reg->insn)) == CLOBBER
3382 || ! rtx_equal_p (SET_SRC (PATTERN (this_reg->insn)),
3383 SET_SRC (PATTERN (insn)))))
3384 /* A setting of the reg to a different value reaches INSN. */
3385 return 0;
3386
3387 if (number_of_reaching_defs > 1)
3388 {
3389 /* If in this setting the value the register is being set to is
3390 equal to the previous value the register was set to and this
3391 setting reaches the insn we are trying to do the substitution
3392 on then we are ok. */
3393 if (GET_CODE (PATTERN (this_reg->insn)) == CLOBBER)
3394 return 0;
3395 else if (! rtx_equal_p (SET_SRC (PATTERN (this_reg->insn)),
3396 SET_SRC (PATTERN (insn))))
3397 return 0;
3398 }
3399
3400 *addr_this_reg = this_reg;
3401 }
3402
3403 return number_of_reaching_defs;
3404 }
3405
3406 /* Expression computed by insn is available and the substitution is legal,
3407 so try to perform the substitution.
3408
3409 The result is nonzero if any changes were made. */
3410
3411 static int
handle_avail_expr(insn,expr)3412 handle_avail_expr (insn, expr)
3413 rtx insn;
3414 struct expr *expr;
3415 {
3416 rtx pat, insn_computes_expr, expr_set;
3417 rtx to;
3418 struct reg_set *this_reg;
3419 int found_setting, use_src;
3420 int changed = 0;
3421
3422 /* We only handle the case where one computation of the expression
3423 reaches this instruction. */
3424 insn_computes_expr = computing_insn (expr, insn);
3425 if (insn_computes_expr == NULL)
3426 return 0;
3427 expr_set = single_set (insn_computes_expr);
3428 if (!expr_set)
3429 abort ();
3430
3431 found_setting = 0;
3432 use_src = 0;
3433
3434 /* At this point we know only one computation of EXPR outside of this
3435 block reaches this insn. Now try to find a register that the
3436 expression is computed into. */
3437 if (GET_CODE (SET_SRC (expr_set)) == REG)
3438 {
3439 /* This is the case when the available expression that reaches
3440 here has already been handled as an available expression. */
3441 unsigned int regnum_for_replacing
3442 = REGNO (SET_SRC (expr_set));
3443
3444 /* If the register was created by GCSE we can't use `reg_set_table',
3445 however we know it's set only once. */
3446 if (regnum_for_replacing >= max_gcse_regno
3447 /* If the register the expression is computed into is set only once,
3448 or only one set reaches this insn, we can use it. */
3449 || (((this_reg = reg_set_table[regnum_for_replacing]),
3450 this_reg->next == NULL)
3451 || can_disregard_other_sets (&this_reg, insn, 0)))
3452 {
3453 use_src = 1;
3454 found_setting = 1;
3455 }
3456 }
3457
3458 if (!found_setting)
3459 {
3460 unsigned int regnum_for_replacing
3461 = REGNO (SET_DEST (expr_set));
3462
3463 /* This shouldn't happen. */
3464 if (regnum_for_replacing >= max_gcse_regno)
3465 abort ();
3466
3467 this_reg = reg_set_table[regnum_for_replacing];
3468
3469 /* If the register the expression is computed into is set only once,
3470 or only one set reaches this insn, use it. */
3471 if (this_reg->next == NULL
3472 || can_disregard_other_sets (&this_reg, insn, 0))
3473 found_setting = 1;
3474 }
3475
3476 if (found_setting)
3477 {
3478 pat = PATTERN (insn);
3479 if (use_src)
3480 to = SET_SRC (expr_set);
3481 else
3482 to = SET_DEST (expr_set);
3483 changed = validate_change (insn, &SET_SRC (pat), to, 0);
3484
3485 /* We should be able to ignore the return code from validate_change but
3486 to play it safe we check. */
3487 if (changed)
3488 {
3489 gcse_subst_count++;
3490 if (gcse_file != NULL)
3491 {
3492 fprintf (gcse_file, "GCSE: Replacing the source in insn %d with",
3493 INSN_UID (insn));
3494 fprintf (gcse_file, " reg %d %s insn %d\n",
3495 REGNO (to), use_src ? "from" : "set in",
3496 INSN_UID (insn_computes_expr));
3497 }
3498 }
3499 }
3500
3501 /* The register that the expr is computed into is set more than once. */
3502 else if (1 /*expensive_op(this_pattrn->op) && do_expensive_gcse)*/)
3503 {
3504 /* Insert an insn after insnx that copies the reg set in insnx
3505 into a new pseudo register call this new register REGN.
3506 From insnb until end of basic block or until REGB is set
3507 replace all uses of REGB with REGN. */
3508 rtx new_insn;
3509
3510 to = gen_reg_rtx (GET_MODE (SET_DEST (expr_set)));
3511
3512 /* Generate the new insn. */
3513 /* ??? If the change fails, we return 0, even though we created
3514 an insn. I think this is ok. */
3515 new_insn
3516 = emit_insn_after (gen_rtx_SET (VOIDmode, to,
3517 SET_DEST (expr_set)),
3518 insn_computes_expr);
3519
3520 /* Keep register set table up to date. */
3521 record_one_set (REGNO (to), new_insn);
3522
3523 gcse_create_count++;
3524 if (gcse_file != NULL)
3525 {
3526 fprintf (gcse_file, "GCSE: Creating insn %d to copy value of reg %d",
3527 INSN_UID (NEXT_INSN (insn_computes_expr)),
3528 REGNO (SET_SRC (PATTERN (NEXT_INSN (insn_computes_expr)))));
3529 fprintf (gcse_file, ", computed in insn %d,\n",
3530 INSN_UID (insn_computes_expr));
3531 fprintf (gcse_file, " into newly allocated reg %d\n",
3532 REGNO (to));
3533 }
3534
3535 pat = PATTERN (insn);
3536
3537 /* Do register replacement for INSN. */
3538 changed = validate_change (insn, &SET_SRC (pat),
3539 SET_DEST (PATTERN
3540 (NEXT_INSN (insn_computes_expr))),
3541 0);
3542
3543 /* We should be able to ignore the return code from validate_change but
3544 to play it safe we check. */
3545 if (changed)
3546 {
3547 gcse_subst_count++;
3548 if (gcse_file != NULL)
3549 {
3550 fprintf (gcse_file,
3551 "GCSE: Replacing the source in insn %d with reg %d ",
3552 INSN_UID (insn),
3553 REGNO (SET_DEST (PATTERN (NEXT_INSN
3554 (insn_computes_expr)))));
3555 fprintf (gcse_file, "set in insn %d\n",
3556 INSN_UID (insn_computes_expr));
3557 }
3558 }
3559 }
3560
3561 return changed;
3562 }
3563
3564 /* Perform classic GCSE. This is called by one_classic_gcse_pass after all
3565 the dataflow analysis has been done.
3566
3567 The result is nonzero if a change was made. */
3568
3569 static int
classic_gcse()3570 classic_gcse ()
3571 {
3572 int changed;
3573 rtx insn;
3574 basic_block bb;
3575
3576 /* Note we start at block 1. */
3577
3578 if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
3579 return 0;
3580
3581 changed = 0;
3582 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb, EXIT_BLOCK_PTR, next_bb)
3583 {
3584 /* Reset tables used to keep track of what's still valid [since the
3585 start of the block]. */
3586 reset_opr_set_tables ();
3587
3588 for (insn = bb->head;
3589 insn != NULL && insn != NEXT_INSN (bb->end);
3590 insn = NEXT_INSN (insn))
3591 {
3592 /* Is insn of form (set (pseudo-reg) ...)? */
3593 if (GET_CODE (insn) == INSN
3594 && GET_CODE (PATTERN (insn)) == SET
3595 && GET_CODE (SET_DEST (PATTERN (insn))) == REG
3596 && REGNO (SET_DEST (PATTERN (insn))) >= FIRST_PSEUDO_REGISTER)
3597 {
3598 rtx pat = PATTERN (insn);
3599 rtx src = SET_SRC (pat);
3600 struct expr *expr;
3601
3602 if (want_to_gcse_p (src)
3603 /* Is the expression recorded? */
3604 && ((expr = lookup_expr (src, &expr_hash_table)) != NULL)
3605 /* Is the expression available [at the start of the
3606 block]? */
3607 && TEST_BIT (ae_in[bb->index], expr->bitmap_index)
3608 /* Are the operands unchanged since the start of the
3609 block? */
3610 && oprs_not_set_p (src, insn))
3611 changed |= handle_avail_expr (insn, expr);
3612 }
3613
3614 /* Keep track of everything modified by this insn. */
3615 /* ??? Need to be careful w.r.t. mods done to INSN. */
3616 if (INSN_P (insn))
3617 mark_oprs_set (insn);
3618 }
3619 }
3620
3621 return changed;
3622 }
3623
3624 /* Top level routine to perform one classic GCSE pass.
3625
3626 Return nonzero if a change was made. */
3627
3628 static int
one_classic_gcse_pass(pass)3629 one_classic_gcse_pass (pass)
3630 int pass;
3631 {
3632 int changed = 0;
3633
3634 gcse_subst_count = 0;
3635 gcse_create_count = 0;
3636
3637 alloc_hash_table (max_cuid, &expr_hash_table, 0);
3638 alloc_rd_mem (last_basic_block, max_cuid);
3639 compute_hash_table (&expr_hash_table);
3640 if (gcse_file)
3641 dump_hash_table (gcse_file, "Expression", &expr_hash_table);
3642
3643 if (expr_hash_table.n_elems > 0)
3644 {
3645 compute_kill_rd ();
3646 compute_rd ();
3647 alloc_avail_expr_mem (last_basic_block, expr_hash_table.n_elems);
3648 compute_ae_gen (&expr_hash_table);
3649 compute_ae_kill (ae_gen, ae_kill, &expr_hash_table);
3650 compute_available (ae_gen, ae_kill, ae_out, ae_in);
3651 changed = classic_gcse ();
3652 free_avail_expr_mem ();
3653 }
3654
3655 free_rd_mem ();
3656 free_hash_table (&expr_hash_table);
3657
3658 if (gcse_file)
3659 {
3660 fprintf (gcse_file, "\n");
3661 fprintf (gcse_file, "GCSE of %s, pass %d: %d bytes needed, %d substs,",
3662 current_function_name, pass, bytes_used, gcse_subst_count);
3663 fprintf (gcse_file, "%d insns created\n", gcse_create_count);
3664 }
3665
3666 return changed;
3667 }
3668
3669 /* Compute copy/constant propagation working variables. */
3670
3671 /* Local properties of assignments. */
3672 static sbitmap *cprop_pavloc;
3673 static sbitmap *cprop_absaltered;
3674
3675 /* Global properties of assignments (computed from the local properties). */
3676 static sbitmap *cprop_avin;
3677 static sbitmap *cprop_avout;
3678
3679 /* Allocate vars used for copy/const propagation. N_BLOCKS is the number of
3680 basic blocks. N_SETS is the number of sets. */
3681
3682 static void
alloc_cprop_mem(n_blocks,n_sets)3683 alloc_cprop_mem (n_blocks, n_sets)
3684 int n_blocks, n_sets;
3685 {
3686 cprop_pavloc = sbitmap_vector_alloc (n_blocks, n_sets);
3687 cprop_absaltered = sbitmap_vector_alloc (n_blocks, n_sets);
3688
3689 cprop_avin = sbitmap_vector_alloc (n_blocks, n_sets);
3690 cprop_avout = sbitmap_vector_alloc (n_blocks, n_sets);
3691 }
3692
3693 /* Free vars used by copy/const propagation. */
3694
3695 static void
free_cprop_mem()3696 free_cprop_mem ()
3697 {
3698 sbitmap_vector_free (cprop_pavloc);
3699 sbitmap_vector_free (cprop_absaltered);
3700 sbitmap_vector_free (cprop_avin);
3701 sbitmap_vector_free (cprop_avout);
3702 }
3703
3704 /* For each block, compute whether X is transparent. X is either an
3705 expression or an assignment [though we don't care which, for this context
3706 an assignment is treated as an expression]. For each block where an
3707 element of X is modified, set (SET_P == 1) or reset (SET_P == 0) the INDX
3708 bit in BMAP. */
3709
3710 static void
compute_transp(x,indx,bmap,set_p)3711 compute_transp (x, indx, bmap, set_p)
3712 rtx x;
3713 int indx;
3714 sbitmap *bmap;
3715 int set_p;
3716 {
3717 int i, j;
3718 basic_block bb;
3719 enum rtx_code code;
3720 reg_set *r;
3721 const char *fmt;
3722
3723 /* repeat is used to turn tail-recursion into iteration since GCC
3724 can't do it when there's no return value. */
3725 repeat:
3726
3727 if (x == 0)
3728 return;
3729
3730 code = GET_CODE (x);
3731 switch (code)
3732 {
3733 case REG:
3734 if (set_p)
3735 {
3736 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
3737 {
3738 FOR_EACH_BB (bb)
3739 if (TEST_BIT (reg_set_in_block[bb->index], REGNO (x)))
3740 SET_BIT (bmap[bb->index], indx);
3741 }
3742 else
3743 {
3744 for (r = reg_set_table[REGNO (x)]; r != NULL; r = r->next)
3745 SET_BIT (bmap[BLOCK_NUM (r->insn)], indx);
3746 }
3747 }
3748 else
3749 {
3750 if (REGNO (x) < FIRST_PSEUDO_REGISTER)
3751 {
3752 FOR_EACH_BB (bb)
3753 if (TEST_BIT (reg_set_in_block[bb->index], REGNO (x)))
3754 RESET_BIT (bmap[bb->index], indx);
3755 }
3756 else
3757 {
3758 for (r = reg_set_table[REGNO (x)]; r != NULL; r = r->next)
3759 RESET_BIT (bmap[BLOCK_NUM (r->insn)], indx);
3760 }
3761 }
3762
3763 return;
3764
3765 case MEM:
3766 FOR_EACH_BB (bb)
3767 {
3768 rtx list_entry = canon_modify_mem_list[bb->index];
3769
3770 while (list_entry)
3771 {
3772 rtx dest, dest_addr;
3773
3774 if (GET_CODE (XEXP (list_entry, 0)) == CALL_INSN)
3775 {
3776 if (set_p)
3777 SET_BIT (bmap[bb->index], indx);
3778 else
3779 RESET_BIT (bmap[bb->index], indx);
3780 break;
3781 }
3782 /* LIST_ENTRY must be an INSN of some kind that sets memory.
3783 Examine each hunk of memory that is modified. */
3784
3785 dest = XEXP (list_entry, 0);
3786 list_entry = XEXP (list_entry, 1);
3787 dest_addr = XEXP (list_entry, 0);
3788
3789 if (canon_true_dependence (dest, GET_MODE (dest), dest_addr,
3790 x, rtx_addr_varies_p))
3791 {
3792 if (set_p)
3793 SET_BIT (bmap[bb->index], indx);
3794 else
3795 RESET_BIT (bmap[bb->index], indx);
3796 break;
3797 }
3798 list_entry = XEXP (list_entry, 1);
3799 }
3800 }
3801
3802 x = XEXP (x, 0);
3803 goto repeat;
3804
3805 case PC:
3806 case CC0: /*FIXME*/
3807 case CONST:
3808 case CONST_INT:
3809 case CONST_DOUBLE:
3810 case CONST_VECTOR:
3811 case SYMBOL_REF:
3812 case LABEL_REF:
3813 case ADDR_VEC:
3814 case ADDR_DIFF_VEC:
3815 return;
3816
3817 default:
3818 break;
3819 }
3820
3821 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
3822 {
3823 if (fmt[i] == 'e')
3824 {
3825 /* If we are about to do the last recursive call
3826 needed at this level, change it into iteration.
3827 This function is called enough to be worth it. */
3828 if (i == 0)
3829 {
3830 x = XEXP (x, i);
3831 goto repeat;
3832 }
3833
3834 compute_transp (XEXP (x, i), indx, bmap, set_p);
3835 }
3836 else if (fmt[i] == 'E')
3837 for (j = 0; j < XVECLEN (x, i); j++)
3838 compute_transp (XVECEXP (x, i, j), indx, bmap, set_p);
3839 }
3840 }
3841
3842 /* Top level routine to do the dataflow analysis needed by copy/const
3843 propagation. */
3844
3845 static void
compute_cprop_data()3846 compute_cprop_data ()
3847 {
3848 compute_local_properties (cprop_absaltered, cprop_pavloc, NULL, &set_hash_table);
3849 compute_available (cprop_pavloc, cprop_absaltered,
3850 cprop_avout, cprop_avin);
3851 }
3852
3853 /* Copy/constant propagation. */
3854
3855 /* Maximum number of register uses in an insn that we handle. */
3856 #define MAX_USES 8
3857
3858 /* Table of uses found in an insn.
3859 Allocated statically to avoid alloc/free complexity and overhead. */
3860 static struct reg_use reg_use_table[MAX_USES];
3861
3862 /* Index into `reg_use_table' while building it. */
3863 static int reg_use_count;
3864
3865 /* Set up a list of register numbers used in INSN. The found uses are stored
3866 in `reg_use_table'. `reg_use_count' is initialized to zero before entry,
3867 and contains the number of uses in the table upon exit.
3868
3869 ??? If a register appears multiple times we will record it multiple times.
3870 This doesn't hurt anything but it will slow things down. */
3871
3872 static void
find_used_regs(xptr,data)3873 find_used_regs (xptr, data)
3874 rtx *xptr;
3875 void *data ATTRIBUTE_UNUSED;
3876 {
3877 int i, j;
3878 enum rtx_code code;
3879 const char *fmt;
3880 rtx x = *xptr;
3881
3882 /* repeat is used to turn tail-recursion into iteration since GCC
3883 can't do it when there's no return value. */
3884 repeat:
3885 if (x == 0)
3886 return;
3887
3888 code = GET_CODE (x);
3889 if (REG_P (x))
3890 {
3891 if (reg_use_count == MAX_USES)
3892 return;
3893
3894 reg_use_table[reg_use_count].reg_rtx = x;
3895 reg_use_count++;
3896 }
3897
3898 /* Recursively scan the operands of this expression. */
3899
3900 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
3901 {
3902 if (fmt[i] == 'e')
3903 {
3904 /* If we are about to do the last recursive call
3905 needed at this level, change it into iteration.
3906 This function is called enough to be worth it. */
3907 if (i == 0)
3908 {
3909 x = XEXP (x, 0);
3910 goto repeat;
3911 }
3912
3913 find_used_regs (&XEXP (x, i), data);
3914 }
3915 else if (fmt[i] == 'E')
3916 for (j = 0; j < XVECLEN (x, i); j++)
3917 find_used_regs (&XVECEXP (x, i, j), data);
3918 }
3919 }
3920
3921 /* Try to replace all non-SET_DEST occurrences of FROM in INSN with TO.
3922 Returns nonzero is successful. */
3923
3924 static int
try_replace_reg(from,to,insn)3925 try_replace_reg (from, to, insn)
3926 rtx from, to, insn;
3927 {
3928 rtx note = find_reg_equal_equiv_note (insn);
3929 rtx src = 0;
3930 int success = 0;
3931 rtx set = single_set (insn);
3932
3933 validate_replace_src_group (from, to, insn);
3934 if (num_changes_pending () && apply_change_group ())
3935 success = 1;
3936
3937 /* Try to simplify SET_SRC if we have substituted a constant. */
3938 if (success && set && CONSTANT_P (to))
3939 {
3940 src = simplify_rtx (SET_SRC (set));
3941
3942 if (src)
3943 validate_change (insn, &SET_SRC (set), src, 0);
3944 }
3945
3946 if (!success && set && reg_mentioned_p (from, SET_SRC (set)))
3947 {
3948 /* If above failed and this is a single set, try to simplify the source of
3949 the set given our substitution. We could perhaps try this for multiple
3950 SETs, but it probably won't buy us anything. */
3951 src = simplify_replace_rtx (SET_SRC (set), from, to);
3952
3953 if (!rtx_equal_p (src, SET_SRC (set))
3954 && validate_change (insn, &SET_SRC (set), src, 0))
3955 success = 1;
3956
3957 /* If we've failed to do replacement, have a single SET, don't already
3958 have a note, and have no special SET, add a REG_EQUAL note to not
3959 lose information. */
3960 if (!success && note == 0 && set != 0
3961 && GET_CODE (XEXP (set, 0)) != ZERO_EXTRACT
3962 && GET_CODE (XEXP (set, 0)) != SIGN_EXTRACT)
3963 note = set_unique_reg_note (insn, REG_EQUAL, copy_rtx (src));
3964 }
3965
3966 /* If there is already a NOTE, update the expression in it with our
3967 replacement. */
3968 else if (note != 0)
3969 XEXP (note, 0) = simplify_replace_rtx (XEXP (note, 0), from, to);
3970
3971 /* REG_EQUAL may get simplified into register.
3972 We don't allow that. Remove that note. This code ought
3973 not to hapen, because previous code ought to syntetize
3974 reg-reg move, but be on the safe side. */
3975 if (note && REG_P (XEXP (note, 0)))
3976 remove_note (insn, note);
3977
3978 return success;
3979 }
3980
3981 /* Find a set of REGNOs that are available on entry to INSN's block. Returns
3982 NULL no such set is found. */
3983
3984 static struct expr *
find_avail_set(regno,insn)3985 find_avail_set (regno, insn)
3986 int regno;
3987 rtx insn;
3988 {
3989 /* SET1 contains the last set found that can be returned to the caller for
3990 use in a substitution. */
3991 struct expr *set1 = 0;
3992
3993 /* Loops are not possible here. To get a loop we would need two sets
3994 available at the start of the block containing INSN. ie we would
3995 need two sets like this available at the start of the block:
3996
3997 (set (reg X) (reg Y))
3998 (set (reg Y) (reg X))
3999
4000 This can not happen since the set of (reg Y) would have killed the
4001 set of (reg X) making it unavailable at the start of this block. */
4002 while (1)
4003 {
4004 rtx src;
4005 struct expr *set = lookup_set (regno, NULL_RTX, &set_hash_table);
4006
4007 /* Find a set that is available at the start of the block
4008 which contains INSN. */
4009 while (set)
4010 {
4011 if (TEST_BIT (cprop_avin[BLOCK_NUM (insn)], set->bitmap_index))
4012 break;
4013 set = next_set (regno, set);
4014 }
4015
4016 /* If no available set was found we've reached the end of the
4017 (possibly empty) copy chain. */
4018 if (set == 0)
4019 break;
4020
4021 if (GET_CODE (set->expr) != SET)
4022 abort ();
4023
4024 src = SET_SRC (set->expr);
4025
4026 /* We know the set is available.
4027 Now check that SRC is ANTLOC (i.e. none of the source operands
4028 have changed since the start of the block).
4029
4030 If the source operand changed, we may still use it for the next
4031 iteration of this loop, but we may not use it for substitutions. */
4032
4033 if (CONSTANT_P (src) || oprs_not_set_p (src, insn))
4034 set1 = set;
4035
4036 /* If the source of the set is anything except a register, then
4037 we have reached the end of the copy chain. */
4038 if (GET_CODE (src) != REG)
4039 break;
4040
4041 /* Follow the copy chain, ie start another iteration of the loop
4042 and see if we have an available copy into SRC. */
4043 regno = REGNO (src);
4044 }
4045
4046 /* SET1 holds the last set that was available and anticipatable at
4047 INSN. */
4048 return set1;
4049 }
4050
4051 /* Subroutine of cprop_insn that tries to propagate constants into
4052 JUMP_INSNS. JUMP must be a conditional jump. If SETCC is non-NULL
4053 it is the instruction that immediately preceeds JUMP, and must be a
4054 single SET of a register. FROM is what we will try to replace,
4055 SRC is the constant we will try to substitute for it. Returns nonzero
4056 if a change was made. */
4057
4058 static int
cprop_jump(bb,setcc,jump,from,src)4059 cprop_jump (bb, setcc, jump, from, src)
4060 basic_block bb;
4061 rtx setcc;
4062 rtx jump;
4063 rtx from;
4064 rtx src;
4065 {
4066 rtx new;
4067 rtx set = pc_set (jump);
4068 rtx set_src = SET_SRC (set);
4069
4070 /* First substitute in the INSN condition as the SET_SRC of the JUMP,
4071 then substitute that given values in this expanded JUMP. */
4072 if (setcc != NULL_RTX
4073 && !modified_between_p (from, setcc, jump)
4074 && !modified_between_p (src, setcc, jump))
4075 {
4076 rtx setcc_set = single_set (setcc);
4077 set_src = simplify_replace_rtx (set_src,
4078 SET_DEST (setcc_set),
4079 SET_SRC (setcc_set));
4080 }
4081 else
4082 setcc = NULL_RTX;
4083
4084 new = simplify_replace_rtx (set_src, from, src);
4085
4086 /* If no simplification can be made, then try the next
4087 register. */
4088 if (rtx_equal_p (new, SET_SRC (set)))
4089 return 0;
4090
4091 /* If this is now a no-op delete it, otherwise this must be a valid insn. */
4092 if (new == pc_rtx)
4093 delete_insn (jump);
4094 else
4095 {
4096 /* Ensure the value computed inside the jump insn to be equivalent
4097 to one computed by setcc. */
4098 if (setcc
4099 && modified_in_p (new, setcc))
4100 return 0;
4101 if (! validate_change (jump, &SET_SRC (set), new, 0))
4102 return 0;
4103
4104 /* If this has turned into an unconditional jump,
4105 then put a barrier after it so that the unreachable
4106 code will be deleted. */
4107 if (GET_CODE (SET_SRC (set)) == LABEL_REF)
4108 emit_barrier_after (jump);
4109 }
4110
4111 #ifdef HAVE_cc0
4112 /* Delete the cc0 setter. */
4113 if (setcc != NULL && CC0_P (SET_DEST (single_set (setcc))))
4114 delete_insn (setcc);
4115 #endif
4116
4117 run_jump_opt_after_gcse = 1;
4118
4119 const_prop_count++;
4120 if (gcse_file != NULL)
4121 {
4122 fprintf (gcse_file,
4123 "CONST-PROP: Replacing reg %d in jump_insn %d with constant ",
4124 REGNO (from), INSN_UID (jump));
4125 print_rtl (gcse_file, src);
4126 fprintf (gcse_file, "\n");
4127 }
4128 purge_dead_edges (bb);
4129
4130 return 1;
4131 }
4132
4133 static bool
constprop_register(insn,from,to,alter_jumps)4134 constprop_register (insn, from, to, alter_jumps)
4135 rtx insn;
4136 rtx from;
4137 rtx to;
4138 int alter_jumps;
4139 {
4140 rtx sset;
4141
4142 /* Check for reg or cc0 setting instructions followed by
4143 conditional branch instructions first. */
4144 if (alter_jumps
4145 && (sset = single_set (insn)) != NULL
4146 && NEXT_INSN (insn)
4147 && any_condjump_p (NEXT_INSN (insn)) && onlyjump_p (NEXT_INSN (insn)))
4148 {
4149 rtx dest = SET_DEST (sset);
4150 if ((REG_P (dest) || CC0_P (dest))
4151 && cprop_jump (BLOCK_FOR_INSN (insn), insn, NEXT_INSN (insn), from, to))
4152 return 1;
4153 }
4154
4155 /* Handle normal insns next. */
4156 if (GET_CODE (insn) == INSN
4157 && try_replace_reg (from, to, insn))
4158 return 1;
4159
4160 /* Try to propagate a CONST_INT into a conditional jump.
4161 We're pretty specific about what we will handle in this
4162 code, we can extend this as necessary over time.
4163
4164 Right now the insn in question must look like
4165 (set (pc) (if_then_else ...)) */
4166 else if (alter_jumps && any_condjump_p (insn) && onlyjump_p (insn))
4167 return cprop_jump (BLOCK_FOR_INSN (insn), NULL, insn, from, to);
4168 return 0;
4169 }
4170
4171 /* Perform constant and copy propagation on INSN.
4172 The result is nonzero if a change was made. */
4173
4174 static int
cprop_insn(insn,alter_jumps)4175 cprop_insn (insn, alter_jumps)
4176 rtx insn;
4177 int alter_jumps;
4178 {
4179 struct reg_use *reg_used;
4180 int changed = 0;
4181 rtx note;
4182
4183 if (!INSN_P (insn))
4184 return 0;
4185
4186 reg_use_count = 0;
4187 note_uses (&PATTERN (insn), find_used_regs, NULL);
4188
4189 note = find_reg_equal_equiv_note (insn);
4190
4191 /* We may win even when propagating constants into notes. */
4192 if (note)
4193 find_used_regs (&XEXP (note, 0), NULL);
4194
4195 for (reg_used = ®_use_table[0]; reg_use_count > 0;
4196 reg_used++, reg_use_count--)
4197 {
4198 unsigned int regno = REGNO (reg_used->reg_rtx);
4199 rtx pat, src;
4200 struct expr *set;
4201
4202 /* Ignore registers created by GCSE.
4203 We do this because ... */
4204 if (regno >= max_gcse_regno)
4205 continue;
4206
4207 /* If the register has already been set in this block, there's
4208 nothing we can do. */
4209 if (! oprs_not_set_p (reg_used->reg_rtx, insn))
4210 continue;
4211
4212 /* Find an assignment that sets reg_used and is available
4213 at the start of the block. */
4214 set = find_avail_set (regno, insn);
4215 if (! set || set->expr->volatil)
4216 continue;
4217
4218 pat = set->expr;
4219 /* ??? We might be able to handle PARALLELs. Later. */
4220 if (GET_CODE (pat) != SET)
4221 abort ();
4222
4223 src = SET_SRC (pat);
4224
4225 /* Constant propagation. */
4226 if (CONSTANT_P (src))
4227 {
4228 if (constprop_register (insn, reg_used->reg_rtx, src, alter_jumps))
4229 {
4230 changed = 1;
4231 const_prop_count++;
4232 if (gcse_file != NULL)
4233 {
4234 fprintf (gcse_file, "GLOBAL CONST-PROP: Replacing reg %d in ", regno);
4235 fprintf (gcse_file, "insn %d with constant ", INSN_UID (insn));
4236 print_rtl (gcse_file, src);
4237 fprintf (gcse_file, "\n");
4238 }
4239 if (INSN_DELETED_P (insn))
4240 return 1;
4241 }
4242 }
4243 else if (GET_CODE (src) == REG
4244 && REGNO (src) >= FIRST_PSEUDO_REGISTER
4245 && REGNO (src) != regno)
4246 {
4247 if (try_replace_reg (reg_used->reg_rtx, src, insn))
4248 {
4249 changed = 1;
4250 copy_prop_count++;
4251 if (gcse_file != NULL)
4252 {
4253 fprintf (gcse_file, "GLOBAL COPY-PROP: Replacing reg %d in insn %d",
4254 regno, INSN_UID (insn));
4255 fprintf (gcse_file, " with reg %d\n", REGNO (src));
4256 }
4257
4258 /* The original insn setting reg_used may or may not now be
4259 deletable. We leave the deletion to flow. */
4260 /* FIXME: If it turns out that the insn isn't deletable,
4261 then we may have unnecessarily extended register lifetimes
4262 and made things worse. */
4263 }
4264 }
4265 }
4266
4267 return changed;
4268 }
4269
4270 /* Like find_used_regs, but avoid recording uses that appear in
4271 input-output contexts such as zero_extract or pre_dec. This
4272 restricts the cases we consider to those for which local cprop
4273 can legitimately make replacements. */
4274
4275 static void
local_cprop_find_used_regs(xptr,data)4276 local_cprop_find_used_regs (xptr, data)
4277 rtx *xptr;
4278 void *data;
4279 {
4280 rtx x = *xptr;
4281
4282 if (x == 0)
4283 return;
4284
4285 switch (GET_CODE (x))
4286 {
4287 case ZERO_EXTRACT:
4288 case SIGN_EXTRACT:
4289 case STRICT_LOW_PART:
4290 return;
4291
4292 case PRE_DEC:
4293 case PRE_INC:
4294 case POST_DEC:
4295 case POST_INC:
4296 case PRE_MODIFY:
4297 case POST_MODIFY:
4298 /* Can only legitimately appear this early in the context of
4299 stack pushes for function arguments, but handle all of the
4300 codes nonetheless. */
4301 return;
4302
4303 case SUBREG:
4304 /* Setting a subreg of a register larger than word_mode leaves
4305 the non-written words unchanged. */
4306 if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) > BITS_PER_WORD)
4307 return;
4308 break;
4309
4310 default:
4311 break;
4312 }
4313
4314 find_used_regs (xptr, data);
4315 }
4316
4317 /* LIBCALL_SP is a zero-terminated array of insns at the end of a libcall;
4318 their REG_EQUAL notes need updating. */
4319
4320 static bool
do_local_cprop(x,insn,alter_jumps,libcall_sp)4321 do_local_cprop (x, insn, alter_jumps, libcall_sp)
4322 rtx x;
4323 rtx insn;
4324 int alter_jumps;
4325 rtx *libcall_sp;
4326 {
4327 rtx newreg = NULL, newcnst = NULL;
4328
4329 /* Rule out USE instructions and ASM statements as we don't want to
4330 change the hard registers mentioned. */
4331 if (GET_CODE (x) == REG
4332 && (REGNO (x) >= FIRST_PSEUDO_REGISTER
4333 || (GET_CODE (PATTERN (insn)) != USE
4334 && asm_noperands (PATTERN (insn)) < 0)))
4335 {
4336 cselib_val *val = cselib_lookup (x, GET_MODE (x), 0);
4337 struct elt_loc_list *l;
4338
4339 if (!val)
4340 return false;
4341 for (l = val->locs; l; l = l->next)
4342 {
4343 rtx this_rtx = l->loc;
4344 rtx note;
4345
4346 if (l->in_libcall)
4347 continue;
4348
4349 if (CONSTANT_P (this_rtx))
4350 newcnst = this_rtx;
4351 if (REG_P (this_rtx) && REGNO (this_rtx) >= FIRST_PSEUDO_REGISTER
4352 /* Don't copy propagate if it has attached REG_EQUIV note.
4353 At this point this only function parameters should have
4354 REG_EQUIV notes and if the argument slot is used somewhere
4355 explicitly, it means address of parameter has been taken,
4356 so we should not extend the lifetime of the pseudo. */
4357 && (!(note = find_reg_note (l->setting_insn, REG_EQUIV, NULL_RTX))
4358 || GET_CODE (XEXP (note, 0)) != MEM))
4359 newreg = this_rtx;
4360 }
4361 if (newcnst && constprop_register (insn, x, newcnst, alter_jumps))
4362 {
4363 /* If we find a case where we can't fix the retval REG_EQUAL notes
4364 match the new register, we either have to abandom this replacement
4365 or fix delete_trivially_dead_insns to preserve the setting insn,
4366 or make it delete the REG_EUAQL note, and fix up all passes that
4367 require the REG_EQUAL note there. */
4368 if (!adjust_libcall_notes (x, newcnst, insn, libcall_sp))
4369 abort ();
4370 if (gcse_file != NULL)
4371 {
4372 fprintf (gcse_file, "LOCAL CONST-PROP: Replacing reg %d in ",
4373 REGNO (x));
4374 fprintf (gcse_file, "insn %d with constant ",
4375 INSN_UID (insn));
4376 print_rtl (gcse_file, newcnst);
4377 fprintf (gcse_file, "\n");
4378 }
4379 const_prop_count++;
4380 return true;
4381 }
4382 else if (newreg && newreg != x && try_replace_reg (x, newreg, insn))
4383 {
4384 adjust_libcall_notes (x, newreg, insn, libcall_sp);
4385 if (gcse_file != NULL)
4386 {
4387 fprintf (gcse_file,
4388 "LOCAL COPY-PROP: Replacing reg %d in insn %d",
4389 REGNO (x), INSN_UID (insn));
4390 fprintf (gcse_file, " with reg %d\n", REGNO (newreg));
4391 }
4392 copy_prop_count++;
4393 return true;
4394 }
4395 }
4396 return false;
4397 }
4398
4399 /* LIBCALL_SP is a zero-terminated array of insns at the end of a libcall;
4400 their REG_EQUAL notes need updating to reflect that OLDREG has been
4401 replaced with NEWVAL in INSN. Return true if all substitutions could
4402 be made. */
4403 static bool
adjust_libcall_notes(oldreg,newval,insn,libcall_sp)4404 adjust_libcall_notes (oldreg, newval, insn, libcall_sp)
4405 rtx oldreg, newval, insn, *libcall_sp;
4406 {
4407 rtx end;
4408
4409 while ((end = *libcall_sp++))
4410 {
4411 rtx note = find_reg_equal_equiv_note (end);
4412
4413 if (! note)
4414 continue;
4415
4416 if (REG_P (newval))
4417 {
4418 if (reg_set_between_p (newval, PREV_INSN (insn), end))
4419 {
4420 do
4421 {
4422 note = find_reg_equal_equiv_note (end);
4423 if (! note)
4424 continue;
4425 if (reg_mentioned_p (newval, XEXP (note, 0)))
4426 return false;
4427 }
4428 while ((end = *libcall_sp++));
4429 return true;
4430 }
4431 }
4432 XEXP (note, 0) = replace_rtx (XEXP (note, 0), oldreg, newval);
4433 insn = end;
4434 }
4435 return true;
4436 }
4437
4438 #define MAX_NESTED_LIBCALLS 9
4439
4440 static void
local_cprop_pass(alter_jumps)4441 local_cprop_pass (alter_jumps)
4442 int alter_jumps;
4443 {
4444 rtx insn;
4445 struct reg_use *reg_used;
4446 rtx libcall_stack[MAX_NESTED_LIBCALLS + 1], *libcall_sp;
4447 bool changed = false;
4448
4449 cselib_init ();
4450 libcall_sp = &libcall_stack[MAX_NESTED_LIBCALLS];
4451 *libcall_sp = 0;
4452 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
4453 {
4454 if (INSN_P (insn))
4455 {
4456 rtx note = find_reg_note (insn, REG_LIBCALL, NULL_RTX);
4457
4458 if (note)
4459 {
4460 if (libcall_sp == libcall_stack)
4461 abort ();
4462 *--libcall_sp = XEXP (note, 0);
4463 }
4464 note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
4465 if (note)
4466 libcall_sp++;
4467 note = find_reg_equal_equiv_note (insn);
4468 do
4469 {
4470 reg_use_count = 0;
4471 note_uses (&PATTERN (insn), local_cprop_find_used_regs, NULL);
4472 if (note)
4473 local_cprop_find_used_regs (&XEXP (note, 0), NULL);
4474
4475 for (reg_used = ®_use_table[0]; reg_use_count > 0;
4476 reg_used++, reg_use_count--)
4477 if (do_local_cprop (reg_used->reg_rtx, insn, alter_jumps,
4478 libcall_sp))
4479 {
4480 changed = true;
4481 break;
4482 }
4483 if (INSN_DELETED_P (insn))
4484 break;
4485 }
4486 while (reg_use_count);
4487 }
4488 cselib_process_insn (insn);
4489 }
4490 cselib_finish ();
4491 /* Global analysis may get into infinite loops for unreachable blocks. */
4492 if (changed && alter_jumps)
4493 {
4494 delete_unreachable_blocks ();
4495 free_reg_set_mem ();
4496 alloc_reg_set_mem (max_reg_num ());
4497 compute_sets (get_insns ());
4498 }
4499 }
4500
4501 /* Forward propagate copies. This includes copies and constants. Return
4502 nonzero if a change was made. */
4503
4504 static int
cprop(alter_jumps)4505 cprop (alter_jumps)
4506 int alter_jumps;
4507 {
4508 int changed;
4509 basic_block bb;
4510 rtx insn;
4511
4512 /* Note we start at block 1. */
4513 if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
4514 {
4515 if (gcse_file != NULL)
4516 fprintf (gcse_file, "\n");
4517 return 0;
4518 }
4519
4520 changed = 0;
4521 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb, EXIT_BLOCK_PTR, next_bb)
4522 {
4523 /* Reset tables used to keep track of what's still valid [since the
4524 start of the block]. */
4525 reset_opr_set_tables ();
4526
4527 for (insn = bb->head;
4528 insn != NULL && insn != NEXT_INSN (bb->end);
4529 insn = NEXT_INSN (insn))
4530 if (INSN_P (insn))
4531 {
4532 changed |= cprop_insn (insn, alter_jumps);
4533
4534 /* Keep track of everything modified by this insn. */
4535 /* ??? Need to be careful w.r.t. mods done to INSN. Don't
4536 call mark_oprs_set if we turned the insn into a NOTE. */
4537 if (GET_CODE (insn) != NOTE)
4538 mark_oprs_set (insn);
4539 }
4540 }
4541
4542 if (gcse_file != NULL)
4543 fprintf (gcse_file, "\n");
4544
4545 return changed;
4546 }
4547
4548 /* Perform one copy/constant propagation pass.
4549 F is the first insn in the function.
4550 PASS is the pass count. */
4551
4552 static int
one_cprop_pass(pass,alter_jumps)4553 one_cprop_pass (pass, alter_jumps)
4554 int pass;
4555 int alter_jumps;
4556 {
4557 int changed = 0;
4558
4559 const_prop_count = 0;
4560 copy_prop_count = 0;
4561
4562 local_cprop_pass (alter_jumps);
4563
4564 alloc_hash_table (max_cuid, &set_hash_table, 1);
4565 compute_hash_table (&set_hash_table);
4566 if (gcse_file)
4567 dump_hash_table (gcse_file, "SET", &set_hash_table);
4568 if (set_hash_table.n_elems > 0)
4569 {
4570 alloc_cprop_mem (last_basic_block, set_hash_table.n_elems);
4571 compute_cprop_data ();
4572 changed = cprop (alter_jumps);
4573 if (alter_jumps)
4574 changed |= bypass_conditional_jumps ();
4575 free_cprop_mem ();
4576 }
4577
4578 free_hash_table (&set_hash_table);
4579
4580 if (gcse_file)
4581 {
4582 fprintf (gcse_file, "CPROP of %s, pass %d: %d bytes needed, ",
4583 current_function_name, pass, bytes_used);
4584 fprintf (gcse_file, "%d const props, %d copy props\n\n",
4585 const_prop_count, copy_prop_count);
4586 }
4587 /* Global analysis may get into infinite loops for unreachable blocks. */
4588 if (changed && alter_jumps)
4589 delete_unreachable_blocks ();
4590
4591 return changed;
4592 }
4593
4594 /* Bypass conditional jumps. */
4595
4596 /* Find a set of REGNO to a constant that is available at the end of basic
4597 block BB. Returns NULL if no such set is found. Based heavily upon
4598 find_avail_set. */
4599
4600 static struct expr *
find_bypass_set(regno,bb)4601 find_bypass_set (regno, bb)
4602 int regno;
4603 int bb;
4604 {
4605 struct expr *result = 0;
4606
4607 for (;;)
4608 {
4609 rtx src;
4610 struct expr *set = lookup_set (regno, NULL_RTX, &set_hash_table);
4611
4612 while (set)
4613 {
4614 if (TEST_BIT (cprop_avout[bb], set->bitmap_index))
4615 break;
4616 set = next_set (regno, set);
4617 }
4618
4619 if (set == 0)
4620 break;
4621
4622 if (GET_CODE (set->expr) != SET)
4623 abort ();
4624
4625 src = SET_SRC (set->expr);
4626 if (CONSTANT_P (src))
4627 result = set;
4628
4629 if (GET_CODE (src) != REG)
4630 break;
4631
4632 regno = REGNO (src);
4633 }
4634 return result;
4635 }
4636
4637
4638 /* Subroutine of bypass_block that checks whether a pseudo is killed by
4639 any of the instructions inserted on an edge. Jump bypassing places
4640 condition code setters on CFG edges using insert_insn_on_edge. This
4641 function is required to check that our data flow analysis is still
4642 valid prior to commit_edge_insertions. */
4643
4644 static bool
reg_killed_on_edge(reg,e)4645 reg_killed_on_edge (reg, e)
4646 rtx reg;
4647 edge e;
4648 {
4649 rtx insn;
4650
4651 for (insn = e->insns; insn; insn = NEXT_INSN (insn))
4652 if (INSN_P (insn) && reg_set_p (reg, insn))
4653 return true;
4654
4655 return false;
4656 }
4657
4658 /* Subroutine of bypass_conditional_jumps that attempts to bypass the given
4659 basic block BB which has more than one predecessor. If not NULL, SETCC
4660 is the first instruction of BB, which is immediately followed by JUMP_INSN
4661 JUMP. Otherwise, SETCC is NULL, and JUMP is the first insn of BB.
4662 Returns nonzero if a change was made.
4663
4664 During the jump bypassing pass, we may place copies of SETCC instuctions
4665 on CFG edges. The following routine must be careful to pay attention to
4666 these inserted insns when performing its transformations. */
4667
4668 static int
bypass_block(bb,setcc,jump)4669 bypass_block (bb, setcc, jump)
4670 basic_block bb;
4671 rtx setcc, jump;
4672 {
4673 rtx insn, note;
4674 edge e, enext, edest;
4675 int i, change;
4676
4677 insn = (setcc != NULL) ? setcc : jump;
4678
4679 /* Determine set of register uses in INSN. */
4680 reg_use_count = 0;
4681 note_uses (&PATTERN (insn), find_used_regs, NULL);
4682 note = find_reg_equal_equiv_note (insn);
4683 if (note)
4684 find_used_regs (&XEXP (note, 0), NULL);
4685
4686 change = 0;
4687 for (e = bb->pred; e; e = enext)
4688 {
4689 enext = e->pred_next;
4690 for (i = 0; i < reg_use_count; i++)
4691 {
4692 struct reg_use *reg_used = ®_use_table[i];
4693 unsigned int regno = REGNO (reg_used->reg_rtx);
4694 basic_block dest, old_dest;
4695 struct expr *set;
4696 rtx src, new;
4697
4698 if (regno >= max_gcse_regno)
4699 continue;
4700
4701 set = find_bypass_set (regno, e->src->index);
4702
4703 if (! set)
4704 continue;
4705
4706 /* Check the data flow is valid after edge insertions. */
4707 if (e->insns && reg_killed_on_edge (reg_used->reg_rtx, e))
4708 continue;
4709
4710 src = SET_SRC (pc_set (jump));
4711
4712 if (setcc != NULL)
4713 src = simplify_replace_rtx (src,
4714 SET_DEST (PATTERN (setcc)),
4715 SET_SRC (PATTERN (setcc)));
4716
4717 new = simplify_replace_rtx (src, reg_used->reg_rtx,
4718 SET_SRC (set->expr));
4719
4720 /* Jump bypassing may have already placed instructions on
4721 edges of the CFG. We can't bypass an outgoing edge that
4722 has instructions associated with it, as these insns won't
4723 get executed if the incoming edge is redirected. */
4724
4725 if (new == pc_rtx)
4726 {
4727 edest = FALLTHRU_EDGE (bb);
4728 dest = edest->insns ? NULL : edest->dest;
4729 }
4730 else if (GET_CODE (new) == LABEL_REF)
4731 {
4732 dest = BLOCK_FOR_INSN (XEXP (new, 0));
4733 /* Don't bypass edges containing instructions. */
4734 for (edest = bb->succ; edest; edest = edest->succ_next)
4735 if (edest->dest == dest && edest->insns)
4736 {
4737 dest = NULL;
4738 break;
4739 }
4740 }
4741 else
4742 dest = NULL;
4743
4744 /* Once basic block indices are stable, we should be able
4745 to use redirect_edge_and_branch_force instead. */
4746 old_dest = e->dest;
4747 if (dest != NULL && dest != old_dest
4748 && redirect_edge_and_branch (e, dest))
4749 {
4750 /* Copy the register setter to the redirected edge.
4751 Don't copy CC0 setters, as CC0 is dead after jump. */
4752 if (setcc)
4753 {
4754 rtx pat = PATTERN (setcc);
4755 if (!CC0_P (SET_DEST (pat)))
4756 insert_insn_on_edge (copy_insn (pat), e);
4757 }
4758
4759 if (gcse_file != NULL)
4760 {
4761 fprintf (gcse_file, "JUMP-BYPASS: Proved reg %d in jump_insn %d equals constant ",
4762 regno, INSN_UID (jump));
4763 print_rtl (gcse_file, SET_SRC (set->expr));
4764 fprintf (gcse_file, "\nBypass edge from %d->%d to %d\n",
4765 e->src->index, old_dest->index, dest->index);
4766 }
4767 change = 1;
4768 break;
4769 }
4770 }
4771 }
4772 return change;
4773 }
4774
4775 /* Find basic blocks with more than one predecessor that only contain a
4776 single conditional jump. If the result of the comparison is known at
4777 compile-time from any incoming edge, redirect that edge to the
4778 appropriate target. Returns nonzero if a change was made. */
4779
4780 static int
bypass_conditional_jumps()4781 bypass_conditional_jumps ()
4782 {
4783 basic_block bb;
4784 int changed;
4785 rtx setcc;
4786 rtx insn;
4787 rtx dest;
4788
4789 /* Note we start at block 1. */
4790 if (ENTRY_BLOCK_PTR->next_bb == EXIT_BLOCK_PTR)
4791 return 0;
4792
4793 changed = 0;
4794 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb,
4795 EXIT_BLOCK_PTR, next_bb)
4796 {
4797 /* Check for more than one predecessor. */
4798 if (bb->pred && bb->pred->pred_next)
4799 {
4800 setcc = NULL_RTX;
4801 for (insn = bb->head;
4802 insn != NULL && insn != NEXT_INSN (bb->end);
4803 insn = NEXT_INSN (insn))
4804 if (GET_CODE (insn) == INSN)
4805 {
4806 if (setcc)
4807 break;
4808 if (GET_CODE (PATTERN (insn)) != SET)
4809 break;
4810
4811 dest = SET_DEST (PATTERN (insn));
4812 if (REG_P (dest) || CC0_P (dest))
4813 setcc = insn;
4814 else
4815 break;
4816 }
4817 else if (GET_CODE (insn) == JUMP_INSN)
4818 {
4819 if (any_condjump_p (insn) && onlyjump_p (insn))
4820 changed |= bypass_block (bb, setcc, insn);
4821 break;
4822 }
4823 else if (INSN_P (insn))
4824 break;
4825 }
4826 }
4827
4828 /* If we bypassed any register setting insns, we inserted a
4829 copy on the redirected edge. These need to be commited. */
4830 if (changed)
4831 commit_edge_insertions();
4832
4833 return changed;
4834 }
4835
4836 /* Compute PRE+LCM working variables. */
4837
4838 /* Local properties of expressions. */
4839 /* Nonzero for expressions that are transparent in the block. */
4840 static sbitmap *transp;
4841
4842 /* Nonzero for expressions that are transparent at the end of the block.
4843 This is only zero for expressions killed by abnormal critical edge
4844 created by a calls. */
4845 static sbitmap *transpout;
4846
4847 /* Nonzero for expressions that are computed (available) in the block. */
4848 static sbitmap *comp;
4849
4850 /* Nonzero for expressions that are locally anticipatable in the block. */
4851 static sbitmap *antloc;
4852
4853 /* Nonzero for expressions where this block is an optimal computation
4854 point. */
4855 static sbitmap *pre_optimal;
4856
4857 /* Nonzero for expressions which are redundant in a particular block. */
4858 static sbitmap *pre_redundant;
4859
4860 /* Nonzero for expressions which should be inserted on a specific edge. */
4861 static sbitmap *pre_insert_map;
4862
4863 /* Nonzero for expressions which should be deleted in a specific block. */
4864 static sbitmap *pre_delete_map;
4865
4866 /* Contains the edge_list returned by pre_edge_lcm. */
4867 static struct edge_list *edge_list;
4868
4869 /* Redundant insns. */
4870 static sbitmap pre_redundant_insns;
4871
4872 /* Allocate vars used for PRE analysis. */
4873
4874 static void
alloc_pre_mem(n_blocks,n_exprs)4875 alloc_pre_mem (n_blocks, n_exprs)
4876 int n_blocks, n_exprs;
4877 {
4878 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
4879 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
4880 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
4881
4882 pre_optimal = NULL;
4883 pre_redundant = NULL;
4884 pre_insert_map = NULL;
4885 pre_delete_map = NULL;
4886 ae_in = NULL;
4887 ae_out = NULL;
4888 ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
4889
4890 /* pre_insert and pre_delete are allocated later. */
4891 }
4892
4893 /* Free vars used for PRE analysis. */
4894
4895 static void
free_pre_mem()4896 free_pre_mem ()
4897 {
4898 sbitmap_vector_free (transp);
4899 sbitmap_vector_free (comp);
4900
4901 /* ANTLOC and AE_KILL are freed just after pre_lcm finishes. */
4902
4903 if (pre_optimal)
4904 sbitmap_vector_free (pre_optimal);
4905 if (pre_redundant)
4906 sbitmap_vector_free (pre_redundant);
4907 if (pre_insert_map)
4908 sbitmap_vector_free (pre_insert_map);
4909 if (pre_delete_map)
4910 sbitmap_vector_free (pre_delete_map);
4911 if (ae_in)
4912 sbitmap_vector_free (ae_in);
4913 if (ae_out)
4914 sbitmap_vector_free (ae_out);
4915
4916 transp = comp = NULL;
4917 pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
4918 ae_in = ae_out = NULL;
4919 }
4920
4921 /* Top level routine to do the dataflow analysis needed by PRE. */
4922
4923 static void
compute_pre_data()4924 compute_pre_data ()
4925 {
4926 sbitmap trapping_expr;
4927 basic_block bb;
4928 unsigned int ui;
4929
4930 compute_local_properties (transp, comp, antloc, &expr_hash_table);
4931 sbitmap_vector_zero (ae_kill, last_basic_block);
4932
4933 /* Collect expressions which might trap. */
4934 trapping_expr = sbitmap_alloc (expr_hash_table.n_elems);
4935 sbitmap_zero (trapping_expr);
4936 for (ui = 0; ui < expr_hash_table.size; ui++)
4937 {
4938 struct expr *e;
4939 for (e = expr_hash_table.table[ui]; e != NULL; e = e->next_same_hash)
4940 if (may_trap_p (e->expr))
4941 SET_BIT (trapping_expr, e->bitmap_index);
4942 }
4943
4944 /* Compute ae_kill for each basic block using:
4945
4946 ~(TRANSP | COMP)
4947
4948 This is significantly faster than compute_ae_kill. */
4949
4950 FOR_EACH_BB (bb)
4951 {
4952 edge e;
4953
4954 /* If the current block is the destination of an abnormal edge, we
4955 kill all trapping expressions because we won't be able to properly
4956 place the instruction on the edge. So make them neither
4957 anticipatable nor transparent. This is fairly conservative. */
4958 for (e = bb->pred; e ; e = e->pred_next)
4959 if (e->flags & EDGE_ABNORMAL)
4960 {
4961 sbitmap_difference (antloc[bb->index], antloc[bb->index], trapping_expr);
4962 sbitmap_difference (transp[bb->index], transp[bb->index], trapping_expr);
4963 break;
4964 }
4965
4966 sbitmap_a_or_b (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
4967 sbitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
4968 }
4969
4970 edge_list = pre_edge_lcm (gcse_file, expr_hash_table.n_elems, transp, comp, antloc,
4971 ae_kill, &pre_insert_map, &pre_delete_map);
4972 sbitmap_vector_free (antloc);
4973 antloc = NULL;
4974 sbitmap_vector_free (ae_kill);
4975 ae_kill = NULL;
4976 sbitmap_free (trapping_expr);
4977 }
4978
4979 /* PRE utilities */
4980
4981 /* Return nonzero if an occurrence of expression EXPR in OCCR_BB would reach
4982 block BB.
4983
4984 VISITED is a pointer to a working buffer for tracking which BB's have
4985 been visited. It is NULL for the top-level call.
4986
4987 We treat reaching expressions that go through blocks containing the same
4988 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
4989 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
4990 2 as not reaching. The intent is to improve the probability of finding
4991 only one reaching expression and to reduce register lifetimes by picking
4992 the closest such expression. */
4993
4994 static int
pre_expr_reaches_here_p_work(occr_bb,expr,bb,visited)4995 pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited)
4996 basic_block occr_bb;
4997 struct expr *expr;
4998 basic_block bb;
4999 char *visited;
5000 {
5001 edge pred;
5002
5003 for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
5004 {
5005 basic_block pred_bb = pred->src;
5006
5007 if (pred->src == ENTRY_BLOCK_PTR
5008 /* Has predecessor has already been visited? */
5009 || visited[pred_bb->index])
5010 ;/* Nothing to do. */
5011
5012 /* Does this predecessor generate this expression? */
5013 else if (TEST_BIT (comp[pred_bb->index], expr->bitmap_index))
5014 {
5015 /* Is this the occurrence we're looking for?
5016 Note that there's only one generating occurrence per block
5017 so we just need to check the block number. */
5018 if (occr_bb == pred_bb)
5019 return 1;
5020
5021 visited[pred_bb->index] = 1;
5022 }
5023 /* Ignore this predecessor if it kills the expression. */
5024 else if (! TEST_BIT (transp[pred_bb->index], expr->bitmap_index))
5025 visited[pred_bb->index] = 1;
5026
5027 /* Neither gen nor kill. */
5028 else
5029 {
5030 visited[pred_bb->index] = 1;
5031 if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
5032 return 1;
5033 }
5034 }
5035
5036 /* All paths have been checked. */
5037 return 0;
5038 }
5039
5040 /* The wrapper for pre_expr_reaches_here_work that ensures that any
5041 memory allocated for that function is returned. */
5042
5043 static int
pre_expr_reaches_here_p(occr_bb,expr,bb)5044 pre_expr_reaches_here_p (occr_bb, expr, bb)
5045 basic_block occr_bb;
5046 struct expr *expr;
5047 basic_block bb;
5048 {
5049 int rval;
5050 char *visited = (char *) xcalloc (last_basic_block, 1);
5051
5052 rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
5053
5054 free (visited);
5055 return rval;
5056 }
5057
5058
5059 /* Given an expr, generate RTL which we can insert at the end of a BB,
5060 or on an edge. Set the block number of any insns generated to
5061 the value of BB. */
5062
5063 static rtx
process_insert_insn(expr)5064 process_insert_insn (expr)
5065 struct expr *expr;
5066 {
5067 rtx reg = expr->reaching_reg;
5068 rtx exp = copy_rtx (expr->expr);
5069 rtx pat;
5070
5071 start_sequence ();
5072
5073 /* If the expression is something that's an operand, like a constant,
5074 just copy it to a register. */
5075 if (general_operand (exp, GET_MODE (reg)))
5076 emit_move_insn (reg, exp);
5077
5078 /* Otherwise, make a new insn to compute this expression and make sure the
5079 insn will be recognized (this also adds any needed CLOBBERs). Copy the
5080 expression to make sure we don't have any sharing issues. */
5081 else if (insn_invalid_p (emit_insn (gen_rtx_SET (VOIDmode, reg, exp))))
5082 abort ();
5083
5084 pat = get_insns ();
5085 end_sequence ();
5086
5087 return pat;
5088 }
5089
5090 /* Add EXPR to the end of basic block BB.
5091
5092 This is used by both the PRE and code hoisting.
5093
5094 For PRE, we want to verify that the expr is either transparent
5095 or locally anticipatable in the target block. This check makes
5096 no sense for code hoisting. */
5097
5098 static void
insert_insn_end_bb(expr,bb,pre)5099 insert_insn_end_bb (expr, bb, pre)
5100 struct expr *expr;
5101 basic_block bb;
5102 int pre;
5103 {
5104 rtx insn = bb->end;
5105 rtx new_insn;
5106 rtx reg = expr->reaching_reg;
5107 int regno = REGNO (reg);
5108 rtx pat, pat_end;
5109
5110 pat = process_insert_insn (expr);
5111 if (pat == NULL_RTX || ! INSN_P (pat))
5112 abort ();
5113
5114 pat_end = pat;
5115 while (NEXT_INSN (pat_end) != NULL_RTX)
5116 pat_end = NEXT_INSN (pat_end);
5117
5118 /* If the last insn is a jump, insert EXPR in front [taking care to
5119 handle cc0, etc. properly]. Similary we need to care trapping
5120 instructions in presence of non-call exceptions. */
5121
5122 if (GET_CODE (insn) == JUMP_INSN
5123 || (GET_CODE (insn) == INSN
5124 && (bb->succ->succ_next || (bb->succ->flags & EDGE_ABNORMAL))))
5125 {
5126 #ifdef HAVE_cc0
5127 rtx note;
5128 #endif
5129 /* It should always be the case that we can put these instructions
5130 anywhere in the basic block with performing PRE optimizations.
5131 Check this. */
5132 if (GET_CODE (insn) == INSN && pre
5133 && !TEST_BIT (antloc[bb->index], expr->bitmap_index)
5134 && !TEST_BIT (transp[bb->index], expr->bitmap_index))
5135 abort ();
5136
5137 /* If this is a jump table, then we can't insert stuff here. Since
5138 we know the previous real insn must be the tablejump, we insert
5139 the new instruction just before the tablejump. */
5140 if (GET_CODE (PATTERN (insn)) == ADDR_VEC
5141 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
5142 insn = prev_real_insn (insn);
5143
5144 #ifdef HAVE_cc0
5145 /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
5146 if cc0 isn't set. */
5147 note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
5148 if (note)
5149 insn = XEXP (note, 0);
5150 else
5151 {
5152 rtx maybe_cc0_setter = prev_nonnote_insn (insn);
5153 if (maybe_cc0_setter
5154 && INSN_P (maybe_cc0_setter)
5155 && sets_cc0_p (PATTERN (maybe_cc0_setter)))
5156 insn = maybe_cc0_setter;
5157 }
5158 #endif
5159 /* FIXME: What if something in cc0/jump uses value set in new insn? */
5160 new_insn = emit_insn_before (pat, insn);
5161 }
5162
5163 /* Likewise if the last insn is a call, as will happen in the presence
5164 of exception handling. */
5165 else if (GET_CODE (insn) == CALL_INSN
5166 && (bb->succ->succ_next || (bb->succ->flags & EDGE_ABNORMAL)))
5167 {
5168 /* Keeping in mind SMALL_REGISTER_CLASSES and parameters in registers,
5169 we search backward and place the instructions before the first
5170 parameter is loaded. Do this for everyone for consistency and a
5171 presumtion that we'll get better code elsewhere as well.
5172
5173 It should always be the case that we can put these instructions
5174 anywhere in the basic block with performing PRE optimizations.
5175 Check this. */
5176
5177 if (pre
5178 && !TEST_BIT (antloc[bb->index], expr->bitmap_index)
5179 && !TEST_BIT (transp[bb->index], expr->bitmap_index))
5180 abort ();
5181
5182 /* Since different machines initialize their parameter registers
5183 in different orders, assume nothing. Collect the set of all
5184 parameter registers. */
5185 insn = find_first_parameter_load (insn, bb->head);
5186
5187 /* If we found all the parameter loads, then we want to insert
5188 before the first parameter load.
5189
5190 If we did not find all the parameter loads, then we might have
5191 stopped on the head of the block, which could be a CODE_LABEL.
5192 If we inserted before the CODE_LABEL, then we would be putting
5193 the insn in the wrong basic block. In that case, put the insn
5194 after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
5195 while (GET_CODE (insn) == CODE_LABEL
5196 || NOTE_INSN_BASIC_BLOCK_P (insn))
5197 insn = NEXT_INSN (insn);
5198
5199 new_insn = emit_insn_before (pat, insn);
5200 }
5201 else
5202 new_insn = emit_insn_after (pat, insn);
5203
5204 while (1)
5205 {
5206 if (INSN_P (pat))
5207 {
5208 add_label_notes (PATTERN (pat), new_insn);
5209 note_stores (PATTERN (pat), record_set_info, pat);
5210 }
5211 if (pat == pat_end)
5212 break;
5213 pat = NEXT_INSN (pat);
5214 }
5215
5216 gcse_create_count++;
5217
5218 if (gcse_file)
5219 {
5220 fprintf (gcse_file, "PRE/HOIST: end of bb %d, insn %d, ",
5221 bb->index, INSN_UID (new_insn));
5222 fprintf (gcse_file, "copying expression %d to reg %d\n",
5223 expr->bitmap_index, regno);
5224 }
5225 }
5226
5227 /* Insert partially redundant expressions on edges in the CFG to make
5228 the expressions fully redundant. */
5229
5230 static int
pre_edge_insert(edge_list,index_map)5231 pre_edge_insert (edge_list, index_map)
5232 struct edge_list *edge_list;
5233 struct expr **index_map;
5234 {
5235 int e, i, j, num_edges, set_size, did_insert = 0;
5236 sbitmap *inserted;
5237
5238 /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
5239 if it reaches any of the deleted expressions. */
5240
5241 set_size = pre_insert_map[0]->size;
5242 num_edges = NUM_EDGES (edge_list);
5243 inserted = sbitmap_vector_alloc (num_edges, expr_hash_table.n_elems);
5244 sbitmap_vector_zero (inserted, num_edges);
5245
5246 for (e = 0; e < num_edges; e++)
5247 {
5248 int indx;
5249 basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
5250
5251 for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
5252 {
5253 SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
5254
5255 for (j = indx; insert && j < (int) expr_hash_table.n_elems; j++, insert >>= 1)
5256 if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
5257 {
5258 struct expr *expr = index_map[j];
5259 struct occr *occr;
5260
5261 /* Now look at each deleted occurrence of this expression. */
5262 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
5263 {
5264 if (! occr->deleted_p)
5265 continue;
5266
5267 /* Insert this expression on this edge if if it would
5268 reach the deleted occurrence in BB. */
5269 if (!TEST_BIT (inserted[e], j))
5270 {
5271 rtx insn;
5272 edge eg = INDEX_EDGE (edge_list, e);
5273
5274 /* We can't insert anything on an abnormal and
5275 critical edge, so we insert the insn at the end of
5276 the previous block. There are several alternatives
5277 detailed in Morgans book P277 (sec 10.5) for
5278 handling this situation. This one is easiest for
5279 now. */
5280
5281 if ((eg->flags & EDGE_ABNORMAL) == EDGE_ABNORMAL)
5282 insert_insn_end_bb (index_map[j], bb, 0);
5283 else
5284 {
5285 insn = process_insert_insn (index_map[j]);
5286 insert_insn_on_edge (insn, eg);
5287 }
5288
5289 if (gcse_file)
5290 {
5291 fprintf (gcse_file, "PRE/HOIST: edge (%d,%d), ",
5292 bb->index,
5293 INDEX_EDGE_SUCC_BB (edge_list, e)->index);
5294 fprintf (gcse_file, "copy expression %d\n",
5295 expr->bitmap_index);
5296 }
5297
5298 update_ld_motion_stores (expr);
5299 SET_BIT (inserted[e], j);
5300 did_insert = 1;
5301 gcse_create_count++;
5302 }
5303 }
5304 }
5305 }
5306 }
5307
5308 sbitmap_vector_free (inserted);
5309 return did_insert;
5310 }
5311
5312 /* Copy the result of INSN to REG. INDX is the expression number. */
5313
5314 static void
pre_insert_copy_insn(expr,insn)5315 pre_insert_copy_insn (expr, insn)
5316 struct expr *expr;
5317 rtx insn;
5318 {
5319 rtx reg = expr->reaching_reg;
5320 int regno = REGNO (reg);
5321 int indx = expr->bitmap_index;
5322 rtx pat = PATTERN (insn);
5323 rtx set, new_insn;
5324 int i;
5325
5326 /* This block matches the logic in hash_scan_insn. */
5327 if (GET_CODE (pat) == SET)
5328 set = pat;
5329 else if (GET_CODE (pat) == PARALLEL)
5330 {
5331 /* Search through the parallel looking got the set whose
5332 source was the expression that we're interested in. */
5333 set = NULL_RTX;
5334 for (i = 0; i < XVECLEN (pat, 0); i++)
5335 {
5336 rtx x = XVECEXP (pat, 0, i);
5337 if (GET_CODE (x) == SET
5338 && expr_equiv_p (SET_SRC (x), expr->expr))
5339 {
5340 set = x;
5341 break;
5342 }
5343 }
5344 if (! set)
5345 abort ();
5346 }
5347 else
5348 abort ();
5349
5350 new_insn = gen_move_insn (reg, copy_rtx (SET_DEST (set)));
5351 new_insn = emit_insn_after (new_insn, insn);
5352
5353 /* Keep register set table up to date. */
5354 record_one_set (regno, new_insn);
5355
5356 gcse_create_count++;
5357
5358 if (gcse_file)
5359 fprintf (gcse_file,
5360 "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
5361 BLOCK_NUM (insn), INSN_UID (new_insn), indx,
5362 INSN_UID (insn), regno);
5363 update_ld_motion_stores (expr);
5364 }
5365
5366 /* Copy available expressions that reach the redundant expression
5367 to `reaching_reg'. */
5368
5369 static void
pre_insert_copies()5370 pre_insert_copies ()
5371 {
5372 unsigned int i;
5373 struct expr *expr;
5374 struct occr *occr;
5375 struct occr *avail;
5376
5377 /* For each available expression in the table, copy the result to
5378 `reaching_reg' if the expression reaches a deleted one.
5379
5380 ??? The current algorithm is rather brute force.
5381 Need to do some profiling. */
5382
5383 for (i = 0; i < expr_hash_table.size; i++)
5384 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
5385 {
5386 /* If the basic block isn't reachable, PPOUT will be TRUE. However,
5387 we don't want to insert a copy here because the expression may not
5388 really be redundant. So only insert an insn if the expression was
5389 deleted. This test also avoids further processing if the
5390 expression wasn't deleted anywhere. */
5391 if (expr->reaching_reg == NULL)
5392 continue;
5393
5394 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
5395 {
5396 if (! occr->deleted_p)
5397 continue;
5398
5399 for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
5400 {
5401 rtx insn = avail->insn;
5402
5403 /* No need to handle this one if handled already. */
5404 if (avail->copied_p)
5405 continue;
5406
5407 /* Don't handle this one if it's a redundant one. */
5408 if (TEST_BIT (pre_redundant_insns, INSN_CUID (insn)))
5409 continue;
5410
5411 /* Or if the expression doesn't reach the deleted one. */
5412 if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
5413 expr,
5414 BLOCK_FOR_INSN (occr->insn)))
5415 continue;
5416
5417 /* Copy the result of avail to reaching_reg. */
5418 pre_insert_copy_insn (expr, insn);
5419 avail->copied_p = 1;
5420 }
5421 }
5422 }
5423 }
5424
5425 /* Emit move from SRC to DEST noting the equivalence with expression computed
5426 in INSN. */
5427 static rtx
gcse_emit_move_after(src,dest,insn)5428 gcse_emit_move_after (src, dest, insn)
5429 rtx src, dest, insn;
5430 {
5431 rtx new;
5432 rtx set = single_set (insn), set2;
5433 rtx note;
5434 rtx eqv;
5435
5436 /* This should never fail since we're creating a reg->reg copy
5437 we've verified to be valid. */
5438
5439 new = emit_insn_after (gen_move_insn (dest, src), insn);
5440
5441 /* Note the equivalence for local CSE pass. */
5442 set2 = single_set (new);
5443 if (!set2 || !rtx_equal_p (SET_DEST (set2), dest))
5444 return new;
5445 if ((note = find_reg_equal_equiv_note (insn)))
5446 eqv = XEXP (note, 0);
5447 else
5448 eqv = SET_SRC (set);
5449
5450 set_unique_reg_note (new, REG_EQUAL, copy_insn_1 (eqv));
5451
5452 return new;
5453 }
5454
5455 /* Delete redundant computations.
5456 Deletion is done by changing the insn to copy the `reaching_reg' of
5457 the expression into the result of the SET. It is left to later passes
5458 (cprop, cse2, flow, combine, regmove) to propagate the copy or eliminate it.
5459
5460 Returns nonzero if a change is made. */
5461
5462 static int
pre_delete()5463 pre_delete ()
5464 {
5465 unsigned int i;
5466 int changed;
5467 struct expr *expr;
5468 struct occr *occr;
5469
5470 changed = 0;
5471 for (i = 0; i < expr_hash_table.size; i++)
5472 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
5473 {
5474 int indx = expr->bitmap_index;
5475
5476 /* We only need to search antic_occr since we require
5477 ANTLOC != 0. */
5478
5479 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
5480 {
5481 rtx insn = occr->insn;
5482 rtx set;
5483 basic_block bb = BLOCK_FOR_INSN (insn);
5484
5485 if (TEST_BIT (pre_delete_map[bb->index], indx))
5486 {
5487 set = single_set (insn);
5488 if (! set)
5489 abort ();
5490
5491 /* Create a pseudo-reg to store the result of reaching
5492 expressions into. Get the mode for the new pseudo from
5493 the mode of the original destination pseudo. */
5494 if (expr->reaching_reg == NULL)
5495 expr->reaching_reg
5496 = gen_reg_rtx (GET_MODE (SET_DEST (set)));
5497
5498 gcse_emit_move_after (expr->reaching_reg, SET_DEST (set), insn);
5499 delete_insn (insn);
5500 occr->deleted_p = 1;
5501 SET_BIT (pre_redundant_insns, INSN_CUID (insn));
5502 changed = 1;
5503 gcse_subst_count++;
5504
5505 if (gcse_file)
5506 {
5507 fprintf (gcse_file,
5508 "PRE: redundant insn %d (expression %d) in ",
5509 INSN_UID (insn), indx);
5510 fprintf (gcse_file, "bb %d, reaching reg is %d\n",
5511 bb->index, REGNO (expr->reaching_reg));
5512 }
5513 }
5514 }
5515 }
5516
5517 return changed;
5518 }
5519
5520 /* Perform GCSE optimizations using PRE.
5521 This is called by one_pre_gcse_pass after all the dataflow analysis
5522 has been done.
5523
5524 This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
5525 lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
5526 Compiler Design and Implementation.
5527
5528 ??? A new pseudo reg is created to hold the reaching expression. The nice
5529 thing about the classical approach is that it would try to use an existing
5530 reg. If the register can't be adequately optimized [i.e. we introduce
5531 reload problems], one could add a pass here to propagate the new register
5532 through the block.
5533
5534 ??? We don't handle single sets in PARALLELs because we're [currently] not
5535 able to copy the rest of the parallel when we insert copies to create full
5536 redundancies from partial redundancies. However, there's no reason why we
5537 can't handle PARALLELs in the cases where there are no partial
5538 redundancies. */
5539
5540 static int
pre_gcse()5541 pre_gcse ()
5542 {
5543 unsigned int i;
5544 int did_insert, changed;
5545 struct expr **index_map;
5546 struct expr *expr;
5547
5548 /* Compute a mapping from expression number (`bitmap_index') to
5549 hash table entry. */
5550
5551 index_map = (struct expr **) xcalloc (expr_hash_table.n_elems, sizeof (struct expr *));
5552 for (i = 0; i < expr_hash_table.size; i++)
5553 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
5554 index_map[expr->bitmap_index] = expr;
5555
5556 /* Reset bitmap used to track which insns are redundant. */
5557 pre_redundant_insns = sbitmap_alloc (max_cuid);
5558 sbitmap_zero (pre_redundant_insns);
5559
5560 /* Delete the redundant insns first so that
5561 - we know what register to use for the new insns and for the other
5562 ones with reaching expressions
5563 - we know which insns are redundant when we go to create copies */
5564
5565 changed = pre_delete ();
5566
5567 did_insert = pre_edge_insert (edge_list, index_map);
5568
5569 /* In other places with reaching expressions, copy the expression to the
5570 specially allocated pseudo-reg that reaches the redundant expr. */
5571 pre_insert_copies ();
5572 if (did_insert)
5573 {
5574 commit_edge_insertions ();
5575 changed = 1;
5576 }
5577
5578 free (index_map);
5579 sbitmap_free (pre_redundant_insns);
5580 return changed;
5581 }
5582
5583 /* Top level routine to perform one PRE GCSE pass.
5584
5585 Return nonzero if a change was made. */
5586
5587 static int
one_pre_gcse_pass(pass)5588 one_pre_gcse_pass (pass)
5589 int pass;
5590 {
5591 int changed = 0;
5592
5593 gcse_subst_count = 0;
5594 gcse_create_count = 0;
5595
5596 alloc_hash_table (max_cuid, &expr_hash_table, 0);
5597 add_noreturn_fake_exit_edges ();
5598 if (flag_gcse_lm)
5599 compute_ld_motion_mems ();
5600
5601 compute_hash_table (&expr_hash_table);
5602 trim_ld_motion_mems ();
5603 if (gcse_file)
5604 dump_hash_table (gcse_file, "Expression", &expr_hash_table);
5605
5606 if (expr_hash_table.n_elems > 0)
5607 {
5608 alloc_pre_mem (last_basic_block, expr_hash_table.n_elems);
5609 compute_pre_data ();
5610 changed |= pre_gcse ();
5611 free_edge_list (edge_list);
5612 free_pre_mem ();
5613 }
5614
5615 free_ldst_mems ();
5616 remove_fake_edges ();
5617 free_hash_table (&expr_hash_table);
5618
5619 if (gcse_file)
5620 {
5621 fprintf (gcse_file, "\nPRE GCSE of %s, pass %d: %d bytes needed, ",
5622 current_function_name, pass, bytes_used);
5623 fprintf (gcse_file, "%d substs, %d insns created\n",
5624 gcse_subst_count, gcse_create_count);
5625 }
5626
5627 return changed;
5628 }
5629
5630 /* If X contains any LABEL_REF's, add REG_LABEL notes for them to INSN.
5631 If notes are added to an insn which references a CODE_LABEL, the
5632 LABEL_NUSES count is incremented. We have to add REG_LABEL notes,
5633 because the following loop optimization pass requires them. */
5634
5635 /* ??? This is very similar to the loop.c add_label_notes function. We
5636 could probably share code here. */
5637
5638 /* ??? If there was a jump optimization pass after gcse and before loop,
5639 then we would not need to do this here, because jump would add the
5640 necessary REG_LABEL notes. */
5641
5642 static void
add_label_notes(x,insn)5643 add_label_notes (x, insn)
5644 rtx x;
5645 rtx insn;
5646 {
5647 enum rtx_code code = GET_CODE (x);
5648 int i, j;
5649 const char *fmt;
5650
5651 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
5652 {
5653 /* This code used to ignore labels that referred to dispatch tables to
5654 avoid flow generating (slighly) worse code.
5655
5656 We no longer ignore such label references (see LABEL_REF handling in
5657 mark_jump_label for additional information). */
5658
5659 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
5660 REG_NOTES (insn));
5661 if (LABEL_P (XEXP (x, 0)))
5662 LABEL_NUSES (XEXP (x, 0))++;
5663 return;
5664 }
5665
5666 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
5667 {
5668 if (fmt[i] == 'e')
5669 add_label_notes (XEXP (x, i), insn);
5670 else if (fmt[i] == 'E')
5671 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5672 add_label_notes (XVECEXP (x, i, j), insn);
5673 }
5674 }
5675
5676 /* Compute transparent outgoing information for each block.
5677
5678 An expression is transparent to an edge unless it is killed by
5679 the edge itself. This can only happen with abnormal control flow,
5680 when the edge is traversed through a call. This happens with
5681 non-local labels and exceptions.
5682
5683 This would not be necessary if we split the edge. While this is
5684 normally impossible for abnormal critical edges, with some effort
5685 it should be possible with exception handling, since we still have
5686 control over which handler should be invoked. But due to increased
5687 EH table sizes, this may not be worthwhile. */
5688
5689 static void
compute_transpout()5690 compute_transpout ()
5691 {
5692 basic_block bb;
5693 unsigned int i;
5694 struct expr *expr;
5695
5696 sbitmap_vector_ones (transpout, last_basic_block);
5697
5698 FOR_EACH_BB (bb)
5699 {
5700 /* Note that flow inserted a nop a the end of basic blocks that
5701 end in call instructions for reasons other than abnormal
5702 control flow. */
5703 if (GET_CODE (bb->end) != CALL_INSN)
5704 continue;
5705
5706 for (i = 0; i < expr_hash_table.size; i++)
5707 for (expr = expr_hash_table.table[i]; expr ; expr = expr->next_same_hash)
5708 if (GET_CODE (expr->expr) == MEM)
5709 {
5710 if (GET_CODE (XEXP (expr->expr, 0)) == SYMBOL_REF
5711 && CONSTANT_POOL_ADDRESS_P (XEXP (expr->expr, 0)))
5712 continue;
5713
5714 /* ??? Optimally, we would use interprocedural alias
5715 analysis to determine if this mem is actually killed
5716 by this call. */
5717 RESET_BIT (transpout[bb->index], expr->bitmap_index);
5718 }
5719 }
5720 }
5721
5722 /* Removal of useless null pointer checks */
5723
5724 /* Called via note_stores. X is set by SETTER. If X is a register we must
5725 invalidate nonnull_local and set nonnull_killed. DATA is really a
5726 `null_pointer_info *'.
5727
5728 We ignore hard registers. */
5729
5730 static void
invalidate_nonnull_info(x,setter,data)5731 invalidate_nonnull_info (x, setter, data)
5732 rtx x;
5733 rtx setter ATTRIBUTE_UNUSED;
5734 void *data;
5735 {
5736 unsigned int regno;
5737 struct null_pointer_info *npi = (struct null_pointer_info *) data;
5738
5739 while (GET_CODE (x) == SUBREG)
5740 x = SUBREG_REG (x);
5741
5742 /* Ignore anything that is not a register or is a hard register. */
5743 if (GET_CODE (x) != REG
5744 || REGNO (x) < npi->min_reg
5745 || REGNO (x) >= npi->max_reg)
5746 return;
5747
5748 regno = REGNO (x) - npi->min_reg;
5749
5750 RESET_BIT (npi->nonnull_local[npi->current_block->index], regno);
5751 SET_BIT (npi->nonnull_killed[npi->current_block->index], regno);
5752 }
5753
5754 /* Do null-pointer check elimination for the registers indicated in
5755 NPI. NONNULL_AVIN and NONNULL_AVOUT are pre-allocated sbitmaps;
5756 they are not our responsibility to free. */
5757
5758 static int
delete_null_pointer_checks_1(block_reg,nonnull_avin,nonnull_avout,npi)5759 delete_null_pointer_checks_1 (block_reg, nonnull_avin,
5760 nonnull_avout, npi)
5761 unsigned int *block_reg;
5762 sbitmap *nonnull_avin;
5763 sbitmap *nonnull_avout;
5764 struct null_pointer_info *npi;
5765 {
5766 basic_block bb, current_block;
5767 sbitmap *nonnull_local = npi->nonnull_local;
5768 sbitmap *nonnull_killed = npi->nonnull_killed;
5769 int something_changed = 0;
5770
5771 /* Compute local properties, nonnull and killed. A register will have
5772 the nonnull property if at the end of the current block its value is
5773 known to be nonnull. The killed property indicates that somewhere in
5774 the block any information we had about the register is killed.
5775
5776 Note that a register can have both properties in a single block. That
5777 indicates that it's killed, then later in the block a new value is
5778 computed. */
5779 sbitmap_vector_zero (nonnull_local, last_basic_block);
5780 sbitmap_vector_zero (nonnull_killed, last_basic_block);
5781
5782 FOR_EACH_BB (current_block)
5783 {
5784 rtx insn, stop_insn;
5785
5786 /* Set the current block for invalidate_nonnull_info. */
5787 npi->current_block = current_block;
5788
5789 /* Scan each insn in the basic block looking for memory references and
5790 register sets. */
5791 stop_insn = NEXT_INSN (current_block->end);
5792 for (insn = current_block->head;
5793 insn != stop_insn;
5794 insn = NEXT_INSN (insn))
5795 {
5796 rtx set;
5797 rtx reg;
5798
5799 /* Ignore anything that is not a normal insn. */
5800 if (! INSN_P (insn))
5801 continue;
5802
5803 /* Basically ignore anything that is not a simple SET. We do have
5804 to make sure to invalidate nonnull_local and set nonnull_killed
5805 for such insns though. */
5806 set = single_set (insn);
5807 if (!set)
5808 {
5809 note_stores (PATTERN (insn), invalidate_nonnull_info, npi);
5810 continue;
5811 }
5812
5813 /* See if we've got a usable memory load. We handle it first
5814 in case it uses its address register as a dest (which kills
5815 the nonnull property). */
5816 if (GET_CODE (SET_SRC (set)) == MEM
5817 && GET_CODE ((reg = XEXP (SET_SRC (set), 0))) == REG
5818 && REGNO (reg) >= npi->min_reg
5819 && REGNO (reg) < npi->max_reg)
5820 SET_BIT (nonnull_local[current_block->index],
5821 REGNO (reg) - npi->min_reg);
5822
5823 /* Now invalidate stuff clobbered by this insn. */
5824 note_stores (PATTERN (insn), invalidate_nonnull_info, npi);
5825
5826 /* And handle stores, we do these last since any sets in INSN can
5827 not kill the nonnull property if it is derived from a MEM
5828 appearing in a SET_DEST. */
5829 if (GET_CODE (SET_DEST (set)) == MEM
5830 && GET_CODE ((reg = XEXP (SET_DEST (set), 0))) == REG
5831 && REGNO (reg) >= npi->min_reg
5832 && REGNO (reg) < npi->max_reg)
5833 SET_BIT (nonnull_local[current_block->index],
5834 REGNO (reg) - npi->min_reg);
5835 }
5836 }
5837
5838 /* Now compute global properties based on the local properties. This
5839 is a classic global availablity algorithm. */
5840 compute_available (nonnull_local, nonnull_killed,
5841 nonnull_avout, nonnull_avin);
5842
5843 /* Now look at each bb and see if it ends with a compare of a value
5844 against zero. */
5845 FOR_EACH_BB (bb)
5846 {
5847 rtx last_insn = bb->end;
5848 rtx condition, earliest;
5849 int compare_and_branch;
5850
5851 /* Since MIN_REG is always at least FIRST_PSEUDO_REGISTER, and
5852 since BLOCK_REG[BB] is zero if this block did not end with a
5853 comparison against zero, this condition works. */
5854 if (block_reg[bb->index] < npi->min_reg
5855 || block_reg[bb->index] >= npi->max_reg)
5856 continue;
5857
5858 /* LAST_INSN is a conditional jump. Get its condition. */
5859 condition = get_condition (last_insn, &earliest);
5860
5861 /* If we can't determine the condition then skip. */
5862 if (! condition)
5863 continue;
5864
5865 /* Is the register known to have a nonzero value? */
5866 if (!TEST_BIT (nonnull_avout[bb->index], block_reg[bb->index] - npi->min_reg))
5867 continue;
5868
5869 /* Try to compute whether the compare/branch at the loop end is one or
5870 two instructions. */
5871 if (earliest == last_insn)
5872 compare_and_branch = 1;
5873 else if (earliest == prev_nonnote_insn (last_insn))
5874 compare_and_branch = 2;
5875 else
5876 continue;
5877
5878 /* We know the register in this comparison is nonnull at exit from
5879 this block. We can optimize this comparison. */
5880 if (GET_CODE (condition) == NE)
5881 {
5882 rtx new_jump;
5883
5884 new_jump = emit_jump_insn_after (gen_jump (JUMP_LABEL (last_insn)),
5885 last_insn);
5886 JUMP_LABEL (new_jump) = JUMP_LABEL (last_insn);
5887 LABEL_NUSES (JUMP_LABEL (new_jump))++;
5888 emit_barrier_after (new_jump);
5889 }
5890
5891 something_changed = 1;
5892 delete_insn (last_insn);
5893 if (compare_and_branch == 2)
5894 delete_insn (earliest);
5895 purge_dead_edges (bb);
5896
5897 /* Don't check this block again. (Note that BLOCK_END is
5898 invalid here; we deleted the last instruction in the
5899 block.) */
5900 block_reg[bb->index] = 0;
5901 }
5902
5903 return something_changed;
5904 }
5905
5906 /* Find EQ/NE comparisons against zero which can be (indirectly) evaluated
5907 at compile time.
5908
5909 This is conceptually similar to global constant/copy propagation and
5910 classic global CSE (it even uses the same dataflow equations as cprop).
5911
5912 If a register is used as memory address with the form (mem (reg)), then we
5913 know that REG can not be zero at that point in the program. Any instruction
5914 which sets REG "kills" this property.
5915
5916 So, if every path leading to a conditional branch has an available memory
5917 reference of that form, then we know the register can not have the value
5918 zero at the conditional branch.
5919
5920 So we merely need to compute the local properies and propagate that data
5921 around the cfg, then optimize where possible.
5922
5923 We run this pass two times. Once before CSE, then again after CSE. This
5924 has proven to be the most profitable approach. It is rare for new
5925 optimization opportunities of this nature to appear after the first CSE
5926 pass.
5927
5928 This could probably be integrated with global cprop with a little work. */
5929
5930 int
delete_null_pointer_checks(f)5931 delete_null_pointer_checks (f)
5932 rtx f ATTRIBUTE_UNUSED;
5933 {
5934 sbitmap *nonnull_avin, *nonnull_avout;
5935 unsigned int *block_reg;
5936 basic_block bb;
5937 int reg;
5938 int regs_per_pass;
5939 int max_reg;
5940 struct null_pointer_info npi;
5941 int something_changed = 0;
5942
5943 /* If we have only a single block, then there's nothing to do. */
5944 if (n_basic_blocks <= 1)
5945 return 0;
5946
5947 /* Trying to perform global optimizations on flow graphs which have
5948 a high connectivity will take a long time and is unlikely to be
5949 particularly useful.
5950
5951 In normal circumstances a cfg should have about twice as many edges
5952 as blocks. But we do not want to punish small functions which have
5953 a couple switch statements. So we require a relatively large number
5954 of basic blocks and the ratio of edges to blocks to be high. */
5955 if (n_basic_blocks > 1000 && n_edges / n_basic_blocks >= 20)
5956 return 0;
5957
5958 /* We need four bitmaps, each with a bit for each register in each
5959 basic block. */
5960 max_reg = max_reg_num ();
5961 regs_per_pass = get_bitmap_width (4, last_basic_block, max_reg);
5962
5963 /* Allocate bitmaps to hold local and global properties. */
5964 npi.nonnull_local = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
5965 npi.nonnull_killed = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
5966 nonnull_avin = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
5967 nonnull_avout = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
5968
5969 /* Go through the basic blocks, seeing whether or not each block
5970 ends with a conditional branch whose condition is a comparison
5971 against zero. Record the register compared in BLOCK_REG. */
5972 block_reg = (unsigned int *) xcalloc (last_basic_block, sizeof (int));
5973 FOR_EACH_BB (bb)
5974 {
5975 rtx last_insn = bb->end;
5976 rtx condition, earliest, reg;
5977
5978 /* We only want conditional branches. */
5979 if (GET_CODE (last_insn) != JUMP_INSN
5980 || !any_condjump_p (last_insn)
5981 || !onlyjump_p (last_insn))
5982 continue;
5983
5984 /* LAST_INSN is a conditional jump. Get its condition. */
5985 condition = get_condition (last_insn, &earliest);
5986
5987 /* If we were unable to get the condition, or it is not an equality
5988 comparison against zero then there's nothing we can do. */
5989 if (!condition
5990 || (GET_CODE (condition) != NE && GET_CODE (condition) != EQ)
5991 || GET_CODE (XEXP (condition, 1)) != CONST_INT
5992 || (XEXP (condition, 1)
5993 != CONST0_RTX (GET_MODE (XEXP (condition, 0)))))
5994 continue;
5995
5996 /* We must be checking a register against zero. */
5997 reg = XEXP (condition, 0);
5998 if (GET_CODE (reg) != REG)
5999 continue;
6000
6001 block_reg[bb->index] = REGNO (reg);
6002 }
6003
6004 /* Go through the algorithm for each block of registers. */
6005 for (reg = FIRST_PSEUDO_REGISTER; reg < max_reg; reg += regs_per_pass)
6006 {
6007 npi.min_reg = reg;
6008 npi.max_reg = MIN (reg + regs_per_pass, max_reg);
6009 something_changed |= delete_null_pointer_checks_1 (block_reg,
6010 nonnull_avin,
6011 nonnull_avout,
6012 &npi);
6013 }
6014
6015 /* Free the table of registers compared at the end of every block. */
6016 free (block_reg);
6017
6018 /* Free bitmaps. */
6019 sbitmap_vector_free (npi.nonnull_local);
6020 sbitmap_vector_free (npi.nonnull_killed);
6021 sbitmap_vector_free (nonnull_avin);
6022 sbitmap_vector_free (nonnull_avout);
6023
6024 return something_changed;
6025 }
6026
6027 /* Code Hoisting variables and subroutines. */
6028
6029 /* Very busy expressions. */
6030 static sbitmap *hoist_vbein;
6031 static sbitmap *hoist_vbeout;
6032
6033 /* Hoistable expressions. */
6034 static sbitmap *hoist_exprs;
6035
6036 /* Dominator bitmaps. */
6037 dominance_info dominators;
6038
6039 /* ??? We could compute post dominators and run this algorithm in
6040 reverse to perform tail merging, doing so would probably be
6041 more effective than the tail merging code in jump.c.
6042
6043 It's unclear if tail merging could be run in parallel with
6044 code hoisting. It would be nice. */
6045
6046 /* Allocate vars used for code hoisting analysis. */
6047
6048 static void
alloc_code_hoist_mem(n_blocks,n_exprs)6049 alloc_code_hoist_mem (n_blocks, n_exprs)
6050 int n_blocks, n_exprs;
6051 {
6052 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
6053 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
6054 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
6055
6056 hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
6057 hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
6058 hoist_exprs = sbitmap_vector_alloc (n_blocks, n_exprs);
6059 transpout = sbitmap_vector_alloc (n_blocks, n_exprs);
6060 }
6061
6062 /* Free vars used for code hoisting analysis. */
6063
6064 static void
free_code_hoist_mem()6065 free_code_hoist_mem ()
6066 {
6067 sbitmap_vector_free (antloc);
6068 sbitmap_vector_free (transp);
6069 sbitmap_vector_free (comp);
6070
6071 sbitmap_vector_free (hoist_vbein);
6072 sbitmap_vector_free (hoist_vbeout);
6073 sbitmap_vector_free (hoist_exprs);
6074 sbitmap_vector_free (transpout);
6075
6076 free_dominance_info (dominators);
6077 }
6078
6079 /* Compute the very busy expressions at entry/exit from each block.
6080
6081 An expression is very busy if all paths from a given point
6082 compute the expression. */
6083
6084 static void
compute_code_hoist_vbeinout()6085 compute_code_hoist_vbeinout ()
6086 {
6087 int changed, passes;
6088 basic_block bb;
6089
6090 sbitmap_vector_zero (hoist_vbeout, last_basic_block);
6091 sbitmap_vector_zero (hoist_vbein, last_basic_block);
6092
6093 passes = 0;
6094 changed = 1;
6095
6096 while (changed)
6097 {
6098 changed = 0;
6099
6100 /* We scan the blocks in the reverse order to speed up
6101 the convergence. */
6102 FOR_EACH_BB_REVERSE (bb)
6103 {
6104 changed |= sbitmap_a_or_b_and_c_cg (hoist_vbein[bb->index], antloc[bb->index],
6105 hoist_vbeout[bb->index], transp[bb->index]);
6106 if (bb->next_bb != EXIT_BLOCK_PTR)
6107 sbitmap_intersection_of_succs (hoist_vbeout[bb->index], hoist_vbein, bb->index);
6108 }
6109
6110 passes++;
6111 }
6112
6113 if (gcse_file)
6114 fprintf (gcse_file, "hoisting vbeinout computation: %d passes\n", passes);
6115 }
6116
6117 /* Top level routine to do the dataflow analysis needed by code hoisting. */
6118
6119 static void
compute_code_hoist_data()6120 compute_code_hoist_data ()
6121 {
6122 compute_local_properties (transp, comp, antloc, &expr_hash_table);
6123 compute_transpout ();
6124 compute_code_hoist_vbeinout ();
6125 dominators = calculate_dominance_info (CDI_DOMINATORS);
6126 if (gcse_file)
6127 fprintf (gcse_file, "\n");
6128 }
6129
6130 /* Determine if the expression identified by EXPR_INDEX would
6131 reach BB unimpared if it was placed at the end of EXPR_BB.
6132
6133 It's unclear exactly what Muchnick meant by "unimpared". It seems
6134 to me that the expression must either be computed or transparent in
6135 *every* block in the path(s) from EXPR_BB to BB. Any other definition
6136 would allow the expression to be hoisted out of loops, even if
6137 the expression wasn't a loop invariant.
6138
6139 Contrast this to reachability for PRE where an expression is
6140 considered reachable if *any* path reaches instead of *all*
6141 paths. */
6142
6143 static int
hoist_expr_reaches_here_p(expr_bb,expr_index,bb,visited)6144 hoist_expr_reaches_here_p (expr_bb, expr_index, bb, visited)
6145 basic_block expr_bb;
6146 int expr_index;
6147 basic_block bb;
6148 char *visited;
6149 {
6150 edge pred;
6151 int visited_allocated_locally = 0;
6152
6153
6154 if (visited == NULL)
6155 {
6156 visited_allocated_locally = 1;
6157 visited = xcalloc (last_basic_block, 1);
6158 }
6159
6160 for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
6161 {
6162 basic_block pred_bb = pred->src;
6163
6164 if (pred->src == ENTRY_BLOCK_PTR)
6165 break;
6166 else if (pred_bb == expr_bb)
6167 continue;
6168 else if (visited[pred_bb->index])
6169 continue;
6170
6171 /* Does this predecessor generate this expression? */
6172 else if (TEST_BIT (comp[pred_bb->index], expr_index))
6173 break;
6174 else if (! TEST_BIT (transp[pred_bb->index], expr_index))
6175 break;
6176
6177 /* Not killed. */
6178 else
6179 {
6180 visited[pred_bb->index] = 1;
6181 if (! hoist_expr_reaches_here_p (expr_bb, expr_index,
6182 pred_bb, visited))
6183 break;
6184 }
6185 }
6186 if (visited_allocated_locally)
6187 free (visited);
6188
6189 return (pred == NULL);
6190 }
6191
6192 /* Actually perform code hoisting. */
6193
6194 static void
hoist_code()6195 hoist_code ()
6196 {
6197 basic_block bb, dominated;
6198 basic_block *domby;
6199 unsigned int domby_len;
6200 unsigned int i,j;
6201 struct expr **index_map;
6202 struct expr *expr;
6203
6204 sbitmap_vector_zero (hoist_exprs, last_basic_block);
6205
6206 /* Compute a mapping from expression number (`bitmap_index') to
6207 hash table entry. */
6208
6209 index_map = (struct expr **) xcalloc (expr_hash_table.n_elems, sizeof (struct expr *));
6210 for (i = 0; i < expr_hash_table.size; i++)
6211 for (expr = expr_hash_table.table[i]; expr != NULL; expr = expr->next_same_hash)
6212 index_map[expr->bitmap_index] = expr;
6213
6214 /* Walk over each basic block looking for potentially hoistable
6215 expressions, nothing gets hoisted from the entry block. */
6216 FOR_EACH_BB (bb)
6217 {
6218 int found = 0;
6219 int insn_inserted_p;
6220
6221 domby_len = get_dominated_by (dominators, bb, &domby);
6222 /* Examine each expression that is very busy at the exit of this
6223 block. These are the potentially hoistable expressions. */
6224 for (i = 0; i < hoist_vbeout[bb->index]->n_bits; i++)
6225 {
6226 int hoistable = 0;
6227
6228 if (TEST_BIT (hoist_vbeout[bb->index], i)
6229 && TEST_BIT (transpout[bb->index], i))
6230 {
6231 /* We've found a potentially hoistable expression, now
6232 we look at every block BB dominates to see if it
6233 computes the expression. */
6234 for (j = 0; j < domby_len; j++)
6235 {
6236 dominated = domby[j];
6237 /* Ignore self dominance. */
6238 if (bb == dominated)
6239 continue;
6240 /* We've found a dominated block, now see if it computes
6241 the busy expression and whether or not moving that
6242 expression to the "beginning" of that block is safe. */
6243 if (!TEST_BIT (antloc[dominated->index], i))
6244 continue;
6245
6246 /* Note if the expression would reach the dominated block
6247 unimpared if it was placed at the end of BB.
6248
6249 Keep track of how many times this expression is hoistable
6250 from a dominated block into BB. */
6251 if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
6252 hoistable++;
6253 }
6254
6255 /* If we found more than one hoistable occurrence of this
6256 expression, then note it in the bitmap of expressions to
6257 hoist. It makes no sense to hoist things which are computed
6258 in only one BB, and doing so tends to pessimize register
6259 allocation. One could increase this value to try harder
6260 to avoid any possible code expansion due to register
6261 allocation issues; however experiments have shown that
6262 the vast majority of hoistable expressions are only movable
6263 from two successors, so raising this threshhold is likely
6264 to nullify any benefit we get from code hoisting. */
6265 if (hoistable > 1)
6266 {
6267 SET_BIT (hoist_exprs[bb->index], i);
6268 found = 1;
6269 }
6270 }
6271 }
6272 /* If we found nothing to hoist, then quit now. */
6273 if (! found)
6274 {
6275 free (domby);
6276 continue;
6277 }
6278
6279 /* Loop over all the hoistable expressions. */
6280 for (i = 0; i < hoist_exprs[bb->index]->n_bits; i++)
6281 {
6282 /* We want to insert the expression into BB only once, so
6283 note when we've inserted it. */
6284 insn_inserted_p = 0;
6285
6286 /* These tests should be the same as the tests above. */
6287 if (TEST_BIT (hoist_vbeout[bb->index], i))
6288 {
6289 /* We've found a potentially hoistable expression, now
6290 we look at every block BB dominates to see if it
6291 computes the expression. */
6292 for (j = 0; j < domby_len; j++)
6293 {
6294 dominated = domby[j];
6295 /* Ignore self dominance. */
6296 if (bb == dominated)
6297 continue;
6298
6299 /* We've found a dominated block, now see if it computes
6300 the busy expression and whether or not moving that
6301 expression to the "beginning" of that block is safe. */
6302 if (!TEST_BIT (antloc[dominated->index], i))
6303 continue;
6304
6305 /* The expression is computed in the dominated block and
6306 it would be safe to compute it at the start of the
6307 dominated block. Now we have to determine if the
6308 expression would reach the dominated block if it was
6309 placed at the end of BB. */
6310 if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
6311 {
6312 struct expr *expr = index_map[i];
6313 struct occr *occr = expr->antic_occr;
6314 rtx insn;
6315 rtx set;
6316
6317 /* Find the right occurrence of this expression. */
6318 while (BLOCK_FOR_INSN (occr->insn) != dominated && occr)
6319 occr = occr->next;
6320
6321 /* Should never happen. */
6322 if (!occr)
6323 abort ();
6324
6325 insn = occr->insn;
6326
6327 set = single_set (insn);
6328 if (! set)
6329 abort ();
6330
6331 /* Create a pseudo-reg to store the result of reaching
6332 expressions into. Get the mode for the new pseudo
6333 from the mode of the original destination pseudo. */
6334 if (expr->reaching_reg == NULL)
6335 expr->reaching_reg
6336 = gen_reg_rtx (GET_MODE (SET_DEST (set)));
6337
6338 gcse_emit_move_after (expr->reaching_reg, SET_DEST (set), insn);
6339 delete_insn (insn);
6340 occr->deleted_p = 1;
6341 if (!insn_inserted_p)
6342 {
6343 insert_insn_end_bb (index_map[i], bb, 0);
6344 insn_inserted_p = 1;
6345 }
6346 }
6347 }
6348 }
6349 }
6350 free (domby);
6351 }
6352
6353 free (index_map);
6354 }
6355
6356 /* Top level routine to perform one code hoisting (aka unification) pass
6357
6358 Return nonzero if a change was made. */
6359
6360 static int
one_code_hoisting_pass()6361 one_code_hoisting_pass ()
6362 {
6363 int changed = 0;
6364
6365 alloc_hash_table (max_cuid, &expr_hash_table, 0);
6366 compute_hash_table (&expr_hash_table);
6367 if (gcse_file)
6368 dump_hash_table (gcse_file, "Code Hosting Expressions", &expr_hash_table);
6369
6370 if (expr_hash_table.n_elems > 0)
6371 {
6372 alloc_code_hoist_mem (last_basic_block, expr_hash_table.n_elems);
6373 compute_code_hoist_data ();
6374 hoist_code ();
6375 free_code_hoist_mem ();
6376 }
6377
6378 free_hash_table (&expr_hash_table);
6379
6380 return changed;
6381 }
6382
6383 /* Here we provide the things required to do store motion towards
6384 the exit. In order for this to be effective, gcse also needed to
6385 be taught how to move a load when it is kill only by a store to itself.
6386
6387 int i;
6388 float a[10];
6389
6390 void foo(float scale)
6391 {
6392 for (i=0; i<10; i++)
6393 a[i] *= scale;
6394 }
6395
6396 'i' is both loaded and stored to in the loop. Normally, gcse cannot move
6397 the load out since its live around the loop, and stored at the bottom
6398 of the loop.
6399
6400 The 'Load Motion' referred to and implemented in this file is
6401 an enhancement to gcse which when using edge based lcm, recognizes
6402 this situation and allows gcse to move the load out of the loop.
6403
6404 Once gcse has hoisted the load, store motion can then push this
6405 load towards the exit, and we end up with no loads or stores of 'i'
6406 in the loop. */
6407
6408 /* This will search the ldst list for a matching expression. If it
6409 doesn't find one, we create one and initialize it. */
6410
6411 static struct ls_expr *
ldst_entry(x)6412 ldst_entry (x)
6413 rtx x;
6414 {
6415 struct ls_expr * ptr;
6416
6417 for (ptr = first_ls_expr(); ptr != NULL; ptr = next_ls_expr (ptr))
6418 if (expr_equiv_p (ptr->pattern, x))
6419 break;
6420
6421 if (!ptr)
6422 {
6423 ptr = (struct ls_expr *) xmalloc (sizeof (struct ls_expr));
6424
6425 ptr->next = pre_ldst_mems;
6426 ptr->expr = NULL;
6427 ptr->pattern = x;
6428 ptr->loads = NULL_RTX;
6429 ptr->stores = NULL_RTX;
6430 ptr->reaching_reg = NULL_RTX;
6431 ptr->invalid = 0;
6432 ptr->index = 0;
6433 ptr->hash_index = 0;
6434 pre_ldst_mems = ptr;
6435 }
6436
6437 return ptr;
6438 }
6439
6440 /* Free up an individual ldst entry. */
6441
6442 static void
free_ldst_entry(ptr)6443 free_ldst_entry (ptr)
6444 struct ls_expr * ptr;
6445 {
6446 free_INSN_LIST_list (& ptr->loads);
6447 free_INSN_LIST_list (& ptr->stores);
6448
6449 free (ptr);
6450 }
6451
6452 /* Free up all memory associated with the ldst list. */
6453
6454 static void
free_ldst_mems()6455 free_ldst_mems ()
6456 {
6457 while (pre_ldst_mems)
6458 {
6459 struct ls_expr * tmp = pre_ldst_mems;
6460
6461 pre_ldst_mems = pre_ldst_mems->next;
6462
6463 free_ldst_entry (tmp);
6464 }
6465
6466 pre_ldst_mems = NULL;
6467 }
6468
6469 /* Dump debugging info about the ldst list. */
6470
6471 static void
print_ldst_list(file)6472 print_ldst_list (file)
6473 FILE * file;
6474 {
6475 struct ls_expr * ptr;
6476
6477 fprintf (file, "LDST list: \n");
6478
6479 for (ptr = first_ls_expr(); ptr != NULL; ptr = next_ls_expr (ptr))
6480 {
6481 fprintf (file, " Pattern (%3d): ", ptr->index);
6482
6483 print_rtl (file, ptr->pattern);
6484
6485 fprintf (file, "\n Loads : ");
6486
6487 if (ptr->loads)
6488 print_rtl (file, ptr->loads);
6489 else
6490 fprintf (file, "(nil)");
6491
6492 fprintf (file, "\n Stores : ");
6493
6494 if (ptr->stores)
6495 print_rtl (file, ptr->stores);
6496 else
6497 fprintf (file, "(nil)");
6498
6499 fprintf (file, "\n\n");
6500 }
6501
6502 fprintf (file, "\n");
6503 }
6504
6505 /* Returns 1 if X is in the list of ldst only expressions. */
6506
6507 static struct ls_expr *
find_rtx_in_ldst(x)6508 find_rtx_in_ldst (x)
6509 rtx x;
6510 {
6511 struct ls_expr * ptr;
6512
6513 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
6514 if (expr_equiv_p (ptr->pattern, x) && ! ptr->invalid)
6515 return ptr;
6516
6517 return NULL;
6518 }
6519
6520 /* Assign each element of the list of mems a monotonically increasing value. */
6521
6522 static int
enumerate_ldsts()6523 enumerate_ldsts ()
6524 {
6525 struct ls_expr * ptr;
6526 int n = 0;
6527
6528 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
6529 ptr->index = n++;
6530
6531 return n;
6532 }
6533
6534 /* Return first item in the list. */
6535
6536 static inline struct ls_expr *
first_ls_expr()6537 first_ls_expr ()
6538 {
6539 return pre_ldst_mems;
6540 }
6541
6542 /* Return the next item in ther list after the specified one. */
6543
6544 static inline struct ls_expr *
next_ls_expr(ptr)6545 next_ls_expr (ptr)
6546 struct ls_expr * ptr;
6547 {
6548 return ptr->next;
6549 }
6550
6551 /* Load Motion for loads which only kill themselves. */
6552
6553 /* Return true if x is a simple MEM operation, with no registers or
6554 side effects. These are the types of loads we consider for the
6555 ld_motion list, otherwise we let the usual aliasing take care of it. */
6556
6557 static int
simple_mem(x)6558 simple_mem (x)
6559 rtx x;
6560 {
6561 if (GET_CODE (x) != MEM)
6562 return 0;
6563
6564 if (MEM_VOLATILE_P (x))
6565 return 0;
6566
6567 if (GET_MODE (x) == BLKmode)
6568 return 0;
6569
6570 if (!rtx_varies_p (XEXP (x, 0), 0))
6571 return 1;
6572
6573 return 0;
6574 }
6575
6576 /* Make sure there isn't a buried reference in this pattern anywhere.
6577 If there is, invalidate the entry for it since we're not capable
6578 of fixing it up just yet.. We have to be sure we know about ALL
6579 loads since the aliasing code will allow all entries in the
6580 ld_motion list to not-alias itself. If we miss a load, we will get
6581 the wrong value since gcse might common it and we won't know to
6582 fix it up. */
6583
6584 static void
invalidate_any_buried_refs(x)6585 invalidate_any_buried_refs (x)
6586 rtx x;
6587 {
6588 const char * fmt;
6589 int i, j;
6590 struct ls_expr * ptr;
6591
6592 /* Invalidate it in the list. */
6593 if (GET_CODE (x) == MEM && simple_mem (x))
6594 {
6595 ptr = ldst_entry (x);
6596 ptr->invalid = 1;
6597 }
6598
6599 /* Recursively process the insn. */
6600 fmt = GET_RTX_FORMAT (GET_CODE (x));
6601
6602 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6603 {
6604 if (fmt[i] == 'e')
6605 invalidate_any_buried_refs (XEXP (x, i));
6606 else if (fmt[i] == 'E')
6607 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6608 invalidate_any_buried_refs (XVECEXP (x, i, j));
6609 }
6610 }
6611
6612 /* Find all the 'simple' MEMs which are used in LOADs and STORES. Simple
6613 being defined as MEM loads and stores to symbols, with no
6614 side effects and no registers in the expression. If there are any
6615 uses/defs which don't match this criteria, it is invalidated and
6616 trimmed out later. */
6617
6618 static void
compute_ld_motion_mems()6619 compute_ld_motion_mems ()
6620 {
6621 struct ls_expr * ptr;
6622 basic_block bb;
6623 rtx insn;
6624
6625 pre_ldst_mems = NULL;
6626
6627 FOR_EACH_BB (bb)
6628 {
6629 for (insn = bb->head;
6630 insn && insn != NEXT_INSN (bb->end);
6631 insn = NEXT_INSN (insn))
6632 {
6633 if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
6634 {
6635 if (GET_CODE (PATTERN (insn)) == SET)
6636 {
6637 rtx src = SET_SRC (PATTERN (insn));
6638 rtx dest = SET_DEST (PATTERN (insn));
6639
6640 /* Check for a simple LOAD... */
6641 if (GET_CODE (src) == MEM && simple_mem (src))
6642 {
6643 ptr = ldst_entry (src);
6644 if (GET_CODE (dest) == REG)
6645 ptr->loads = alloc_INSN_LIST (insn, ptr->loads);
6646 else
6647 ptr->invalid = 1;
6648 }
6649 else
6650 {
6651 /* Make sure there isn't a buried load somewhere. */
6652 invalidate_any_buried_refs (src);
6653 }
6654
6655 /* Check for stores. Don't worry about aliased ones, they
6656 will block any movement we might do later. We only care
6657 about this exact pattern since those are the only
6658 circumstance that we will ignore the aliasing info. */
6659 if (GET_CODE (dest) == MEM && simple_mem (dest))
6660 {
6661 ptr = ldst_entry (dest);
6662
6663 if (GET_CODE (src) != MEM
6664 && GET_CODE (src) != ASM_OPERANDS)
6665 ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
6666 else
6667 ptr->invalid = 1;
6668 }
6669 }
6670 else
6671 invalidate_any_buried_refs (PATTERN (insn));
6672 }
6673 }
6674 }
6675 }
6676
6677 /* Remove any references that have been either invalidated or are not in the
6678 expression list for pre gcse. */
6679
6680 static void
trim_ld_motion_mems()6681 trim_ld_motion_mems ()
6682 {
6683 struct ls_expr * last = NULL;
6684 struct ls_expr * ptr = first_ls_expr ();
6685
6686 while (ptr != NULL)
6687 {
6688 int del = ptr->invalid;
6689 struct expr * expr = NULL;
6690
6691 /* Delete if entry has been made invalid. */
6692 if (!del)
6693 {
6694 unsigned int i;
6695
6696 del = 1;
6697 /* Delete if we cannot find this mem in the expression list. */
6698 for (i = 0; i < expr_hash_table.size && del; i++)
6699 {
6700 for (expr = expr_hash_table.table[i];
6701 expr != NULL;
6702 expr = expr->next_same_hash)
6703 if (expr_equiv_p (expr->expr, ptr->pattern))
6704 {
6705 del = 0;
6706 break;
6707 }
6708 }
6709 }
6710
6711 if (del)
6712 {
6713 if (last != NULL)
6714 {
6715 last->next = ptr->next;
6716 free_ldst_entry (ptr);
6717 ptr = last->next;
6718 }
6719 else
6720 {
6721 pre_ldst_mems = pre_ldst_mems->next;
6722 free_ldst_entry (ptr);
6723 ptr = pre_ldst_mems;
6724 }
6725 }
6726 else
6727 {
6728 /* Set the expression field if we are keeping it. */
6729 last = ptr;
6730 ptr->expr = expr;
6731 ptr = ptr->next;
6732 }
6733 }
6734
6735 /* Show the world what we've found. */
6736 if (gcse_file && pre_ldst_mems != NULL)
6737 print_ldst_list (gcse_file);
6738 }
6739
6740 /* This routine will take an expression which we are replacing with
6741 a reaching register, and update any stores that are needed if
6742 that expression is in the ld_motion list. Stores are updated by
6743 copying their SRC to the reaching register, and then storeing
6744 the reaching register into the store location. These keeps the
6745 correct value in the reaching register for the loads. */
6746
6747 static void
update_ld_motion_stores(expr)6748 update_ld_motion_stores (expr)
6749 struct expr * expr;
6750 {
6751 struct ls_expr * mem_ptr;
6752
6753 if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
6754 {
6755 /* We can try to find just the REACHED stores, but is shouldn't
6756 matter to set the reaching reg everywhere... some might be
6757 dead and should be eliminated later. */
6758
6759 /* We replace SET mem = expr with
6760 SET reg = expr
6761 SET mem = reg , where reg is the
6762 reaching reg used in the load. */
6763 rtx list = mem_ptr->stores;
6764
6765 for ( ; list != NULL_RTX; list = XEXP (list, 1))
6766 {
6767 rtx insn = XEXP (list, 0);
6768 rtx pat = PATTERN (insn);
6769 rtx src = SET_SRC (pat);
6770 rtx reg = expr->reaching_reg;
6771 rtx copy, new;
6772
6773 /* If we've already copied it, continue. */
6774 if (expr->reaching_reg == src)
6775 continue;
6776
6777 if (gcse_file)
6778 {
6779 fprintf (gcse_file, "PRE: store updated with reaching reg ");
6780 print_rtl (gcse_file, expr->reaching_reg);
6781 fprintf (gcse_file, ":\n ");
6782 print_inline_rtx (gcse_file, insn, 8);
6783 fprintf (gcse_file, "\n");
6784 }
6785
6786 copy = gen_move_insn ( reg, SET_SRC (pat));
6787 new = emit_insn_before (copy, insn);
6788 record_one_set (REGNO (reg), new);
6789 SET_SRC (pat) = reg;
6790
6791 /* un-recognize this pattern since it's probably different now. */
6792 INSN_CODE (insn) = -1;
6793 gcse_create_count++;
6794 }
6795 }
6796 }
6797
6798 /* Store motion code. */
6799
6800 /* This is used to communicate the target bitvector we want to use in the
6801 reg_set_info routine when called via the note_stores mechanism. */
6802 static sbitmap * regvec;
6803
6804 /* Used in computing the reverse edge graph bit vectors. */
6805 static sbitmap * st_antloc;
6806
6807 /* Global holding the number of store expressions we are dealing with. */
6808 static int num_stores;
6809
6810 /* Checks to set if we need to mark a register set. Called from note_stores. */
6811
6812 static void
reg_set_info(dest,setter,data)6813 reg_set_info (dest, setter, data)
6814 rtx dest, setter ATTRIBUTE_UNUSED;
6815 void * data ATTRIBUTE_UNUSED;
6816 {
6817 if (GET_CODE (dest) == SUBREG)
6818 dest = SUBREG_REG (dest);
6819
6820 if (GET_CODE (dest) == REG)
6821 SET_BIT (*regvec, REGNO (dest));
6822 }
6823
6824 /* Return nonzero if the register operands of expression X are killed
6825 anywhere in basic block BB. */
6826
6827 static int
store_ops_ok(x,bb)6828 store_ops_ok (x, bb)
6829 rtx x;
6830 basic_block bb;
6831 {
6832 int i;
6833 enum rtx_code code;
6834 const char * fmt;
6835
6836 /* Repeat is used to turn tail-recursion into iteration. */
6837 repeat:
6838
6839 if (x == 0)
6840 return 1;
6841
6842 code = GET_CODE (x);
6843 switch (code)
6844 {
6845 case REG:
6846 /* If a reg has changed after us in this
6847 block, the operand has been killed. */
6848 return TEST_BIT (reg_set_in_block[bb->index], REGNO (x));
6849
6850 case MEM:
6851 x = XEXP (x, 0);
6852 goto repeat;
6853
6854 case PRE_DEC:
6855 case PRE_INC:
6856 case POST_DEC:
6857 case POST_INC:
6858 return 0;
6859
6860 case PC:
6861 case CC0: /*FIXME*/
6862 case CONST:
6863 case CONST_INT:
6864 case CONST_DOUBLE:
6865 case CONST_VECTOR:
6866 case SYMBOL_REF:
6867 case LABEL_REF:
6868 case ADDR_VEC:
6869 case ADDR_DIFF_VEC:
6870 return 1;
6871
6872 default:
6873 break;
6874 }
6875
6876 i = GET_RTX_LENGTH (code) - 1;
6877 fmt = GET_RTX_FORMAT (code);
6878
6879 for (; i >= 0; i--)
6880 {
6881 if (fmt[i] == 'e')
6882 {
6883 rtx tem = XEXP (x, i);
6884
6885 /* If we are about to do the last recursive call
6886 needed at this level, change it into iteration.
6887 This function is called enough to be worth it. */
6888 if (i == 0)
6889 {
6890 x = tem;
6891 goto repeat;
6892 }
6893
6894 if (! store_ops_ok (tem, bb))
6895 return 0;
6896 }
6897 else if (fmt[i] == 'E')
6898 {
6899 int j;
6900
6901 for (j = 0; j < XVECLEN (x, i); j++)
6902 {
6903 if (! store_ops_ok (XVECEXP (x, i, j), bb))
6904 return 0;
6905 }
6906 }
6907 }
6908
6909 return 1;
6910 }
6911
6912 /* Determine whether insn is MEM store pattern that we will consider moving. */
6913
6914 static void
find_moveable_store(insn)6915 find_moveable_store (insn)
6916 rtx insn;
6917 {
6918 struct ls_expr * ptr;
6919 rtx dest = PATTERN (insn);
6920
6921 if (GET_CODE (dest) != SET
6922 || GET_CODE (SET_SRC (dest)) == ASM_OPERANDS)
6923 return;
6924
6925 dest = SET_DEST (dest);
6926
6927 if (GET_CODE (dest) != MEM || MEM_VOLATILE_P (dest)
6928 || GET_MODE (dest) == BLKmode)
6929 return;
6930
6931 if (GET_CODE (XEXP (dest, 0)) != SYMBOL_REF)
6932 return;
6933
6934 if (rtx_varies_p (XEXP (dest, 0), 0))
6935 return;
6936
6937 ptr = ldst_entry (dest);
6938 ptr->stores = alloc_INSN_LIST (insn, ptr->stores);
6939 }
6940
6941 /* Perform store motion. Much like gcse, except we move expressions the
6942 other way by looking at the flowgraph in reverse. */
6943
6944 static int
compute_store_table()6945 compute_store_table ()
6946 {
6947 int ret;
6948 basic_block bb;
6949 unsigned regno;
6950 rtx insn, pat;
6951
6952 max_gcse_regno = max_reg_num ();
6953
6954 reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (last_basic_block,
6955 max_gcse_regno);
6956 sbitmap_vector_zero (reg_set_in_block, last_basic_block);
6957 pre_ldst_mems = 0;
6958
6959 /* Find all the stores we care about. */
6960 FOR_EACH_BB (bb)
6961 {
6962 regvec = & (reg_set_in_block[bb->index]);
6963 for (insn = bb->end;
6964 insn && insn != PREV_INSN (bb->end);
6965 insn = PREV_INSN (insn))
6966 {
6967 /* Ignore anything that is not a normal insn. */
6968 if (! INSN_P (insn))
6969 continue;
6970
6971 if (GET_CODE (insn) == CALL_INSN)
6972 {
6973 bool clobbers_all = false;
6974 #ifdef NON_SAVING_SETJMP
6975 if (NON_SAVING_SETJMP
6976 && find_reg_note (insn, REG_SETJMP, NULL_RTX))
6977 clobbers_all = true;
6978 #endif
6979
6980 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
6981 if (clobbers_all
6982 || TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
6983 SET_BIT (reg_set_in_block[bb->index], regno);
6984 }
6985
6986 pat = PATTERN (insn);
6987 note_stores (pat, reg_set_info, NULL);
6988
6989 /* Now that we've marked regs, look for stores. */
6990 if (GET_CODE (pat) == SET)
6991 find_moveable_store (insn);
6992 }
6993 }
6994
6995 ret = enumerate_ldsts ();
6996
6997 if (gcse_file)
6998 {
6999 fprintf (gcse_file, "Store Motion Expressions.\n");
7000 print_ldst_list (gcse_file);
7001 }
7002
7003 return ret;
7004 }
7005
7006 /* Check to see if the load X is aliased with STORE_PATTERN. */
7007
7008 static int
load_kills_store(x,store_pattern)7009 load_kills_store (x, store_pattern)
7010 rtx x, store_pattern;
7011 {
7012 if (true_dependence (x, GET_MODE (x), store_pattern, rtx_addr_varies_p))
7013 return 1;
7014 return 0;
7015 }
7016
7017 /* Go through the entire insn X, looking for any loads which might alias
7018 STORE_PATTERN. Return 1 if found. */
7019
7020 static int
find_loads(x,store_pattern)7021 find_loads (x, store_pattern)
7022 rtx x, store_pattern;
7023 {
7024 const char * fmt;
7025 int i, j;
7026 int ret = 0;
7027
7028 if (!x)
7029 return 0;
7030
7031 if (GET_CODE (x) == SET)
7032 x = SET_SRC (x);
7033
7034 if (GET_CODE (x) == MEM)
7035 {
7036 if (load_kills_store (x, store_pattern))
7037 return 1;
7038 }
7039
7040 /* Recursively process the insn. */
7041 fmt = GET_RTX_FORMAT (GET_CODE (x));
7042
7043 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0 && !ret; i--)
7044 {
7045 if (fmt[i] == 'e')
7046 ret |= find_loads (XEXP (x, i), store_pattern);
7047 else if (fmt[i] == 'E')
7048 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
7049 ret |= find_loads (XVECEXP (x, i, j), store_pattern);
7050 }
7051 return ret;
7052 }
7053
7054 /* Check if INSN kills the store pattern X (is aliased with it).
7055 Return 1 if it it does. */
7056
7057 static int
store_killed_in_insn(x,insn)7058 store_killed_in_insn (x, insn)
7059 rtx x, insn;
7060 {
7061 if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
7062 return 0;
7063
7064 if (GET_CODE (insn) == CALL_INSN)
7065 {
7066 /* A normal or pure call might read from pattern,
7067 but a const call will not. */
7068 return ! CONST_OR_PURE_CALL_P (insn) || pure_call_p (insn);
7069 }
7070
7071 if (GET_CODE (PATTERN (insn)) == SET)
7072 {
7073 rtx pat = PATTERN (insn);
7074 /* Check for memory stores to aliased objects. */
7075 if (GET_CODE (SET_DEST (pat)) == MEM && !expr_equiv_p (SET_DEST (pat), x))
7076 /* pretend its a load and check for aliasing. */
7077 if (find_loads (SET_DEST (pat), x))
7078 return 1;
7079 return find_loads (SET_SRC (pat), x);
7080 }
7081 else
7082 return find_loads (PATTERN (insn), x);
7083 }
7084
7085 /* Returns 1 if the expression X is loaded or clobbered on or after INSN
7086 within basic block BB. */
7087
7088 static int
store_killed_after(x,insn,bb)7089 store_killed_after (x, insn, bb)
7090 rtx x, insn;
7091 basic_block bb;
7092 {
7093 rtx last = bb->end;
7094
7095 if (insn == last)
7096 return 0;
7097
7098 /* Check if the register operands of the store are OK in this block.
7099 Note that if registers are changed ANYWHERE in the block, we'll
7100 decide we can't move it, regardless of whether it changed above
7101 or below the store. This could be improved by checking the register
7102 operands while lookinng for aliasing in each insn. */
7103 if (!store_ops_ok (XEXP (x, 0), bb))
7104 return 1;
7105
7106 for ( ; insn && insn != NEXT_INSN (last); insn = NEXT_INSN (insn))
7107 if (store_killed_in_insn (x, insn))
7108 return 1;
7109
7110 return 0;
7111 }
7112
7113 /* Returns 1 if the expression X is loaded or clobbered on or before INSN
7114 within basic block BB. */
7115 static int
store_killed_before(x,insn,bb)7116 store_killed_before (x, insn, bb)
7117 rtx x, insn;
7118 basic_block bb;
7119 {
7120 rtx first = bb->head;
7121
7122 if (insn == first)
7123 return store_killed_in_insn (x, insn);
7124
7125 /* Check if the register operands of the store are OK in this block.
7126 Note that if registers are changed ANYWHERE in the block, we'll
7127 decide we can't move it, regardless of whether it changed above
7128 or below the store. This could be improved by checking the register
7129 operands while lookinng for aliasing in each insn. */
7130 if (!store_ops_ok (XEXP (x, 0), bb))
7131 return 1;
7132
7133 for ( ; insn && insn != PREV_INSN (first); insn = PREV_INSN (insn))
7134 if (store_killed_in_insn (x, insn))
7135 return 1;
7136
7137 return 0;
7138 }
7139
7140 #define ANTIC_STORE_LIST(x) ((x)->loads)
7141 #define AVAIL_STORE_LIST(x) ((x)->stores)
7142
7143 /* Given the table of available store insns at the end of blocks,
7144 determine which ones are not killed by aliasing, and generate
7145 the appropriate vectors for gen and killed. */
7146 static void
build_store_vectors()7147 build_store_vectors ()
7148 {
7149 basic_block bb, b;
7150 rtx insn, st;
7151 struct ls_expr * ptr;
7152
7153 /* Build the gen_vector. This is any store in the table which is not killed
7154 by aliasing later in its block. */
7155 ae_gen = (sbitmap *) sbitmap_vector_alloc (last_basic_block, num_stores);
7156 sbitmap_vector_zero (ae_gen, last_basic_block);
7157
7158 st_antloc = (sbitmap *) sbitmap_vector_alloc (last_basic_block, num_stores);
7159 sbitmap_vector_zero (st_antloc, last_basic_block);
7160
7161 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
7162 {
7163 /* Put all the stores into either the antic list, or the avail list,
7164 or both. */
7165 rtx store_list = ptr->stores;
7166 ptr->stores = NULL_RTX;
7167
7168 for (st = store_list; st != NULL; st = XEXP (st, 1))
7169 {
7170 insn = XEXP (st, 0);
7171 bb = BLOCK_FOR_INSN (insn);
7172
7173 if (!store_killed_after (ptr->pattern, insn, bb))
7174 {
7175 /* If we've already seen an availale expression in this block,
7176 we can delete the one we saw already (It occurs earlier in
7177 the block), and replace it with this one). We'll copy the
7178 old SRC expression to an unused register in case there
7179 are any side effects. */
7180 if (TEST_BIT (ae_gen[bb->index], ptr->index))
7181 {
7182 /* Find previous store. */
7183 rtx st;
7184 for (st = AVAIL_STORE_LIST (ptr); st ; st = XEXP (st, 1))
7185 if (BLOCK_FOR_INSN (XEXP (st, 0)) == bb)
7186 break;
7187 if (st)
7188 {
7189 rtx r = gen_reg_rtx (GET_MODE (ptr->pattern));
7190 if (gcse_file)
7191 fprintf (gcse_file, "Removing redundant store:\n");
7192 replace_store_insn (r, XEXP (st, 0), bb);
7193 XEXP (st, 0) = insn;
7194 continue;
7195 }
7196 }
7197 SET_BIT (ae_gen[bb->index], ptr->index);
7198 AVAIL_STORE_LIST (ptr) = alloc_INSN_LIST (insn,
7199 AVAIL_STORE_LIST (ptr));
7200 }
7201
7202 if (!store_killed_before (ptr->pattern, insn, bb))
7203 {
7204 SET_BIT (st_antloc[BLOCK_NUM (insn)], ptr->index);
7205 ANTIC_STORE_LIST (ptr) = alloc_INSN_LIST (insn,
7206 ANTIC_STORE_LIST (ptr));
7207 }
7208 }
7209
7210 /* Free the original list of store insns. */
7211 free_INSN_LIST_list (&store_list);
7212 }
7213
7214 ae_kill = (sbitmap *) sbitmap_vector_alloc (last_basic_block, num_stores);
7215 sbitmap_vector_zero (ae_kill, last_basic_block);
7216
7217 transp = (sbitmap *) sbitmap_vector_alloc (last_basic_block, num_stores);
7218 sbitmap_vector_zero (transp, last_basic_block);
7219
7220 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
7221 FOR_EACH_BB (b)
7222 {
7223 if (store_killed_after (ptr->pattern, b->head, b))
7224 {
7225 /* The anticipatable expression is not killed if it's gen'd. */
7226 /*
7227 We leave this check out for now. If we have a code sequence
7228 in a block which looks like:
7229 ST MEMa = x
7230 L y = MEMa
7231 ST MEMa = z
7232 We should flag this as having an ANTIC expression, NOT
7233 transparent, NOT killed, and AVAIL.
7234 Unfortunately, since we haven't re-written all loads to
7235 use the reaching reg, we'll end up doing an incorrect
7236 Load in the middle here if we push the store down. It happens in
7237 gcc.c-torture/execute/960311-1.c with -O3
7238 If we always kill it in this case, we'll sometimes do
7239 uneccessary work, but it shouldn't actually hurt anything.
7240 if (!TEST_BIT (ae_gen[b], ptr->index)). */
7241 SET_BIT (ae_kill[b->index], ptr->index);
7242 }
7243 else
7244 SET_BIT (transp[b->index], ptr->index);
7245 }
7246
7247 /* Any block with no exits calls some non-returning function, so
7248 we better mark the store killed here, or we might not store to
7249 it at all. If we knew it was abort, we wouldn't have to store,
7250 but we don't know that for sure. */
7251 if (gcse_file)
7252 {
7253 fprintf (gcse_file, "ST_avail and ST_antic (shown under loads..)\n");
7254 print_ldst_list (gcse_file);
7255 dump_sbitmap_vector (gcse_file, "st_antloc", "", st_antloc, last_basic_block);
7256 dump_sbitmap_vector (gcse_file, "st_kill", "", ae_kill, last_basic_block);
7257 dump_sbitmap_vector (gcse_file, "Transpt", "", transp, last_basic_block);
7258 dump_sbitmap_vector (gcse_file, "st_avloc", "", ae_gen, last_basic_block);
7259 }
7260 }
7261
7262 /* Insert an instruction at the begining of a basic block, and update
7263 the BLOCK_HEAD if needed. */
7264
7265 static void
insert_insn_start_bb(insn,bb)7266 insert_insn_start_bb (insn, bb)
7267 rtx insn;
7268 basic_block bb;
7269 {
7270 /* Insert at start of successor block. */
7271 rtx prev = PREV_INSN (bb->head);
7272 rtx before = bb->head;
7273 while (before != 0)
7274 {
7275 if (GET_CODE (before) != CODE_LABEL
7276 && (GET_CODE (before) != NOTE
7277 || NOTE_LINE_NUMBER (before) != NOTE_INSN_BASIC_BLOCK))
7278 break;
7279 prev = before;
7280 if (prev == bb->end)
7281 break;
7282 before = NEXT_INSN (before);
7283 }
7284
7285 insn = emit_insn_after (insn, prev);
7286
7287 if (gcse_file)
7288 {
7289 fprintf (gcse_file, "STORE_MOTION insert store at start of BB %d:\n",
7290 bb->index);
7291 print_inline_rtx (gcse_file, insn, 6);
7292 fprintf (gcse_file, "\n");
7293 }
7294 }
7295
7296 /* This routine will insert a store on an edge. EXPR is the ldst entry for
7297 the memory reference, and E is the edge to insert it on. Returns nonzero
7298 if an edge insertion was performed. */
7299
7300 static int
insert_store(expr,e)7301 insert_store (expr, e)
7302 struct ls_expr * expr;
7303 edge e;
7304 {
7305 rtx reg, insn;
7306 basic_block bb;
7307 edge tmp;
7308
7309 /* We did all the deleted before this insert, so if we didn't delete a
7310 store, then we haven't set the reaching reg yet either. */
7311 if (expr->reaching_reg == NULL_RTX)
7312 return 0;
7313
7314 reg = expr->reaching_reg;
7315 insn = gen_move_insn (expr->pattern, reg);
7316
7317 /* If we are inserting this expression on ALL predecessor edges of a BB,
7318 insert it at the start of the BB, and reset the insert bits on the other
7319 edges so we don't try to insert it on the other edges. */
7320 bb = e->dest;
7321 for (tmp = e->dest->pred; tmp ; tmp = tmp->pred_next)
7322 {
7323 int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest);
7324 if (index == EDGE_INDEX_NO_EDGE)
7325 abort ();
7326 if (! TEST_BIT (pre_insert_map[index], expr->index))
7327 break;
7328 }
7329
7330 /* If tmp is NULL, we found an insertion on every edge, blank the
7331 insertion vector for these edges, and insert at the start of the BB. */
7332 if (!tmp && bb != EXIT_BLOCK_PTR)
7333 {
7334 for (tmp = e->dest->pred; tmp ; tmp = tmp->pred_next)
7335 {
7336 int index = EDGE_INDEX (edge_list, tmp->src, tmp->dest);
7337 RESET_BIT (pre_insert_map[index], expr->index);
7338 }
7339 insert_insn_start_bb (insn, bb);
7340 return 0;
7341 }
7342
7343 /* We can't insert on this edge, so we'll insert at the head of the
7344 successors block. See Morgan, sec 10.5. */
7345 if ((e->flags & EDGE_ABNORMAL) == EDGE_ABNORMAL)
7346 {
7347 insert_insn_start_bb (insn, bb);
7348 return 0;
7349 }
7350
7351 insert_insn_on_edge (insn, e);
7352
7353 if (gcse_file)
7354 {
7355 fprintf (gcse_file, "STORE_MOTION insert insn on edge (%d, %d):\n",
7356 e->src->index, e->dest->index);
7357 print_inline_rtx (gcse_file, insn, 6);
7358 fprintf (gcse_file, "\n");
7359 }
7360
7361 return 1;
7362 }
7363
7364 /* This routine will replace a store with a SET to a specified register. */
7365
7366 static void
replace_store_insn(reg,del,bb)7367 replace_store_insn (reg, del, bb)
7368 rtx reg, del;
7369 basic_block bb;
7370 {
7371 rtx insn;
7372
7373 insn = gen_move_insn (reg, SET_SRC (PATTERN (del)));
7374 insn = emit_insn_after (insn, del);
7375
7376 if (gcse_file)
7377 {
7378 fprintf (gcse_file,
7379 "STORE_MOTION delete insn in BB %d:\n ", bb->index);
7380 print_inline_rtx (gcse_file, del, 6);
7381 fprintf (gcse_file, "\nSTORE MOTION replaced with insn:\n ");
7382 print_inline_rtx (gcse_file, insn, 6);
7383 fprintf (gcse_file, "\n");
7384 }
7385
7386 delete_insn (del);
7387 }
7388
7389
7390 /* Delete a store, but copy the value that would have been stored into
7391 the reaching_reg for later storing. */
7392
7393 static void
delete_store(expr,bb)7394 delete_store (expr, bb)
7395 struct ls_expr * expr;
7396 basic_block bb;
7397 {
7398 rtx reg, i, del;
7399
7400 if (expr->reaching_reg == NULL_RTX)
7401 expr->reaching_reg = gen_reg_rtx (GET_MODE (expr->pattern));
7402
7403
7404 /* If there is more than 1 store, the earlier ones will be dead,
7405 but it doesn't hurt to replace them here. */
7406 reg = expr->reaching_reg;
7407
7408 for (i = AVAIL_STORE_LIST (expr); i; i = XEXP (i, 1))
7409 {
7410 del = XEXP (i, 0);
7411 if (BLOCK_FOR_INSN (del) == bb)
7412 {
7413 /* We know there is only one since we deleted redundant
7414 ones during the available computation. */
7415 replace_store_insn (reg, del, bb);
7416 break;
7417 }
7418 }
7419 }
7420
7421 /* Free memory used by store motion. */
7422
7423 static void
free_store_memory()7424 free_store_memory ()
7425 {
7426 free_ldst_mems ();
7427
7428 if (ae_gen)
7429 sbitmap_vector_free (ae_gen);
7430 if (ae_kill)
7431 sbitmap_vector_free (ae_kill);
7432 if (transp)
7433 sbitmap_vector_free (transp);
7434 if (st_antloc)
7435 sbitmap_vector_free (st_antloc);
7436 if (pre_insert_map)
7437 sbitmap_vector_free (pre_insert_map);
7438 if (pre_delete_map)
7439 sbitmap_vector_free (pre_delete_map);
7440 if (reg_set_in_block)
7441 sbitmap_vector_free (reg_set_in_block);
7442
7443 ae_gen = ae_kill = transp = st_antloc = NULL;
7444 pre_insert_map = pre_delete_map = reg_set_in_block = NULL;
7445 }
7446
7447 /* Perform store motion. Much like gcse, except we move expressions the
7448 other way by looking at the flowgraph in reverse. */
7449
7450 static void
store_motion()7451 store_motion ()
7452 {
7453 basic_block bb;
7454 int x;
7455 struct ls_expr * ptr;
7456 int update_flow = 0;
7457
7458 if (gcse_file)
7459 {
7460 fprintf (gcse_file, "before store motion\n");
7461 print_rtl (gcse_file, get_insns ());
7462 }
7463
7464
7465 init_alias_analysis ();
7466
7467 /* Find all the stores that are live to the end of their block. */
7468 num_stores = compute_store_table ();
7469 if (num_stores == 0)
7470 {
7471 sbitmap_vector_free (reg_set_in_block);
7472 end_alias_analysis ();
7473 return;
7474 }
7475
7476 /* Now compute whats actually available to move. */
7477 add_noreturn_fake_exit_edges ();
7478 build_store_vectors ();
7479
7480 edge_list = pre_edge_rev_lcm (gcse_file, num_stores, transp, ae_gen,
7481 st_antloc, ae_kill, &pre_insert_map,
7482 &pre_delete_map);
7483
7484 /* Now we want to insert the new stores which are going to be needed. */
7485 for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
7486 {
7487 FOR_EACH_BB (bb)
7488 if (TEST_BIT (pre_delete_map[bb->index], ptr->index))
7489 delete_store (ptr, bb);
7490
7491 for (x = 0; x < NUM_EDGES (edge_list); x++)
7492 if (TEST_BIT (pre_insert_map[x], ptr->index))
7493 update_flow |= insert_store (ptr, INDEX_EDGE (edge_list, x));
7494 }
7495
7496 if (update_flow)
7497 commit_edge_insertions ();
7498
7499 free_store_memory ();
7500 free_edge_list (edge_list);
7501 remove_fake_edges ();
7502 end_alias_analysis ();
7503 }
7504
7505 #include "gt-gcse.h"
7506