1 /* Dead store elimination
2 Copyright (C) 2004-2019 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "gimple-pretty-print.h"
30 #include "fold-const.h"
31 #include "gimple-iterator.h"
32 #include "tree-cfg.h"
33 #include "tree-dfa.h"
34 #include "domwalk.h"
35 #include "tree-cfgcleanup.h"
36 #include "params.h"
37 #include "alias.h"
38 #include "tree-ssa-loop.h"
39 #include "gimplify.h"
40
41 /* This file implements dead store elimination.
42
43 A dead store is a store into a memory location which will later be
44 overwritten by another store without any intervening loads. In this
45 case the earlier store can be deleted.
46
47 In our SSA + virtual operand world we use immediate uses of virtual
48 operands to detect dead stores. If a store's virtual definition
49 is used precisely once by a later store to the same location which
50 post dominates the first store, then the first store is dead.
51
52 The single use of the store's virtual definition ensures that
53 there are no intervening aliased loads and the requirement that
54 the second load post dominate the first ensures that if the earlier
55 store executes, then the later stores will execute before the function
56 exits.
57
58 It may help to think of this as first moving the earlier store to
59 the point immediately before the later store. Again, the single
60 use of the virtual definition and the post-dominance relationship
61 ensure that such movement would be safe. Clearly if there are
62 back to back stores, then the second is redundant.
63
64 Reviewing section 10.7.2 in Morgan's "Building an Optimizing Compiler"
65 may also help in understanding this code since it discusses the
66 relationship between dead store and redundant load elimination. In
67 fact, they are the same transformation applied to different views of
68 the CFG. */
69
70
71 /* Bitmap of blocks that have had EH statements cleaned. We should
72 remove their dead edges eventually. */
73 static bitmap need_eh_cleanup;
74
75 /* Return value from dse_classify_store */
76 enum dse_store_status
77 {
78 DSE_STORE_LIVE,
79 DSE_STORE_MAYBE_PARTIAL_DEAD,
80 DSE_STORE_DEAD
81 };
82
83 /* STMT is a statement that may write into memory. Analyze it and
84 initialize WRITE to describe how STMT affects memory.
85
86 Return TRUE if the the statement was analyzed, FALSE otherwise.
87
88 It is always safe to return FALSE. But typically better optimziation
89 can be achieved by analyzing more statements. */
90
91 static bool
initialize_ao_ref_for_dse(gimple * stmt,ao_ref * write)92 initialize_ao_ref_for_dse (gimple *stmt, ao_ref *write)
93 {
94 /* It's advantageous to handle certain mem* functions. */
95 if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
96 {
97 switch (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt)))
98 {
99 case BUILT_IN_MEMCPY:
100 case BUILT_IN_MEMMOVE:
101 case BUILT_IN_MEMSET:
102 {
103 tree size = NULL_TREE;
104 if (gimple_call_num_args (stmt) == 3)
105 size = gimple_call_arg (stmt, 2);
106 tree ptr = gimple_call_arg (stmt, 0);
107 ao_ref_init_from_ptr_and_size (write, ptr, size);
108 return true;
109 }
110 default:
111 break;
112 }
113 }
114 else if (is_gimple_assign (stmt))
115 {
116 ao_ref_init (write, gimple_assign_lhs (stmt));
117 return true;
118 }
119 return false;
120 }
121
122 /* Given REF from the the alias oracle, return TRUE if it is a valid
123 memory reference for dead store elimination, false otherwise.
124
125 In particular, the reference must have a known base, known maximum
126 size, start at a byte offset and have a size that is one or more
127 bytes. */
128
129 static bool
valid_ao_ref_for_dse(ao_ref * ref)130 valid_ao_ref_for_dse (ao_ref *ref)
131 {
132 return (ao_ref_base (ref)
133 && known_size_p (ref->max_size)
134 && maybe_ne (ref->size, 0)
135 && known_eq (ref->max_size, ref->size)
136 && known_ge (ref->offset, 0)
137 && multiple_p (ref->offset, BITS_PER_UNIT)
138 && multiple_p (ref->size, BITS_PER_UNIT));
139 }
140
141 /* Try to normalize COPY (an ao_ref) relative to REF. Essentially when we are
142 done COPY will only refer bytes found within REF. Return true if COPY
143 is known to intersect at least one byte of REF. */
144
145 static bool
normalize_ref(ao_ref * copy,ao_ref * ref)146 normalize_ref (ao_ref *copy, ao_ref *ref)
147 {
148 if (!ordered_p (copy->offset, ref->offset))
149 return false;
150
151 /* If COPY starts before REF, then reset the beginning of
152 COPY to match REF and decrease the size of COPY by the
153 number of bytes removed from COPY. */
154 if (maybe_lt (copy->offset, ref->offset))
155 {
156 poly_int64 diff = ref->offset - copy->offset;
157 if (maybe_le (copy->size, diff))
158 return false;
159 copy->size -= diff;
160 copy->offset = ref->offset;
161 }
162
163 poly_int64 diff = copy->offset - ref->offset;
164 if (maybe_le (ref->size, diff))
165 return false;
166
167 /* If COPY extends beyond REF, chop off its size appropriately. */
168 poly_int64 limit = ref->size - diff;
169 if (!ordered_p (limit, copy->size))
170 return false;
171
172 if (maybe_gt (copy->size, limit))
173 copy->size = limit;
174 return true;
175 }
176
177 /* Clear any bytes written by STMT from the bitmap LIVE_BYTES. The base
178 address written by STMT must match the one found in REF, which must
179 have its base address previously initialized.
180
181 This routine must be conservative. If we don't know the offset or
182 actual size written, assume nothing was written. */
183
184 static void
clear_bytes_written_by(sbitmap live_bytes,gimple * stmt,ao_ref * ref)185 clear_bytes_written_by (sbitmap live_bytes, gimple *stmt, ao_ref *ref)
186 {
187 ao_ref write;
188 if (!initialize_ao_ref_for_dse (stmt, &write))
189 return;
190
191 /* Verify we have the same base memory address, the write
192 has a known size and overlaps with REF. */
193 HOST_WIDE_INT start, size;
194 if (valid_ao_ref_for_dse (&write)
195 && operand_equal_p (write.base, ref->base, OEP_ADDRESS_OF)
196 && known_eq (write.size, write.max_size)
197 && normalize_ref (&write, ref)
198 && (write.offset - ref->offset).is_constant (&start)
199 && write.size.is_constant (&size))
200 bitmap_clear_range (live_bytes, start / BITS_PER_UNIT,
201 size / BITS_PER_UNIT);
202 }
203
204 /* REF is a memory write. Extract relevant information from it and
205 initialize the LIVE_BYTES bitmap. If successful, return TRUE.
206 Otherwise return FALSE. */
207
208 static bool
setup_live_bytes_from_ref(ao_ref * ref,sbitmap live_bytes)209 setup_live_bytes_from_ref (ao_ref *ref, sbitmap live_bytes)
210 {
211 HOST_WIDE_INT const_size;
212 if (valid_ao_ref_for_dse (ref)
213 && ref->size.is_constant (&const_size)
214 && (const_size / BITS_PER_UNIT
215 <= PARAM_VALUE (PARAM_DSE_MAX_OBJECT_SIZE)))
216 {
217 bitmap_clear (live_bytes);
218 bitmap_set_range (live_bytes, 0, const_size / BITS_PER_UNIT);
219 return true;
220 }
221 return false;
222 }
223
224 /* Compute the number of elements that we can trim from the head and
225 tail of ORIG resulting in a bitmap that is a superset of LIVE.
226
227 Store the number of elements trimmed from the head and tail in
228 TRIM_HEAD and TRIM_TAIL.
229
230 STMT is the statement being trimmed and is used for debugging dump
231 output only. */
232
233 static void
compute_trims(ao_ref * ref,sbitmap live,int * trim_head,int * trim_tail,gimple * stmt)234 compute_trims (ao_ref *ref, sbitmap live, int *trim_head, int *trim_tail,
235 gimple *stmt)
236 {
237 /* We use sbitmaps biased such that ref->offset is bit zero and the bitmap
238 extends through ref->size. So we know that in the original bitmap
239 bits 0..ref->size were true. We don't actually need the bitmap, just
240 the REF to compute the trims. */
241
242 /* Now identify how much, if any of the tail we can chop off. */
243 HOST_WIDE_INT const_size;
244 int last_live = bitmap_last_set_bit (live);
245 if (ref->size.is_constant (&const_size))
246 {
247 int last_orig = (const_size / BITS_PER_UNIT) - 1;
248 /* We can leave inconvenient amounts on the tail as
249 residual handling in mem* and str* functions is usually
250 reasonably efficient. */
251 *trim_tail = last_orig - last_live;
252
253 /* But don't trim away out of bounds accesses, as this defeats
254 proper warnings.
255
256 We could have a type with no TYPE_SIZE_UNIT or we could have a VLA
257 where TYPE_SIZE_UNIT is not a constant. */
258 if (*trim_tail
259 && TYPE_SIZE_UNIT (TREE_TYPE (ref->base))
260 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (ref->base))) == INTEGER_CST
261 && compare_tree_int (TYPE_SIZE_UNIT (TREE_TYPE (ref->base)),
262 last_orig) <= 0)
263 *trim_tail = 0;
264 }
265 else
266 *trim_tail = 0;
267
268 /* Identify how much, if any of the head we can chop off. */
269 int first_orig = 0;
270 int first_live = bitmap_first_set_bit (live);
271 *trim_head = first_live - first_orig;
272
273 /* If more than a word remains, then make sure to keep the
274 starting point at least word aligned. */
275 if (last_live - first_live > UNITS_PER_WORD)
276 *trim_head &= ~(UNITS_PER_WORD - 1);
277
278 if ((*trim_head || *trim_tail)
279 && dump_file && (dump_flags & TDF_DETAILS))
280 {
281 fprintf (dump_file, " Trimming statement (head = %d, tail = %d): ",
282 *trim_head, *trim_tail);
283 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
284 fprintf (dump_file, "\n");
285 }
286 }
287
288 /* STMT initializes an object from COMPLEX_CST where one or more of the
289 bytes written may be dead stores. REF is a representation of the
290 memory written. LIVE is the bitmap of stores that are actually live.
291
292 Attempt to rewrite STMT so that only the real or imaginary part of
293 the object is actually stored. */
294
295 static void
maybe_trim_complex_store(ao_ref * ref,sbitmap live,gimple * stmt)296 maybe_trim_complex_store (ao_ref *ref, sbitmap live, gimple *stmt)
297 {
298 int trim_head, trim_tail;
299 compute_trims (ref, live, &trim_head, &trim_tail, stmt);
300
301 /* The amount of data trimmed from the head or tail must be at
302 least half the size of the object to ensure we're trimming
303 the entire real or imaginary half. By writing things this
304 way we avoid more O(n) bitmap operations. */
305 if (known_ge (trim_tail * 2 * BITS_PER_UNIT, ref->size))
306 {
307 /* TREE_REALPART is live */
308 tree x = TREE_REALPART (gimple_assign_rhs1 (stmt));
309 tree y = gimple_assign_lhs (stmt);
310 y = build1 (REALPART_EXPR, TREE_TYPE (x), y);
311 gimple_assign_set_lhs (stmt, y);
312 gimple_assign_set_rhs1 (stmt, x);
313 }
314 else if (known_ge (trim_head * 2 * BITS_PER_UNIT, ref->size))
315 {
316 /* TREE_IMAGPART is live */
317 tree x = TREE_IMAGPART (gimple_assign_rhs1 (stmt));
318 tree y = gimple_assign_lhs (stmt);
319 y = build1 (IMAGPART_EXPR, TREE_TYPE (x), y);
320 gimple_assign_set_lhs (stmt, y);
321 gimple_assign_set_rhs1 (stmt, x);
322 }
323
324 /* Other cases indicate parts of both the real and imag subobjects
325 are live. We do not try to optimize those cases. */
326 }
327
328 /* STMT initializes an object using a CONSTRUCTOR where one or more of the
329 bytes written are dead stores. ORIG is the bitmap of bytes stored by
330 STMT. LIVE is the bitmap of stores that are actually live.
331
332 Attempt to rewrite STMT so that only the real or imaginary part of
333 the object is actually stored.
334
335 The most common case for getting here is a CONSTRUCTOR with no elements
336 being used to zero initialize an object. We do not try to handle other
337 cases as those would force us to fully cover the object with the
338 CONSTRUCTOR node except for the components that are dead. */
339
340 static void
maybe_trim_constructor_store(ao_ref * ref,sbitmap live,gimple * stmt)341 maybe_trim_constructor_store (ao_ref *ref, sbitmap live, gimple *stmt)
342 {
343 tree ctor = gimple_assign_rhs1 (stmt);
344
345 /* This is the only case we currently handle. It actually seems to
346 catch most cases of actual interest. */
347 gcc_assert (CONSTRUCTOR_NELTS (ctor) == 0);
348
349 int head_trim = 0;
350 int tail_trim = 0;
351 compute_trims (ref, live, &head_trim, &tail_trim, stmt);
352
353 /* Now we want to replace the constructor initializer
354 with memset (object + head_trim, 0, size - head_trim - tail_trim). */
355 if (head_trim || tail_trim)
356 {
357 /* We want &lhs for the MEM_REF expression. */
358 tree lhs_addr = build_fold_addr_expr (gimple_assign_lhs (stmt));
359
360 if (! is_gimple_min_invariant (lhs_addr))
361 return;
362
363 /* The number of bytes for the new constructor. */
364 poly_int64 ref_bytes = exact_div (ref->size, BITS_PER_UNIT);
365 poly_int64 count = ref_bytes - head_trim - tail_trim;
366
367 /* And the new type for the CONSTRUCTOR. Essentially it's just
368 a char array large enough to cover the non-trimmed parts of
369 the original CONSTRUCTOR. Note we want explicit bounds here
370 so that we know how many bytes to clear when expanding the
371 CONSTRUCTOR. */
372 tree type = build_array_type_nelts (char_type_node, count);
373
374 /* Build a suitable alias type rather than using alias set zero
375 to avoid pessimizing. */
376 tree alias_type = reference_alias_ptr_type (gimple_assign_lhs (stmt));
377
378 /* Build a MEM_REF representing the whole accessed area, starting
379 at the first byte not trimmed. */
380 tree exp = fold_build2 (MEM_REF, type, lhs_addr,
381 build_int_cst (alias_type, head_trim));
382
383 /* Now update STMT with a new RHS and LHS. */
384 gimple_assign_set_lhs (stmt, exp);
385 gimple_assign_set_rhs1 (stmt, build_constructor (type, NULL));
386 }
387 }
388
389 /* STMT is a memcpy, memmove or memset. Decrement the number of bytes
390 copied/set by DECREMENT. */
391 static void
decrement_count(gimple * stmt,int decrement)392 decrement_count (gimple *stmt, int decrement)
393 {
394 tree *countp = gimple_call_arg_ptr (stmt, 2);
395 gcc_assert (TREE_CODE (*countp) == INTEGER_CST);
396 *countp = wide_int_to_tree (TREE_TYPE (*countp), (TREE_INT_CST_LOW (*countp)
397 - decrement));
398 }
399
400 static void
increment_start_addr(gimple * stmt,tree * where,int increment)401 increment_start_addr (gimple *stmt, tree *where, int increment)
402 {
403 if (tree lhs = gimple_call_lhs (stmt))
404 if (where == gimple_call_arg_ptr (stmt, 0))
405 {
406 gassign *newop = gimple_build_assign (lhs, unshare_expr (*where));
407 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
408 gsi_insert_after (&gsi, newop, GSI_SAME_STMT);
409 gimple_call_set_lhs (stmt, NULL_TREE);
410 update_stmt (stmt);
411 }
412
413 if (TREE_CODE (*where) == SSA_NAME)
414 {
415 tree tem = make_ssa_name (TREE_TYPE (*where));
416 gassign *newop
417 = gimple_build_assign (tem, POINTER_PLUS_EXPR, *where,
418 build_int_cst (sizetype, increment));
419 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
420 gsi_insert_before (&gsi, newop, GSI_SAME_STMT);
421 *where = tem;
422 update_stmt (stmt);
423 return;
424 }
425
426 *where = build_fold_addr_expr (fold_build2 (MEM_REF, char_type_node,
427 *where,
428 build_int_cst (ptr_type_node,
429 increment)));
430 }
431
432 /* STMT is builtin call that writes bytes in bitmap ORIG, some bytes are dead
433 (ORIG & ~NEW) and need not be stored. Try to rewrite STMT to reduce
434 the amount of data it actually writes.
435
436 Right now we only support trimming from the head or the tail of the
437 memory region. In theory we could split the mem* call, but it's
438 likely of marginal value. */
439
440 static void
maybe_trim_memstar_call(ao_ref * ref,sbitmap live,gimple * stmt)441 maybe_trim_memstar_call (ao_ref *ref, sbitmap live, gimple *stmt)
442 {
443 switch (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt)))
444 {
445 case BUILT_IN_MEMCPY:
446 case BUILT_IN_MEMMOVE:
447 {
448 int head_trim, tail_trim;
449 compute_trims (ref, live, &head_trim, &tail_trim, stmt);
450
451 /* Tail trimming is easy, we can just reduce the count. */
452 if (tail_trim)
453 decrement_count (stmt, tail_trim);
454
455 /* Head trimming requires adjusting all the arguments. */
456 if (head_trim)
457 {
458 tree *dst = gimple_call_arg_ptr (stmt, 0);
459 increment_start_addr (stmt, dst, head_trim);
460 tree *src = gimple_call_arg_ptr (stmt, 1);
461 increment_start_addr (stmt, src, head_trim);
462 decrement_count (stmt, head_trim);
463 }
464 break;
465 }
466
467 case BUILT_IN_MEMSET:
468 {
469 int head_trim, tail_trim;
470 compute_trims (ref, live, &head_trim, &tail_trim, stmt);
471
472 /* Tail trimming is easy, we can just reduce the count. */
473 if (tail_trim)
474 decrement_count (stmt, tail_trim);
475
476 /* Head trimming requires adjusting all the arguments. */
477 if (head_trim)
478 {
479 tree *dst = gimple_call_arg_ptr (stmt, 0);
480 increment_start_addr (stmt, dst, head_trim);
481 decrement_count (stmt, head_trim);
482 }
483 break;
484 }
485
486 default:
487 break;
488 }
489 }
490
491 /* STMT is a memory write where one or more bytes written are dead
492 stores. ORIG is the bitmap of bytes stored by STMT. LIVE is the
493 bitmap of stores that are actually live.
494
495 Attempt to rewrite STMT so that it writes fewer memory locations. Right
496 now we only support trimming at the start or end of the memory region.
497 It's not clear how much there is to be gained by trimming from the middle
498 of the region. */
499
500 static void
maybe_trim_partially_dead_store(ao_ref * ref,sbitmap live,gimple * stmt)501 maybe_trim_partially_dead_store (ao_ref *ref, sbitmap live, gimple *stmt)
502 {
503 if (is_gimple_assign (stmt)
504 && TREE_CODE (gimple_assign_lhs (stmt)) != TARGET_MEM_REF)
505 {
506 switch (gimple_assign_rhs_code (stmt))
507 {
508 case CONSTRUCTOR:
509 maybe_trim_constructor_store (ref, live, stmt);
510 break;
511 case COMPLEX_CST:
512 maybe_trim_complex_store (ref, live, stmt);
513 break;
514 default:
515 break;
516 }
517 }
518 }
519
520 /* Return TRUE if USE_REF reads bytes from LIVE where live is
521 derived from REF, a write reference.
522
523 While this routine may modify USE_REF, it's passed by value, not
524 location. So callers do not see those modifications. */
525
526 static bool
live_bytes_read(ao_ref use_ref,ao_ref * ref,sbitmap live)527 live_bytes_read (ao_ref use_ref, ao_ref *ref, sbitmap live)
528 {
529 /* We have already verified that USE_REF and REF hit the same object.
530 Now verify that there's actually an overlap between USE_REF and REF. */
531 HOST_WIDE_INT start, size;
532 if (normalize_ref (&use_ref, ref)
533 && (use_ref.offset - ref->offset).is_constant (&start)
534 && use_ref.size.is_constant (&size))
535 {
536 /* If USE_REF covers all of REF, then it will hit one or more
537 live bytes. This avoids useless iteration over the bitmap
538 below. */
539 if (start == 0 && known_eq (size, ref->size))
540 return true;
541
542 /* Now check if any of the remaining bits in use_ref are set in LIVE. */
543 return bitmap_bit_in_range_p (live, start / BITS_PER_UNIT,
544 (start + size - 1) / BITS_PER_UNIT);
545 }
546 return true;
547 }
548
549 /* Callback for dse_classify_store calling for_each_index. Verify that
550 indices are invariant in the loop with backedge PHI in basic-block DATA. */
551
552 static bool
check_name(tree,tree * idx,void * data)553 check_name (tree, tree *idx, void *data)
554 {
555 basic_block phi_bb = (basic_block) data;
556 if (TREE_CODE (*idx) == SSA_NAME
557 && !SSA_NAME_IS_DEFAULT_DEF (*idx)
558 && dominated_by_p (CDI_DOMINATORS, gimple_bb (SSA_NAME_DEF_STMT (*idx)),
559 phi_bb))
560 return false;
561 return true;
562 }
563
564 /* A helper of dse_optimize_stmt.
565 Given a GIMPLE_ASSIGN in STMT that writes to REF, classify it
566 according to downstream uses and defs. Sets *BY_CLOBBER_P to true
567 if only clobber statements influenced the classification result.
568 Returns the classification. */
569
570 static dse_store_status
571 dse_classify_store (ao_ref *ref, gimple *stmt,
572 bool byte_tracking_enabled, sbitmap live_bytes,
573 bool *by_clobber_p = NULL)
574 {
575 gimple *temp;
576 int cnt = 0;
577 auto_bitmap visited;
578
579 if (by_clobber_p)
580 *by_clobber_p = true;
581
582 /* Find the first dominated statement that clobbers (part of) the
583 memory stmt stores to with no intermediate statement that may use
584 part of the memory stmt stores. That is, find a store that may
585 prove stmt to be a dead store. */
586 temp = stmt;
587 do
588 {
589 gimple *use_stmt;
590 imm_use_iterator ui;
591 bool fail = false;
592 tree defvar;
593
594 if (gimple_code (temp) == GIMPLE_PHI)
595 {
596 /* If we visit this PHI by following a backedge then we have to
597 make sure ref->ref only refers to SSA names that are invariant
598 with respect to the loop represented by this PHI node. */
599 if (dominated_by_p (CDI_DOMINATORS, gimple_bb (stmt),
600 gimple_bb (temp))
601 && !for_each_index (ref->ref ? &ref->ref : &ref->base,
602 check_name, gimple_bb (temp)))
603 return DSE_STORE_LIVE;
604 defvar = PHI_RESULT (temp);
605 bitmap_set_bit (visited, SSA_NAME_VERSION (defvar));
606 }
607 else
608 defvar = gimple_vdef (temp);
609 auto_vec<gimple *, 10> defs;
610 gimple *phi_def = NULL;
FOR_EACH_IMM_USE_STMT(use_stmt,ui,defvar)611 FOR_EACH_IMM_USE_STMT (use_stmt, ui, defvar)
612 {
613 /* Limit stmt walking. */
614 if (++cnt > PARAM_VALUE (PARAM_DSE_MAX_ALIAS_QUERIES_PER_STORE))
615 {
616 fail = true;
617 BREAK_FROM_IMM_USE_STMT (ui);
618 }
619
620 /* We have visited ourselves already so ignore STMT for the
621 purpose of chaining. */
622 if (use_stmt == stmt)
623 ;
624 /* In simple cases we can look through PHI nodes, but we
625 have to be careful with loops and with memory references
626 containing operands that are also operands of PHI nodes.
627 See gcc.c-torture/execute/20051110-*.c. */
628 else if (gimple_code (use_stmt) == GIMPLE_PHI)
629 {
630 /* If we already visited this PHI ignore it for further
631 processing. */
632 if (!bitmap_bit_p (visited,
633 SSA_NAME_VERSION (PHI_RESULT (use_stmt))))
634 {
635 defs.safe_push (use_stmt);
636 phi_def = use_stmt;
637 }
638 }
639 /* If the statement is a use the store is not dead. */
640 else if (ref_maybe_used_by_stmt_p (use_stmt, ref))
641 {
642 /* Handle common cases where we can easily build an ao_ref
643 structure for USE_STMT and in doing so we find that the
644 references hit non-live bytes and thus can be ignored. */
645 if (byte_tracking_enabled
646 && is_gimple_assign (use_stmt))
647 {
648 ao_ref use_ref;
649 ao_ref_init (&use_ref, gimple_assign_rhs1 (use_stmt));
650 if (valid_ao_ref_for_dse (&use_ref)
651 && use_ref.base == ref->base
652 && known_eq (use_ref.size, use_ref.max_size)
653 && !live_bytes_read (use_ref, ref, live_bytes))
654 {
655 /* If this is a store, remember it as we possibly
656 need to walk the defs uses. */
657 if (gimple_vdef (use_stmt))
658 defs.safe_push (use_stmt);
659 continue;
660 }
661 }
662
663 fail = true;
664 BREAK_FROM_IMM_USE_STMT (ui);
665 }
666 /* If this is a store, remember it as we possibly need to walk the
667 defs uses. */
668 else if (gimple_vdef (use_stmt))
669 defs.safe_push (use_stmt);
670 }
671
672 if (fail)
673 {
674 /* STMT might be partially dead and we may be able to reduce
675 how many memory locations it stores into. */
676 if (byte_tracking_enabled && !gimple_clobber_p (stmt))
677 return DSE_STORE_MAYBE_PARTIAL_DEAD;
678 return DSE_STORE_LIVE;
679 }
680
681 /* If we didn't find any definition this means the store is dead
682 if it isn't a store to global reachable memory. In this case
683 just pretend the stmt makes itself dead. Otherwise fail. */
684 if (defs.is_empty ())
685 {
686 if (ref_may_alias_global_p (ref))
687 return DSE_STORE_LIVE;
688
689 if (by_clobber_p)
690 *by_clobber_p = false;
691 return DSE_STORE_DEAD;
692 }
693
694 /* Process defs and remove those we need not process further. */
695 for (unsigned i = 0; i < defs.length ();)
696 {
697 gimple *def = defs[i];
698 gimple *use_stmt;
699 use_operand_p use_p;
700 /* If the path to check starts with a kill we do not need to
701 process it further.
702 ??? With byte tracking we need only kill the bytes currently
703 live. */
704 if (stmt_kills_ref_p (def, ref))
705 {
706 if (by_clobber_p && !gimple_clobber_p (def))
707 *by_clobber_p = false;
708 defs.unordered_remove (i);
709 }
710 /* In addition to kills we can remove defs whose only use
711 is another def in defs. That can only ever be PHIs of which
712 we track a single for simplicity reasons (we fail for multiple
713 PHIs anyways). We can also ignore defs that feed only into
714 already visited PHIs. */
715 else if (gimple_code (def) != GIMPLE_PHI
716 && single_imm_use (gimple_vdef (def), &use_p, &use_stmt)
717 && (use_stmt == phi_def
718 || (gimple_code (use_stmt) == GIMPLE_PHI
719 && bitmap_bit_p (visited,
720 SSA_NAME_VERSION
721 (PHI_RESULT (use_stmt))))))
722 defs.unordered_remove (i);
723 else
724 ++i;
725 }
726
727 /* If all defs kill the ref we are done. */
728 if (defs.is_empty ())
729 return DSE_STORE_DEAD;
730 /* If more than one def survives fail. */
731 if (defs.length () > 1)
732 {
733 /* STMT might be partially dead and we may be able to reduce
734 how many memory locations it stores into. */
735 if (byte_tracking_enabled && !gimple_clobber_p (stmt))
736 return DSE_STORE_MAYBE_PARTIAL_DEAD;
737 return DSE_STORE_LIVE;
738 }
739 temp = defs[0];
740
741 /* Track partial kills. */
742 if (byte_tracking_enabled)
743 {
744 clear_bytes_written_by (live_bytes, temp, ref);
745 if (bitmap_empty_p (live_bytes))
746 {
747 if (by_clobber_p && !gimple_clobber_p (temp))
748 *by_clobber_p = false;
749 return DSE_STORE_DEAD;
750 }
751 }
752 }
753 /* Continue walking until there are no more live bytes. */
754 while (1);
755 }
756
757
758 class dse_dom_walker : public dom_walker
759 {
760 public:
dse_dom_walker(cdi_direction direction)761 dse_dom_walker (cdi_direction direction)
762 : dom_walker (direction),
763 m_live_bytes (PARAM_VALUE (PARAM_DSE_MAX_OBJECT_SIZE)),
764 m_byte_tracking_enabled (false) {}
765
766 virtual edge before_dom_children (basic_block);
767
768 private:
769 auto_sbitmap m_live_bytes;
770 bool m_byte_tracking_enabled;
771 void dse_optimize_stmt (gimple_stmt_iterator *);
772 };
773
774 /* Delete a dead call at GSI, which is mem* call of some kind. */
775 static void
delete_dead_call(gimple_stmt_iterator * gsi)776 delete_dead_call (gimple_stmt_iterator *gsi)
777 {
778 gimple *stmt = gsi_stmt (*gsi);
779 if (dump_file && (dump_flags & TDF_DETAILS))
780 {
781 fprintf (dump_file, " Deleted dead call: ");
782 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
783 fprintf (dump_file, "\n");
784 }
785
786 tree lhs = gimple_call_lhs (stmt);
787 if (lhs)
788 {
789 tree ptr = gimple_call_arg (stmt, 0);
790 gimple *new_stmt = gimple_build_assign (lhs, ptr);
791 unlink_stmt_vdef (stmt);
792 if (gsi_replace (gsi, new_stmt, true))
793 bitmap_set_bit (need_eh_cleanup, gimple_bb (stmt)->index);
794 }
795 else
796 {
797 /* Then we need to fix the operand of the consuming stmt. */
798 unlink_stmt_vdef (stmt);
799
800 /* Remove the dead store. */
801 if (gsi_remove (gsi, true))
802 bitmap_set_bit (need_eh_cleanup, gimple_bb (stmt)->index);
803 release_defs (stmt);
804 }
805 }
806
807 /* Delete a dead store at GSI, which is a gimple assignment. */
808
809 static void
delete_dead_assignment(gimple_stmt_iterator * gsi)810 delete_dead_assignment (gimple_stmt_iterator *gsi)
811 {
812 gimple *stmt = gsi_stmt (*gsi);
813 if (dump_file && (dump_flags & TDF_DETAILS))
814 {
815 fprintf (dump_file, " Deleted dead store: ");
816 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
817 fprintf (dump_file, "\n");
818 }
819
820 /* Then we need to fix the operand of the consuming stmt. */
821 unlink_stmt_vdef (stmt);
822
823 /* Remove the dead store. */
824 basic_block bb = gimple_bb (stmt);
825 if (gsi_remove (gsi, true))
826 bitmap_set_bit (need_eh_cleanup, bb->index);
827
828 /* And release any SSA_NAMEs set in this statement back to the
829 SSA_NAME manager. */
830 release_defs (stmt);
831 }
832
833 /* Attempt to eliminate dead stores in the statement referenced by BSI.
834
835 A dead store is a store into a memory location which will later be
836 overwritten by another store without any intervening loads. In this
837 case the earlier store can be deleted.
838
839 In our SSA + virtual operand world we use immediate uses of virtual
840 operands to detect dead stores. If a store's virtual definition
841 is used precisely once by a later store to the same location which
842 post dominates the first store, then the first store is dead. */
843
844 void
dse_optimize_stmt(gimple_stmt_iterator * gsi)845 dse_dom_walker::dse_optimize_stmt (gimple_stmt_iterator *gsi)
846 {
847 gimple *stmt = gsi_stmt (*gsi);
848
849 /* If this statement has no virtual defs, then there is nothing
850 to do. */
851 if (!gimple_vdef (stmt))
852 return;
853
854 /* Don't return early on *this_2(D) ={v} {CLOBBER}. */
855 if (gimple_has_volatile_ops (stmt)
856 && (!gimple_clobber_p (stmt)
857 || TREE_CODE (gimple_assign_lhs (stmt)) != MEM_REF))
858 return;
859
860 ao_ref ref;
861 if (!initialize_ao_ref_for_dse (stmt, &ref))
862 return;
863
864 /* We know we have virtual definitions. We can handle assignments and
865 some builtin calls. */
866 if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
867 {
868 switch (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt)))
869 {
870 case BUILT_IN_MEMCPY:
871 case BUILT_IN_MEMMOVE:
872 case BUILT_IN_MEMSET:
873 {
874 /* Occasionally calls with an explicit length of zero
875 show up in the IL. It's pointless to do analysis
876 on them, they're trivially dead. */
877 tree size = gimple_call_arg (stmt, 2);
878 if (integer_zerop (size))
879 {
880 delete_dead_call (gsi);
881 return;
882 }
883
884 enum dse_store_status store_status;
885 m_byte_tracking_enabled
886 = setup_live_bytes_from_ref (&ref, m_live_bytes);
887 store_status = dse_classify_store (&ref, stmt,
888 m_byte_tracking_enabled,
889 m_live_bytes);
890 if (store_status == DSE_STORE_LIVE)
891 return;
892
893 if (store_status == DSE_STORE_MAYBE_PARTIAL_DEAD)
894 {
895 maybe_trim_memstar_call (&ref, m_live_bytes, stmt);
896 return;
897 }
898
899 if (store_status == DSE_STORE_DEAD)
900 delete_dead_call (gsi);
901 return;
902 }
903
904 default:
905 return;
906 }
907 }
908
909 if (is_gimple_assign (stmt))
910 {
911 bool by_clobber_p = false;
912
913 /* Self-assignments are zombies. */
914 if (operand_equal_p (gimple_assign_rhs1 (stmt),
915 gimple_assign_lhs (stmt), 0))
916 ;
917 else
918 {
919 m_byte_tracking_enabled
920 = setup_live_bytes_from_ref (&ref, m_live_bytes);
921 enum dse_store_status store_status;
922 store_status = dse_classify_store (&ref, stmt,
923 m_byte_tracking_enabled,
924 m_live_bytes, &by_clobber_p);
925 if (store_status == DSE_STORE_LIVE)
926 return;
927
928 if (store_status == DSE_STORE_MAYBE_PARTIAL_DEAD)
929 {
930 maybe_trim_partially_dead_store (&ref, m_live_bytes, stmt);
931 return;
932 }
933 }
934
935 /* Now we know that use_stmt kills the LHS of stmt. */
936
937 /* But only remove *this_2(D) ={v} {CLOBBER} if killed by
938 another clobber stmt. */
939 if (gimple_clobber_p (stmt)
940 && !by_clobber_p)
941 return;
942
943 delete_dead_assignment (gsi);
944 }
945 }
946
947 edge
before_dom_children(basic_block bb)948 dse_dom_walker::before_dom_children (basic_block bb)
949 {
950 gimple_stmt_iterator gsi;
951
952 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi);)
953 {
954 dse_optimize_stmt (&gsi);
955 if (gsi_end_p (gsi))
956 gsi = gsi_last_bb (bb);
957 else
958 gsi_prev (&gsi);
959 }
960 return NULL;
961 }
962
963 namespace {
964
965 const pass_data pass_data_dse =
966 {
967 GIMPLE_PASS, /* type */
968 "dse", /* name */
969 OPTGROUP_NONE, /* optinfo_flags */
970 TV_TREE_DSE, /* tv_id */
971 ( PROP_cfg | PROP_ssa ), /* properties_required */
972 0, /* properties_provided */
973 0, /* properties_destroyed */
974 0, /* todo_flags_start */
975 0, /* todo_flags_finish */
976 };
977
978 class pass_dse : public gimple_opt_pass
979 {
980 public:
pass_dse(gcc::context * ctxt)981 pass_dse (gcc::context *ctxt)
982 : gimple_opt_pass (pass_data_dse, ctxt)
983 {}
984
985 /* opt_pass methods: */
clone()986 opt_pass * clone () { return new pass_dse (m_ctxt); }
gate(function *)987 virtual bool gate (function *) { return flag_tree_dse != 0; }
988 virtual unsigned int execute (function *);
989
990 }; // class pass_dse
991
992 unsigned int
execute(function * fun)993 pass_dse::execute (function *fun)
994 {
995 need_eh_cleanup = BITMAP_ALLOC (NULL);
996
997 renumber_gimple_stmt_uids (cfun);
998
999 /* We might consider making this a property of each pass so that it
1000 can be [re]computed on an as-needed basis. Particularly since
1001 this pass could be seen as an extension of DCE which needs post
1002 dominators. */
1003 calculate_dominance_info (CDI_POST_DOMINATORS);
1004 calculate_dominance_info (CDI_DOMINATORS);
1005
1006 /* Dead store elimination is fundamentally a walk of the post-dominator
1007 tree and a backwards walk of statements within each block. */
1008 dse_dom_walker (CDI_POST_DOMINATORS).walk (fun->cfg->x_exit_block_ptr);
1009
1010 /* Removal of stores may make some EH edges dead. Purge such edges from
1011 the CFG as needed. */
1012 if (!bitmap_empty_p (need_eh_cleanup))
1013 {
1014 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
1015 cleanup_tree_cfg ();
1016 }
1017
1018 BITMAP_FREE (need_eh_cleanup);
1019
1020 /* For now, just wipe the post-dominator information. */
1021 free_dominance_info (CDI_POST_DOMINATORS);
1022 return 0;
1023 }
1024
1025 } // anon namespace
1026
1027 gimple_opt_pass *
make_pass_dse(gcc::context * ctxt)1028 make_pass_dse (gcc::context *ctxt)
1029 {
1030 return new pass_dse (ctxt);
1031 }
1032