1 /* Global, SSA-based optimizations using mathematical identities.
2 Copyright (C) 2005-2016 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* Currently, the only mini-pass in this file tries to CSE reciprocal
21 operations. These are common in sequences such as this one:
22
23 modulus = sqrt(x*x + y*y + z*z);
24 x = x / modulus;
25 y = y / modulus;
26 z = z / modulus;
27
28 that can be optimized to
29
30 modulus = sqrt(x*x + y*y + z*z);
31 rmodulus = 1.0 / modulus;
32 x = x * rmodulus;
33 y = y * rmodulus;
34 z = z * rmodulus;
35
36 We do this for loop invariant divisors, and with this pass whenever
37 we notice that a division has the same divisor multiple times.
38
39 Of course, like in PRE, we don't insert a division if a dominator
40 already has one. However, this cannot be done as an extension of
41 PRE for several reasons.
42
43 First of all, with some experiments it was found out that the
44 transformation is not always useful if there are only two divisions
45 by the same divisor. This is probably because modern processors
46 can pipeline the divisions; on older, in-order processors it should
47 still be effective to optimize two divisions by the same number.
48 We make this a param, and it shall be called N in the remainder of
49 this comment.
50
51 Second, if trapping math is active, we have less freedom on where
52 to insert divisions: we can only do so in basic blocks that already
53 contain one. (If divisions don't trap, instead, we can insert
54 divisions elsewhere, which will be in blocks that are common dominators
55 of those that have the division).
56
57 We really don't want to compute the reciprocal unless a division will
58 be found. To do this, we won't insert the division in a basic block
59 that has less than N divisions *post-dominating* it.
60
61 The algorithm constructs a subset of the dominator tree, holding the
62 blocks containing the divisions and the common dominators to them,
63 and walk it twice. The first walk is in post-order, and it annotates
64 each block with the number of divisions that post-dominate it: this
65 gives information on where divisions can be inserted profitably.
66 The second walk is in pre-order, and it inserts divisions as explained
67 above, and replaces divisions by multiplications.
68
69 In the best case, the cost of the pass is O(n_statements). In the
70 worst-case, the cost is due to creating the dominator tree subset,
71 with a cost of O(n_basic_blocks ^ 2); however this can only happen
72 for n_statements / n_basic_blocks statements. So, the amortized cost
73 of creating the dominator tree subset is O(n_basic_blocks) and the
74 worst-case cost of the pass is O(n_statements * n_basic_blocks).
75
76 More practically, the cost will be small because there are few
77 divisions, and they tend to be in the same basic block, so insert_bb
78 is called very few times.
79
80 If we did this using domwalk.c, an efficient implementation would have
81 to work on all the variables in a single pass, because we could not
82 work on just a subset of the dominator tree, as we do now, and the
83 cost would also be something like O(n_statements * n_basic_blocks).
84 The data structures would be more complex in order to work on all the
85 variables in a single pass. */
86
87 #include "config.h"
88 #include "system.h"
89 #include "coretypes.h"
90 #include "backend.h"
91 #include "target.h"
92 #include "rtl.h"
93 #include "tree.h"
94 #include "gimple.h"
95 #include "predict.h"
96 #include "alloc-pool.h"
97 #include "tree-pass.h"
98 #include "ssa.h"
99 #include "optabs-tree.h"
100 #include "gimple-pretty-print.h"
101 #include "alias.h"
102 #include "fold-const.h"
103 #include "gimple-fold.h"
104 #include "gimple-iterator.h"
105 #include "gimplify.h"
106 #include "gimplify-me.h"
107 #include "stor-layout.h"
108 #include "tree-cfg.h"
109 #include "tree-dfa.h"
110 #include "tree-ssa.h"
111 #include "builtins.h"
112 #include "params.h"
113 #include "internal-fn.h"
114 #include "case-cfn-macros.h"
115
116 /* This structure represents one basic block that either computes a
117 division, or is a common dominator for basic block that compute a
118 division. */
119 struct occurrence {
120 /* The basic block represented by this structure. */
121 basic_block bb;
122
123 /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
124 inserted in BB. */
125 tree recip_def;
126
127 /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
128 was inserted in BB. */
129 gimple *recip_def_stmt;
130
131 /* Pointer to a list of "struct occurrence"s for blocks dominated
132 by BB. */
133 struct occurrence *children;
134
135 /* Pointer to the next "struct occurrence"s in the list of blocks
136 sharing a common dominator. */
137 struct occurrence *next;
138
139 /* The number of divisions that are in BB before compute_merit. The
140 number of divisions that are in BB or post-dominate it after
141 compute_merit. */
142 int num_divisions;
143
144 /* True if the basic block has a division, false if it is a common
145 dominator for basic blocks that do. If it is false and trapping
146 math is active, BB is not a candidate for inserting a reciprocal. */
147 bool bb_has_division;
148 };
149
150 static struct
151 {
152 /* Number of 1.0/X ops inserted. */
153 int rdivs_inserted;
154
155 /* Number of 1.0/FUNC ops inserted. */
156 int rfuncs_inserted;
157 } reciprocal_stats;
158
159 static struct
160 {
161 /* Number of cexpi calls inserted. */
162 int inserted;
163 } sincos_stats;
164
165 static struct
166 {
167 /* Number of hand-written 16-bit nop / bswaps found. */
168 int found_16bit;
169
170 /* Number of hand-written 32-bit nop / bswaps found. */
171 int found_32bit;
172
173 /* Number of hand-written 64-bit nop / bswaps found. */
174 int found_64bit;
175 } nop_stats, bswap_stats;
176
177 static struct
178 {
179 /* Number of widening multiplication ops inserted. */
180 int widen_mults_inserted;
181
182 /* Number of integer multiply-and-accumulate ops inserted. */
183 int maccs_inserted;
184
185 /* Number of fp fused multiply-add ops inserted. */
186 int fmas_inserted;
187 } widen_mul_stats;
188
189 /* The instance of "struct occurrence" representing the highest
190 interesting block in the dominator tree. */
191 static struct occurrence *occ_head;
192
193 /* Allocation pool for getting instances of "struct occurrence". */
194 static object_allocator<occurrence> *occ_pool;
195
196
197
198 /* Allocate and return a new struct occurrence for basic block BB, and
199 whose children list is headed by CHILDREN. */
200 static struct occurrence *
occ_new(basic_block bb,struct occurrence * children)201 occ_new (basic_block bb, struct occurrence *children)
202 {
203 struct occurrence *occ;
204
205 bb->aux = occ = occ_pool->allocate ();
206 memset (occ, 0, sizeof (struct occurrence));
207
208 occ->bb = bb;
209 occ->children = children;
210 return occ;
211 }
212
213
214 /* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a
215 list of "struct occurrence"s, one per basic block, having IDOM as
216 their common dominator.
217
218 We try to insert NEW_OCC as deep as possible in the tree, and we also
219 insert any other block that is a common dominator for BB and one
220 block already in the tree. */
221
222 static void
insert_bb(struct occurrence * new_occ,basic_block idom,struct occurrence ** p_head)223 insert_bb (struct occurrence *new_occ, basic_block idom,
224 struct occurrence **p_head)
225 {
226 struct occurrence *occ, **p_occ;
227
228 for (p_occ = p_head; (occ = *p_occ) != NULL; )
229 {
230 basic_block bb = new_occ->bb, occ_bb = occ->bb;
231 basic_block dom = nearest_common_dominator (CDI_DOMINATORS, occ_bb, bb);
232 if (dom == bb)
233 {
234 /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC
235 from its list. */
236 *p_occ = occ->next;
237 occ->next = new_occ->children;
238 new_occ->children = occ;
239
240 /* Try the next block (it may as well be dominated by BB). */
241 }
242
243 else if (dom == occ_bb)
244 {
245 /* OCC_BB dominates BB. Tail recurse to look deeper. */
246 insert_bb (new_occ, dom, &occ->children);
247 return;
248 }
249
250 else if (dom != idom)
251 {
252 gcc_assert (!dom->aux);
253
254 /* There is a dominator between IDOM and BB, add it and make
255 two children out of NEW_OCC and OCC. First, remove OCC from
256 its list. */
257 *p_occ = occ->next;
258 new_occ->next = occ;
259 occ->next = NULL;
260
261 /* None of the previous blocks has DOM as a dominator: if we tail
262 recursed, we would reexamine them uselessly. Just switch BB with
263 DOM, and go on looking for blocks dominated by DOM. */
264 new_occ = occ_new (dom, new_occ);
265 }
266
267 else
268 {
269 /* Nothing special, go on with the next element. */
270 p_occ = &occ->next;
271 }
272 }
273
274 /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */
275 new_occ->next = *p_head;
276 *p_head = new_occ;
277 }
278
279 /* Register that we found a division in BB. */
280
281 static inline void
register_division_in(basic_block bb)282 register_division_in (basic_block bb)
283 {
284 struct occurrence *occ;
285
286 occ = (struct occurrence *) bb->aux;
287 if (!occ)
288 {
289 occ = occ_new (bb, NULL);
290 insert_bb (occ, ENTRY_BLOCK_PTR_FOR_FN (cfun), &occ_head);
291 }
292
293 occ->bb_has_division = true;
294 occ->num_divisions++;
295 }
296
297
298 /* Compute the number of divisions that postdominate each block in OCC and
299 its children. */
300
301 static void
compute_merit(struct occurrence * occ)302 compute_merit (struct occurrence *occ)
303 {
304 struct occurrence *occ_child;
305 basic_block dom = occ->bb;
306
307 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
308 {
309 basic_block bb;
310 if (occ_child->children)
311 compute_merit (occ_child);
312
313 if (flag_exceptions)
314 bb = single_noncomplex_succ (dom);
315 else
316 bb = dom;
317
318 if (dominated_by_p (CDI_POST_DOMINATORS, bb, occ_child->bb))
319 occ->num_divisions += occ_child->num_divisions;
320 }
321 }
322
323
324 /* Return whether USE_STMT is a floating-point division by DEF. */
325 static inline bool
is_division_by(gimple * use_stmt,tree def)326 is_division_by (gimple *use_stmt, tree def)
327 {
328 return is_gimple_assign (use_stmt)
329 && gimple_assign_rhs_code (use_stmt) == RDIV_EXPR
330 && gimple_assign_rhs2 (use_stmt) == def
331 /* Do not recognize x / x as valid division, as we are getting
332 confused later by replacing all immediate uses x in such
333 a stmt. */
334 && gimple_assign_rhs1 (use_stmt) != def;
335 }
336
337 /* Walk the subset of the dominator tree rooted at OCC, setting the
338 RECIP_DEF field to a definition of 1.0 / DEF that can be used in
339 the given basic block. The field may be left NULL, of course,
340 if it is not possible or profitable to do the optimization.
341
342 DEF_BSI is an iterator pointing at the statement defining DEF.
343 If RECIP_DEF is set, a dominator already has a computation that can
344 be used. */
345
346 static void
insert_reciprocals(gimple_stmt_iterator * def_gsi,struct occurrence * occ,tree def,tree recip_def,int threshold)347 insert_reciprocals (gimple_stmt_iterator *def_gsi, struct occurrence *occ,
348 tree def, tree recip_def, int threshold)
349 {
350 tree type;
351 gassign *new_stmt;
352 gimple_stmt_iterator gsi;
353 struct occurrence *occ_child;
354
355 if (!recip_def
356 && (occ->bb_has_division || !flag_trapping_math)
357 && occ->num_divisions >= threshold)
358 {
359 /* Make a variable with the replacement and substitute it. */
360 type = TREE_TYPE (def);
361 recip_def = create_tmp_reg (type, "reciptmp");
362 new_stmt = gimple_build_assign (recip_def, RDIV_EXPR,
363 build_one_cst (type), def);
364
365 if (occ->bb_has_division)
366 {
367 /* Case 1: insert before an existing division. */
368 gsi = gsi_after_labels (occ->bb);
369 while (!gsi_end_p (gsi) && !is_division_by (gsi_stmt (gsi), def))
370 gsi_next (&gsi);
371
372 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
373 }
374 else if (def_gsi && occ->bb == def_gsi->bb)
375 {
376 /* Case 2: insert right after the definition. Note that this will
377 never happen if the definition statement can throw, because in
378 that case the sole successor of the statement's basic block will
379 dominate all the uses as well. */
380 gsi_insert_after (def_gsi, new_stmt, GSI_NEW_STMT);
381 }
382 else
383 {
384 /* Case 3: insert in a basic block not containing defs/uses. */
385 gsi = gsi_after_labels (occ->bb);
386 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
387 }
388
389 reciprocal_stats.rdivs_inserted++;
390
391 occ->recip_def_stmt = new_stmt;
392 }
393
394 occ->recip_def = recip_def;
395 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
396 insert_reciprocals (def_gsi, occ_child, def, recip_def, threshold);
397 }
398
399
400 /* Replace the division at USE_P with a multiplication by the reciprocal, if
401 possible. */
402
403 static inline void
replace_reciprocal(use_operand_p use_p)404 replace_reciprocal (use_operand_p use_p)
405 {
406 gimple *use_stmt = USE_STMT (use_p);
407 basic_block bb = gimple_bb (use_stmt);
408 struct occurrence *occ = (struct occurrence *) bb->aux;
409
410 if (optimize_bb_for_speed_p (bb)
411 && occ->recip_def && use_stmt != occ->recip_def_stmt)
412 {
413 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
414 gimple_assign_set_rhs_code (use_stmt, MULT_EXPR);
415 SET_USE (use_p, occ->recip_def);
416 fold_stmt_inplace (&gsi);
417 update_stmt (use_stmt);
418 }
419 }
420
421
422 /* Free OCC and return one more "struct occurrence" to be freed. */
423
424 static struct occurrence *
free_bb(struct occurrence * occ)425 free_bb (struct occurrence *occ)
426 {
427 struct occurrence *child, *next;
428
429 /* First get the two pointers hanging off OCC. */
430 next = occ->next;
431 child = occ->children;
432 occ->bb->aux = NULL;
433 occ_pool->remove (occ);
434
435 /* Now ensure that we don't recurse unless it is necessary. */
436 if (!child)
437 return next;
438 else
439 {
440 while (next)
441 next = free_bb (next);
442
443 return child;
444 }
445 }
446
447
448 /* Look for floating-point divisions among DEF's uses, and try to
449 replace them by multiplications with the reciprocal. Add
450 as many statements computing the reciprocal as needed.
451
452 DEF must be a GIMPLE register of a floating-point type. */
453
454 static void
execute_cse_reciprocals_1(gimple_stmt_iterator * def_gsi,tree def)455 execute_cse_reciprocals_1 (gimple_stmt_iterator *def_gsi, tree def)
456 {
457 use_operand_p use_p;
458 imm_use_iterator use_iter;
459 struct occurrence *occ;
460 int count = 0, threshold;
461
462 gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def)) && is_gimple_reg (def));
463
464 FOR_EACH_IMM_USE_FAST (use_p, use_iter, def)
465 {
466 gimple *use_stmt = USE_STMT (use_p);
467 if (is_division_by (use_stmt, def))
468 {
469 register_division_in (gimple_bb (use_stmt));
470 count++;
471 }
472 }
473
474 /* Do the expensive part only if we can hope to optimize something. */
475 threshold = targetm.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def)));
476 if (count >= threshold)
477 {
478 gimple *use_stmt;
479 for (occ = occ_head; occ; occ = occ->next)
480 {
481 compute_merit (occ);
482 insert_reciprocals (def_gsi, occ, def, NULL, threshold);
483 }
484
485 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, def)
486 {
487 if (is_division_by (use_stmt, def))
488 {
489 FOR_EACH_IMM_USE_ON_STMT (use_p, use_iter)
490 replace_reciprocal (use_p);
491 }
492 }
493 }
494
495 for (occ = occ_head; occ; )
496 occ = free_bb (occ);
497
498 occ_head = NULL;
499 }
500
501 /* Return an internal function that implements the reciprocal of CALL,
502 or IFN_LAST if there is no such function that the target supports. */
503
504 internal_fn
internal_fn_reciprocal(gcall * call)505 internal_fn_reciprocal (gcall *call)
506 {
507 internal_fn ifn;
508
509 switch (gimple_call_combined_fn (call))
510 {
511 CASE_CFN_SQRT:
512 ifn = IFN_RSQRT;
513 break;
514
515 default:
516 return IFN_LAST;
517 }
518
519 tree_pair types = direct_internal_fn_types (ifn, call);
520 if (!direct_internal_fn_supported_p (ifn, types, OPTIMIZE_FOR_SPEED))
521 return IFN_LAST;
522
523 return ifn;
524 }
525
526 /* Go through all the floating-point SSA_NAMEs, and call
527 execute_cse_reciprocals_1 on each of them. */
528 namespace {
529
530 const pass_data pass_data_cse_reciprocals =
531 {
532 GIMPLE_PASS, /* type */
533 "recip", /* name */
534 OPTGROUP_NONE, /* optinfo_flags */
535 TV_NONE, /* tv_id */
536 PROP_ssa, /* properties_required */
537 0, /* properties_provided */
538 0, /* properties_destroyed */
539 0, /* todo_flags_start */
540 TODO_update_ssa, /* todo_flags_finish */
541 };
542
543 class pass_cse_reciprocals : public gimple_opt_pass
544 {
545 public:
pass_cse_reciprocals(gcc::context * ctxt)546 pass_cse_reciprocals (gcc::context *ctxt)
547 : gimple_opt_pass (pass_data_cse_reciprocals, ctxt)
548 {}
549
550 /* opt_pass methods: */
gate(function *)551 virtual bool gate (function *) { return optimize && flag_reciprocal_math; }
552 virtual unsigned int execute (function *);
553
554 }; // class pass_cse_reciprocals
555
556 unsigned int
execute(function * fun)557 pass_cse_reciprocals::execute (function *fun)
558 {
559 basic_block bb;
560 tree arg;
561
562 occ_pool = new object_allocator<occurrence> ("dominators for recip");
563
564 memset (&reciprocal_stats, 0, sizeof (reciprocal_stats));
565 calculate_dominance_info (CDI_DOMINATORS);
566 calculate_dominance_info (CDI_POST_DOMINATORS);
567
568 if (flag_checking)
569 FOR_EACH_BB_FN (bb, fun)
570 gcc_assert (!bb->aux);
571
572 for (arg = DECL_ARGUMENTS (fun->decl); arg; arg = DECL_CHAIN (arg))
573 if (FLOAT_TYPE_P (TREE_TYPE (arg))
574 && is_gimple_reg (arg))
575 {
576 tree name = ssa_default_def (fun, arg);
577 if (name)
578 execute_cse_reciprocals_1 (NULL, name);
579 }
580
581 FOR_EACH_BB_FN (bb, fun)
582 {
583 tree def;
584
585 for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
586 gsi_next (&gsi))
587 {
588 gphi *phi = gsi.phi ();
589 def = PHI_RESULT (phi);
590 if (! virtual_operand_p (def)
591 && FLOAT_TYPE_P (TREE_TYPE (def)))
592 execute_cse_reciprocals_1 (NULL, def);
593 }
594
595 for (gimple_stmt_iterator gsi = gsi_after_labels (bb); !gsi_end_p (gsi);
596 gsi_next (&gsi))
597 {
598 gimple *stmt = gsi_stmt (gsi);
599
600 if (gimple_has_lhs (stmt)
601 && (def = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_DEF)) != NULL
602 && FLOAT_TYPE_P (TREE_TYPE (def))
603 && TREE_CODE (def) == SSA_NAME)
604 execute_cse_reciprocals_1 (&gsi, def);
605 }
606
607 if (optimize_bb_for_size_p (bb))
608 continue;
609
610 /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */
611 for (gimple_stmt_iterator gsi = gsi_after_labels (bb); !gsi_end_p (gsi);
612 gsi_next (&gsi))
613 {
614 gimple *stmt = gsi_stmt (gsi);
615
616 if (is_gimple_assign (stmt)
617 && gimple_assign_rhs_code (stmt) == RDIV_EXPR)
618 {
619 tree arg1 = gimple_assign_rhs2 (stmt);
620 gimple *stmt1;
621
622 if (TREE_CODE (arg1) != SSA_NAME)
623 continue;
624
625 stmt1 = SSA_NAME_DEF_STMT (arg1);
626
627 if (is_gimple_call (stmt1)
628 && gimple_call_lhs (stmt1))
629 {
630 bool fail;
631 imm_use_iterator ui;
632 use_operand_p use_p;
633 tree fndecl = NULL_TREE;
634
635 gcall *call = as_a <gcall *> (stmt1);
636 internal_fn ifn = internal_fn_reciprocal (call);
637 if (ifn == IFN_LAST)
638 {
639 fndecl = gimple_call_fndecl (call);
640 if (!fndecl
641 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_MD)
642 continue;
643 fndecl = targetm.builtin_reciprocal (fndecl);
644 if (!fndecl)
645 continue;
646 }
647
648 /* Check that all uses of the SSA name are divisions,
649 otherwise replacing the defining statement will do
650 the wrong thing. */
651 fail = false;
652 FOR_EACH_IMM_USE_FAST (use_p, ui, arg1)
653 {
654 gimple *stmt2 = USE_STMT (use_p);
655 if (is_gimple_debug (stmt2))
656 continue;
657 if (!is_gimple_assign (stmt2)
658 || gimple_assign_rhs_code (stmt2) != RDIV_EXPR
659 || gimple_assign_rhs1 (stmt2) == arg1
660 || gimple_assign_rhs2 (stmt2) != arg1)
661 {
662 fail = true;
663 break;
664 }
665 }
666 if (fail)
667 continue;
668
669 gimple_replace_ssa_lhs (call, arg1);
670 if (gimple_call_internal_p (call) != (ifn != IFN_LAST))
671 {
672 auto_vec<tree, 4> args;
673 for (unsigned int i = 0;
674 i < gimple_call_num_args (call); i++)
675 args.safe_push (gimple_call_arg (call, i));
676 gcall *stmt2;
677 if (ifn == IFN_LAST)
678 stmt2 = gimple_build_call_vec (fndecl, args);
679 else
680 stmt2 = gimple_build_call_internal_vec (ifn, args);
681 gimple_call_set_lhs (stmt2, arg1);
682 if (gimple_vdef (call))
683 {
684 gimple_set_vdef (stmt2, gimple_vdef (call));
685 SSA_NAME_DEF_STMT (gimple_vdef (stmt2)) = stmt2;
686 }
687 gimple_set_vuse (stmt2, gimple_vuse (call));
688 gimple_stmt_iterator gsi2 = gsi_for_stmt (call);
689 gsi_replace (&gsi2, stmt2, true);
690 }
691 else
692 {
693 if (ifn == IFN_LAST)
694 gimple_call_set_fndecl (call, fndecl);
695 else
696 gimple_call_set_internal_fn (call, ifn);
697 update_stmt (call);
698 }
699 reciprocal_stats.rfuncs_inserted++;
700
701 FOR_EACH_IMM_USE_STMT (stmt, ui, arg1)
702 {
703 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
704 gimple_assign_set_rhs_code (stmt, MULT_EXPR);
705 fold_stmt_inplace (&gsi);
706 update_stmt (stmt);
707 }
708 }
709 }
710 }
711 }
712
713 statistics_counter_event (fun, "reciprocal divs inserted",
714 reciprocal_stats.rdivs_inserted);
715 statistics_counter_event (fun, "reciprocal functions inserted",
716 reciprocal_stats.rfuncs_inserted);
717
718 free_dominance_info (CDI_DOMINATORS);
719 free_dominance_info (CDI_POST_DOMINATORS);
720 delete occ_pool;
721 return 0;
722 }
723
724 } // anon namespace
725
726 gimple_opt_pass *
make_pass_cse_reciprocals(gcc::context * ctxt)727 make_pass_cse_reciprocals (gcc::context *ctxt)
728 {
729 return new pass_cse_reciprocals (ctxt);
730 }
731
732 /* Records an occurrence at statement USE_STMT in the vector of trees
733 STMTS if it is dominated by *TOP_BB or dominates it or this basic block
734 is not yet initialized. Returns true if the occurrence was pushed on
735 the vector. Adjusts *TOP_BB to be the basic block dominating all
736 statements in the vector. */
737
738 static bool
maybe_record_sincos(vec<gimple * > * stmts,basic_block * top_bb,gimple * use_stmt)739 maybe_record_sincos (vec<gimple *> *stmts,
740 basic_block *top_bb, gimple *use_stmt)
741 {
742 basic_block use_bb = gimple_bb (use_stmt);
743 if (*top_bb
744 && (*top_bb == use_bb
745 || dominated_by_p (CDI_DOMINATORS, use_bb, *top_bb)))
746 stmts->safe_push (use_stmt);
747 else if (!*top_bb
748 || dominated_by_p (CDI_DOMINATORS, *top_bb, use_bb))
749 {
750 stmts->safe_push (use_stmt);
751 *top_bb = use_bb;
752 }
753 else
754 return false;
755
756 return true;
757 }
758
759 /* Look for sin, cos and cexpi calls with the same argument NAME and
760 create a single call to cexpi CSEing the result in this case.
761 We first walk over all immediate uses of the argument collecting
762 statements that we can CSE in a vector and in a second pass replace
763 the statement rhs with a REALPART or IMAGPART expression on the
764 result of the cexpi call we insert before the use statement that
765 dominates all other candidates. */
766
767 static bool
execute_cse_sincos_1(tree name)768 execute_cse_sincos_1 (tree name)
769 {
770 gimple_stmt_iterator gsi;
771 imm_use_iterator use_iter;
772 tree fndecl, res, type;
773 gimple *def_stmt, *use_stmt, *stmt;
774 int seen_cos = 0, seen_sin = 0, seen_cexpi = 0;
775 auto_vec<gimple *> stmts;
776 basic_block top_bb = NULL;
777 int i;
778 bool cfg_changed = false;
779
780 type = TREE_TYPE (name);
781 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, name)
782 {
783 if (gimple_code (use_stmt) != GIMPLE_CALL
784 || !gimple_call_lhs (use_stmt))
785 continue;
786
787 switch (gimple_call_combined_fn (use_stmt))
788 {
789 CASE_CFN_COS:
790 seen_cos |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
791 break;
792
793 CASE_CFN_SIN:
794 seen_sin |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
795 break;
796
797 CASE_CFN_CEXPI:
798 seen_cexpi |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
799 break;
800
801 default:;
802 }
803 }
804
805 if (seen_cos + seen_sin + seen_cexpi <= 1)
806 return false;
807
808 /* Simply insert cexpi at the beginning of top_bb but not earlier than
809 the name def statement. */
810 fndecl = mathfn_built_in (type, BUILT_IN_CEXPI);
811 if (!fndecl)
812 return false;
813 stmt = gimple_build_call (fndecl, 1, name);
814 res = make_temp_ssa_name (TREE_TYPE (TREE_TYPE (fndecl)), stmt, "sincostmp");
815 gimple_call_set_lhs (stmt, res);
816
817 def_stmt = SSA_NAME_DEF_STMT (name);
818 if (!SSA_NAME_IS_DEFAULT_DEF (name)
819 && gimple_code (def_stmt) != GIMPLE_PHI
820 && gimple_bb (def_stmt) == top_bb)
821 {
822 gsi = gsi_for_stmt (def_stmt);
823 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
824 }
825 else
826 {
827 gsi = gsi_after_labels (top_bb);
828 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
829 }
830 sincos_stats.inserted++;
831
832 /* And adjust the recorded old call sites. */
833 for (i = 0; stmts.iterate (i, &use_stmt); ++i)
834 {
835 tree rhs = NULL;
836
837 switch (gimple_call_combined_fn (use_stmt))
838 {
839 CASE_CFN_COS:
840 rhs = fold_build1 (REALPART_EXPR, type, res);
841 break;
842
843 CASE_CFN_SIN:
844 rhs = fold_build1 (IMAGPART_EXPR, type, res);
845 break;
846
847 CASE_CFN_CEXPI:
848 rhs = res;
849 break;
850
851 default:;
852 gcc_unreachable ();
853 }
854
855 /* Replace call with a copy. */
856 stmt = gimple_build_assign (gimple_call_lhs (use_stmt), rhs);
857
858 gsi = gsi_for_stmt (use_stmt);
859 gsi_replace (&gsi, stmt, true);
860 if (gimple_purge_dead_eh_edges (gimple_bb (stmt)))
861 cfg_changed = true;
862 }
863
864 return cfg_changed;
865 }
866
867 /* To evaluate powi(x,n), the floating point value x raised to the
868 constant integer exponent n, we use a hybrid algorithm that
869 combines the "window method" with look-up tables. For an
870 introduction to exponentiation algorithms and "addition chains",
871 see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth,
872 "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming",
873 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation
874 Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */
875
876 /* Provide a default value for POWI_MAX_MULTS, the maximum number of
877 multiplications to inline before calling the system library's pow
878 function. powi(x,n) requires at worst 2*bits(n)-2 multiplications,
879 so this default never requires calling pow, powf or powl. */
880
881 #ifndef POWI_MAX_MULTS
882 #define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2)
883 #endif
884
885 /* The size of the "optimal power tree" lookup table. All
886 exponents less than this value are simply looked up in the
887 powi_table below. This threshold is also used to size the
888 cache of pseudo registers that hold intermediate results. */
889 #define POWI_TABLE_SIZE 256
890
891 /* The size, in bits of the window, used in the "window method"
892 exponentiation algorithm. This is equivalent to a radix of
893 (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method". */
894 #define POWI_WINDOW_SIZE 3
895
896 /* The following table is an efficient representation of an
897 "optimal power tree". For each value, i, the corresponding
898 value, j, in the table states than an optimal evaluation
899 sequence for calculating pow(x,i) can be found by evaluating
900 pow(x,j)*pow(x,i-j). An optimal power tree for the first
901 100 integers is given in Knuth's "Seminumerical algorithms". */
902
903 static const unsigned char powi_table[POWI_TABLE_SIZE] =
904 {
905 0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */
906 4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */
907 8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */
908 12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */
909 16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */
910 20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */
911 24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */
912 28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */
913 32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */
914 36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */
915 40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */
916 44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */
917 48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */
918 52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */
919 56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */
920 60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */
921 64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */
922 68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */
923 72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */
924 76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */
925 80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */
926 84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */
927 88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */
928 92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */
929 96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */
930 100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */
931 104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */
932 108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */
933 112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */
934 116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */
935 120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */
936 124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */
937 };
938
939
940 /* Return the number of multiplications required to calculate
941 powi(x,n) where n is less than POWI_TABLE_SIZE. This is a
942 subroutine of powi_cost. CACHE is an array indicating
943 which exponents have already been calculated. */
944
945 static int
powi_lookup_cost(unsigned HOST_WIDE_INT n,bool * cache)946 powi_lookup_cost (unsigned HOST_WIDE_INT n, bool *cache)
947 {
948 /* If we've already calculated this exponent, then this evaluation
949 doesn't require any additional multiplications. */
950 if (cache[n])
951 return 0;
952
953 cache[n] = true;
954 return powi_lookup_cost (n - powi_table[n], cache)
955 + powi_lookup_cost (powi_table[n], cache) + 1;
956 }
957
958 /* Return the number of multiplications required to calculate
959 powi(x,n) for an arbitrary x, given the exponent N. This
960 function needs to be kept in sync with powi_as_mults below. */
961
962 static int
powi_cost(HOST_WIDE_INT n)963 powi_cost (HOST_WIDE_INT n)
964 {
965 bool cache[POWI_TABLE_SIZE];
966 unsigned HOST_WIDE_INT digit;
967 unsigned HOST_WIDE_INT val;
968 int result;
969
970 if (n == 0)
971 return 0;
972
973 /* Ignore the reciprocal when calculating the cost. */
974 val = (n < 0) ? -n : n;
975
976 /* Initialize the exponent cache. */
977 memset (cache, 0, POWI_TABLE_SIZE * sizeof (bool));
978 cache[1] = true;
979
980 result = 0;
981
982 while (val >= POWI_TABLE_SIZE)
983 {
984 if (val & 1)
985 {
986 digit = val & ((1 << POWI_WINDOW_SIZE) - 1);
987 result += powi_lookup_cost (digit, cache)
988 + POWI_WINDOW_SIZE + 1;
989 val >>= POWI_WINDOW_SIZE;
990 }
991 else
992 {
993 val >>= 1;
994 result++;
995 }
996 }
997
998 return result + powi_lookup_cost (val, cache);
999 }
1000
1001 /* Recursive subroutine of powi_as_mults. This function takes the
1002 array, CACHE, of already calculated exponents and an exponent N and
1003 returns a tree that corresponds to CACHE[1]**N, with type TYPE. */
1004
1005 static tree
powi_as_mults_1(gimple_stmt_iterator * gsi,location_t loc,tree type,HOST_WIDE_INT n,tree * cache)1006 powi_as_mults_1 (gimple_stmt_iterator *gsi, location_t loc, tree type,
1007 HOST_WIDE_INT n, tree *cache)
1008 {
1009 tree op0, op1, ssa_target;
1010 unsigned HOST_WIDE_INT digit;
1011 gassign *mult_stmt;
1012
1013 if (n < POWI_TABLE_SIZE && cache[n])
1014 return cache[n];
1015
1016 ssa_target = make_temp_ssa_name (type, NULL, "powmult");
1017
1018 if (n < POWI_TABLE_SIZE)
1019 {
1020 cache[n] = ssa_target;
1021 op0 = powi_as_mults_1 (gsi, loc, type, n - powi_table[n], cache);
1022 op1 = powi_as_mults_1 (gsi, loc, type, powi_table[n], cache);
1023 }
1024 else if (n & 1)
1025 {
1026 digit = n & ((1 << POWI_WINDOW_SIZE) - 1);
1027 op0 = powi_as_mults_1 (gsi, loc, type, n - digit, cache);
1028 op1 = powi_as_mults_1 (gsi, loc, type, digit, cache);
1029 }
1030 else
1031 {
1032 op0 = powi_as_mults_1 (gsi, loc, type, n >> 1, cache);
1033 op1 = op0;
1034 }
1035
1036 mult_stmt = gimple_build_assign (ssa_target, MULT_EXPR, op0, op1);
1037 gimple_set_location (mult_stmt, loc);
1038 gsi_insert_before (gsi, mult_stmt, GSI_SAME_STMT);
1039
1040 return ssa_target;
1041 }
1042
1043 /* Convert ARG0**N to a tree of multiplications of ARG0 with itself.
1044 This function needs to be kept in sync with powi_cost above. */
1045
1046 static tree
powi_as_mults(gimple_stmt_iterator * gsi,location_t loc,tree arg0,HOST_WIDE_INT n)1047 powi_as_mults (gimple_stmt_iterator *gsi, location_t loc,
1048 tree arg0, HOST_WIDE_INT n)
1049 {
1050 tree cache[POWI_TABLE_SIZE], result, type = TREE_TYPE (arg0);
1051 gassign *div_stmt;
1052 tree target;
1053
1054 if (n == 0)
1055 return build_real (type, dconst1);
1056
1057 memset (cache, 0, sizeof (cache));
1058 cache[1] = arg0;
1059
1060 result = powi_as_mults_1 (gsi, loc, type, (n < 0) ? -n : n, cache);
1061 if (n >= 0)
1062 return result;
1063
1064 /* If the original exponent was negative, reciprocate the result. */
1065 target = make_temp_ssa_name (type, NULL, "powmult");
1066 div_stmt = gimple_build_assign (target, RDIV_EXPR,
1067 build_real (type, dconst1), result);
1068 gimple_set_location (div_stmt, loc);
1069 gsi_insert_before (gsi, div_stmt, GSI_SAME_STMT);
1070
1071 return target;
1072 }
1073
1074 /* ARG0 and N are the two arguments to a powi builtin in GSI with
1075 location info LOC. If the arguments are appropriate, create an
1076 equivalent sequence of statements prior to GSI using an optimal
1077 number of multiplications, and return an expession holding the
1078 result. */
1079
1080 static tree
gimple_expand_builtin_powi(gimple_stmt_iterator * gsi,location_t loc,tree arg0,HOST_WIDE_INT n)1081 gimple_expand_builtin_powi (gimple_stmt_iterator *gsi, location_t loc,
1082 tree arg0, HOST_WIDE_INT n)
1083 {
1084 /* Avoid largest negative number. */
1085 if (n != -n
1086 && ((n >= -1 && n <= 2)
1087 || (optimize_function_for_speed_p (cfun)
1088 && powi_cost (n) <= POWI_MAX_MULTS)))
1089 return powi_as_mults (gsi, loc, arg0, n);
1090
1091 return NULL_TREE;
1092 }
1093
1094 /* Build a gimple call statement that calls FN with argument ARG.
1095 Set the lhs of the call statement to a fresh SSA name. Insert the
1096 statement prior to GSI's current position, and return the fresh
1097 SSA name. */
1098
1099 static tree
build_and_insert_call(gimple_stmt_iterator * gsi,location_t loc,tree fn,tree arg)1100 build_and_insert_call (gimple_stmt_iterator *gsi, location_t loc,
1101 tree fn, tree arg)
1102 {
1103 gcall *call_stmt;
1104 tree ssa_target;
1105
1106 call_stmt = gimple_build_call (fn, 1, arg);
1107 ssa_target = make_temp_ssa_name (TREE_TYPE (arg), NULL, "powroot");
1108 gimple_set_lhs (call_stmt, ssa_target);
1109 gimple_set_location (call_stmt, loc);
1110 gsi_insert_before (gsi, call_stmt, GSI_SAME_STMT);
1111
1112 return ssa_target;
1113 }
1114
1115 /* Build a gimple binary operation with the given CODE and arguments
1116 ARG0, ARG1, assigning the result to a new SSA name for variable
1117 TARGET. Insert the statement prior to GSI's current position, and
1118 return the fresh SSA name.*/
1119
1120 static tree
build_and_insert_binop(gimple_stmt_iterator * gsi,location_t loc,const char * name,enum tree_code code,tree arg0,tree arg1)1121 build_and_insert_binop (gimple_stmt_iterator *gsi, location_t loc,
1122 const char *name, enum tree_code code,
1123 tree arg0, tree arg1)
1124 {
1125 tree result = make_temp_ssa_name (TREE_TYPE (arg0), NULL, name);
1126 gassign *stmt = gimple_build_assign (result, code, arg0, arg1);
1127 gimple_set_location (stmt, loc);
1128 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1129 return result;
1130 }
1131
1132 /* Build a gimple reference operation with the given CODE and argument
1133 ARG, assigning the result to a new SSA name of TYPE with NAME.
1134 Insert the statement prior to GSI's current position, and return
1135 the fresh SSA name. */
1136
1137 static inline tree
build_and_insert_ref(gimple_stmt_iterator * gsi,location_t loc,tree type,const char * name,enum tree_code code,tree arg0)1138 build_and_insert_ref (gimple_stmt_iterator *gsi, location_t loc, tree type,
1139 const char *name, enum tree_code code, tree arg0)
1140 {
1141 tree result = make_temp_ssa_name (type, NULL, name);
1142 gimple *stmt = gimple_build_assign (result, build1 (code, type, arg0));
1143 gimple_set_location (stmt, loc);
1144 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1145 return result;
1146 }
1147
1148 /* Build a gimple assignment to cast VAL to TYPE. Insert the statement
1149 prior to GSI's current position, and return the fresh SSA name. */
1150
1151 static tree
build_and_insert_cast(gimple_stmt_iterator * gsi,location_t loc,tree type,tree val)1152 build_and_insert_cast (gimple_stmt_iterator *gsi, location_t loc,
1153 tree type, tree val)
1154 {
1155 tree result = make_ssa_name (type);
1156 gassign *stmt = gimple_build_assign (result, NOP_EXPR, val);
1157 gimple_set_location (stmt, loc);
1158 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1159 return result;
1160 }
1161
1162 struct pow_synth_sqrt_info
1163 {
1164 bool *factors;
1165 unsigned int deepest;
1166 unsigned int num_mults;
1167 };
1168
1169 /* Return true iff the real value C can be represented as a
1170 sum of powers of 0.5 up to N. That is:
1171 C == SUM<i from 1..N> (a[i]*(0.5**i)) where a[i] is either 0 or 1.
1172 Record in INFO the various parameters of the synthesis algorithm such
1173 as the factors a[i], the maximum 0.5 power and the number of
1174 multiplications that will be required. */
1175
1176 bool
representable_as_half_series_p(REAL_VALUE_TYPE c,unsigned n,struct pow_synth_sqrt_info * info)1177 representable_as_half_series_p (REAL_VALUE_TYPE c, unsigned n,
1178 struct pow_synth_sqrt_info *info)
1179 {
1180 REAL_VALUE_TYPE factor = dconsthalf;
1181 REAL_VALUE_TYPE remainder = c;
1182
1183 info->deepest = 0;
1184 info->num_mults = 0;
1185 memset (info->factors, 0, n * sizeof (bool));
1186
1187 for (unsigned i = 0; i < n; i++)
1188 {
1189 REAL_VALUE_TYPE res;
1190
1191 /* If something inexact happened bail out now. */
1192 if (real_arithmetic (&res, MINUS_EXPR, &remainder, &factor))
1193 return false;
1194
1195 /* We have hit zero. The number is representable as a sum
1196 of powers of 0.5. */
1197 if (real_equal (&res, &dconst0))
1198 {
1199 info->factors[i] = true;
1200 info->deepest = i + 1;
1201 return true;
1202 }
1203 else if (!REAL_VALUE_NEGATIVE (res))
1204 {
1205 remainder = res;
1206 info->factors[i] = true;
1207 info->num_mults++;
1208 }
1209 else
1210 info->factors[i] = false;
1211
1212 real_arithmetic (&factor, MULT_EXPR, &factor, &dconsthalf);
1213 }
1214 return false;
1215 }
1216
1217 /* Return the tree corresponding to FN being applied
1218 to ARG N times at GSI and LOC.
1219 Look up previous results from CACHE if need be.
1220 cache[0] should contain just plain ARG i.e. FN applied to ARG 0 times. */
1221
1222 static tree
get_fn_chain(tree arg,unsigned int n,gimple_stmt_iterator * gsi,tree fn,location_t loc,tree * cache)1223 get_fn_chain (tree arg, unsigned int n, gimple_stmt_iterator *gsi,
1224 tree fn, location_t loc, tree *cache)
1225 {
1226 tree res = cache[n];
1227 if (!res)
1228 {
1229 tree prev = get_fn_chain (arg, n - 1, gsi, fn, loc, cache);
1230 res = build_and_insert_call (gsi, loc, fn, prev);
1231 cache[n] = res;
1232 }
1233
1234 return res;
1235 }
1236
1237 /* Print to STREAM the repeated application of function FNAME to ARG
1238 N times. So, for FNAME = "foo", ARG = "x", N = 2 it would print:
1239 "foo (foo (x))". */
1240
1241 static void
print_nested_fn(FILE * stream,const char * fname,const char * arg,unsigned int n)1242 print_nested_fn (FILE* stream, const char *fname, const char* arg,
1243 unsigned int n)
1244 {
1245 if (n == 0)
1246 fprintf (stream, "%s", arg);
1247 else
1248 {
1249 fprintf (stream, "%s (", fname);
1250 print_nested_fn (stream, fname, arg, n - 1);
1251 fprintf (stream, ")");
1252 }
1253 }
1254
1255 /* Print to STREAM the fractional sequence of sqrt chains
1256 applied to ARG, described by INFO. Used for the dump file. */
1257
1258 static void
dump_fractional_sqrt_sequence(FILE * stream,const char * arg,struct pow_synth_sqrt_info * info)1259 dump_fractional_sqrt_sequence (FILE *stream, const char *arg,
1260 struct pow_synth_sqrt_info *info)
1261 {
1262 for (unsigned int i = 0; i < info->deepest; i++)
1263 {
1264 bool is_set = info->factors[i];
1265 if (is_set)
1266 {
1267 print_nested_fn (stream, "sqrt", arg, i + 1);
1268 if (i != info->deepest - 1)
1269 fprintf (stream, " * ");
1270 }
1271 }
1272 }
1273
1274 /* Print to STREAM a representation of raising ARG to an integer
1275 power N. Used for the dump file. */
1276
1277 static void
dump_integer_part(FILE * stream,const char * arg,HOST_WIDE_INT n)1278 dump_integer_part (FILE *stream, const char* arg, HOST_WIDE_INT n)
1279 {
1280 if (n > 1)
1281 fprintf (stream, "powi (%s, " HOST_WIDE_INT_PRINT_DEC ")", arg, n);
1282 else if (n == 1)
1283 fprintf (stream, "%s", arg);
1284 }
1285
1286 /* Attempt to synthesize a POW[F] (ARG0, ARG1) call using chains of
1287 square roots. Place at GSI and LOC. Limit the maximum depth
1288 of the sqrt chains to MAX_DEPTH. Return the tree holding the
1289 result of the expanded sequence or NULL_TREE if the expansion failed.
1290
1291 This routine assumes that ARG1 is a real number with a fractional part
1292 (the integer exponent case will have been handled earlier in
1293 gimple_expand_builtin_pow).
1294
1295 For ARG1 > 0.0:
1296 * For ARG1 composed of a whole part WHOLE_PART and a fractional part
1297 FRAC_PART i.e. WHOLE_PART == floor (ARG1) and
1298 FRAC_PART == ARG1 - WHOLE_PART:
1299 Produce POWI (ARG0, WHOLE_PART) * POW (ARG0, FRAC_PART) where
1300 POW (ARG0, FRAC_PART) is expanded as a product of square root chains
1301 if it can be expressed as such, that is if FRAC_PART satisfies:
1302 FRAC_PART == <SUM from i = 1 until MAX_DEPTH> (a[i] * (0.5**i))
1303 where integer a[i] is either 0 or 1.
1304
1305 Example:
1306 POW (x, 3.625) == POWI (x, 3) * POW (x, 0.625)
1307 --> POWI (x, 3) * SQRT (x) * SQRT (SQRT (SQRT (x)))
1308
1309 For ARG1 < 0.0 there are two approaches:
1310 * (A) Expand to 1.0 / POW (ARG0, -ARG1) where POW (ARG0, -ARG1)
1311 is calculated as above.
1312
1313 Example:
1314 POW (x, -5.625) == 1.0 / POW (x, 5.625)
1315 --> 1.0 / (POWI (x, 5) * SQRT (x) * SQRT (SQRT (SQRT (x))))
1316
1317 * (B) : WHOLE_PART := - ceil (abs (ARG1))
1318 FRAC_PART := ARG1 - WHOLE_PART
1319 and expand to POW (x, FRAC_PART) / POWI (x, WHOLE_PART).
1320 Example:
1321 POW (x, -5.875) == POW (x, 0.125) / POWI (X, 6)
1322 --> SQRT (SQRT (SQRT (x))) / (POWI (x, 6))
1323
1324 For ARG1 < 0.0 we choose between (A) and (B) depending on
1325 how many multiplications we'd have to do.
1326 So, for the example in (B): POW (x, -5.875), if we were to
1327 follow algorithm (A) we would produce:
1328 1.0 / POWI (X, 5) * SQRT (X) * SQRT (SQRT (X)) * SQRT (SQRT (SQRT (X)))
1329 which contains more multiplications than approach (B).
1330
1331 Hopefully, this approach will eliminate potentially expensive POW library
1332 calls when unsafe floating point math is enabled and allow the compiler to
1333 further optimise the multiplies, square roots and divides produced by this
1334 function. */
1335
1336 static tree
expand_pow_as_sqrts(gimple_stmt_iterator * gsi,location_t loc,tree arg0,tree arg1,HOST_WIDE_INT max_depth)1337 expand_pow_as_sqrts (gimple_stmt_iterator *gsi, location_t loc,
1338 tree arg0, tree arg1, HOST_WIDE_INT max_depth)
1339 {
1340 tree type = TREE_TYPE (arg0);
1341 machine_mode mode = TYPE_MODE (type);
1342 tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1343 bool one_over = true;
1344
1345 if (!sqrtfn)
1346 return NULL_TREE;
1347
1348 if (TREE_CODE (arg1) != REAL_CST)
1349 return NULL_TREE;
1350
1351 REAL_VALUE_TYPE exp_init = TREE_REAL_CST (arg1);
1352
1353 gcc_assert (max_depth > 0);
1354 tree *cache = XALLOCAVEC (tree, max_depth + 1);
1355
1356 struct pow_synth_sqrt_info synth_info;
1357 synth_info.factors = XALLOCAVEC (bool, max_depth + 1);
1358 synth_info.deepest = 0;
1359 synth_info.num_mults = 0;
1360
1361 bool neg_exp = REAL_VALUE_NEGATIVE (exp_init);
1362 REAL_VALUE_TYPE exp = real_value_abs (&exp_init);
1363
1364 /* The whole and fractional parts of exp. */
1365 REAL_VALUE_TYPE whole_part;
1366 REAL_VALUE_TYPE frac_part;
1367
1368 real_floor (&whole_part, mode, &exp);
1369 real_arithmetic (&frac_part, MINUS_EXPR, &exp, &whole_part);
1370
1371
1372 REAL_VALUE_TYPE ceil_whole = dconst0;
1373 REAL_VALUE_TYPE ceil_fract = dconst0;
1374
1375 if (neg_exp)
1376 {
1377 real_ceil (&ceil_whole, mode, &exp);
1378 real_arithmetic (&ceil_fract, MINUS_EXPR, &ceil_whole, &exp);
1379 }
1380
1381 if (!representable_as_half_series_p (frac_part, max_depth, &synth_info))
1382 return NULL_TREE;
1383
1384 /* Check whether it's more profitable to not use 1.0 / ... */
1385 if (neg_exp)
1386 {
1387 struct pow_synth_sqrt_info alt_synth_info;
1388 alt_synth_info.factors = XALLOCAVEC (bool, max_depth + 1);
1389 alt_synth_info.deepest = 0;
1390 alt_synth_info.num_mults = 0;
1391
1392 if (representable_as_half_series_p (ceil_fract, max_depth,
1393 &alt_synth_info)
1394 && alt_synth_info.deepest <= synth_info.deepest
1395 && alt_synth_info.num_mults < synth_info.num_mults)
1396 {
1397 whole_part = ceil_whole;
1398 frac_part = ceil_fract;
1399 synth_info.deepest = alt_synth_info.deepest;
1400 synth_info.num_mults = alt_synth_info.num_mults;
1401 memcpy (synth_info.factors, alt_synth_info.factors,
1402 (max_depth + 1) * sizeof (bool));
1403 one_over = false;
1404 }
1405 }
1406
1407 HOST_WIDE_INT n = real_to_integer (&whole_part);
1408 REAL_VALUE_TYPE cint;
1409 real_from_integer (&cint, VOIDmode, n, SIGNED);
1410
1411 if (!real_identical (&whole_part, &cint))
1412 return NULL_TREE;
1413
1414 if (powi_cost (n) + synth_info.num_mults > POWI_MAX_MULTS)
1415 return NULL_TREE;
1416
1417 memset (cache, 0, (max_depth + 1) * sizeof (tree));
1418
1419 tree integer_res = n == 0 ? build_real (type, dconst1) : arg0;
1420
1421 /* Calculate the integer part of the exponent. */
1422 if (n > 1)
1423 {
1424 integer_res = gimple_expand_builtin_powi (gsi, loc, arg0, n);
1425 if (!integer_res)
1426 return NULL_TREE;
1427 }
1428
1429 if (dump_file)
1430 {
1431 char string[64];
1432
1433 real_to_decimal (string, &exp_init, sizeof (string), 0, 1);
1434 fprintf (dump_file, "synthesizing pow (x, %s) as:\n", string);
1435
1436 if (neg_exp)
1437 {
1438 if (one_over)
1439 {
1440 fprintf (dump_file, "1.0 / (");
1441 dump_integer_part (dump_file, "x", n);
1442 if (n > 0)
1443 fprintf (dump_file, " * ");
1444 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1445 fprintf (dump_file, ")");
1446 }
1447 else
1448 {
1449 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1450 fprintf (dump_file, " / (");
1451 dump_integer_part (dump_file, "x", n);
1452 fprintf (dump_file, ")");
1453 }
1454 }
1455 else
1456 {
1457 dump_fractional_sqrt_sequence (dump_file, "x", &synth_info);
1458 if (n > 0)
1459 fprintf (dump_file, " * ");
1460 dump_integer_part (dump_file, "x", n);
1461 }
1462
1463 fprintf (dump_file, "\ndeepest sqrt chain: %d\n", synth_info.deepest);
1464 }
1465
1466
1467 tree fract_res = NULL_TREE;
1468 cache[0] = arg0;
1469
1470 /* Calculate the fractional part of the exponent. */
1471 for (unsigned i = 0; i < synth_info.deepest; i++)
1472 {
1473 if (synth_info.factors[i])
1474 {
1475 tree sqrt_chain = get_fn_chain (arg0, i + 1, gsi, sqrtfn, loc, cache);
1476
1477 if (!fract_res)
1478 fract_res = sqrt_chain;
1479
1480 else
1481 fract_res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1482 fract_res, sqrt_chain);
1483 }
1484 }
1485
1486 tree res = NULL_TREE;
1487
1488 if (neg_exp)
1489 {
1490 if (one_over)
1491 {
1492 if (n > 0)
1493 res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1494 fract_res, integer_res);
1495 else
1496 res = fract_res;
1497
1498 res = build_and_insert_binop (gsi, loc, "powrootrecip", RDIV_EXPR,
1499 build_real (type, dconst1), res);
1500 }
1501 else
1502 {
1503 res = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
1504 fract_res, integer_res);
1505 }
1506 }
1507 else
1508 res = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1509 fract_res, integer_res);
1510 return res;
1511 }
1512
1513 /* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI
1514 with location info LOC. If possible, create an equivalent and
1515 less expensive sequence of statements prior to GSI, and return an
1516 expession holding the result. */
1517
1518 static tree
gimple_expand_builtin_pow(gimple_stmt_iterator * gsi,location_t loc,tree arg0,tree arg1)1519 gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc,
1520 tree arg0, tree arg1)
1521 {
1522 REAL_VALUE_TYPE c, cint, dconst1_3, dconst1_4, dconst1_6;
1523 REAL_VALUE_TYPE c2, dconst3;
1524 HOST_WIDE_INT n;
1525 tree type, sqrtfn, cbrtfn, sqrt_arg0, result, cbrt_x, powi_cbrt_x;
1526 machine_mode mode;
1527 bool speed_p = optimize_bb_for_speed_p (gsi_bb (*gsi));
1528 bool hw_sqrt_exists, c_is_int, c2_is_int;
1529
1530 dconst1_4 = dconst1;
1531 SET_REAL_EXP (&dconst1_4, REAL_EXP (&dconst1_4) - 2);
1532
1533 /* If the exponent isn't a constant, there's nothing of interest
1534 to be done. */
1535 if (TREE_CODE (arg1) != REAL_CST)
1536 return NULL_TREE;
1537
1538 /* Don't perform the operation if flag_signaling_nans is on
1539 and the operand is a signaling NaN. */
1540 if (HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg1)))
1541 && ((TREE_CODE (arg0) == REAL_CST
1542 && REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg0)))
1543 || REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg1))))
1544 return NULL_TREE;
1545
1546 /* If the exponent is equivalent to an integer, expand to an optimal
1547 multiplication sequence when profitable. */
1548 c = TREE_REAL_CST (arg1);
1549 n = real_to_integer (&c);
1550 real_from_integer (&cint, VOIDmode, n, SIGNED);
1551 c_is_int = real_identical (&c, &cint);
1552
1553 if (c_is_int
1554 && ((n >= -1 && n <= 2)
1555 || (flag_unsafe_math_optimizations
1556 && speed_p
1557 && powi_cost (n) <= POWI_MAX_MULTS)))
1558 return gimple_expand_builtin_powi (gsi, loc, arg0, n);
1559
1560 /* Attempt various optimizations using sqrt and cbrt. */
1561 type = TREE_TYPE (arg0);
1562 mode = TYPE_MODE (type);
1563 sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1564
1565 /* Optimize pow(x,0.5) = sqrt(x). This replacement is always safe
1566 unless signed zeros must be maintained. pow(-0,0.5) = +0, while
1567 sqrt(-0) = -0. */
1568 if (sqrtfn
1569 && real_equal (&c, &dconsthalf)
1570 && !HONOR_SIGNED_ZEROS (mode))
1571 return build_and_insert_call (gsi, loc, sqrtfn, arg0);
1572
1573 hw_sqrt_exists = optab_handler (sqrt_optab, mode) != CODE_FOR_nothing;
1574
1575 /* Optimize pow(x,1./3.) = cbrt(x). This requires unsafe math
1576 optimizations since 1./3. is not exactly representable. If x
1577 is negative and finite, the correct value of pow(x,1./3.) is
1578 a NaN with the "invalid" exception raised, because the value
1579 of 1./3. actually has an even denominator. The correct value
1580 of cbrt(x) is a negative real value. */
1581 cbrtfn = mathfn_built_in (type, BUILT_IN_CBRT);
1582 dconst1_3 = real_value_truncate (mode, dconst_third ());
1583
1584 if (flag_unsafe_math_optimizations
1585 && cbrtfn
1586 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
1587 && real_equal (&c, &dconst1_3))
1588 return build_and_insert_call (gsi, loc, cbrtfn, arg0);
1589
1590 /* Optimize pow(x,1./6.) = cbrt(sqrt(x)). Don't do this optimization
1591 if we don't have a hardware sqrt insn. */
1592 dconst1_6 = dconst1_3;
1593 SET_REAL_EXP (&dconst1_6, REAL_EXP (&dconst1_6) - 1);
1594
1595 if (flag_unsafe_math_optimizations
1596 && sqrtfn
1597 && cbrtfn
1598 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
1599 && speed_p
1600 && hw_sqrt_exists
1601 && real_equal (&c, &dconst1_6))
1602 {
1603 /* sqrt(x) */
1604 sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
1605
1606 /* cbrt(sqrt(x)) */
1607 return build_and_insert_call (gsi, loc, cbrtfn, sqrt_arg0);
1608 }
1609
1610
1611 /* Attempt to expand the POW as a product of square root chains.
1612 Expand the 0.25 case even when otpimising for size. */
1613 if (flag_unsafe_math_optimizations
1614 && sqrtfn
1615 && hw_sqrt_exists
1616 && (speed_p || real_equal (&c, &dconst1_4))
1617 && !HONOR_SIGNED_ZEROS (mode))
1618 {
1619 unsigned int max_depth = speed_p
1620 ? PARAM_VALUE (PARAM_MAX_POW_SQRT_DEPTH)
1621 : 2;
1622
1623 tree expand_with_sqrts
1624 = expand_pow_as_sqrts (gsi, loc, arg0, arg1, max_depth);
1625
1626 if (expand_with_sqrts)
1627 return expand_with_sqrts;
1628 }
1629
1630 real_arithmetic (&c2, MULT_EXPR, &c, &dconst2);
1631 n = real_to_integer (&c2);
1632 real_from_integer (&cint, VOIDmode, n, SIGNED);
1633 c2_is_int = real_identical (&c2, &cint);
1634
1635 /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into
1636
1637 powi(x, n/3) * powi(cbrt(x), n%3), n > 0;
1638 1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)), n < 0.
1639
1640 Do not calculate the first factor when n/3 = 0. As cbrt(x) is
1641 different from pow(x, 1./3.) due to rounding and behavior with
1642 negative x, we need to constrain this transformation to unsafe
1643 math and positive x or finite math. */
1644 real_from_integer (&dconst3, VOIDmode, 3, SIGNED);
1645 real_arithmetic (&c2, MULT_EXPR, &c, &dconst3);
1646 real_round (&c2, mode, &c2);
1647 n = real_to_integer (&c2);
1648 real_from_integer (&cint, VOIDmode, n, SIGNED);
1649 real_arithmetic (&c2, RDIV_EXPR, &cint, &dconst3);
1650 real_convert (&c2, mode, &c2);
1651
1652 if (flag_unsafe_math_optimizations
1653 && cbrtfn
1654 && (!HONOR_NANS (mode) || tree_expr_nonnegative_p (arg0))
1655 && real_identical (&c2, &c)
1656 && !c2_is_int
1657 && optimize_function_for_speed_p (cfun)
1658 && powi_cost (n / 3) <= POWI_MAX_MULTS)
1659 {
1660 tree powi_x_ndiv3 = NULL_TREE;
1661
1662 /* Attempt to fold powi(arg0, abs(n/3)) into multiplies. If not
1663 possible or profitable, give up. Skip the degenerate case when
1664 abs(n) < 3, where the result is always 1. */
1665 if (absu_hwi (n) >= 3)
1666 {
1667 powi_x_ndiv3 = gimple_expand_builtin_powi (gsi, loc, arg0,
1668 abs_hwi (n / 3));
1669 if (!powi_x_ndiv3)
1670 return NULL_TREE;
1671 }
1672
1673 /* Calculate powi(cbrt(x), n%3). Don't use gimple_expand_builtin_powi
1674 as that creates an unnecessary variable. Instead, just produce
1675 either cbrt(x) or cbrt(x) * cbrt(x). */
1676 cbrt_x = build_and_insert_call (gsi, loc, cbrtfn, arg0);
1677
1678 if (absu_hwi (n) % 3 == 1)
1679 powi_cbrt_x = cbrt_x;
1680 else
1681 powi_cbrt_x = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1682 cbrt_x, cbrt_x);
1683
1684 /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1. */
1685 if (absu_hwi (n) < 3)
1686 result = powi_cbrt_x;
1687 else
1688 result = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1689 powi_x_ndiv3, powi_cbrt_x);
1690
1691 /* If n is negative, reciprocate the result. */
1692 if (n < 0)
1693 result = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
1694 build_real (type, dconst1), result);
1695
1696 return result;
1697 }
1698
1699 /* No optimizations succeeded. */
1700 return NULL_TREE;
1701 }
1702
1703 /* ARG is the argument to a cabs builtin call in GSI with location info
1704 LOC. Create a sequence of statements prior to GSI that calculates
1705 sqrt(R*R + I*I), where R and I are the real and imaginary components
1706 of ARG, respectively. Return an expression holding the result. */
1707
1708 static tree
gimple_expand_builtin_cabs(gimple_stmt_iterator * gsi,location_t loc,tree arg)1709 gimple_expand_builtin_cabs (gimple_stmt_iterator *gsi, location_t loc, tree arg)
1710 {
1711 tree real_part, imag_part, addend1, addend2, sum, result;
1712 tree type = TREE_TYPE (TREE_TYPE (arg));
1713 tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1714 machine_mode mode = TYPE_MODE (type);
1715
1716 if (!flag_unsafe_math_optimizations
1717 || !optimize_bb_for_speed_p (gimple_bb (gsi_stmt (*gsi)))
1718 || !sqrtfn
1719 || optab_handler (sqrt_optab, mode) == CODE_FOR_nothing)
1720 return NULL_TREE;
1721
1722 real_part = build_and_insert_ref (gsi, loc, type, "cabs",
1723 REALPART_EXPR, arg);
1724 addend1 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
1725 real_part, real_part);
1726 imag_part = build_and_insert_ref (gsi, loc, type, "cabs",
1727 IMAGPART_EXPR, arg);
1728 addend2 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
1729 imag_part, imag_part);
1730 sum = build_and_insert_binop (gsi, loc, "cabs", PLUS_EXPR, addend1, addend2);
1731 result = build_and_insert_call (gsi, loc, sqrtfn, sum);
1732
1733 return result;
1734 }
1735
1736 /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
1737 on the SSA_NAME argument of each of them. Also expand powi(x,n) into
1738 an optimal number of multiplies, when n is a constant. */
1739
1740 namespace {
1741
1742 const pass_data pass_data_cse_sincos =
1743 {
1744 GIMPLE_PASS, /* type */
1745 "sincos", /* name */
1746 OPTGROUP_NONE, /* optinfo_flags */
1747 TV_NONE, /* tv_id */
1748 PROP_ssa, /* properties_required */
1749 PROP_gimple_opt_math, /* properties_provided */
1750 0, /* properties_destroyed */
1751 0, /* todo_flags_start */
1752 TODO_update_ssa, /* todo_flags_finish */
1753 };
1754
1755 class pass_cse_sincos : public gimple_opt_pass
1756 {
1757 public:
pass_cse_sincos(gcc::context * ctxt)1758 pass_cse_sincos (gcc::context *ctxt)
1759 : gimple_opt_pass (pass_data_cse_sincos, ctxt)
1760 {}
1761
1762 /* opt_pass methods: */
gate(function *)1763 virtual bool gate (function *)
1764 {
1765 /* We no longer require either sincos or cexp, since powi expansion
1766 piggybacks on this pass. */
1767 return optimize;
1768 }
1769
1770 virtual unsigned int execute (function *);
1771
1772 }; // class pass_cse_sincos
1773
1774 unsigned int
execute(function * fun)1775 pass_cse_sincos::execute (function *fun)
1776 {
1777 basic_block bb;
1778 bool cfg_changed = false;
1779
1780 calculate_dominance_info (CDI_DOMINATORS);
1781 memset (&sincos_stats, 0, sizeof (sincos_stats));
1782
1783 FOR_EACH_BB_FN (bb, fun)
1784 {
1785 gimple_stmt_iterator gsi;
1786 bool cleanup_eh = false;
1787
1788 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1789 {
1790 gimple *stmt = gsi_stmt (gsi);
1791
1792 /* Only the last stmt in a bb could throw, no need to call
1793 gimple_purge_dead_eh_edges if we change something in the middle
1794 of a basic block. */
1795 cleanup_eh = false;
1796
1797 if (is_gimple_call (stmt)
1798 && gimple_call_lhs (stmt))
1799 {
1800 tree arg, arg0, arg1, result;
1801 HOST_WIDE_INT n;
1802 location_t loc;
1803
1804 switch (gimple_call_combined_fn (stmt))
1805 {
1806 CASE_CFN_COS:
1807 CASE_CFN_SIN:
1808 CASE_CFN_CEXPI:
1809 /* Make sure we have either sincos or cexp. */
1810 if (!targetm.libc_has_function (function_c99_math_complex)
1811 && !targetm.libc_has_function (function_sincos))
1812 break;
1813
1814 arg = gimple_call_arg (stmt, 0);
1815 if (TREE_CODE (arg) == SSA_NAME)
1816 cfg_changed |= execute_cse_sincos_1 (arg);
1817 break;
1818
1819 CASE_CFN_POW:
1820 arg0 = gimple_call_arg (stmt, 0);
1821 arg1 = gimple_call_arg (stmt, 1);
1822
1823 loc = gimple_location (stmt);
1824 result = gimple_expand_builtin_pow (&gsi, loc, arg0, arg1);
1825
1826 if (result)
1827 {
1828 tree lhs = gimple_get_lhs (stmt);
1829 gassign *new_stmt = gimple_build_assign (lhs, result);
1830 gimple_set_location (new_stmt, loc);
1831 unlink_stmt_vdef (stmt);
1832 gsi_replace (&gsi, new_stmt, true);
1833 cleanup_eh = true;
1834 if (gimple_vdef (stmt))
1835 release_ssa_name (gimple_vdef (stmt));
1836 }
1837 break;
1838
1839 CASE_CFN_POWI:
1840 arg0 = gimple_call_arg (stmt, 0);
1841 arg1 = gimple_call_arg (stmt, 1);
1842 loc = gimple_location (stmt);
1843
1844 if (real_minus_onep (arg0))
1845 {
1846 tree t0, t1, cond, one, minus_one;
1847 gassign *stmt;
1848
1849 t0 = TREE_TYPE (arg0);
1850 t1 = TREE_TYPE (arg1);
1851 one = build_real (t0, dconst1);
1852 minus_one = build_real (t0, dconstm1);
1853
1854 cond = make_temp_ssa_name (t1, NULL, "powi_cond");
1855 stmt = gimple_build_assign (cond, BIT_AND_EXPR,
1856 arg1, build_int_cst (t1, 1));
1857 gimple_set_location (stmt, loc);
1858 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1859
1860 result = make_temp_ssa_name (t0, NULL, "powi");
1861 stmt = gimple_build_assign (result, COND_EXPR, cond,
1862 minus_one, one);
1863 gimple_set_location (stmt, loc);
1864 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1865 }
1866 else
1867 {
1868 if (!tree_fits_shwi_p (arg1))
1869 break;
1870
1871 n = tree_to_shwi (arg1);
1872 result = gimple_expand_builtin_powi (&gsi, loc, arg0, n);
1873 }
1874
1875 if (result)
1876 {
1877 tree lhs = gimple_get_lhs (stmt);
1878 gassign *new_stmt = gimple_build_assign (lhs, result);
1879 gimple_set_location (new_stmt, loc);
1880 unlink_stmt_vdef (stmt);
1881 gsi_replace (&gsi, new_stmt, true);
1882 cleanup_eh = true;
1883 if (gimple_vdef (stmt))
1884 release_ssa_name (gimple_vdef (stmt));
1885 }
1886 break;
1887
1888 CASE_CFN_CABS:
1889 arg0 = gimple_call_arg (stmt, 0);
1890 loc = gimple_location (stmt);
1891 result = gimple_expand_builtin_cabs (&gsi, loc, arg0);
1892
1893 if (result)
1894 {
1895 tree lhs = gimple_get_lhs (stmt);
1896 gassign *new_stmt = gimple_build_assign (lhs, result);
1897 gimple_set_location (new_stmt, loc);
1898 unlink_stmt_vdef (stmt);
1899 gsi_replace (&gsi, new_stmt, true);
1900 cleanup_eh = true;
1901 if (gimple_vdef (stmt))
1902 release_ssa_name (gimple_vdef (stmt));
1903 }
1904 break;
1905
1906 default:;
1907 }
1908 }
1909 }
1910 if (cleanup_eh)
1911 cfg_changed |= gimple_purge_dead_eh_edges (bb);
1912 }
1913
1914 statistics_counter_event (fun, "sincos statements inserted",
1915 sincos_stats.inserted);
1916
1917 return cfg_changed ? TODO_cleanup_cfg : 0;
1918 }
1919
1920 } // anon namespace
1921
1922 gimple_opt_pass *
make_pass_cse_sincos(gcc::context * ctxt)1923 make_pass_cse_sincos (gcc::context *ctxt)
1924 {
1925 return new pass_cse_sincos (ctxt);
1926 }
1927
1928 /* A symbolic number structure is used to detect byte permutation and selection
1929 patterns of a source. To achieve that, its field N contains an artificial
1930 number consisting of BITS_PER_MARKER sized markers tracking where does each
1931 byte come from in the source:
1932
1933 0 - target byte has the value 0
1934 FF - target byte has an unknown value (eg. due to sign extension)
1935 1..size - marker value is the byte index in the source (0 for lsb).
1936
1937 To detect permutations on memory sources (arrays and structures), a symbolic
1938 number is also associated:
1939 - a base address BASE_ADDR and an OFFSET giving the address of the source;
1940 - a range which gives the difference between the highest and lowest accessed
1941 memory location to make such a symbolic number;
1942 - the address SRC of the source element of lowest address as a convenience
1943 to easily get BASE_ADDR + offset + lowest bytepos.
1944
1945 Note 1: the range is different from size as size reflects the size of the
1946 type of the current expression. For instance, for an array char a[],
1947 (short) a[0] | (short) a[3] would have a size of 2 but a range of 4 while
1948 (short) a[0] | ((short) a[0] << 1) would still have a size of 2 but this
1949 time a range of 1.
1950
1951 Note 2: for non-memory sources, range holds the same value as size.
1952
1953 Note 3: SRC points to the SSA_NAME in case of non-memory source. */
1954
1955 struct symbolic_number {
1956 uint64_t n;
1957 tree type;
1958 tree base_addr;
1959 tree offset;
1960 HOST_WIDE_INT bytepos;
1961 tree src;
1962 tree alias_set;
1963 tree vuse;
1964 unsigned HOST_WIDE_INT range;
1965 };
1966
1967 #define BITS_PER_MARKER 8
1968 #define MARKER_MASK ((1 << BITS_PER_MARKER) - 1)
1969 #define MARKER_BYTE_UNKNOWN MARKER_MASK
1970 #define HEAD_MARKER(n, size) \
1971 ((n) & ((uint64_t) MARKER_MASK << (((size) - 1) * BITS_PER_MARKER)))
1972
1973 /* The number which the find_bswap_or_nop_1 result should match in
1974 order to have a nop. The number is masked according to the size of
1975 the symbolic number before using it. */
1976 #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
1977 (uint64_t)0x08070605 << 32 | 0x04030201)
1978
1979 /* The number which the find_bswap_or_nop_1 result should match in
1980 order to have a byte swap. The number is masked according to the
1981 size of the symbolic number before using it. */
1982 #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
1983 (uint64_t)0x01020304 << 32 | 0x05060708)
1984
1985 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
1986 number N. Return false if the requested operation is not permitted
1987 on a symbolic number. */
1988
1989 static inline bool
do_shift_rotate(enum tree_code code,struct symbolic_number * n,int count)1990 do_shift_rotate (enum tree_code code,
1991 struct symbolic_number *n,
1992 int count)
1993 {
1994 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
1995 unsigned head_marker;
1996
1997 if (count % BITS_PER_UNIT != 0)
1998 return false;
1999 count = (count / BITS_PER_UNIT) * BITS_PER_MARKER;
2000
2001 /* Zero out the extra bits of N in order to avoid them being shifted
2002 into the significant bits. */
2003 if (size < 64 / BITS_PER_MARKER)
2004 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
2005
2006 switch (code)
2007 {
2008 case LSHIFT_EXPR:
2009 n->n <<= count;
2010 break;
2011 case RSHIFT_EXPR:
2012 head_marker = HEAD_MARKER (n->n, size);
2013 n->n >>= count;
2014 /* Arithmetic shift of signed type: result is dependent on the value. */
2015 if (!TYPE_UNSIGNED (n->type) && head_marker)
2016 for (i = 0; i < count / BITS_PER_MARKER; i++)
2017 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
2018 << ((size - 1 - i) * BITS_PER_MARKER);
2019 break;
2020 case LROTATE_EXPR:
2021 n->n = (n->n << count) | (n->n >> ((size * BITS_PER_MARKER) - count));
2022 break;
2023 case RROTATE_EXPR:
2024 n->n = (n->n >> count) | (n->n << ((size * BITS_PER_MARKER) - count));
2025 break;
2026 default:
2027 return false;
2028 }
2029 /* Zero unused bits for size. */
2030 if (size < 64 / BITS_PER_MARKER)
2031 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
2032 return true;
2033 }
2034
2035 /* Perform sanity checking for the symbolic number N and the gimple
2036 statement STMT. */
2037
2038 static inline bool
verify_symbolic_number_p(struct symbolic_number * n,gimple * stmt)2039 verify_symbolic_number_p (struct symbolic_number *n, gimple *stmt)
2040 {
2041 tree lhs_type;
2042
2043 lhs_type = gimple_expr_type (stmt);
2044
2045 if (TREE_CODE (lhs_type) != INTEGER_TYPE)
2046 return false;
2047
2048 if (TYPE_PRECISION (lhs_type) != TYPE_PRECISION (n->type))
2049 return false;
2050
2051 return true;
2052 }
2053
2054 /* Initialize the symbolic number N for the bswap pass from the base element
2055 SRC manipulated by the bitwise OR expression. */
2056
2057 static bool
init_symbolic_number(struct symbolic_number * n,tree src)2058 init_symbolic_number (struct symbolic_number *n, tree src)
2059 {
2060 int size;
2061
2062 n->base_addr = n->offset = n->alias_set = n->vuse = NULL_TREE;
2063 n->src = src;
2064
2065 /* Set up the symbolic number N by setting each byte to a value between 1 and
2066 the byte size of rhs1. The highest order byte is set to n->size and the
2067 lowest order byte to 1. */
2068 n->type = TREE_TYPE (src);
2069 size = TYPE_PRECISION (n->type);
2070 if (size % BITS_PER_UNIT != 0)
2071 return false;
2072 size /= BITS_PER_UNIT;
2073 if (size > 64 / BITS_PER_MARKER)
2074 return false;
2075 n->range = size;
2076 n->n = CMPNOP;
2077
2078 if (size < 64 / BITS_PER_MARKER)
2079 n->n &= ((uint64_t) 1 << (size * BITS_PER_MARKER)) - 1;
2080
2081 return true;
2082 }
2083
2084 /* Check if STMT might be a byte swap or a nop from a memory source and returns
2085 the answer. If so, REF is that memory source and the base of the memory area
2086 accessed and the offset of the access from that base are recorded in N. */
2087
2088 bool
find_bswap_or_nop_load(gimple * stmt,tree ref,struct symbolic_number * n)2089 find_bswap_or_nop_load (gimple *stmt, tree ref, struct symbolic_number *n)
2090 {
2091 /* Leaf node is an array or component ref. Memorize its base and
2092 offset from base to compare to other such leaf node. */
2093 HOST_WIDE_INT bitsize, bitpos;
2094 machine_mode mode;
2095 int unsignedp, reversep, volatilep;
2096 tree offset, base_addr;
2097
2098 /* Not prepared to handle PDP endian. */
2099 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
2100 return false;
2101
2102 if (!gimple_assign_load_p (stmt) || gimple_has_volatile_ops (stmt))
2103 return false;
2104
2105 base_addr = get_inner_reference (ref, &bitsize, &bitpos, &offset, &mode,
2106 &unsignedp, &reversep, &volatilep, false);
2107
2108 if (TREE_CODE (base_addr) == MEM_REF)
2109 {
2110 offset_int bit_offset = 0;
2111 tree off = TREE_OPERAND (base_addr, 1);
2112
2113 if (!integer_zerop (off))
2114 {
2115 offset_int boff, coff = mem_ref_offset (base_addr);
2116 boff = wi::lshift (coff, LOG2_BITS_PER_UNIT);
2117 bit_offset += boff;
2118 }
2119
2120 base_addr = TREE_OPERAND (base_addr, 0);
2121
2122 /* Avoid returning a negative bitpos as this may wreak havoc later. */
2123 if (wi::neg_p (bit_offset))
2124 {
2125 offset_int mask = wi::mask <offset_int> (LOG2_BITS_PER_UNIT, false);
2126 offset_int tem = bit_offset.and_not (mask);
2127 /* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf.
2128 Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */
2129 bit_offset -= tem;
2130 tem = wi::arshift (tem, LOG2_BITS_PER_UNIT);
2131 if (offset)
2132 offset = size_binop (PLUS_EXPR, offset,
2133 wide_int_to_tree (sizetype, tem));
2134 else
2135 offset = wide_int_to_tree (sizetype, tem);
2136 }
2137
2138 bitpos += bit_offset.to_shwi ();
2139 }
2140
2141 if (bitpos % BITS_PER_UNIT)
2142 return false;
2143 if (bitsize % BITS_PER_UNIT)
2144 return false;
2145 if (reversep)
2146 return false;
2147
2148 if (!init_symbolic_number (n, ref))
2149 return false;
2150 n->base_addr = base_addr;
2151 n->offset = offset;
2152 n->bytepos = bitpos / BITS_PER_UNIT;
2153 n->alias_set = reference_alias_ptr_type (ref);
2154 n->vuse = gimple_vuse (stmt);
2155 return true;
2156 }
2157
2158 /* Compute the symbolic number N representing the result of a bitwise OR on 2
2159 symbolic number N1 and N2 whose source statements are respectively
2160 SOURCE_STMT1 and SOURCE_STMT2. */
2161
2162 static gimple *
perform_symbolic_merge(gimple * source_stmt1,struct symbolic_number * n1,gimple * source_stmt2,struct symbolic_number * n2,struct symbolic_number * n)2163 perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1,
2164 gimple *source_stmt2, struct symbolic_number *n2,
2165 struct symbolic_number *n)
2166 {
2167 int i, size;
2168 uint64_t mask;
2169 gimple *source_stmt;
2170 struct symbolic_number *n_start;
2171
2172 /* Sources are different, cancel bswap if they are not memory location with
2173 the same base (array, structure, ...). */
2174 if (gimple_assign_rhs1 (source_stmt1) != gimple_assign_rhs1 (source_stmt2))
2175 {
2176 uint64_t inc;
2177 HOST_WIDE_INT start_sub, end_sub, end1, end2, end;
2178 struct symbolic_number *toinc_n_ptr, *n_end;
2179 basic_block bb1, bb2;
2180
2181 if (!n1->base_addr || !n2->base_addr
2182 || !operand_equal_p (n1->base_addr, n2->base_addr, 0))
2183 return NULL;
2184
2185 if (!n1->offset != !n2->offset
2186 || (n1->offset && !operand_equal_p (n1->offset, n2->offset, 0)))
2187 return NULL;
2188
2189 if (n1->bytepos < n2->bytepos)
2190 {
2191 n_start = n1;
2192 start_sub = n2->bytepos - n1->bytepos;
2193 }
2194 else
2195 {
2196 n_start = n2;
2197 start_sub = n1->bytepos - n2->bytepos;
2198 }
2199
2200 bb1 = gimple_bb (source_stmt1);
2201 bb2 = gimple_bb (source_stmt2);
2202 if (dominated_by_p (CDI_DOMINATORS, bb1, bb2))
2203 source_stmt = source_stmt1;
2204 else
2205 source_stmt = source_stmt2;
2206
2207 /* Find the highest address at which a load is performed and
2208 compute related info. */
2209 end1 = n1->bytepos + (n1->range - 1);
2210 end2 = n2->bytepos + (n2->range - 1);
2211 if (end1 < end2)
2212 {
2213 end = end2;
2214 end_sub = end2 - end1;
2215 }
2216 else
2217 {
2218 end = end1;
2219 end_sub = end1 - end2;
2220 }
2221 n_end = (end2 > end1) ? n2 : n1;
2222
2223 /* Find symbolic number whose lsb is the most significant. */
2224 if (BYTES_BIG_ENDIAN)
2225 toinc_n_ptr = (n_end == n1) ? n2 : n1;
2226 else
2227 toinc_n_ptr = (n_start == n1) ? n2 : n1;
2228
2229 n->range = end - n_start->bytepos + 1;
2230
2231 /* Check that the range of memory covered can be represented by
2232 a symbolic number. */
2233 if (n->range > 64 / BITS_PER_MARKER)
2234 return NULL;
2235
2236 /* Reinterpret byte marks in symbolic number holding the value of
2237 bigger weight according to target endianness. */
2238 inc = BYTES_BIG_ENDIAN ? end_sub : start_sub;
2239 size = TYPE_PRECISION (n1->type) / BITS_PER_UNIT;
2240 for (i = 0; i < size; i++, inc <<= BITS_PER_MARKER)
2241 {
2242 unsigned marker
2243 = (toinc_n_ptr->n >> (i * BITS_PER_MARKER)) & MARKER_MASK;
2244 if (marker && marker != MARKER_BYTE_UNKNOWN)
2245 toinc_n_ptr->n += inc;
2246 }
2247 }
2248 else
2249 {
2250 n->range = n1->range;
2251 n_start = n1;
2252 source_stmt = source_stmt1;
2253 }
2254
2255 if (!n1->alias_set
2256 || alias_ptr_types_compatible_p (n1->alias_set, n2->alias_set))
2257 n->alias_set = n1->alias_set;
2258 else
2259 n->alias_set = ptr_type_node;
2260 n->vuse = n_start->vuse;
2261 n->base_addr = n_start->base_addr;
2262 n->offset = n_start->offset;
2263 n->src = n_start->src;
2264 n->bytepos = n_start->bytepos;
2265 n->type = n_start->type;
2266 size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2267
2268 for (i = 0, mask = MARKER_MASK; i < size; i++, mask <<= BITS_PER_MARKER)
2269 {
2270 uint64_t masked1, masked2;
2271
2272 masked1 = n1->n & mask;
2273 masked2 = n2->n & mask;
2274 if (masked1 && masked2 && masked1 != masked2)
2275 return NULL;
2276 }
2277 n->n = n1->n | n2->n;
2278
2279 return source_stmt;
2280 }
2281
2282 /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
2283 the operation given by the rhs of STMT on the result. If the operation
2284 could successfully be executed the function returns a gimple stmt whose
2285 rhs's first tree is the expression of the source operand and NULL
2286 otherwise. */
2287
2288 static gimple *
find_bswap_or_nop_1(gimple * stmt,struct symbolic_number * n,int limit)2289 find_bswap_or_nop_1 (gimple *stmt, struct symbolic_number *n, int limit)
2290 {
2291 enum tree_code code;
2292 tree rhs1, rhs2 = NULL;
2293 gimple *rhs1_stmt, *rhs2_stmt, *source_stmt1;
2294 enum gimple_rhs_class rhs_class;
2295
2296 if (!limit || !is_gimple_assign (stmt))
2297 return NULL;
2298
2299 rhs1 = gimple_assign_rhs1 (stmt);
2300
2301 if (find_bswap_or_nop_load (stmt, rhs1, n))
2302 return stmt;
2303
2304 if (TREE_CODE (rhs1) != SSA_NAME)
2305 return NULL;
2306
2307 code = gimple_assign_rhs_code (stmt);
2308 rhs_class = gimple_assign_rhs_class (stmt);
2309 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
2310
2311 if (rhs_class == GIMPLE_BINARY_RHS)
2312 rhs2 = gimple_assign_rhs2 (stmt);
2313
2314 /* Handle unary rhs and binary rhs with integer constants as second
2315 operand. */
2316
2317 if (rhs_class == GIMPLE_UNARY_RHS
2318 || (rhs_class == GIMPLE_BINARY_RHS
2319 && TREE_CODE (rhs2) == INTEGER_CST))
2320 {
2321 if (code != BIT_AND_EXPR
2322 && code != LSHIFT_EXPR
2323 && code != RSHIFT_EXPR
2324 && code != LROTATE_EXPR
2325 && code != RROTATE_EXPR
2326 && !CONVERT_EXPR_CODE_P (code))
2327 return NULL;
2328
2329 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, n, limit - 1);
2330
2331 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
2332 we have to initialize the symbolic number. */
2333 if (!source_stmt1)
2334 {
2335 if (gimple_assign_load_p (stmt)
2336 || !init_symbolic_number (n, rhs1))
2337 return NULL;
2338 source_stmt1 = stmt;
2339 }
2340
2341 switch (code)
2342 {
2343 case BIT_AND_EXPR:
2344 {
2345 int i, size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2346 uint64_t val = int_cst_value (rhs2), mask = 0;
2347 uint64_t tmp = (1 << BITS_PER_UNIT) - 1;
2348
2349 /* Only constants masking full bytes are allowed. */
2350 for (i = 0; i < size; i++, tmp <<= BITS_PER_UNIT)
2351 if ((val & tmp) != 0 && (val & tmp) != tmp)
2352 return NULL;
2353 else if (val & tmp)
2354 mask |= (uint64_t) MARKER_MASK << (i * BITS_PER_MARKER);
2355
2356 n->n &= mask;
2357 }
2358 break;
2359 case LSHIFT_EXPR:
2360 case RSHIFT_EXPR:
2361 case LROTATE_EXPR:
2362 case RROTATE_EXPR:
2363 if (!do_shift_rotate (code, n, (int) TREE_INT_CST_LOW (rhs2)))
2364 return NULL;
2365 break;
2366 CASE_CONVERT:
2367 {
2368 int i, type_size, old_type_size;
2369 tree type;
2370
2371 type = gimple_expr_type (stmt);
2372 type_size = TYPE_PRECISION (type);
2373 if (type_size % BITS_PER_UNIT != 0)
2374 return NULL;
2375 type_size /= BITS_PER_UNIT;
2376 if (type_size > 64 / BITS_PER_MARKER)
2377 return NULL;
2378
2379 /* Sign extension: result is dependent on the value. */
2380 old_type_size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
2381 if (!TYPE_UNSIGNED (n->type) && type_size > old_type_size
2382 && HEAD_MARKER (n->n, old_type_size))
2383 for (i = 0; i < type_size - old_type_size; i++)
2384 n->n |= (uint64_t) MARKER_BYTE_UNKNOWN
2385 << ((type_size - 1 - i) * BITS_PER_MARKER);
2386
2387 if (type_size < 64 / BITS_PER_MARKER)
2388 {
2389 /* If STMT casts to a smaller type mask out the bits not
2390 belonging to the target type. */
2391 n->n &= ((uint64_t) 1 << (type_size * BITS_PER_MARKER)) - 1;
2392 }
2393 n->type = type;
2394 if (!n->base_addr)
2395 n->range = type_size;
2396 }
2397 break;
2398 default:
2399 return NULL;
2400 };
2401 return verify_symbolic_number_p (n, stmt) ? source_stmt1 : NULL;
2402 }
2403
2404 /* Handle binary rhs. */
2405
2406 if (rhs_class == GIMPLE_BINARY_RHS)
2407 {
2408 struct symbolic_number n1, n2;
2409 gimple *source_stmt, *source_stmt2;
2410
2411 if (code != BIT_IOR_EXPR)
2412 return NULL;
2413
2414 if (TREE_CODE (rhs2) != SSA_NAME)
2415 return NULL;
2416
2417 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
2418
2419 switch (code)
2420 {
2421 case BIT_IOR_EXPR:
2422 source_stmt1 = find_bswap_or_nop_1 (rhs1_stmt, &n1, limit - 1);
2423
2424 if (!source_stmt1)
2425 return NULL;
2426
2427 source_stmt2 = find_bswap_or_nop_1 (rhs2_stmt, &n2, limit - 1);
2428
2429 if (!source_stmt2)
2430 return NULL;
2431
2432 if (TYPE_PRECISION (n1.type) != TYPE_PRECISION (n2.type))
2433 return NULL;
2434
2435 if (!n1.vuse != !n2.vuse
2436 || (n1.vuse && !operand_equal_p (n1.vuse, n2.vuse, 0)))
2437 return NULL;
2438
2439 source_stmt
2440 = perform_symbolic_merge (source_stmt1, &n1, source_stmt2, &n2, n);
2441
2442 if (!source_stmt)
2443 return NULL;
2444
2445 if (!verify_symbolic_number_p (n, stmt))
2446 return NULL;
2447
2448 break;
2449 default:
2450 return NULL;
2451 }
2452 return source_stmt;
2453 }
2454 return NULL;
2455 }
2456
2457 /* Check if STMT completes a bswap implementation or a read in a given
2458 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
2459 accordingly. It also sets N to represent the kind of operations
2460 performed: size of the resulting expression and whether it works on
2461 a memory source, and if so alias-set and vuse. At last, the
2462 function returns a stmt whose rhs's first tree is the source
2463 expression. */
2464
2465 static gimple *
find_bswap_or_nop(gimple * stmt,struct symbolic_number * n,bool * bswap)2466 find_bswap_or_nop (gimple *stmt, struct symbolic_number *n, bool *bswap)
2467 {
2468 /* The number which the find_bswap_or_nop_1 result should match in order
2469 to have a full byte swap. The number is shifted to the right
2470 according to the size of the symbolic number before using it. */
2471 uint64_t cmpxchg = CMPXCHG;
2472 uint64_t cmpnop = CMPNOP;
2473
2474 gimple *ins_stmt;
2475 int limit;
2476
2477 /* The last parameter determines the depth search limit. It usually
2478 correlates directly to the number n of bytes to be touched. We
2479 increase that number by log2(n) + 1 here in order to also
2480 cover signed -> unsigned conversions of the src operand as can be seen
2481 in libgcc, and for initial shift/and operation of the src operand. */
2482 limit = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt)));
2483 limit += 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT) limit);
2484 ins_stmt = find_bswap_or_nop_1 (stmt, n, limit);
2485
2486 if (!ins_stmt)
2487 return NULL;
2488
2489 /* Find real size of result (highest non-zero byte). */
2490 if (n->base_addr)
2491 {
2492 unsigned HOST_WIDE_INT rsize;
2493 uint64_t tmpn;
2494
2495 for (tmpn = n->n, rsize = 0; tmpn; tmpn >>= BITS_PER_MARKER, rsize++);
2496 if (BYTES_BIG_ENDIAN && n->range != rsize)
2497 /* This implies an offset, which is currently not handled by
2498 bswap_replace. */
2499 return NULL;
2500 n->range = rsize;
2501 }
2502
2503 /* Zero out the extra bits of N and CMP*. */
2504 if (n->range < (int) sizeof (int64_t))
2505 {
2506 uint64_t mask;
2507
2508 mask = ((uint64_t) 1 << (n->range * BITS_PER_MARKER)) - 1;
2509 cmpxchg >>= (64 / BITS_PER_MARKER - n->range) * BITS_PER_MARKER;
2510 cmpnop &= mask;
2511 }
2512
2513 /* A complete byte swap should make the symbolic number to start with
2514 the largest digit in the highest order byte. Unchanged symbolic
2515 number indicates a read with same endianness as target architecture. */
2516 if (n->n == cmpnop)
2517 *bswap = false;
2518 else if (n->n == cmpxchg)
2519 *bswap = true;
2520 else
2521 return NULL;
2522
2523 /* Useless bit manipulation performed by code. */
2524 if (!n->base_addr && n->n == cmpnop)
2525 return NULL;
2526
2527 n->range *= BITS_PER_UNIT;
2528 return ins_stmt;
2529 }
2530
2531 namespace {
2532
2533 const pass_data pass_data_optimize_bswap =
2534 {
2535 GIMPLE_PASS, /* type */
2536 "bswap", /* name */
2537 OPTGROUP_NONE, /* optinfo_flags */
2538 TV_NONE, /* tv_id */
2539 PROP_ssa, /* properties_required */
2540 0, /* properties_provided */
2541 0, /* properties_destroyed */
2542 0, /* todo_flags_start */
2543 0, /* todo_flags_finish */
2544 };
2545
2546 class pass_optimize_bswap : public gimple_opt_pass
2547 {
2548 public:
pass_optimize_bswap(gcc::context * ctxt)2549 pass_optimize_bswap (gcc::context *ctxt)
2550 : gimple_opt_pass (pass_data_optimize_bswap, ctxt)
2551 {}
2552
2553 /* opt_pass methods: */
gate(function *)2554 virtual bool gate (function *)
2555 {
2556 return flag_expensive_optimizations && optimize;
2557 }
2558
2559 virtual unsigned int execute (function *);
2560
2561 }; // class pass_optimize_bswap
2562
2563 /* Perform the bswap optimization: replace the expression computed in the rhs
2564 of CUR_STMT by an equivalent bswap, load or load + bswap expression.
2565 Which of these alternatives replace the rhs is given by N->base_addr (non
2566 null if a load is needed) and BSWAP. The type, VUSE and set-alias of the
2567 load to perform are also given in N while the builtin bswap invoke is given
2568 in FNDEL. Finally, if a load is involved, SRC_STMT refers to one of the
2569 load statements involved to construct the rhs in CUR_STMT and N->range gives
2570 the size of the rhs expression for maintaining some statistics.
2571
2572 Note that if the replacement involve a load, CUR_STMT is moved just after
2573 SRC_STMT to do the load with the same VUSE which can lead to CUR_STMT
2574 changing of basic block. */
2575
2576 static bool
bswap_replace(gimple * cur_stmt,gimple * ins_stmt,tree fndecl,tree bswap_type,tree load_type,struct symbolic_number * n,bool bswap)2577 bswap_replace (gimple *cur_stmt, gimple *ins_stmt, tree fndecl,
2578 tree bswap_type, tree load_type, struct symbolic_number *n,
2579 bool bswap)
2580 {
2581 gimple_stmt_iterator gsi;
2582 tree src, tmp, tgt;
2583 gimple *bswap_stmt;
2584
2585 gsi = gsi_for_stmt (cur_stmt);
2586 src = n->src;
2587 tgt = gimple_assign_lhs (cur_stmt);
2588
2589 /* Need to load the value from memory first. */
2590 if (n->base_addr)
2591 {
2592 gimple_stmt_iterator gsi_ins = gsi_for_stmt (ins_stmt);
2593 tree addr_expr, addr_tmp, val_expr, val_tmp;
2594 tree load_offset_ptr, aligned_load_type;
2595 gimple *addr_stmt, *load_stmt;
2596 unsigned align;
2597 HOST_WIDE_INT load_offset = 0;
2598 basic_block ins_bb, cur_bb;
2599
2600 ins_bb = gimple_bb (ins_stmt);
2601 cur_bb = gimple_bb (cur_stmt);
2602 if (!dominated_by_p (CDI_DOMINATORS, cur_bb, ins_bb))
2603 return false;
2604
2605 align = get_object_alignment (src);
2606 /* If the new access is smaller than the original one, we need
2607 to perform big endian adjustment. */
2608 if (BYTES_BIG_ENDIAN)
2609 {
2610 HOST_WIDE_INT bitsize, bitpos;
2611 machine_mode mode;
2612 int unsignedp, reversep, volatilep;
2613 tree offset;
2614
2615 get_inner_reference (src, &bitsize, &bitpos, &offset, &mode,
2616 &unsignedp, &reversep, &volatilep, false);
2617 if (n->range < (unsigned HOST_WIDE_INT) bitsize)
2618 {
2619 load_offset = (bitsize - n->range) / BITS_PER_UNIT;
2620 unsigned HOST_WIDE_INT l
2621 = (load_offset * BITS_PER_UNIT) & (align - 1);
2622 if (l)
2623 align = l & -l;
2624 }
2625 }
2626
2627 if (bswap
2628 && align < GET_MODE_ALIGNMENT (TYPE_MODE (load_type))
2629 && SLOW_UNALIGNED_ACCESS (TYPE_MODE (load_type), align))
2630 return false;
2631
2632 /* Move cur_stmt just before one of the load of the original
2633 to ensure it has the same VUSE. See PR61517 for what could
2634 go wrong. */
2635 if (gimple_bb (cur_stmt) != gimple_bb (ins_stmt))
2636 reset_flow_sensitive_info (gimple_assign_lhs (cur_stmt));
2637 gsi_move_before (&gsi, &gsi_ins);
2638 gsi = gsi_for_stmt (cur_stmt);
2639
2640 /* Compute address to load from and cast according to the size
2641 of the load. */
2642 addr_expr = build_fold_addr_expr (unshare_expr (src));
2643 if (is_gimple_mem_ref_addr (addr_expr))
2644 addr_tmp = addr_expr;
2645 else
2646 {
2647 addr_tmp = make_temp_ssa_name (TREE_TYPE (addr_expr), NULL,
2648 "load_src");
2649 addr_stmt = gimple_build_assign (addr_tmp, addr_expr);
2650 gsi_insert_before (&gsi, addr_stmt, GSI_SAME_STMT);
2651 }
2652
2653 /* Perform the load. */
2654 aligned_load_type = load_type;
2655 if (align < TYPE_ALIGN (load_type))
2656 aligned_load_type = build_aligned_type (load_type, align);
2657 load_offset_ptr = build_int_cst (n->alias_set, load_offset);
2658 val_expr = fold_build2 (MEM_REF, aligned_load_type, addr_tmp,
2659 load_offset_ptr);
2660
2661 if (!bswap)
2662 {
2663 if (n->range == 16)
2664 nop_stats.found_16bit++;
2665 else if (n->range == 32)
2666 nop_stats.found_32bit++;
2667 else
2668 {
2669 gcc_assert (n->range == 64);
2670 nop_stats.found_64bit++;
2671 }
2672
2673 /* Convert the result of load if necessary. */
2674 if (!useless_type_conversion_p (TREE_TYPE (tgt), load_type))
2675 {
2676 val_tmp = make_temp_ssa_name (aligned_load_type, NULL,
2677 "load_dst");
2678 load_stmt = gimple_build_assign (val_tmp, val_expr);
2679 gimple_set_vuse (load_stmt, n->vuse);
2680 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
2681 gimple_assign_set_rhs_with_ops (&gsi, NOP_EXPR, val_tmp);
2682 }
2683 else
2684 {
2685 gimple_assign_set_rhs_with_ops (&gsi, MEM_REF, val_expr);
2686 gimple_set_vuse (cur_stmt, n->vuse);
2687 }
2688 update_stmt (cur_stmt);
2689
2690 if (dump_file)
2691 {
2692 fprintf (dump_file,
2693 "%d bit load in target endianness found at: ",
2694 (int) n->range);
2695 print_gimple_stmt (dump_file, cur_stmt, 0, 0);
2696 }
2697 return true;
2698 }
2699 else
2700 {
2701 val_tmp = make_temp_ssa_name (aligned_load_type, NULL, "load_dst");
2702 load_stmt = gimple_build_assign (val_tmp, val_expr);
2703 gimple_set_vuse (load_stmt, n->vuse);
2704 gsi_insert_before (&gsi, load_stmt, GSI_SAME_STMT);
2705 }
2706 src = val_tmp;
2707 }
2708
2709 if (n->range == 16)
2710 bswap_stats.found_16bit++;
2711 else if (n->range == 32)
2712 bswap_stats.found_32bit++;
2713 else
2714 {
2715 gcc_assert (n->range == 64);
2716 bswap_stats.found_64bit++;
2717 }
2718
2719 tmp = src;
2720
2721 /* Convert the src expression if necessary. */
2722 if (!useless_type_conversion_p (TREE_TYPE (tmp), bswap_type))
2723 {
2724 gimple *convert_stmt;
2725
2726 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapsrc");
2727 convert_stmt = gimple_build_assign (tmp, NOP_EXPR, src);
2728 gsi_insert_before (&gsi, convert_stmt, GSI_SAME_STMT);
2729 }
2730
2731 /* Canonical form for 16 bit bswap is a rotate expression. Only 16bit values
2732 are considered as rotation of 2N bit values by N bits is generally not
2733 equivalent to a bswap. Consider for instance 0x01020304 r>> 16 which
2734 gives 0x03040102 while a bswap for that value is 0x04030201. */
2735 if (bswap && n->range == 16)
2736 {
2737 tree count = build_int_cst (NULL, BITS_PER_UNIT);
2738 src = fold_build2 (LROTATE_EXPR, bswap_type, tmp, count);
2739 bswap_stmt = gimple_build_assign (NULL, src);
2740 }
2741 else
2742 bswap_stmt = gimple_build_call (fndecl, 1, tmp);
2743
2744 tmp = tgt;
2745
2746 /* Convert the result if necessary. */
2747 if (!useless_type_conversion_p (TREE_TYPE (tgt), bswap_type))
2748 {
2749 gimple *convert_stmt;
2750
2751 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapdst");
2752 convert_stmt = gimple_build_assign (tgt, NOP_EXPR, tmp);
2753 gsi_insert_after (&gsi, convert_stmt, GSI_SAME_STMT);
2754 }
2755
2756 gimple_set_lhs (bswap_stmt, tmp);
2757
2758 if (dump_file)
2759 {
2760 fprintf (dump_file, "%d bit bswap implementation found at: ",
2761 (int) n->range);
2762 print_gimple_stmt (dump_file, cur_stmt, 0, 0);
2763 }
2764
2765 gsi_insert_after (&gsi, bswap_stmt, GSI_SAME_STMT);
2766 gsi_remove (&gsi, true);
2767 return true;
2768 }
2769
2770 /* Find manual byte swap implementations as well as load in a given
2771 endianness. Byte swaps are turned into a bswap builtin invokation
2772 while endian loads are converted to bswap builtin invokation or
2773 simple load according to the target endianness. */
2774
2775 unsigned int
execute(function * fun)2776 pass_optimize_bswap::execute (function *fun)
2777 {
2778 basic_block bb;
2779 bool bswap32_p, bswap64_p;
2780 bool changed = false;
2781 tree bswap32_type = NULL_TREE, bswap64_type = NULL_TREE;
2782
2783 if (BITS_PER_UNIT != 8)
2784 return 0;
2785
2786 bswap32_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
2787 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing);
2788 bswap64_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
2789 && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
2790 || (bswap32_p && word_mode == SImode)));
2791
2792 /* Determine the argument type of the builtins. The code later on
2793 assumes that the return and argument type are the same. */
2794 if (bswap32_p)
2795 {
2796 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
2797 bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
2798 }
2799
2800 if (bswap64_p)
2801 {
2802 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
2803 bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
2804 }
2805
2806 memset (&nop_stats, 0, sizeof (nop_stats));
2807 memset (&bswap_stats, 0, sizeof (bswap_stats));
2808 calculate_dominance_info (CDI_DOMINATORS);
2809
2810 FOR_EACH_BB_FN (bb, fun)
2811 {
2812 gimple_stmt_iterator gsi;
2813
2814 /* We do a reverse scan for bswap patterns to make sure we get the
2815 widest match. As bswap pattern matching doesn't handle previously
2816 inserted smaller bswap replacements as sub-patterns, the wider
2817 variant wouldn't be detected. */
2818 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi);)
2819 {
2820 gimple *ins_stmt, *cur_stmt = gsi_stmt (gsi);
2821 tree fndecl = NULL_TREE, bswap_type = NULL_TREE, load_type;
2822 enum tree_code code;
2823 struct symbolic_number n;
2824 bool bswap;
2825
2826 /* This gsi_prev (&gsi) is not part of the for loop because cur_stmt
2827 might be moved to a different basic block by bswap_replace and gsi
2828 must not points to it if that's the case. Moving the gsi_prev
2829 there make sure that gsi points to the statement previous to
2830 cur_stmt while still making sure that all statements are
2831 considered in this basic block. */
2832 gsi_prev (&gsi);
2833
2834 if (!is_gimple_assign (cur_stmt))
2835 continue;
2836
2837 code = gimple_assign_rhs_code (cur_stmt);
2838 switch (code)
2839 {
2840 case LROTATE_EXPR:
2841 case RROTATE_EXPR:
2842 if (!tree_fits_uhwi_p (gimple_assign_rhs2 (cur_stmt))
2843 || tree_to_uhwi (gimple_assign_rhs2 (cur_stmt))
2844 % BITS_PER_UNIT)
2845 continue;
2846 /* Fall through. */
2847 case BIT_IOR_EXPR:
2848 break;
2849 default:
2850 continue;
2851 }
2852
2853 ins_stmt = find_bswap_or_nop (cur_stmt, &n, &bswap);
2854
2855 if (!ins_stmt)
2856 continue;
2857
2858 switch (n.range)
2859 {
2860 case 16:
2861 /* Already in canonical form, nothing to do. */
2862 if (code == LROTATE_EXPR || code == RROTATE_EXPR)
2863 continue;
2864 load_type = bswap_type = uint16_type_node;
2865 break;
2866 case 32:
2867 load_type = uint32_type_node;
2868 if (bswap32_p)
2869 {
2870 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
2871 bswap_type = bswap32_type;
2872 }
2873 break;
2874 case 64:
2875 load_type = uint64_type_node;
2876 if (bswap64_p)
2877 {
2878 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
2879 bswap_type = bswap64_type;
2880 }
2881 break;
2882 default:
2883 continue;
2884 }
2885
2886 if (bswap && !fndecl && n.range != 16)
2887 continue;
2888
2889 if (bswap_replace (cur_stmt, ins_stmt, fndecl, bswap_type, load_type,
2890 &n, bswap))
2891 changed = true;
2892 }
2893 }
2894
2895 statistics_counter_event (fun, "16-bit nop implementations found",
2896 nop_stats.found_16bit);
2897 statistics_counter_event (fun, "32-bit nop implementations found",
2898 nop_stats.found_32bit);
2899 statistics_counter_event (fun, "64-bit nop implementations found",
2900 nop_stats.found_64bit);
2901 statistics_counter_event (fun, "16-bit bswap implementations found",
2902 bswap_stats.found_16bit);
2903 statistics_counter_event (fun, "32-bit bswap implementations found",
2904 bswap_stats.found_32bit);
2905 statistics_counter_event (fun, "64-bit bswap implementations found",
2906 bswap_stats.found_64bit);
2907
2908 return (changed ? TODO_update_ssa : 0);
2909 }
2910
2911 } // anon namespace
2912
2913 gimple_opt_pass *
make_pass_optimize_bswap(gcc::context * ctxt)2914 make_pass_optimize_bswap (gcc::context *ctxt)
2915 {
2916 return new pass_optimize_bswap (ctxt);
2917 }
2918
2919 /* Return true if stmt is a type conversion operation that can be stripped
2920 when used in a widening multiply operation. */
2921 static bool
widening_mult_conversion_strippable_p(tree result_type,gimple * stmt)2922 widening_mult_conversion_strippable_p (tree result_type, gimple *stmt)
2923 {
2924 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
2925
2926 if (TREE_CODE (result_type) == INTEGER_TYPE)
2927 {
2928 tree op_type;
2929 tree inner_op_type;
2930
2931 if (!CONVERT_EXPR_CODE_P (rhs_code))
2932 return false;
2933
2934 op_type = TREE_TYPE (gimple_assign_lhs (stmt));
2935
2936 /* If the type of OP has the same precision as the result, then
2937 we can strip this conversion. The multiply operation will be
2938 selected to create the correct extension as a by-product. */
2939 if (TYPE_PRECISION (result_type) == TYPE_PRECISION (op_type))
2940 return true;
2941
2942 /* We can also strip a conversion if it preserves the signed-ness of
2943 the operation and doesn't narrow the range. */
2944 inner_op_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
2945
2946 /* If the inner-most type is unsigned, then we can strip any
2947 intermediate widening operation. If it's signed, then the
2948 intermediate widening operation must also be signed. */
2949 if ((TYPE_UNSIGNED (inner_op_type)
2950 || TYPE_UNSIGNED (op_type) == TYPE_UNSIGNED (inner_op_type))
2951 && TYPE_PRECISION (op_type) > TYPE_PRECISION (inner_op_type))
2952 return true;
2953
2954 return false;
2955 }
2956
2957 return rhs_code == FIXED_CONVERT_EXPR;
2958 }
2959
2960 /* Return true if RHS is a suitable operand for a widening multiplication,
2961 assuming a target type of TYPE.
2962 There are two cases:
2963
2964 - RHS makes some value at least twice as wide. Store that value
2965 in *NEW_RHS_OUT if so, and store its type in *TYPE_OUT.
2966
2967 - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so,
2968 but leave *TYPE_OUT untouched. */
2969
2970 static bool
is_widening_mult_rhs_p(tree type,tree rhs,tree * type_out,tree * new_rhs_out)2971 is_widening_mult_rhs_p (tree type, tree rhs, tree *type_out,
2972 tree *new_rhs_out)
2973 {
2974 gimple *stmt;
2975 tree type1, rhs1;
2976
2977 if (TREE_CODE (rhs) == SSA_NAME)
2978 {
2979 stmt = SSA_NAME_DEF_STMT (rhs);
2980 if (is_gimple_assign (stmt))
2981 {
2982 if (! widening_mult_conversion_strippable_p (type, stmt))
2983 rhs1 = rhs;
2984 else
2985 {
2986 rhs1 = gimple_assign_rhs1 (stmt);
2987
2988 if (TREE_CODE (rhs1) == INTEGER_CST)
2989 {
2990 *new_rhs_out = rhs1;
2991 *type_out = NULL;
2992 return true;
2993 }
2994 }
2995 }
2996 else
2997 rhs1 = rhs;
2998
2999 type1 = TREE_TYPE (rhs1);
3000
3001 if (TREE_CODE (type1) != TREE_CODE (type)
3002 || TYPE_PRECISION (type1) * 2 > TYPE_PRECISION (type))
3003 return false;
3004
3005 *new_rhs_out = rhs1;
3006 *type_out = type1;
3007 return true;
3008 }
3009
3010 if (TREE_CODE (rhs) == INTEGER_CST)
3011 {
3012 *new_rhs_out = rhs;
3013 *type_out = NULL;
3014 return true;
3015 }
3016
3017 return false;
3018 }
3019
3020 /* Return true if STMT performs a widening multiplication, assuming the
3021 output type is TYPE. If so, store the unwidened types of the operands
3022 in *TYPE1_OUT and *TYPE2_OUT respectively. Also fill *RHS1_OUT and
3023 *RHS2_OUT such that converting those operands to types *TYPE1_OUT
3024 and *TYPE2_OUT would give the operands of the multiplication. */
3025
3026 static bool
is_widening_mult_p(gimple * stmt,tree * type1_out,tree * rhs1_out,tree * type2_out,tree * rhs2_out)3027 is_widening_mult_p (gimple *stmt,
3028 tree *type1_out, tree *rhs1_out,
3029 tree *type2_out, tree *rhs2_out)
3030 {
3031 tree type = TREE_TYPE (gimple_assign_lhs (stmt));
3032
3033 if (TREE_CODE (type) != INTEGER_TYPE
3034 && TREE_CODE (type) != FIXED_POINT_TYPE)
3035 return false;
3036
3037 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs1 (stmt), type1_out,
3038 rhs1_out))
3039 return false;
3040
3041 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs2 (stmt), type2_out,
3042 rhs2_out))
3043 return false;
3044
3045 if (*type1_out == NULL)
3046 {
3047 if (*type2_out == NULL || !int_fits_type_p (*rhs1_out, *type2_out))
3048 return false;
3049 *type1_out = *type2_out;
3050 }
3051
3052 if (*type2_out == NULL)
3053 {
3054 if (!int_fits_type_p (*rhs2_out, *type1_out))
3055 return false;
3056 *type2_out = *type1_out;
3057 }
3058
3059 /* Ensure that the larger of the two operands comes first. */
3060 if (TYPE_PRECISION (*type1_out) < TYPE_PRECISION (*type2_out))
3061 {
3062 std::swap (*type1_out, *type2_out);
3063 std::swap (*rhs1_out, *rhs2_out);
3064 }
3065
3066 return true;
3067 }
3068
3069 /* Process a single gimple statement STMT, which has a MULT_EXPR as
3070 its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return
3071 value is true iff we converted the statement. */
3072
3073 static bool
convert_mult_to_widen(gimple * stmt,gimple_stmt_iterator * gsi)3074 convert_mult_to_widen (gimple *stmt, gimple_stmt_iterator *gsi)
3075 {
3076 tree lhs, rhs1, rhs2, type, type1, type2;
3077 enum insn_code handler;
3078 machine_mode to_mode, from_mode, actual_mode;
3079 optab op;
3080 int actual_precision;
3081 location_t loc = gimple_location (stmt);
3082 bool from_unsigned1, from_unsigned2;
3083
3084 lhs = gimple_assign_lhs (stmt);
3085 type = TREE_TYPE (lhs);
3086 if (TREE_CODE (type) != INTEGER_TYPE)
3087 return false;
3088
3089 if (!is_widening_mult_p (stmt, &type1, &rhs1, &type2, &rhs2))
3090 return false;
3091
3092 to_mode = TYPE_MODE (type);
3093 from_mode = TYPE_MODE (type1);
3094 from_unsigned1 = TYPE_UNSIGNED (type1);
3095 from_unsigned2 = TYPE_UNSIGNED (type2);
3096
3097 if (from_unsigned1 && from_unsigned2)
3098 op = umul_widen_optab;
3099 else if (!from_unsigned1 && !from_unsigned2)
3100 op = smul_widen_optab;
3101 else
3102 op = usmul_widen_optab;
3103
3104 handler = find_widening_optab_handler_and_mode (op, to_mode, from_mode,
3105 0, &actual_mode);
3106
3107 if (handler == CODE_FOR_nothing)
3108 {
3109 if (op != smul_widen_optab)
3110 {
3111 /* We can use a signed multiply with unsigned types as long as
3112 there is a wider mode to use, or it is the smaller of the two
3113 types that is unsigned. Note that type1 >= type2, always. */
3114 if ((TYPE_UNSIGNED (type1)
3115 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
3116 || (TYPE_UNSIGNED (type2)
3117 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
3118 {
3119 from_mode = GET_MODE_WIDER_MODE (from_mode);
3120 if (GET_MODE_SIZE (to_mode) <= GET_MODE_SIZE (from_mode))
3121 return false;
3122 }
3123
3124 op = smul_widen_optab;
3125 handler = find_widening_optab_handler_and_mode (op, to_mode,
3126 from_mode, 0,
3127 &actual_mode);
3128
3129 if (handler == CODE_FOR_nothing)
3130 return false;
3131
3132 from_unsigned1 = from_unsigned2 = false;
3133 }
3134 else
3135 return false;
3136 }
3137
3138 /* Ensure that the inputs to the handler are in the correct precison
3139 for the opcode. This will be the full mode size. */
3140 actual_precision = GET_MODE_PRECISION (actual_mode);
3141 if (2 * actual_precision > TYPE_PRECISION (type))
3142 return false;
3143 if (actual_precision != TYPE_PRECISION (type1)
3144 || from_unsigned1 != TYPE_UNSIGNED (type1))
3145 rhs1 = build_and_insert_cast (gsi, loc,
3146 build_nonstandard_integer_type
3147 (actual_precision, from_unsigned1), rhs1);
3148 if (actual_precision != TYPE_PRECISION (type2)
3149 || from_unsigned2 != TYPE_UNSIGNED (type2))
3150 rhs2 = build_and_insert_cast (gsi, loc,
3151 build_nonstandard_integer_type
3152 (actual_precision, from_unsigned2), rhs2);
3153
3154 /* Handle constants. */
3155 if (TREE_CODE (rhs1) == INTEGER_CST)
3156 rhs1 = fold_convert (type1, rhs1);
3157 if (TREE_CODE (rhs2) == INTEGER_CST)
3158 rhs2 = fold_convert (type2, rhs2);
3159
3160 gimple_assign_set_rhs1 (stmt, rhs1);
3161 gimple_assign_set_rhs2 (stmt, rhs2);
3162 gimple_assign_set_rhs_code (stmt, WIDEN_MULT_EXPR);
3163 update_stmt (stmt);
3164 widen_mul_stats.widen_mults_inserted++;
3165 return true;
3166 }
3167
3168 /* Process a single gimple statement STMT, which is found at the
3169 iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
3170 rhs (given by CODE), and try to convert it into a
3171 WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR. The return value
3172 is true iff we converted the statement. */
3173
3174 static bool
convert_plusminus_to_widen(gimple_stmt_iterator * gsi,gimple * stmt,enum tree_code code)3175 convert_plusminus_to_widen (gimple_stmt_iterator *gsi, gimple *stmt,
3176 enum tree_code code)
3177 {
3178 gimple *rhs1_stmt = NULL, *rhs2_stmt = NULL;
3179 gimple *conv1_stmt = NULL, *conv2_stmt = NULL, *conv_stmt;
3180 tree type, type1, type2, optype;
3181 tree lhs, rhs1, rhs2, mult_rhs1, mult_rhs2, add_rhs;
3182 enum tree_code rhs1_code = ERROR_MARK, rhs2_code = ERROR_MARK;
3183 optab this_optab;
3184 enum tree_code wmult_code;
3185 enum insn_code handler;
3186 machine_mode to_mode, from_mode, actual_mode;
3187 location_t loc = gimple_location (stmt);
3188 int actual_precision;
3189 bool from_unsigned1, from_unsigned2;
3190
3191 lhs = gimple_assign_lhs (stmt);
3192 type = TREE_TYPE (lhs);
3193 if (TREE_CODE (type) != INTEGER_TYPE
3194 && TREE_CODE (type) != FIXED_POINT_TYPE)
3195 return false;
3196
3197 if (code == MINUS_EXPR)
3198 wmult_code = WIDEN_MULT_MINUS_EXPR;
3199 else
3200 wmult_code = WIDEN_MULT_PLUS_EXPR;
3201
3202 rhs1 = gimple_assign_rhs1 (stmt);
3203 rhs2 = gimple_assign_rhs2 (stmt);
3204
3205 if (TREE_CODE (rhs1) == SSA_NAME)
3206 {
3207 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
3208 if (is_gimple_assign (rhs1_stmt))
3209 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
3210 }
3211
3212 if (TREE_CODE (rhs2) == SSA_NAME)
3213 {
3214 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
3215 if (is_gimple_assign (rhs2_stmt))
3216 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
3217 }
3218
3219 /* Allow for one conversion statement between the multiply
3220 and addition/subtraction statement. If there are more than
3221 one conversions then we assume they would invalidate this
3222 transformation. If that's not the case then they should have
3223 been folded before now. */
3224 if (CONVERT_EXPR_CODE_P (rhs1_code))
3225 {
3226 conv1_stmt = rhs1_stmt;
3227 rhs1 = gimple_assign_rhs1 (rhs1_stmt);
3228 if (TREE_CODE (rhs1) == SSA_NAME)
3229 {
3230 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
3231 if (is_gimple_assign (rhs1_stmt))
3232 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
3233 }
3234 else
3235 return false;
3236 }
3237 if (CONVERT_EXPR_CODE_P (rhs2_code))
3238 {
3239 conv2_stmt = rhs2_stmt;
3240 rhs2 = gimple_assign_rhs1 (rhs2_stmt);
3241 if (TREE_CODE (rhs2) == SSA_NAME)
3242 {
3243 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
3244 if (is_gimple_assign (rhs2_stmt))
3245 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
3246 }
3247 else
3248 return false;
3249 }
3250
3251 /* If code is WIDEN_MULT_EXPR then it would seem unnecessary to call
3252 is_widening_mult_p, but we still need the rhs returns.
3253
3254 It might also appear that it would be sufficient to use the existing
3255 operands of the widening multiply, but that would limit the choice of
3256 multiply-and-accumulate instructions.
3257
3258 If the widened-multiplication result has more than one uses, it is
3259 probably wiser not to do the conversion. */
3260 if (code == PLUS_EXPR
3261 && (rhs1_code == MULT_EXPR || rhs1_code == WIDEN_MULT_EXPR))
3262 {
3263 if (!has_single_use (rhs1)
3264 || !is_widening_mult_p (rhs1_stmt, &type1, &mult_rhs1,
3265 &type2, &mult_rhs2))
3266 return false;
3267 add_rhs = rhs2;
3268 conv_stmt = conv1_stmt;
3269 }
3270 else if (rhs2_code == MULT_EXPR || rhs2_code == WIDEN_MULT_EXPR)
3271 {
3272 if (!has_single_use (rhs2)
3273 || !is_widening_mult_p (rhs2_stmt, &type1, &mult_rhs1,
3274 &type2, &mult_rhs2))
3275 return false;
3276 add_rhs = rhs1;
3277 conv_stmt = conv2_stmt;
3278 }
3279 else
3280 return false;
3281
3282 to_mode = TYPE_MODE (type);
3283 from_mode = TYPE_MODE (type1);
3284 from_unsigned1 = TYPE_UNSIGNED (type1);
3285 from_unsigned2 = TYPE_UNSIGNED (type2);
3286 optype = type1;
3287
3288 /* There's no such thing as a mixed sign madd yet, so use a wider mode. */
3289 if (from_unsigned1 != from_unsigned2)
3290 {
3291 if (!INTEGRAL_TYPE_P (type))
3292 return false;
3293 /* We can use a signed multiply with unsigned types as long as
3294 there is a wider mode to use, or it is the smaller of the two
3295 types that is unsigned. Note that type1 >= type2, always. */
3296 if ((from_unsigned1
3297 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
3298 || (from_unsigned2
3299 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
3300 {
3301 from_mode = GET_MODE_WIDER_MODE (from_mode);
3302 if (GET_MODE_SIZE (from_mode) >= GET_MODE_SIZE (to_mode))
3303 return false;
3304 }
3305
3306 from_unsigned1 = from_unsigned2 = false;
3307 optype = build_nonstandard_integer_type (GET_MODE_PRECISION (from_mode),
3308 false);
3309 }
3310
3311 /* If there was a conversion between the multiply and addition
3312 then we need to make sure it fits a multiply-and-accumulate.
3313 The should be a single mode change which does not change the
3314 value. */
3315 if (conv_stmt)
3316 {
3317 /* We use the original, unmodified data types for this. */
3318 tree from_type = TREE_TYPE (gimple_assign_rhs1 (conv_stmt));
3319 tree to_type = TREE_TYPE (gimple_assign_lhs (conv_stmt));
3320 int data_size = TYPE_PRECISION (type1) + TYPE_PRECISION (type2);
3321 bool is_unsigned = TYPE_UNSIGNED (type1) && TYPE_UNSIGNED (type2);
3322
3323 if (TYPE_PRECISION (from_type) > TYPE_PRECISION (to_type))
3324 {
3325 /* Conversion is a truncate. */
3326 if (TYPE_PRECISION (to_type) < data_size)
3327 return false;
3328 }
3329 else if (TYPE_PRECISION (from_type) < TYPE_PRECISION (to_type))
3330 {
3331 /* Conversion is an extend. Check it's the right sort. */
3332 if (TYPE_UNSIGNED (from_type) != is_unsigned
3333 && !(is_unsigned && TYPE_PRECISION (from_type) > data_size))
3334 return false;
3335 }
3336 /* else convert is a no-op for our purposes. */
3337 }
3338
3339 /* Verify that the machine can perform a widening multiply
3340 accumulate in this mode/signedness combination, otherwise
3341 this transformation is likely to pessimize code. */
3342 this_optab = optab_for_tree_code (wmult_code, optype, optab_default);
3343 handler = find_widening_optab_handler_and_mode (this_optab, to_mode,
3344 from_mode, 0, &actual_mode);
3345
3346 if (handler == CODE_FOR_nothing)
3347 return false;
3348
3349 /* Ensure that the inputs to the handler are in the correct precison
3350 for the opcode. This will be the full mode size. */
3351 actual_precision = GET_MODE_PRECISION (actual_mode);
3352 if (actual_precision != TYPE_PRECISION (type1)
3353 || from_unsigned1 != TYPE_UNSIGNED (type1))
3354 mult_rhs1 = build_and_insert_cast (gsi, loc,
3355 build_nonstandard_integer_type
3356 (actual_precision, from_unsigned1),
3357 mult_rhs1);
3358 if (actual_precision != TYPE_PRECISION (type2)
3359 || from_unsigned2 != TYPE_UNSIGNED (type2))
3360 mult_rhs2 = build_and_insert_cast (gsi, loc,
3361 build_nonstandard_integer_type
3362 (actual_precision, from_unsigned2),
3363 mult_rhs2);
3364
3365 if (!useless_type_conversion_p (type, TREE_TYPE (add_rhs)))
3366 add_rhs = build_and_insert_cast (gsi, loc, type, add_rhs);
3367
3368 /* Handle constants. */
3369 if (TREE_CODE (mult_rhs1) == INTEGER_CST)
3370 mult_rhs1 = fold_convert (type1, mult_rhs1);
3371 if (TREE_CODE (mult_rhs2) == INTEGER_CST)
3372 mult_rhs2 = fold_convert (type2, mult_rhs2);
3373
3374 gimple_assign_set_rhs_with_ops (gsi, wmult_code, mult_rhs1, mult_rhs2,
3375 add_rhs);
3376 update_stmt (gsi_stmt (*gsi));
3377 widen_mul_stats.maccs_inserted++;
3378 return true;
3379 }
3380
3381 /* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
3382 with uses in additions and subtractions to form fused multiply-add
3383 operations. Returns true if successful and MUL_STMT should be removed. */
3384
3385 static bool
convert_mult_to_fma(gimple * mul_stmt,tree op1,tree op2)3386 convert_mult_to_fma (gimple *mul_stmt, tree op1, tree op2)
3387 {
3388 tree mul_result = gimple_get_lhs (mul_stmt);
3389 tree type = TREE_TYPE (mul_result);
3390 gimple *use_stmt, *neguse_stmt;
3391 gassign *fma_stmt;
3392 use_operand_p use_p;
3393 imm_use_iterator imm_iter;
3394
3395 if (FLOAT_TYPE_P (type)
3396 && flag_fp_contract_mode == FP_CONTRACT_OFF)
3397 return false;
3398
3399 /* We don't want to do bitfield reduction ops. */
3400 if (INTEGRAL_TYPE_P (type)
3401 && (TYPE_PRECISION (type)
3402 != GET_MODE_PRECISION (TYPE_MODE (type))))
3403 return false;
3404
3405 /* If the target doesn't support it, don't generate it. We assume that
3406 if fma isn't available then fms, fnma or fnms are not either. */
3407 if (optab_handler (fma_optab, TYPE_MODE (type)) == CODE_FOR_nothing)
3408 return false;
3409
3410 /* If the multiplication has zero uses, it is kept around probably because
3411 of -fnon-call-exceptions. Don't optimize it away in that case,
3412 it is DCE job. */
3413 if (has_zero_uses (mul_result))
3414 return false;
3415
3416 /* Make sure that the multiplication statement becomes dead after
3417 the transformation, thus that all uses are transformed to FMAs.
3418 This means we assume that an FMA operation has the same cost
3419 as an addition. */
3420 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, mul_result)
3421 {
3422 enum tree_code use_code;
3423 tree result = mul_result;
3424 bool negate_p = false;
3425
3426 use_stmt = USE_STMT (use_p);
3427
3428 if (is_gimple_debug (use_stmt))
3429 continue;
3430
3431 /* For now restrict this operations to single basic blocks. In theory
3432 we would want to support sinking the multiplication in
3433 m = a*b;
3434 if ()
3435 ma = m + c;
3436 else
3437 d = m;
3438 to form a fma in the then block and sink the multiplication to the
3439 else block. */
3440 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
3441 return false;
3442
3443 if (!is_gimple_assign (use_stmt))
3444 return false;
3445
3446 use_code = gimple_assign_rhs_code (use_stmt);
3447
3448 /* A negate on the multiplication leads to FNMA. */
3449 if (use_code == NEGATE_EXPR)
3450 {
3451 ssa_op_iter iter;
3452 use_operand_p usep;
3453
3454 result = gimple_assign_lhs (use_stmt);
3455
3456 /* Make sure the negate statement becomes dead with this
3457 single transformation. */
3458 if (!single_imm_use (gimple_assign_lhs (use_stmt),
3459 &use_p, &neguse_stmt))
3460 return false;
3461
3462 /* Make sure the multiplication isn't also used on that stmt. */
3463 FOR_EACH_PHI_OR_STMT_USE (usep, neguse_stmt, iter, SSA_OP_USE)
3464 if (USE_FROM_PTR (usep) == mul_result)
3465 return false;
3466
3467 /* Re-validate. */
3468 use_stmt = neguse_stmt;
3469 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
3470 return false;
3471 if (!is_gimple_assign (use_stmt))
3472 return false;
3473
3474 use_code = gimple_assign_rhs_code (use_stmt);
3475 negate_p = true;
3476 }
3477
3478 switch (use_code)
3479 {
3480 case MINUS_EXPR:
3481 if (gimple_assign_rhs2 (use_stmt) == result)
3482 negate_p = !negate_p;
3483 break;
3484 case PLUS_EXPR:
3485 break;
3486 default:
3487 /* FMA can only be formed from PLUS and MINUS. */
3488 return false;
3489 }
3490
3491 /* If the subtrahend (gimple_assign_rhs2 (use_stmt)) is computed
3492 by a MULT_EXPR that we'll visit later, we might be able to
3493 get a more profitable match with fnma.
3494 OTOH, if we don't, a negate / fma pair has likely lower latency
3495 that a mult / subtract pair. */
3496 if (use_code == MINUS_EXPR && !negate_p
3497 && gimple_assign_rhs1 (use_stmt) == result
3498 && optab_handler (fms_optab, TYPE_MODE (type)) == CODE_FOR_nothing
3499 && optab_handler (fnma_optab, TYPE_MODE (type)) != CODE_FOR_nothing)
3500 {
3501 tree rhs2 = gimple_assign_rhs2 (use_stmt);
3502
3503 if (TREE_CODE (rhs2) == SSA_NAME)
3504 {
3505 gimple *stmt2 = SSA_NAME_DEF_STMT (rhs2);
3506 if (has_single_use (rhs2)
3507 && is_gimple_assign (stmt2)
3508 && gimple_assign_rhs_code (stmt2) == MULT_EXPR)
3509 return false;
3510 }
3511 }
3512
3513 /* We can't handle a * b + a * b. */
3514 if (gimple_assign_rhs1 (use_stmt) == gimple_assign_rhs2 (use_stmt))
3515 return false;
3516
3517 /* While it is possible to validate whether or not the exact form
3518 that we've recognized is available in the backend, the assumption
3519 is that the transformation is never a loss. For instance, suppose
3520 the target only has the plain FMA pattern available. Consider
3521 a*b-c -> fma(a,b,-c): we've exchanged MUL+SUB for FMA+NEG, which
3522 is still two operations. Consider -(a*b)-c -> fma(-a,b,-c): we
3523 still have 3 operations, but in the FMA form the two NEGs are
3524 independent and could be run in parallel. */
3525 }
3526
3527 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, mul_result)
3528 {
3529 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
3530 enum tree_code use_code;
3531 tree addop, mulop1 = op1, result = mul_result;
3532 bool negate_p = false;
3533
3534 if (is_gimple_debug (use_stmt))
3535 continue;
3536
3537 use_code = gimple_assign_rhs_code (use_stmt);
3538 if (use_code == NEGATE_EXPR)
3539 {
3540 result = gimple_assign_lhs (use_stmt);
3541 single_imm_use (gimple_assign_lhs (use_stmt), &use_p, &neguse_stmt);
3542 gsi_remove (&gsi, true);
3543 release_defs (use_stmt);
3544
3545 use_stmt = neguse_stmt;
3546 gsi = gsi_for_stmt (use_stmt);
3547 use_code = gimple_assign_rhs_code (use_stmt);
3548 negate_p = true;
3549 }
3550
3551 if (gimple_assign_rhs1 (use_stmt) == result)
3552 {
3553 addop = gimple_assign_rhs2 (use_stmt);
3554 /* a * b - c -> a * b + (-c) */
3555 if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
3556 addop = force_gimple_operand_gsi (&gsi,
3557 build1 (NEGATE_EXPR,
3558 type, addop),
3559 true, NULL_TREE, true,
3560 GSI_SAME_STMT);
3561 }
3562 else
3563 {
3564 addop = gimple_assign_rhs1 (use_stmt);
3565 /* a - b * c -> (-b) * c + a */
3566 if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
3567 negate_p = !negate_p;
3568 }
3569
3570 if (negate_p)
3571 mulop1 = force_gimple_operand_gsi (&gsi,
3572 build1 (NEGATE_EXPR,
3573 type, mulop1),
3574 true, NULL_TREE, true,
3575 GSI_SAME_STMT);
3576
3577 fma_stmt = gimple_build_assign (gimple_assign_lhs (use_stmt),
3578 FMA_EXPR, mulop1, op2, addop);
3579 gsi_replace (&gsi, fma_stmt, true);
3580 widen_mul_stats.fmas_inserted++;
3581 }
3582
3583 return true;
3584 }
3585
3586
3587 /* Helper function of match_uaddsub_overflow. Return 1
3588 if USE_STMT is unsigned overflow check ovf != 0 for
3589 STMT, -1 if USE_STMT is unsigned overflow check ovf == 0
3590 and 0 otherwise. */
3591
3592 static int
uaddsub_overflow_check_p(gimple * stmt,gimple * use_stmt)3593 uaddsub_overflow_check_p (gimple *stmt, gimple *use_stmt)
3594 {
3595 enum tree_code ccode = ERROR_MARK;
3596 tree crhs1 = NULL_TREE, crhs2 = NULL_TREE;
3597 if (gimple_code (use_stmt) == GIMPLE_COND)
3598 {
3599 ccode = gimple_cond_code (use_stmt);
3600 crhs1 = gimple_cond_lhs (use_stmt);
3601 crhs2 = gimple_cond_rhs (use_stmt);
3602 }
3603 else if (is_gimple_assign (use_stmt))
3604 {
3605 if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
3606 {
3607 ccode = gimple_assign_rhs_code (use_stmt);
3608 crhs1 = gimple_assign_rhs1 (use_stmt);
3609 crhs2 = gimple_assign_rhs2 (use_stmt);
3610 }
3611 else if (gimple_assign_rhs_code (use_stmt) == COND_EXPR)
3612 {
3613 tree cond = gimple_assign_rhs1 (use_stmt);
3614 if (COMPARISON_CLASS_P (cond))
3615 {
3616 ccode = TREE_CODE (cond);
3617 crhs1 = TREE_OPERAND (cond, 0);
3618 crhs2 = TREE_OPERAND (cond, 1);
3619 }
3620 else
3621 return 0;
3622 }
3623 else
3624 return 0;
3625 }
3626 else
3627 return 0;
3628
3629 if (TREE_CODE_CLASS (ccode) != tcc_comparison)
3630 return 0;
3631
3632 enum tree_code code = gimple_assign_rhs_code (stmt);
3633 tree lhs = gimple_assign_lhs (stmt);
3634 tree rhs1 = gimple_assign_rhs1 (stmt);
3635 tree rhs2 = gimple_assign_rhs2 (stmt);
3636
3637 switch (ccode)
3638 {
3639 case GT_EXPR:
3640 case LE_EXPR:
3641 /* r = a - b; r > a or r <= a
3642 r = a + b; a > r or a <= r or b > r or b <= r. */
3643 if ((code == MINUS_EXPR && crhs1 == lhs && crhs2 == rhs1)
3644 || (code == PLUS_EXPR && (crhs1 == rhs1 || crhs1 == rhs2)
3645 && crhs2 == lhs))
3646 return ccode == GT_EXPR ? 1 : -1;
3647 break;
3648 case LT_EXPR:
3649 case GE_EXPR:
3650 /* r = a - b; a < r or a >= r
3651 r = a + b; r < a or r >= a or r < b or r >= b. */
3652 if ((code == MINUS_EXPR && crhs1 == rhs1 && crhs2 == lhs)
3653 || (code == PLUS_EXPR && crhs1 == lhs
3654 && (crhs2 == rhs1 || crhs2 == rhs2)))
3655 return ccode == LT_EXPR ? 1 : -1;
3656 break;
3657 default:
3658 break;
3659 }
3660 return 0;
3661 }
3662
3663 /* Recognize for unsigned x
3664 x = y - z;
3665 if (x > y)
3666 where there are other uses of x and replace it with
3667 _7 = SUB_OVERFLOW (y, z);
3668 x = REALPART_EXPR <_7>;
3669 _8 = IMAGPART_EXPR <_7>;
3670 if (_8)
3671 and similarly for addition. */
3672
3673 static bool
match_uaddsub_overflow(gimple_stmt_iterator * gsi,gimple * stmt,enum tree_code code)3674 match_uaddsub_overflow (gimple_stmt_iterator *gsi, gimple *stmt,
3675 enum tree_code code)
3676 {
3677 tree lhs = gimple_assign_lhs (stmt);
3678 tree type = TREE_TYPE (lhs);
3679 use_operand_p use_p;
3680 imm_use_iterator iter;
3681 bool use_seen = false;
3682 bool ovf_use_seen = false;
3683 gimple *use_stmt;
3684
3685 gcc_checking_assert (code == PLUS_EXPR || code == MINUS_EXPR);
3686 if (!INTEGRAL_TYPE_P (type)
3687 || !TYPE_UNSIGNED (type)
3688 || has_zero_uses (lhs)
3689 || has_single_use (lhs)
3690 || optab_handler (code == PLUS_EXPR ? uaddv4_optab : usubv4_optab,
3691 TYPE_MODE (type)) == CODE_FOR_nothing)
3692 return false;
3693
3694 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
3695 {
3696 use_stmt = USE_STMT (use_p);
3697 if (is_gimple_debug (use_stmt))
3698 continue;
3699
3700 if (uaddsub_overflow_check_p (stmt, use_stmt))
3701 ovf_use_seen = true;
3702 else
3703 use_seen = true;
3704 if (ovf_use_seen && use_seen)
3705 break;
3706 }
3707
3708 if (!ovf_use_seen || !use_seen)
3709 return false;
3710
3711 tree ctype = build_complex_type (type);
3712 tree rhs1 = gimple_assign_rhs1 (stmt);
3713 tree rhs2 = gimple_assign_rhs2 (stmt);
3714 gcall *g = gimple_build_call_internal (code == PLUS_EXPR
3715 ? IFN_ADD_OVERFLOW : IFN_SUB_OVERFLOW,
3716 2, rhs1, rhs2);
3717 tree ctmp = make_ssa_name (ctype);
3718 gimple_call_set_lhs (g, ctmp);
3719 gsi_insert_before (gsi, g, GSI_SAME_STMT);
3720 gassign *g2 = gimple_build_assign (lhs, REALPART_EXPR,
3721 build1 (REALPART_EXPR, type, ctmp));
3722 gsi_replace (gsi, g2, true);
3723 tree ovf = make_ssa_name (type);
3724 g2 = gimple_build_assign (ovf, IMAGPART_EXPR,
3725 build1 (IMAGPART_EXPR, type, ctmp));
3726 gsi_insert_after (gsi, g2, GSI_NEW_STMT);
3727
3728 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3729 {
3730 if (is_gimple_debug (use_stmt))
3731 continue;
3732
3733 int ovf_use = uaddsub_overflow_check_p (stmt, use_stmt);
3734 if (ovf_use == 0)
3735 continue;
3736 if (gimple_code (use_stmt) == GIMPLE_COND)
3737 {
3738 gcond *cond_stmt = as_a <gcond *> (use_stmt);
3739 gimple_cond_set_lhs (cond_stmt, ovf);
3740 gimple_cond_set_rhs (cond_stmt, build_int_cst (type, 0));
3741 gimple_cond_set_code (cond_stmt, ovf_use == 1 ? NE_EXPR : EQ_EXPR);
3742 }
3743 else
3744 {
3745 gcc_checking_assert (is_gimple_assign (use_stmt));
3746 if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS)
3747 {
3748 gimple_assign_set_rhs1 (use_stmt, ovf);
3749 gimple_assign_set_rhs2 (use_stmt, build_int_cst (type, 0));
3750 gimple_assign_set_rhs_code (use_stmt,
3751 ovf_use == 1 ? NE_EXPR : EQ_EXPR);
3752 }
3753 else
3754 {
3755 gcc_checking_assert (gimple_assign_rhs_code (use_stmt)
3756 == COND_EXPR);
3757 tree cond = build2 (ovf_use == 1 ? NE_EXPR : EQ_EXPR,
3758 boolean_type_node, ovf,
3759 build_int_cst (type, 0));
3760 gimple_assign_set_rhs1 (use_stmt, cond);
3761 }
3762 }
3763 update_stmt (use_stmt);
3764 }
3765 return true;
3766 }
3767
3768
3769 /* Find integer multiplications where the operands are extended from
3770 smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
3771 where appropriate. */
3772
3773 namespace {
3774
3775 const pass_data pass_data_optimize_widening_mul =
3776 {
3777 GIMPLE_PASS, /* type */
3778 "widening_mul", /* name */
3779 OPTGROUP_NONE, /* optinfo_flags */
3780 TV_NONE, /* tv_id */
3781 PROP_ssa, /* properties_required */
3782 0, /* properties_provided */
3783 0, /* properties_destroyed */
3784 0, /* todo_flags_start */
3785 TODO_update_ssa, /* todo_flags_finish */
3786 };
3787
3788 class pass_optimize_widening_mul : public gimple_opt_pass
3789 {
3790 public:
pass_optimize_widening_mul(gcc::context * ctxt)3791 pass_optimize_widening_mul (gcc::context *ctxt)
3792 : gimple_opt_pass (pass_data_optimize_widening_mul, ctxt)
3793 {}
3794
3795 /* opt_pass methods: */
gate(function *)3796 virtual bool gate (function *)
3797 {
3798 return flag_expensive_optimizations && optimize;
3799 }
3800
3801 virtual unsigned int execute (function *);
3802
3803 }; // class pass_optimize_widening_mul
3804
3805 unsigned int
execute(function * fun)3806 pass_optimize_widening_mul::execute (function *fun)
3807 {
3808 basic_block bb;
3809 bool cfg_changed = false;
3810
3811 memset (&widen_mul_stats, 0, sizeof (widen_mul_stats));
3812
3813 FOR_EACH_BB_FN (bb, fun)
3814 {
3815 gimple_stmt_iterator gsi;
3816
3817 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi);)
3818 {
3819 gimple *stmt = gsi_stmt (gsi);
3820 enum tree_code code;
3821
3822 if (is_gimple_assign (stmt))
3823 {
3824 code = gimple_assign_rhs_code (stmt);
3825 switch (code)
3826 {
3827 case MULT_EXPR:
3828 if (!convert_mult_to_widen (stmt, &gsi)
3829 && convert_mult_to_fma (stmt,
3830 gimple_assign_rhs1 (stmt),
3831 gimple_assign_rhs2 (stmt)))
3832 {
3833 gsi_remove (&gsi, true);
3834 release_defs (stmt);
3835 continue;
3836 }
3837 break;
3838
3839 case PLUS_EXPR:
3840 case MINUS_EXPR:
3841 if (!convert_plusminus_to_widen (&gsi, stmt, code))
3842 match_uaddsub_overflow (&gsi, stmt, code);
3843 break;
3844
3845 default:;
3846 }
3847 }
3848 else if (is_gimple_call (stmt)
3849 && gimple_call_lhs (stmt))
3850 {
3851 tree fndecl = gimple_call_fndecl (stmt);
3852 if (fndecl
3853 && gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
3854 {
3855 switch (DECL_FUNCTION_CODE (fndecl))
3856 {
3857 case BUILT_IN_POWF:
3858 case BUILT_IN_POW:
3859 case BUILT_IN_POWL:
3860 if (TREE_CODE (gimple_call_arg (stmt, 1)) == REAL_CST
3861 && real_equal
3862 (&TREE_REAL_CST (gimple_call_arg (stmt, 1)),
3863 &dconst2)
3864 && convert_mult_to_fma (stmt,
3865 gimple_call_arg (stmt, 0),
3866 gimple_call_arg (stmt, 0)))
3867 {
3868 unlink_stmt_vdef (stmt);
3869 if (gsi_remove (&gsi, true)
3870 && gimple_purge_dead_eh_edges (bb))
3871 cfg_changed = true;
3872 release_defs (stmt);
3873 continue;
3874 }
3875 break;
3876
3877 default:;
3878 }
3879 }
3880 }
3881 gsi_next (&gsi);
3882 }
3883 }
3884
3885 statistics_counter_event (fun, "widening multiplications inserted",
3886 widen_mul_stats.widen_mults_inserted);
3887 statistics_counter_event (fun, "widening maccs inserted",
3888 widen_mul_stats.maccs_inserted);
3889 statistics_counter_event (fun, "fused multiply-adds inserted",
3890 widen_mul_stats.fmas_inserted);
3891
3892 return cfg_changed ? TODO_cleanup_cfg : 0;
3893 }
3894
3895 } // anon namespace
3896
3897 gimple_opt_pass *
make_pass_optimize_widening_mul(gcc::context * ctxt)3898 make_pass_optimize_widening_mul (gcc::context *ctxt)
3899 {
3900 return new pass_optimize_widening_mul (ctxt);
3901 }
3902