1 /* Operations with affine combinations of trees.
2 Copyright (C) 2005-2016 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "tree-pretty-print.h"
28 #include "fold-const.h"
29 #include "tree-affine.h"
30 #include "gimplify.h"
31 #include "dumpfile.h"
32 #include "cfgexpand.h"
33
34 /* Extends CST as appropriate for the affine combinations COMB. */
35
36 widest_int
wide_int_ext_for_comb(const widest_int & cst,aff_tree * comb)37 wide_int_ext_for_comb (const widest_int &cst, aff_tree *comb)
38 {
39 return wi::sext (cst, TYPE_PRECISION (comb->type));
40 }
41
42 /* Initializes affine combination COMB so that its value is zero in TYPE. */
43
44 static void
aff_combination_zero(aff_tree * comb,tree type)45 aff_combination_zero (aff_tree *comb, tree type)
46 {
47 int i;
48 comb->type = type;
49 comb->offset = 0;
50 comb->n = 0;
51 for (i = 0; i < MAX_AFF_ELTS; i++)
52 comb->elts[i].coef = 0;
53 comb->rest = NULL_TREE;
54 }
55
56 /* Sets COMB to CST. */
57
58 void
aff_combination_const(aff_tree * comb,tree type,const widest_int & cst)59 aff_combination_const (aff_tree *comb, tree type, const widest_int &cst)
60 {
61 aff_combination_zero (comb, type);
62 comb->offset = wide_int_ext_for_comb (cst, comb);;
63 }
64
65 /* Sets COMB to single element ELT. */
66
67 void
aff_combination_elt(aff_tree * comb,tree type,tree elt)68 aff_combination_elt (aff_tree *comb, tree type, tree elt)
69 {
70 aff_combination_zero (comb, type);
71
72 comb->n = 1;
73 comb->elts[0].val = elt;
74 comb->elts[0].coef = 1;
75 }
76
77 /* Scales COMB by SCALE. */
78
79 void
aff_combination_scale(aff_tree * comb,const widest_int & scale_in)80 aff_combination_scale (aff_tree *comb, const widest_int &scale_in)
81 {
82 unsigned i, j;
83
84 widest_int scale = wide_int_ext_for_comb (scale_in, comb);
85 if (scale == 1)
86 return;
87
88 if (scale == 0)
89 {
90 aff_combination_zero (comb, comb->type);
91 return;
92 }
93
94 comb->offset = wide_int_ext_for_comb (scale * comb->offset, comb);
95 for (i = 0, j = 0; i < comb->n; i++)
96 {
97 widest_int new_coef
98 = wide_int_ext_for_comb (scale * comb->elts[i].coef, comb);
99 /* A coefficient may become zero due to overflow. Remove the zero
100 elements. */
101 if (new_coef == 0)
102 continue;
103 comb->elts[j].coef = new_coef;
104 comb->elts[j].val = comb->elts[i].val;
105 j++;
106 }
107 comb->n = j;
108
109 if (comb->rest)
110 {
111 tree type = comb->type;
112 if (POINTER_TYPE_P (type))
113 type = sizetype;
114 if (comb->n < MAX_AFF_ELTS)
115 {
116 comb->elts[comb->n].coef = scale;
117 comb->elts[comb->n].val = comb->rest;
118 comb->rest = NULL_TREE;
119 comb->n++;
120 }
121 else
122 comb->rest = fold_build2 (MULT_EXPR, type, comb->rest,
123 wide_int_to_tree (type, scale));
124 }
125 }
126
127 /* Adds ELT * SCALE to COMB. */
128
129 void
aff_combination_add_elt(aff_tree * comb,tree elt,const widest_int & scale_in)130 aff_combination_add_elt (aff_tree *comb, tree elt, const widest_int &scale_in)
131 {
132 unsigned i;
133 tree type;
134
135 widest_int scale = wide_int_ext_for_comb (scale_in, comb);
136 if (scale == 0)
137 return;
138
139 for (i = 0; i < comb->n; i++)
140 if (operand_equal_p (comb->elts[i].val, elt, 0))
141 {
142 widest_int new_coef
143 = wide_int_ext_for_comb (comb->elts[i].coef + scale, comb);
144 if (new_coef != 0)
145 {
146 comb->elts[i].coef = new_coef;
147 return;
148 }
149
150 comb->n--;
151 comb->elts[i] = comb->elts[comb->n];
152
153 if (comb->rest)
154 {
155 gcc_assert (comb->n == MAX_AFF_ELTS - 1);
156 comb->elts[comb->n].coef = 1;
157 comb->elts[comb->n].val = comb->rest;
158 comb->rest = NULL_TREE;
159 comb->n++;
160 }
161 return;
162 }
163 if (comb->n < MAX_AFF_ELTS)
164 {
165 comb->elts[comb->n].coef = scale;
166 comb->elts[comb->n].val = elt;
167 comb->n++;
168 return;
169 }
170
171 type = comb->type;
172 if (POINTER_TYPE_P (type))
173 type = sizetype;
174
175 if (scale == 1)
176 elt = fold_convert (type, elt);
177 else
178 elt = fold_build2 (MULT_EXPR, type,
179 fold_convert (type, elt),
180 wide_int_to_tree (type, scale));
181
182 if (comb->rest)
183 comb->rest = fold_build2 (PLUS_EXPR, type, comb->rest,
184 elt);
185 else
186 comb->rest = elt;
187 }
188
189 /* Adds CST to C. */
190
191 static void
aff_combination_add_cst(aff_tree * c,const widest_int & cst)192 aff_combination_add_cst (aff_tree *c, const widest_int &cst)
193 {
194 c->offset = wide_int_ext_for_comb (c->offset + cst, c);
195 }
196
197 /* Adds COMB2 to COMB1. */
198
199 void
aff_combination_add(aff_tree * comb1,aff_tree * comb2)200 aff_combination_add (aff_tree *comb1, aff_tree *comb2)
201 {
202 unsigned i;
203
204 aff_combination_add_cst (comb1, comb2->offset);
205 for (i = 0; i < comb2->n; i++)
206 aff_combination_add_elt (comb1, comb2->elts[i].val, comb2->elts[i].coef);
207 if (comb2->rest)
208 aff_combination_add_elt (comb1, comb2->rest, 1);
209 }
210
211 /* Converts affine combination COMB to TYPE. */
212
213 void
aff_combination_convert(aff_tree * comb,tree type)214 aff_combination_convert (aff_tree *comb, tree type)
215 {
216 unsigned i, j;
217 tree comb_type = comb->type;
218
219 if (TYPE_PRECISION (type) > TYPE_PRECISION (comb_type))
220 {
221 tree val = fold_convert (type, aff_combination_to_tree (comb));
222 tree_to_aff_combination (val, type, comb);
223 return;
224 }
225
226 comb->type = type;
227 if (comb->rest && !POINTER_TYPE_P (type))
228 comb->rest = fold_convert (type, comb->rest);
229
230 if (TYPE_PRECISION (type) == TYPE_PRECISION (comb_type))
231 return;
232
233 comb->offset = wide_int_ext_for_comb (comb->offset, comb);
234 for (i = j = 0; i < comb->n; i++)
235 {
236 if (comb->elts[i].coef == 0)
237 continue;
238 comb->elts[j].coef = comb->elts[i].coef;
239 comb->elts[j].val = fold_convert (type, comb->elts[i].val);
240 j++;
241 }
242
243 comb->n = j;
244 if (comb->n < MAX_AFF_ELTS && comb->rest)
245 {
246 comb->elts[comb->n].coef = 1;
247 comb->elts[comb->n].val = comb->rest;
248 comb->rest = NULL_TREE;
249 comb->n++;
250 }
251 }
252
253 /* Splits EXPR into an affine combination of parts. */
254
255 void
tree_to_aff_combination(tree expr,tree type,aff_tree * comb)256 tree_to_aff_combination (tree expr, tree type, aff_tree *comb)
257 {
258 aff_tree tmp;
259 enum tree_code code;
260 tree cst, core, toffset;
261 HOST_WIDE_INT bitpos, bitsize;
262 machine_mode mode;
263 int unsignedp, reversep, volatilep;
264
265 STRIP_NOPS (expr);
266
267 code = TREE_CODE (expr);
268 switch (code)
269 {
270 case INTEGER_CST:
271 aff_combination_const (comb, type, wi::to_widest (expr));
272 return;
273
274 case POINTER_PLUS_EXPR:
275 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
276 tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
277 aff_combination_add (comb, &tmp);
278 return;
279
280 case PLUS_EXPR:
281 case MINUS_EXPR:
282 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
283 tree_to_aff_combination (TREE_OPERAND (expr, 1), type, &tmp);
284 if (code == MINUS_EXPR)
285 aff_combination_scale (&tmp, -1);
286 aff_combination_add (comb, &tmp);
287 return;
288
289 case MULT_EXPR:
290 cst = TREE_OPERAND (expr, 1);
291 if (TREE_CODE (cst) != INTEGER_CST)
292 break;
293 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
294 aff_combination_scale (comb, wi::to_widest (cst));
295 return;
296
297 case NEGATE_EXPR:
298 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
299 aff_combination_scale (comb, -1);
300 return;
301
302 case BIT_NOT_EXPR:
303 /* ~x = -x - 1 */
304 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
305 aff_combination_scale (comb, -1);
306 aff_combination_add_cst (comb, -1);
307 return;
308
309 case ADDR_EXPR:
310 /* Handle &MEM[ptr + CST] which is equivalent to POINTER_PLUS_EXPR. */
311 if (TREE_CODE (TREE_OPERAND (expr, 0)) == MEM_REF)
312 {
313 expr = TREE_OPERAND (expr, 0);
314 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
315 tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
316 aff_combination_add (comb, &tmp);
317 return;
318 }
319 core = get_inner_reference (TREE_OPERAND (expr, 0), &bitsize, &bitpos,
320 &toffset, &mode, &unsignedp, &reversep,
321 &volatilep, false);
322 if (bitpos % BITS_PER_UNIT != 0)
323 break;
324 aff_combination_const (comb, type, bitpos / BITS_PER_UNIT);
325 if (TREE_CODE (core) == MEM_REF)
326 {
327 aff_combination_add_cst (comb, wi::to_widest (TREE_OPERAND (core, 1)));
328 core = TREE_OPERAND (core, 0);
329 }
330 else
331 core = build_fold_addr_expr (core);
332
333 if (TREE_CODE (core) == ADDR_EXPR)
334 aff_combination_add_elt (comb, core, 1);
335 else
336 {
337 tree_to_aff_combination (core, type, &tmp);
338 aff_combination_add (comb, &tmp);
339 }
340 if (toffset)
341 {
342 tree_to_aff_combination (toffset, type, &tmp);
343 aff_combination_add (comb, &tmp);
344 }
345 return;
346
347 case MEM_REF:
348 if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
349 tree_to_aff_combination (TREE_OPERAND (TREE_OPERAND (expr, 0), 0),
350 type, comb);
351 else if (integer_zerop (TREE_OPERAND (expr, 1)))
352 {
353 aff_combination_elt (comb, type, expr);
354 return;
355 }
356 else
357 aff_combination_elt (comb, type,
358 build2 (MEM_REF, TREE_TYPE (expr),
359 TREE_OPERAND (expr, 0),
360 build_int_cst
361 (TREE_TYPE (TREE_OPERAND (expr, 1)), 0)));
362 tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
363 aff_combination_add (comb, &tmp);
364 return;
365
366 default:
367 break;
368 }
369
370 aff_combination_elt (comb, type, expr);
371 }
372
373 /* Creates EXPR + ELT * SCALE in TYPE. EXPR is taken from affine
374 combination COMB. */
375
376 static tree
add_elt_to_tree(tree expr,tree type,tree elt,const widest_int & scale_in,aff_tree * comb ATTRIBUTE_UNUSED)377 add_elt_to_tree (tree expr, tree type, tree elt, const widest_int &scale_in,
378 aff_tree *comb ATTRIBUTE_UNUSED)
379 {
380 enum tree_code code;
381 tree type1 = type;
382 if (POINTER_TYPE_P (type))
383 type1 = sizetype;
384
385 widest_int scale = wide_int_ext_for_comb (scale_in, comb);
386
387 if (scale == -1
388 && POINTER_TYPE_P (TREE_TYPE (elt)))
389 {
390 elt = convert_to_ptrofftype (elt);
391 elt = fold_build1 (NEGATE_EXPR, TREE_TYPE (elt), elt);
392 scale = 1;
393 }
394
395 if (scale == 1)
396 {
397 if (!expr)
398 {
399 if (POINTER_TYPE_P (TREE_TYPE (elt)))
400 return elt;
401 else
402 return fold_convert (type1, elt);
403 }
404
405 if (POINTER_TYPE_P (TREE_TYPE (expr)))
406 return fold_build_pointer_plus (expr, elt);
407 if (POINTER_TYPE_P (TREE_TYPE (elt)))
408 return fold_build_pointer_plus (elt, expr);
409 return fold_build2 (PLUS_EXPR, type1,
410 expr, fold_convert (type1, elt));
411 }
412
413 if (scale == -1)
414 {
415 if (!expr)
416 return fold_build1 (NEGATE_EXPR, type1,
417 fold_convert (type1, elt));
418
419 if (POINTER_TYPE_P (TREE_TYPE (expr)))
420 {
421 elt = convert_to_ptrofftype (elt);
422 elt = fold_build1 (NEGATE_EXPR, TREE_TYPE (elt), elt);
423 return fold_build_pointer_plus (expr, elt);
424 }
425 return fold_build2 (MINUS_EXPR, type1,
426 expr, fold_convert (type1, elt));
427 }
428
429 elt = fold_convert (type1, elt);
430 if (!expr)
431 return fold_build2 (MULT_EXPR, type1, elt,
432 wide_int_to_tree (type1, scale));
433
434 if (wi::neg_p (scale))
435 {
436 code = MINUS_EXPR;
437 scale = -scale;
438 }
439 else
440 code = PLUS_EXPR;
441
442 elt = fold_build2 (MULT_EXPR, type1, elt,
443 wide_int_to_tree (type1, scale));
444 if (POINTER_TYPE_P (TREE_TYPE (expr)))
445 {
446 if (code == MINUS_EXPR)
447 elt = fold_build1 (NEGATE_EXPR, type1, elt);
448 return fold_build_pointer_plus (expr, elt);
449 }
450 return fold_build2 (code, type1, expr, elt);
451 }
452
453 /* Makes tree from the affine combination COMB. */
454
455 tree
aff_combination_to_tree(aff_tree * comb)456 aff_combination_to_tree (aff_tree *comb)
457 {
458 tree type = comb->type;
459 tree expr = NULL_TREE;
460 unsigned i;
461 widest_int off, sgn;
462 tree type1 = type;
463 if (POINTER_TYPE_P (type))
464 type1 = sizetype;
465
466 gcc_assert (comb->n == MAX_AFF_ELTS || comb->rest == NULL_TREE);
467
468 for (i = 0; i < comb->n; i++)
469 expr = add_elt_to_tree (expr, type, comb->elts[i].val, comb->elts[i].coef,
470 comb);
471
472 if (comb->rest)
473 expr = add_elt_to_tree (expr, type, comb->rest, 1, comb);
474
475 /* Ensure that we get x - 1, not x + (-1) or x + 0xff..f if x is
476 unsigned. */
477 if (wi::neg_p (comb->offset))
478 {
479 off = -comb->offset;
480 sgn = -1;
481 }
482 else
483 {
484 off = comb->offset;
485 sgn = 1;
486 }
487 return add_elt_to_tree (expr, type, wide_int_to_tree (type1, off), sgn,
488 comb);
489 }
490
491 /* Copies the tree elements of COMB to ensure that they are not shared. */
492
493 void
unshare_aff_combination(aff_tree * comb)494 unshare_aff_combination (aff_tree *comb)
495 {
496 unsigned i;
497
498 for (i = 0; i < comb->n; i++)
499 comb->elts[i].val = unshare_expr (comb->elts[i].val);
500 if (comb->rest)
501 comb->rest = unshare_expr (comb->rest);
502 }
503
504 /* Remove M-th element from COMB. */
505
506 void
aff_combination_remove_elt(aff_tree * comb,unsigned m)507 aff_combination_remove_elt (aff_tree *comb, unsigned m)
508 {
509 comb->n--;
510 if (m <= comb->n)
511 comb->elts[m] = comb->elts[comb->n];
512 if (comb->rest)
513 {
514 comb->elts[comb->n].coef = 1;
515 comb->elts[comb->n].val = comb->rest;
516 comb->rest = NULL_TREE;
517 comb->n++;
518 }
519 }
520
521 /* Adds C * COEF * VAL to R. VAL may be NULL, in that case only
522 C * COEF is added to R. */
523
524
525 static void
aff_combination_add_product(aff_tree * c,const widest_int & coef,tree val,aff_tree * r)526 aff_combination_add_product (aff_tree *c, const widest_int &coef, tree val,
527 aff_tree *r)
528 {
529 unsigned i;
530 tree aval, type;
531
532 for (i = 0; i < c->n; i++)
533 {
534 aval = c->elts[i].val;
535 if (val)
536 {
537 type = TREE_TYPE (aval);
538 aval = fold_build2 (MULT_EXPR, type, aval,
539 fold_convert (type, val));
540 }
541
542 aff_combination_add_elt (r, aval, coef * c->elts[i].coef);
543 }
544
545 if (c->rest)
546 {
547 aval = c->rest;
548 if (val)
549 {
550 type = TREE_TYPE (aval);
551 aval = fold_build2 (MULT_EXPR, type, aval,
552 fold_convert (type, val));
553 }
554
555 aff_combination_add_elt (r, aval, coef);
556 }
557
558 if (val)
559 aff_combination_add_elt (r, val, coef * c->offset);
560 else
561 aff_combination_add_cst (r, coef * c->offset);
562 }
563
564 /* Multiplies C1 by C2, storing the result to R */
565
566 void
aff_combination_mult(aff_tree * c1,aff_tree * c2,aff_tree * r)567 aff_combination_mult (aff_tree *c1, aff_tree *c2, aff_tree *r)
568 {
569 unsigned i;
570 gcc_assert (TYPE_PRECISION (c1->type) == TYPE_PRECISION (c2->type));
571
572 aff_combination_zero (r, c1->type);
573
574 for (i = 0; i < c2->n; i++)
575 aff_combination_add_product (c1, c2->elts[i].coef, c2->elts[i].val, r);
576 if (c2->rest)
577 aff_combination_add_product (c1, 1, c2->rest, r);
578 aff_combination_add_product (c1, c2->offset, NULL, r);
579 }
580
581 /* Returns the element of COMB whose value is VAL, or NULL if no such
582 element exists. If IDX is not NULL, it is set to the index of VAL in
583 COMB. */
584
585 static struct aff_comb_elt *
aff_combination_find_elt(aff_tree * comb,tree val,unsigned * idx)586 aff_combination_find_elt (aff_tree *comb, tree val, unsigned *idx)
587 {
588 unsigned i;
589
590 for (i = 0; i < comb->n; i++)
591 if (operand_equal_p (comb->elts[i].val, val, 0))
592 {
593 if (idx)
594 *idx = i;
595
596 return &comb->elts[i];
597 }
598
599 return NULL;
600 }
601
602 /* Element of the cache that maps ssa name NAME to its expanded form
603 as an affine expression EXPANSION. */
604
605 struct name_expansion
606 {
607 aff_tree expansion;
608
609 /* True if the expansion for the name is just being generated. */
610 unsigned in_progress : 1;
611 };
612
613 /* Expands SSA names in COMB recursively. CACHE is used to cache the
614 results. */
615
616 void
aff_combination_expand(aff_tree * comb ATTRIBUTE_UNUSED,hash_map<tree,name_expansion * > ** cache)617 aff_combination_expand (aff_tree *comb ATTRIBUTE_UNUSED,
618 hash_map<tree, name_expansion *> **cache)
619 {
620 unsigned i;
621 aff_tree to_add, current, curre;
622 tree e, rhs;
623 gimple *def;
624 widest_int scale;
625 struct name_expansion *exp;
626
627 aff_combination_zero (&to_add, comb->type);
628 for (i = 0; i < comb->n; i++)
629 {
630 tree type, name;
631 enum tree_code code;
632
633 e = comb->elts[i].val;
634 type = TREE_TYPE (e);
635 name = e;
636 /* Look through some conversions. */
637 if (CONVERT_EXPR_P (e)
638 && (TYPE_PRECISION (type)
639 >= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (e, 0)))))
640 name = TREE_OPERAND (e, 0);
641 if (TREE_CODE (name) != SSA_NAME)
642 continue;
643 def = SSA_NAME_DEF_STMT (name);
644 if (!is_gimple_assign (def) || gimple_assign_lhs (def) != name)
645 continue;
646
647 code = gimple_assign_rhs_code (def);
648 if (code != SSA_NAME
649 && !IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code))
650 && (get_gimple_rhs_class (code) != GIMPLE_SINGLE_RHS
651 || !is_gimple_min_invariant (gimple_assign_rhs1 (def))))
652 continue;
653
654 /* We do not know whether the reference retains its value at the
655 place where the expansion is used. */
656 if (TREE_CODE_CLASS (code) == tcc_reference)
657 continue;
658
659 if (!*cache)
660 *cache = new hash_map<tree, name_expansion *>;
661 name_expansion **slot = &(*cache)->get_or_insert (e);
662 exp = *slot;
663
664 if (!exp)
665 {
666 exp = XNEW (struct name_expansion);
667 exp->in_progress = 1;
668 *slot = exp;
669 /* In principle this is a generally valid folding, but
670 it is not unconditionally an optimization, so do it
671 here and not in fold_unary. */
672 /* Convert (T1)(X *+- CST) into (T1)X *+- (T1)CST if T1 is wider
673 than the type of X and overflow for the type of X is
674 undefined. */
675 if (e != name
676 && INTEGRAL_TYPE_P (type)
677 && INTEGRAL_TYPE_P (TREE_TYPE (name))
678 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (name))
679 && TYPE_PRECISION (type) > TYPE_PRECISION (TREE_TYPE (name))
680 && (code == PLUS_EXPR || code == MINUS_EXPR || code == MULT_EXPR)
681 && TREE_CODE (gimple_assign_rhs2 (def)) == INTEGER_CST)
682 rhs = fold_build2 (code, type,
683 fold_convert (type, gimple_assign_rhs1 (def)),
684 fold_convert (type, gimple_assign_rhs2 (def)));
685 else
686 {
687 rhs = gimple_assign_rhs_to_tree (def);
688 if (e != name)
689 rhs = fold_convert (type, rhs);
690 }
691 tree_to_aff_combination_expand (rhs, comb->type, ¤t, cache);
692 exp->expansion = current;
693 exp->in_progress = 0;
694 }
695 else
696 {
697 /* Since we follow the definitions in the SSA form, we should not
698 enter a cycle unless we pass through a phi node. */
699 gcc_assert (!exp->in_progress);
700 current = exp->expansion;
701 }
702
703 /* Accumulate the new terms to TO_ADD, so that we do not modify
704 COMB while traversing it; include the term -coef * E, to remove
705 it from COMB. */
706 scale = comb->elts[i].coef;
707 aff_combination_zero (&curre, comb->type);
708 aff_combination_add_elt (&curre, e, -scale);
709 aff_combination_scale (¤t, scale);
710 aff_combination_add (&to_add, ¤t);
711 aff_combination_add (&to_add, &curre);
712 }
713 aff_combination_add (comb, &to_add);
714 }
715
716 /* Similar to tree_to_aff_combination, but follows SSA name definitions
717 and expands them recursively. CACHE is used to cache the expansions
718 of the ssa names, to avoid exponential time complexity for cases
719 like
720
721 a1 = a0 + a0;
722 a2 = a1 + a1;
723 a3 = a2 + a2;
724 ... */
725
726 void
tree_to_aff_combination_expand(tree expr,tree type,aff_tree * comb,hash_map<tree,name_expansion * > ** cache)727 tree_to_aff_combination_expand (tree expr, tree type, aff_tree *comb,
728 hash_map<tree, name_expansion *> **cache)
729 {
730 tree_to_aff_combination (expr, type, comb);
731 aff_combination_expand (comb, cache);
732 }
733
734 /* Frees memory occupied by struct name_expansion in *VALUE. Callback for
735 hash_map::traverse. */
736
737 bool
free_name_expansion(tree const &,name_expansion ** value,void *)738 free_name_expansion (tree const &, name_expansion **value, void *)
739 {
740 free (*value);
741 return true;
742 }
743
744 /* Frees memory allocated for the CACHE used by
745 tree_to_aff_combination_expand. */
746
747 void
free_affine_expand_cache(hash_map<tree,name_expansion * > ** cache)748 free_affine_expand_cache (hash_map<tree, name_expansion *> **cache)
749 {
750 if (!*cache)
751 return;
752
753 (*cache)->traverse<void *, free_name_expansion> (NULL);
754 delete (*cache);
755 *cache = NULL;
756 }
757
758 /* If VAL != CST * DIV for any constant CST, returns false.
759 Otherwise, if *MULT_SET is true, additionally compares CST and MULT,
760 and if they are different, returns false. Finally, if neither of these
761 two cases occur, true is returned, and CST is stored to MULT and MULT_SET
762 is set to true. */
763
764 static bool
wide_int_constant_multiple_p(const widest_int & val,const widest_int & div,bool * mult_set,widest_int * mult)765 wide_int_constant_multiple_p (const widest_int &val, const widest_int &div,
766 bool *mult_set, widest_int *mult)
767 {
768 widest_int rem, cst;
769
770 if (val == 0)
771 {
772 if (*mult_set && mult != 0)
773 return false;
774 *mult_set = true;
775 *mult = 0;
776 return true;
777 }
778
779 if (div == 0)
780 return false;
781
782 if (!wi::multiple_of_p (val, div, SIGNED, &cst))
783 return false;
784
785 if (*mult_set && *mult != cst)
786 return false;
787
788 *mult_set = true;
789 *mult = cst;
790 return true;
791 }
792
793 /* Returns true if VAL = X * DIV for some constant X. If this is the case,
794 X is stored to MULT. */
795
796 bool
aff_combination_constant_multiple_p(aff_tree * val,aff_tree * div,widest_int * mult)797 aff_combination_constant_multiple_p (aff_tree *val, aff_tree *div,
798 widest_int *mult)
799 {
800 bool mult_set = false;
801 unsigned i;
802
803 if (val->n == 0 && val->offset == 0)
804 {
805 *mult = 0;
806 return true;
807 }
808 if (val->n != div->n)
809 return false;
810
811 if (val->rest || div->rest)
812 return false;
813
814 if (!wide_int_constant_multiple_p (val->offset, div->offset,
815 &mult_set, mult))
816 return false;
817
818 for (i = 0; i < div->n; i++)
819 {
820 struct aff_comb_elt *elt
821 = aff_combination_find_elt (val, div->elts[i].val, NULL);
822 if (!elt)
823 return false;
824 if (!wide_int_constant_multiple_p (elt->coef, div->elts[i].coef,
825 &mult_set, mult))
826 return false;
827 }
828
829 gcc_assert (mult_set);
830 return true;
831 }
832
833 /* Prints the affine VAL to the FILE. */
834
835 static void
print_aff(FILE * file,aff_tree * val)836 print_aff (FILE *file, aff_tree *val)
837 {
838 unsigned i;
839 signop sgn = TYPE_SIGN (val->type);
840 if (POINTER_TYPE_P (val->type))
841 sgn = SIGNED;
842 fprintf (file, "{\n type = ");
843 print_generic_expr (file, val->type, TDF_VOPS|TDF_MEMSYMS);
844 fprintf (file, "\n offset = ");
845 print_dec (val->offset, file, sgn);
846 if (val->n > 0)
847 {
848 fprintf (file, "\n elements = {\n");
849 for (i = 0; i < val->n; i++)
850 {
851 fprintf (file, " [%d] = ", i);
852 print_generic_expr (file, val->elts[i].val, TDF_VOPS|TDF_MEMSYMS);
853
854 fprintf (file, " * ");
855 print_dec (val->elts[i].coef, file, sgn);
856 if (i != val->n - 1)
857 fprintf (file, ", \n");
858 }
859 fprintf (file, "\n }");
860 }
861 if (val->rest)
862 {
863 fprintf (file, "\n rest = ");
864 print_generic_expr (file, val->rest, TDF_VOPS|TDF_MEMSYMS);
865 }
866 fprintf (file, "\n}");
867 }
868
869 /* Prints the affine VAL to the standard error, used for debugging. */
870
871 DEBUG_FUNCTION void
debug_aff(aff_tree * val)872 debug_aff (aff_tree *val)
873 {
874 print_aff (stderr, val);
875 fprintf (stderr, "\n");
876 }
877
878 /* Computes address of the reference REF in ADDR. The size of the accessed
879 location is stored to SIZE. Returns the ultimate containing object to
880 which REF refers. */
881
882 tree
get_inner_reference_aff(tree ref,aff_tree * addr,widest_int * size)883 get_inner_reference_aff (tree ref, aff_tree *addr, widest_int *size)
884 {
885 HOST_WIDE_INT bitsize, bitpos;
886 tree toff;
887 machine_mode mode;
888 int uns, rev, vol;
889 aff_tree tmp;
890 tree base = get_inner_reference (ref, &bitsize, &bitpos, &toff, &mode,
891 &uns, &rev, &vol, false);
892 tree base_addr = build_fold_addr_expr (base);
893
894 /* ADDR = &BASE + TOFF + BITPOS / BITS_PER_UNIT. */
895
896 tree_to_aff_combination (base_addr, sizetype, addr);
897
898 if (toff)
899 {
900 tree_to_aff_combination (toff, sizetype, &tmp);
901 aff_combination_add (addr, &tmp);
902 }
903
904 aff_combination_const (&tmp, sizetype, bitpos / BITS_PER_UNIT);
905 aff_combination_add (addr, &tmp);
906
907 *size = (bitsize + BITS_PER_UNIT - 1) / BITS_PER_UNIT;
908
909 return base;
910 }
911
912 /* Returns true if a region of size SIZE1 at position 0 and a region of
913 size SIZE2 at position DIFF cannot overlap. */
914
915 bool
aff_comb_cannot_overlap_p(aff_tree * diff,const widest_int & size1,const widest_int & size2)916 aff_comb_cannot_overlap_p (aff_tree *diff, const widest_int &size1,
917 const widest_int &size2)
918 {
919 /* Unless the difference is a constant, we fail. */
920 if (diff->n != 0)
921 return false;
922
923 if (wi::neg_p (diff->offset))
924 {
925 /* The second object is before the first one, we succeed if the last
926 element of the second object is before the start of the first one. */
927 return wi::neg_p (diff->offset + size2 - 1);
928 }
929 else
930 {
931 /* We succeed if the second object starts after the first one ends. */
932 return wi::les_p (size1, diff->offset);
933 }
934 }
935
936