1 /* Operations with affine combinations of trees.
2    Copyright (C) 2005-2013 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tree.h"
24 #include "tree-pretty-print.h"
25 #include "pointer-set.h"
26 #include "tree-affine.h"
27 #include "gimple.h"
28 #include "flags.h"
29 #include "dumpfile.h"
30 
31 /* Extends CST as appropriate for the affine combinations COMB.  */
32 
33 double_int
double_int_ext_for_comb(double_int cst,aff_tree * comb)34 double_int_ext_for_comb (double_int cst, aff_tree *comb)
35 {
36   return cst.sext (TYPE_PRECISION (comb->type));
37 }
38 
39 /* Initializes affine combination COMB so that its value is zero in TYPE.  */
40 
41 static void
aff_combination_zero(aff_tree * comb,tree type)42 aff_combination_zero (aff_tree *comb, tree type)
43 {
44   comb->type = type;
45   comb->offset = double_int_zero;
46   comb->n = 0;
47   comb->rest = NULL_TREE;
48 }
49 
50 /* Sets COMB to CST.  */
51 
52 void
aff_combination_const(aff_tree * comb,tree type,double_int cst)53 aff_combination_const (aff_tree *comb, tree type, double_int cst)
54 {
55   aff_combination_zero (comb, type);
56   comb->offset = double_int_ext_for_comb (cst, comb);
57 }
58 
59 /* Sets COMB to single element ELT.  */
60 
61 void
aff_combination_elt(aff_tree * comb,tree type,tree elt)62 aff_combination_elt (aff_tree *comb, tree type, tree elt)
63 {
64   aff_combination_zero (comb, type);
65 
66   comb->n = 1;
67   comb->elts[0].val = elt;
68   comb->elts[0].coef = double_int_one;
69 }
70 
71 /* Scales COMB by SCALE.  */
72 
73 void
aff_combination_scale(aff_tree * comb,double_int scale)74 aff_combination_scale (aff_tree *comb, double_int scale)
75 {
76   unsigned i, j;
77 
78   scale = double_int_ext_for_comb (scale, comb);
79   if (scale.is_one ())
80     return;
81 
82   if (scale.is_zero ())
83     {
84       aff_combination_zero (comb, comb->type);
85       return;
86     }
87 
88   comb->offset
89     = double_int_ext_for_comb (scale * comb->offset, comb);
90   for (i = 0, j = 0; i < comb->n; i++)
91     {
92       double_int new_coef;
93 
94       new_coef
95 	= double_int_ext_for_comb (scale * comb->elts[i].coef, comb);
96       /* A coefficient may become zero due to overflow.  Remove the zero
97 	 elements.  */
98       if (new_coef.is_zero ())
99 	continue;
100       comb->elts[j].coef = new_coef;
101       comb->elts[j].val = comb->elts[i].val;
102       j++;
103     }
104   comb->n = j;
105 
106   if (comb->rest)
107     {
108       tree type = comb->type;
109       if (POINTER_TYPE_P (type))
110 	type = sizetype;
111       if (comb->n < MAX_AFF_ELTS)
112 	{
113 	  comb->elts[comb->n].coef = scale;
114 	  comb->elts[comb->n].val = comb->rest;
115 	  comb->rest = NULL_TREE;
116 	  comb->n++;
117 	}
118       else
119 	comb->rest = fold_build2 (MULT_EXPR, type, comb->rest,
120 				  double_int_to_tree (type, scale));
121     }
122 }
123 
124 /* Adds ELT * SCALE to COMB.  */
125 
126 void
aff_combination_add_elt(aff_tree * comb,tree elt,double_int scale)127 aff_combination_add_elt (aff_tree *comb, tree elt, double_int scale)
128 {
129   unsigned i;
130   tree type;
131 
132   scale = double_int_ext_for_comb (scale, comb);
133   if (scale.is_zero ())
134     return;
135 
136   for (i = 0; i < comb->n; i++)
137     if (operand_equal_p (comb->elts[i].val, elt, 0))
138       {
139 	double_int new_coef;
140 
141 	new_coef = comb->elts[i].coef + scale;
142 	new_coef = double_int_ext_for_comb (new_coef, comb);
143 	if (!new_coef.is_zero ())
144 	  {
145 	    comb->elts[i].coef = new_coef;
146 	    return;
147 	  }
148 
149 	comb->n--;
150 	comb->elts[i] = comb->elts[comb->n];
151 
152 	if (comb->rest)
153 	  {
154 	    gcc_assert (comb->n == MAX_AFF_ELTS - 1);
155 	    comb->elts[comb->n].coef = double_int_one;
156 	    comb->elts[comb->n].val = comb->rest;
157 	    comb->rest = NULL_TREE;
158 	    comb->n++;
159 	  }
160 	return;
161       }
162   if (comb->n < MAX_AFF_ELTS)
163     {
164       comb->elts[comb->n].coef = scale;
165       comb->elts[comb->n].val = elt;
166       comb->n++;
167       return;
168     }
169 
170   type = comb->type;
171   if (POINTER_TYPE_P (type))
172     type = sizetype;
173 
174   if (scale.is_one ())
175     elt = fold_convert (type, elt);
176   else
177     elt = fold_build2 (MULT_EXPR, type,
178 		       fold_convert (type, elt),
179 		       double_int_to_tree (type, scale));
180 
181   if (comb->rest)
182     comb->rest = fold_build2 (PLUS_EXPR, type, comb->rest,
183 			      elt);
184   else
185     comb->rest = elt;
186 }
187 
188 /* Adds CST to C.  */
189 
190 static void
aff_combination_add_cst(aff_tree * c,double_int cst)191 aff_combination_add_cst (aff_tree *c, double_int cst)
192 {
193   c->offset = double_int_ext_for_comb (c->offset + cst, c);
194 }
195 
196 /* Adds COMB2 to COMB1.  */
197 
198 void
aff_combination_add(aff_tree * comb1,aff_tree * comb2)199 aff_combination_add (aff_tree *comb1, aff_tree *comb2)
200 {
201   unsigned i;
202 
203   aff_combination_add_cst (comb1, comb2->offset);
204   for (i = 0; i < comb2->n; i++)
205     aff_combination_add_elt (comb1, comb2->elts[i].val, comb2->elts[i].coef);
206   if (comb2->rest)
207     aff_combination_add_elt (comb1, comb2->rest, double_int_one);
208 }
209 
210 /* Converts affine combination COMB to TYPE.  */
211 
212 void
aff_combination_convert(aff_tree * comb,tree type)213 aff_combination_convert (aff_tree *comb, tree type)
214 {
215   unsigned i, j;
216   tree comb_type = comb->type;
217 
218   if  (TYPE_PRECISION (type) > TYPE_PRECISION (comb_type))
219     {
220       tree val = fold_convert (type, aff_combination_to_tree (comb));
221       tree_to_aff_combination (val, type, comb);
222       return;
223     }
224 
225   comb->type = type;
226   if (comb->rest && !POINTER_TYPE_P (type))
227     comb->rest = fold_convert (type, comb->rest);
228 
229   if (TYPE_PRECISION (type) == TYPE_PRECISION (comb_type))
230     return;
231 
232   comb->offset = double_int_ext_for_comb (comb->offset, comb);
233   for (i = j = 0; i < comb->n; i++)
234     {
235       double_int new_coef = double_int_ext_for_comb (comb->elts[i].coef, comb);
236       if (new_coef.is_zero ())
237 	continue;
238       comb->elts[j].coef = new_coef;
239       comb->elts[j].val = fold_convert (type, comb->elts[i].val);
240       j++;
241     }
242 
243   comb->n = j;
244   if (comb->n < MAX_AFF_ELTS && comb->rest)
245     {
246       comb->elts[comb->n].coef = double_int_one;
247       comb->elts[comb->n].val = comb->rest;
248       comb->rest = NULL_TREE;
249       comb->n++;
250     }
251 }
252 
253 /* Splits EXPR into an affine combination of parts.  */
254 
255 void
tree_to_aff_combination(tree expr,tree type,aff_tree * comb)256 tree_to_aff_combination (tree expr, tree type, aff_tree *comb)
257 {
258   aff_tree tmp;
259   enum tree_code code;
260   tree cst, core, toffset;
261   HOST_WIDE_INT bitpos, bitsize;
262   enum machine_mode mode;
263   int unsignedp, volatilep;
264 
265   STRIP_NOPS (expr);
266 
267   code = TREE_CODE (expr);
268   switch (code)
269     {
270     case INTEGER_CST:
271       aff_combination_const (comb, type, tree_to_double_int (expr));
272       return;
273 
274     case POINTER_PLUS_EXPR:
275       tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
276       tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
277       aff_combination_add (comb, &tmp);
278       return;
279 
280     case PLUS_EXPR:
281     case MINUS_EXPR:
282       tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
283       tree_to_aff_combination (TREE_OPERAND (expr, 1), type, &tmp);
284       if (code == MINUS_EXPR)
285 	aff_combination_scale (&tmp, double_int_minus_one);
286       aff_combination_add (comb, &tmp);
287       return;
288 
289     case MULT_EXPR:
290       cst = TREE_OPERAND (expr, 1);
291       if (TREE_CODE (cst) != INTEGER_CST)
292 	break;
293       tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
294       aff_combination_scale (comb, tree_to_double_int (cst));
295       return;
296 
297     case NEGATE_EXPR:
298       tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
299       aff_combination_scale (comb, double_int_minus_one);
300       return;
301 
302     case BIT_NOT_EXPR:
303       /* ~x = -x - 1 */
304       tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
305       aff_combination_scale (comb, double_int_minus_one);
306       aff_combination_add_cst (comb, double_int_minus_one);
307       return;
308 
309     case ADDR_EXPR:
310       /* Handle &MEM[ptr + CST] which is equivalent to POINTER_PLUS_EXPR.  */
311       if (TREE_CODE (TREE_OPERAND (expr, 0)) == MEM_REF)
312 	{
313 	  expr = TREE_OPERAND (expr, 0);
314 	  tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
315 	  tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
316 	  aff_combination_add (comb, &tmp);
317 	  return;
318 	}
319       core = get_inner_reference (TREE_OPERAND (expr, 0), &bitsize, &bitpos,
320 				  &toffset, &mode, &unsignedp, &volatilep,
321 				  false);
322       if (bitpos % BITS_PER_UNIT != 0)
323 	break;
324       aff_combination_const (comb, type,
325 			     double_int::from_uhwi (bitpos / BITS_PER_UNIT));
326       core = build_fold_addr_expr (core);
327       if (TREE_CODE (core) == ADDR_EXPR)
328 	aff_combination_add_elt (comb, core, double_int_one);
329       else
330 	{
331 	  tree_to_aff_combination (core, type, &tmp);
332 	  aff_combination_add (comb, &tmp);
333 	}
334       if (toffset)
335 	{
336 	  tree_to_aff_combination (toffset, type, &tmp);
337 	  aff_combination_add (comb, &tmp);
338 	}
339       return;
340 
341     case MEM_REF:
342       if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
343 	tree_to_aff_combination (TREE_OPERAND (TREE_OPERAND (expr, 0), 0),
344 				 type, comb);
345       else if (integer_zerop (TREE_OPERAND (expr, 1)))
346 	{
347 	  aff_combination_elt (comb, type, expr);
348 	  return;
349 	}
350       else
351 	aff_combination_elt (comb, type,
352 			     build2 (MEM_REF, TREE_TYPE (expr),
353 				     TREE_OPERAND (expr, 0),
354 				     build_int_cst
355 				      (TREE_TYPE (TREE_OPERAND (expr, 1)), 0)));
356       tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
357       aff_combination_add (comb, &tmp);
358       return;
359 
360     default:
361       break;
362     }
363 
364   aff_combination_elt (comb, type, expr);
365 }
366 
367 /* Creates EXPR + ELT * SCALE in TYPE.  EXPR is taken from affine
368    combination COMB.  */
369 
370 static tree
add_elt_to_tree(tree expr,tree type,tree elt,double_int scale,aff_tree * comb)371 add_elt_to_tree (tree expr, tree type, tree elt, double_int scale,
372 		 aff_tree *comb)
373 {
374   enum tree_code code;
375   tree type1 = type;
376   if (POINTER_TYPE_P (type))
377     type1 = sizetype;
378 
379   scale = double_int_ext_for_comb (scale, comb);
380   elt = fold_convert (type1, elt);
381 
382   if (scale.is_one ())
383     {
384       if (!expr)
385 	return fold_convert (type, elt);
386 
387       if (POINTER_TYPE_P (type))
388         return fold_build_pointer_plus (expr, elt);
389       return fold_build2 (PLUS_EXPR, type, expr, elt);
390     }
391 
392   if (scale.is_minus_one ())
393     {
394       if (!expr)
395 	return fold_convert (type, fold_build1 (NEGATE_EXPR, type1, elt));
396 
397       if (POINTER_TYPE_P (type))
398 	{
399 	  elt = fold_build1 (NEGATE_EXPR, type1, elt);
400 	  return fold_build_pointer_plus (expr, elt);
401 	}
402       return fold_build2 (MINUS_EXPR, type, expr, elt);
403     }
404 
405   if (!expr)
406     return fold_convert (type,
407 			 fold_build2 (MULT_EXPR, type1, elt,
408 				      double_int_to_tree (type1, scale)));
409 
410   if (scale.is_negative ())
411     {
412       code = MINUS_EXPR;
413       scale = -scale;
414     }
415   else
416     code = PLUS_EXPR;
417 
418   elt = fold_build2 (MULT_EXPR, type1, elt,
419 		     double_int_to_tree (type1, scale));
420   if (POINTER_TYPE_P (type))
421     {
422       if (code == MINUS_EXPR)
423         elt = fold_build1 (NEGATE_EXPR, type1, elt);
424       return fold_build_pointer_plus (expr, elt);
425     }
426   return fold_build2 (code, type, expr, elt);
427 }
428 
429 /* Makes tree from the affine combination COMB.  */
430 
431 tree
aff_combination_to_tree(aff_tree * comb)432 aff_combination_to_tree (aff_tree *comb)
433 {
434   tree type = comb->type;
435   tree expr = NULL_TREE;
436   unsigned i;
437   double_int off, sgn;
438   tree type1 = type;
439   if (POINTER_TYPE_P (type))
440     type1 = sizetype;
441 
442   gcc_assert (comb->n == MAX_AFF_ELTS || comb->rest == NULL_TREE);
443 
444   for (i = 0; i < comb->n; i++)
445     expr = add_elt_to_tree (expr, type, comb->elts[i].val, comb->elts[i].coef,
446 			    comb);
447 
448   if (comb->rest)
449     expr = add_elt_to_tree (expr, type, comb->rest, double_int_one, comb);
450 
451   /* Ensure that we get x - 1, not x + (-1) or x + 0xff..f if x is
452      unsigned.  */
453   if (comb->offset.is_negative ())
454     {
455       off = -comb->offset;
456       sgn = double_int_minus_one;
457     }
458   else
459     {
460       off = comb->offset;
461       sgn = double_int_one;
462     }
463   return add_elt_to_tree (expr, type, double_int_to_tree (type1, off), sgn,
464 			  comb);
465 }
466 
467 /* Copies the tree elements of COMB to ensure that they are not shared.  */
468 
469 void
unshare_aff_combination(aff_tree * comb)470 unshare_aff_combination (aff_tree *comb)
471 {
472   unsigned i;
473 
474   for (i = 0; i < comb->n; i++)
475     comb->elts[i].val = unshare_expr (comb->elts[i].val);
476   if (comb->rest)
477     comb->rest = unshare_expr (comb->rest);
478 }
479 
480 /* Remove M-th element from COMB.  */
481 
482 void
aff_combination_remove_elt(aff_tree * comb,unsigned m)483 aff_combination_remove_elt (aff_tree *comb, unsigned m)
484 {
485   comb->n--;
486   if (m <= comb->n)
487     comb->elts[m] = comb->elts[comb->n];
488   if (comb->rest)
489     {
490       comb->elts[comb->n].coef = double_int_one;
491       comb->elts[comb->n].val = comb->rest;
492       comb->rest = NULL_TREE;
493       comb->n++;
494     }
495 }
496 
497 /* Adds C * COEF * VAL to R.  VAL may be NULL, in that case only
498    C * COEF is added to R.  */
499 
500 
501 static void
aff_combination_add_product(aff_tree * c,double_int coef,tree val,aff_tree * r)502 aff_combination_add_product (aff_tree *c, double_int coef, tree val,
503 			     aff_tree *r)
504 {
505   unsigned i;
506   tree aval, type;
507 
508   for (i = 0; i < c->n; i++)
509     {
510       aval = c->elts[i].val;
511       if (val)
512 	{
513 	  type = TREE_TYPE (aval);
514 	  aval = fold_build2 (MULT_EXPR, type, aval,
515 			      fold_convert (type, val));
516 	}
517 
518       aff_combination_add_elt (r, aval, coef * c->elts[i].coef);
519     }
520 
521   if (c->rest)
522     {
523       aval = c->rest;
524       if (val)
525 	{
526 	  type = TREE_TYPE (aval);
527 	  aval = fold_build2 (MULT_EXPR, type, aval,
528 			      fold_convert (type, val));
529 	}
530 
531       aff_combination_add_elt (r, aval, coef);
532     }
533 
534   if (val)
535     aff_combination_add_elt (r, val, coef * c->offset);
536   else
537     aff_combination_add_cst (r, coef * c->offset);
538 }
539 
540 /* Multiplies C1 by C2, storing the result to R  */
541 
542 void
aff_combination_mult(aff_tree * c1,aff_tree * c2,aff_tree * r)543 aff_combination_mult (aff_tree *c1, aff_tree *c2, aff_tree *r)
544 {
545   unsigned i;
546   gcc_assert (TYPE_PRECISION (c1->type) == TYPE_PRECISION (c2->type));
547 
548   aff_combination_zero (r, c1->type);
549 
550   for (i = 0; i < c2->n; i++)
551     aff_combination_add_product (c1, c2->elts[i].coef, c2->elts[i].val, r);
552   if (c2->rest)
553     aff_combination_add_product (c1, double_int_one, c2->rest, r);
554   aff_combination_add_product (c1, c2->offset, NULL, r);
555 }
556 
557 /* Returns the element of COMB whose value is VAL, or NULL if no such
558    element exists.  If IDX is not NULL, it is set to the index of VAL in
559    COMB.  */
560 
561 static struct aff_comb_elt *
aff_combination_find_elt(aff_tree * comb,tree val,unsigned * idx)562 aff_combination_find_elt (aff_tree *comb, tree val, unsigned *idx)
563 {
564   unsigned i;
565 
566   for (i = 0; i < comb->n; i++)
567     if (operand_equal_p (comb->elts[i].val, val, 0))
568       {
569 	if (idx)
570 	  *idx = i;
571 
572 	return &comb->elts[i];
573       }
574 
575   return NULL;
576 }
577 
578 /* Element of the cache that maps ssa name NAME to its expanded form
579    as an affine expression EXPANSION.  */
580 
581 struct name_expansion
582 {
583   aff_tree expansion;
584 
585   /* True if the expansion for the name is just being generated.  */
586   unsigned in_progress : 1;
587 };
588 
589 /* Expands SSA names in COMB recursively.  CACHE is used to cache the
590    results.  */
591 
592 void
aff_combination_expand(aff_tree * comb ATTRIBUTE_UNUSED,struct pointer_map_t ** cache ATTRIBUTE_UNUSED)593 aff_combination_expand (aff_tree *comb ATTRIBUTE_UNUSED,
594 			struct pointer_map_t **cache ATTRIBUTE_UNUSED)
595 {
596   unsigned i;
597   aff_tree to_add, current, curre;
598   tree e, rhs;
599   gimple def;
600   double_int scale;
601   void **slot;
602   struct name_expansion *exp;
603 
604   aff_combination_zero (&to_add, comb->type);
605   for (i = 0; i < comb->n; i++)
606     {
607       tree type, name;
608       enum tree_code code;
609 
610       e = comb->elts[i].val;
611       type = TREE_TYPE (e);
612       name = e;
613       /* Look through some conversions.  */
614       if (TREE_CODE (e) == NOP_EXPR
615           && (TYPE_PRECISION (type)
616 	      >= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (e, 0)))))
617 	name = TREE_OPERAND (e, 0);
618       if (TREE_CODE (name) != SSA_NAME)
619 	continue;
620       def = SSA_NAME_DEF_STMT (name);
621       if (!is_gimple_assign (def) || gimple_assign_lhs (def) != name)
622 	continue;
623 
624       code = gimple_assign_rhs_code (def);
625       if (code != SSA_NAME
626 	  && !IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code))
627 	  && (get_gimple_rhs_class (code) != GIMPLE_SINGLE_RHS
628 	      || !is_gimple_min_invariant (gimple_assign_rhs1 (def))))
629 	continue;
630 
631       /* We do not know whether the reference retains its value at the
632 	 place where the expansion is used.  */
633       if (TREE_CODE_CLASS (code) == tcc_reference)
634 	continue;
635 
636       if (!*cache)
637 	*cache = pointer_map_create ();
638       slot = pointer_map_insert (*cache, e);
639       exp = (struct name_expansion *) *slot;
640 
641       if (!exp)
642 	{
643 	  exp = XNEW (struct name_expansion);
644 	  exp->in_progress = 1;
645 	  *slot = exp;
646 	  /* In principle this is a generally valid folding, but
647 	     it is not unconditionally an optimization, so do it
648 	     here and not in fold_unary.  */
649 	  /* Convert (T1)(X *+- CST) into (T1)X *+- (T1)CST if T1 is wider
650 	     than the type of X and overflow for the type of X is
651 	     undefined.  */
652 	  if (e != name
653 	      && INTEGRAL_TYPE_P (type)
654 	      && INTEGRAL_TYPE_P (TREE_TYPE (name))
655 	      && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (name))
656 	      && TYPE_PRECISION (type) > TYPE_PRECISION (TREE_TYPE (name))
657 	      && (code == PLUS_EXPR || code == MINUS_EXPR || code == MULT_EXPR)
658 	      && TREE_CODE (gimple_assign_rhs2 (def)) == INTEGER_CST)
659 	    rhs = fold_build2 (code, type,
660 			       fold_convert (type, gimple_assign_rhs1 (def)),
661 			       fold_convert (type, gimple_assign_rhs2 (def)));
662 	  else
663 	    {
664 	      rhs = gimple_assign_rhs_to_tree (def);
665 	      if (e != name)
666 		rhs = fold_convert (type, rhs);
667 	    }
668 	  tree_to_aff_combination_expand (rhs, comb->type, &current, cache);
669 	  exp->expansion = current;
670 	  exp->in_progress = 0;
671 	}
672       else
673 	{
674 	  /* Since we follow the definitions in the SSA form, we should not
675 	     enter a cycle unless we pass through a phi node.  */
676 	  gcc_assert (!exp->in_progress);
677 	  current = exp->expansion;
678 	}
679 
680       /* Accumulate the new terms to TO_ADD, so that we do not modify
681 	 COMB while traversing it; include the term -coef * E, to remove
682          it from COMB.  */
683       scale = comb->elts[i].coef;
684       aff_combination_zero (&curre, comb->type);
685       aff_combination_add_elt (&curre, e, -scale);
686       aff_combination_scale (&current, scale);
687       aff_combination_add (&to_add, &current);
688       aff_combination_add (&to_add, &curre);
689     }
690   aff_combination_add (comb, &to_add);
691 }
692 
693 /* Similar to tree_to_aff_combination, but follows SSA name definitions
694    and expands them recursively.  CACHE is used to cache the expansions
695    of the ssa names, to avoid exponential time complexity for cases
696    like
697 
698    a1 = a0 + a0;
699    a2 = a1 + a1;
700    a3 = a2 + a2;
701    ...  */
702 
703 void
tree_to_aff_combination_expand(tree expr,tree type,aff_tree * comb,struct pointer_map_t ** cache)704 tree_to_aff_combination_expand (tree expr, tree type, aff_tree *comb,
705 				struct pointer_map_t **cache)
706 {
707   tree_to_aff_combination (expr, type, comb);
708   aff_combination_expand (comb, cache);
709 }
710 
711 /* Frees memory occupied by struct name_expansion in *VALUE.  Callback for
712    pointer_map_traverse.  */
713 
714 static bool
free_name_expansion(const void * key ATTRIBUTE_UNUSED,void ** value,void * data ATTRIBUTE_UNUSED)715 free_name_expansion (const void *key ATTRIBUTE_UNUSED, void **value,
716 		     void *data ATTRIBUTE_UNUSED)
717 {
718   struct name_expansion *const exp = (struct name_expansion *) *value;
719 
720   free (exp);
721   return true;
722 }
723 
724 /* Frees memory allocated for the CACHE used by
725    tree_to_aff_combination_expand.  */
726 
727 void
free_affine_expand_cache(struct pointer_map_t ** cache)728 free_affine_expand_cache (struct pointer_map_t **cache)
729 {
730   if (!*cache)
731     return;
732 
733   pointer_map_traverse (*cache, free_name_expansion, NULL);
734   pointer_map_destroy (*cache);
735   *cache = NULL;
736 }
737 
738 /* If VAL != CST * DIV for any constant CST, returns false.
739    Otherwise, if VAL != 0 (and hence CST != 0), and *MULT_SET is true,
740    additionally compares CST and MULT, and if they are different,
741    returns false.  Finally, if neither of these two cases occur,
742    true is returned, and if CST != 0, CST is stored to MULT and
743    MULT_SET is set to true.  */
744 
745 static bool
double_int_constant_multiple_p(double_int val,double_int div,bool * mult_set,double_int * mult)746 double_int_constant_multiple_p (double_int val, double_int div,
747 				bool *mult_set, double_int *mult)
748 {
749   double_int rem, cst;
750 
751   if (val.is_zero ())
752     return true;
753 
754   if (div.is_zero ())
755     return false;
756 
757   cst = val.sdivmod (div, FLOOR_DIV_EXPR, &rem);
758   if (!rem.is_zero ())
759     return false;
760 
761   if (*mult_set && *mult != cst)
762     return false;
763 
764   *mult_set = true;
765   *mult = cst;
766   return true;
767 }
768 
769 /* Returns true if VAL = X * DIV for some constant X.  If this is the case,
770    X is stored to MULT.  */
771 
772 bool
aff_combination_constant_multiple_p(aff_tree * val,aff_tree * div,double_int * mult)773 aff_combination_constant_multiple_p (aff_tree *val, aff_tree *div,
774 				     double_int *mult)
775 {
776   bool mult_set = false;
777   unsigned i;
778 
779   if (val->n == 0 && val->offset.is_zero ())
780     {
781       *mult = double_int_zero;
782       return true;
783     }
784   if (val->n != div->n)
785     return false;
786 
787   if (val->rest || div->rest)
788     return false;
789 
790   if (!double_int_constant_multiple_p (val->offset, div->offset,
791 				       &mult_set, mult))
792     return false;
793 
794   for (i = 0; i < div->n; i++)
795     {
796       struct aff_comb_elt *elt
797 	      = aff_combination_find_elt (val, div->elts[i].val, NULL);
798       if (!elt)
799 	return false;
800       if (!double_int_constant_multiple_p (elt->coef, div->elts[i].coef,
801 					   &mult_set, mult))
802 	return false;
803     }
804 
805   gcc_assert (mult_set);
806   return true;
807 }
808 
809 /* Prints the affine VAL to the FILE. */
810 
811 static void
print_aff(FILE * file,aff_tree * val)812 print_aff (FILE *file, aff_tree *val)
813 {
814   unsigned i;
815   bool uns = TYPE_UNSIGNED (val->type);
816   if (POINTER_TYPE_P (val->type))
817     uns = false;
818   fprintf (file, "{\n  type = ");
819   print_generic_expr (file, val->type, TDF_VOPS|TDF_MEMSYMS);
820   fprintf (file, "\n  offset = ");
821   dump_double_int (file, val->offset, uns);
822   if (val->n > 0)
823     {
824       fprintf (file, "\n  elements = {\n");
825       for (i = 0; i < val->n; i++)
826 	{
827 	  fprintf (file, "    [%d] = ", i);
828 	  print_generic_expr (file, val->elts[i].val, TDF_VOPS|TDF_MEMSYMS);
829 
830 	  fprintf (file, " * ");
831 	  dump_double_int (file, val->elts[i].coef, uns);
832 	  if (i != val->n - 1)
833 	    fprintf (file, ", \n");
834 	}
835       fprintf (file, "\n  }");
836   }
837   if (val->rest)
838     {
839       fprintf (file, "\n  rest = ");
840       print_generic_expr (file, val->rest, TDF_VOPS|TDF_MEMSYMS);
841     }
842   fprintf (file, "\n}");
843 }
844 
845 /* Prints the affine VAL to the standard error, used for debugging.  */
846 
847 DEBUG_FUNCTION void
debug_aff(aff_tree * val)848 debug_aff (aff_tree *val)
849 {
850   print_aff (stderr, val);
851   fprintf (stderr, "\n");
852 }
853 
854 /* Returns address of the reference REF in ADDR.  The size of the accessed
855    location is stored to SIZE.  */
856 
857 void
get_inner_reference_aff(tree ref,aff_tree * addr,double_int * size)858 get_inner_reference_aff (tree ref, aff_tree *addr, double_int *size)
859 {
860   HOST_WIDE_INT bitsize, bitpos;
861   tree toff;
862   enum machine_mode mode;
863   int uns, vol;
864   aff_tree tmp;
865   tree base = get_inner_reference (ref, &bitsize, &bitpos, &toff, &mode,
866 				   &uns, &vol, false);
867   tree base_addr = build_fold_addr_expr (base);
868 
869   /* ADDR = &BASE + TOFF + BITPOS / BITS_PER_UNIT.  */
870 
871   tree_to_aff_combination (base_addr, sizetype, addr);
872 
873   if (toff)
874     {
875       tree_to_aff_combination (toff, sizetype, &tmp);
876       aff_combination_add (addr, &tmp);
877     }
878 
879   aff_combination_const (&tmp, sizetype,
880 			 double_int::from_shwi (bitpos / BITS_PER_UNIT));
881   aff_combination_add (addr, &tmp);
882 
883   *size = double_int::from_shwi ((bitsize + BITS_PER_UNIT - 1) / BITS_PER_UNIT);
884 }
885 
886 /* Returns true if a region of size SIZE1 at position 0 and a region of
887    size SIZE2 at position DIFF cannot overlap.  */
888 
889 bool
aff_comb_cannot_overlap_p(aff_tree * diff,double_int size1,double_int size2)890 aff_comb_cannot_overlap_p (aff_tree *diff, double_int size1, double_int size2)
891 {
892   double_int d, bound;
893 
894   /* Unless the difference is a constant, we fail.  */
895   if (diff->n != 0)
896     return false;
897 
898   d = diff->offset;
899   if (d.is_negative ())
900     {
901       /* The second object is before the first one, we succeed if the last
902 	 element of the second object is before the start of the first one.  */
903       bound = d + size2 + double_int_minus_one;
904       return bound.is_negative ();
905     }
906   else
907     {
908       /* We succeed if the second object starts after the first one ends.  */
909       return size1.sle (d);
910     }
911 }
912 
913