xref: /dragonfly/contrib/gcc-4.7/gcc/tree-affine.c (revision 3c7e5806)
1 /* Operations with affine combinations of trees.
2    Copyright (C) 2005, 2007, 2008, 2010 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tree.h"
24 #include "output.h"
25 #include "tree-pretty-print.h"
26 #include "tree-dump.h"
27 #include "pointer-set.h"
28 #include "tree-affine.h"
29 #include "gimple.h"
30 #include "flags.h"
31 
32 /* Extends CST as appropriate for the affine combinations COMB.  */
33 
34 double_int
35 double_int_ext_for_comb (double_int cst, aff_tree *comb)
36 {
37   return double_int_sext (cst, TYPE_PRECISION (comb->type));
38 }
39 
40 /* Initializes affine combination COMB so that its value is zero in TYPE.  */
41 
42 static void
43 aff_combination_zero (aff_tree *comb, tree type)
44 {
45   comb->type = type;
46   comb->offset = double_int_zero;
47   comb->n = 0;
48   comb->rest = NULL_TREE;
49 }
50 
51 /* Sets COMB to CST.  */
52 
53 void
54 aff_combination_const (aff_tree *comb, tree type, double_int cst)
55 {
56   aff_combination_zero (comb, type);
57   comb->offset = double_int_ext_for_comb (cst, comb);
58 }
59 
60 /* Sets COMB to single element ELT.  */
61 
62 void
63 aff_combination_elt (aff_tree *comb, tree type, tree elt)
64 {
65   aff_combination_zero (comb, type);
66 
67   comb->n = 1;
68   comb->elts[0].val = elt;
69   comb->elts[0].coef = double_int_one;
70 }
71 
72 /* Scales COMB by SCALE.  */
73 
74 void
75 aff_combination_scale (aff_tree *comb, double_int scale)
76 {
77   unsigned i, j;
78 
79   scale = double_int_ext_for_comb (scale, comb);
80   if (double_int_one_p (scale))
81     return;
82 
83   if (double_int_zero_p (scale))
84     {
85       aff_combination_zero (comb, comb->type);
86       return;
87     }
88 
89   comb->offset
90     = double_int_ext_for_comb (double_int_mul (scale, comb->offset), comb);
91   for (i = 0, j = 0; i < comb->n; i++)
92     {
93       double_int new_coef;
94 
95       new_coef
96 	= double_int_ext_for_comb (double_int_mul (scale, comb->elts[i].coef),
97 				   comb);
98       /* A coefficient may become zero due to overflow.  Remove the zero
99 	 elements.  */
100       if (double_int_zero_p (new_coef))
101 	continue;
102       comb->elts[j].coef = new_coef;
103       comb->elts[j].val = comb->elts[i].val;
104       j++;
105     }
106   comb->n = j;
107 
108   if (comb->rest)
109     {
110       tree type = comb->type;
111       if (POINTER_TYPE_P (type))
112 	type = sizetype;
113       if (comb->n < MAX_AFF_ELTS)
114 	{
115 	  comb->elts[comb->n].coef = scale;
116 	  comb->elts[comb->n].val = comb->rest;
117 	  comb->rest = NULL_TREE;
118 	  comb->n++;
119 	}
120       else
121 	comb->rest = fold_build2 (MULT_EXPR, type, comb->rest,
122 				  double_int_to_tree (type, scale));
123     }
124 }
125 
126 /* Adds ELT * SCALE to COMB.  */
127 
128 void
129 aff_combination_add_elt (aff_tree *comb, tree elt, double_int scale)
130 {
131   unsigned i;
132   tree type;
133 
134   scale = double_int_ext_for_comb (scale, comb);
135   if (double_int_zero_p (scale))
136     return;
137 
138   for (i = 0; i < comb->n; i++)
139     if (operand_equal_p (comb->elts[i].val, elt, 0))
140       {
141 	double_int new_coef;
142 
143 	new_coef = double_int_add (comb->elts[i].coef, scale);
144 	new_coef = double_int_ext_for_comb (new_coef, comb);
145 	if (!double_int_zero_p (new_coef))
146 	  {
147 	    comb->elts[i].coef = new_coef;
148 	    return;
149 	  }
150 
151 	comb->n--;
152 	comb->elts[i] = comb->elts[comb->n];
153 
154 	if (comb->rest)
155 	  {
156 	    gcc_assert (comb->n == MAX_AFF_ELTS - 1);
157 	    comb->elts[comb->n].coef = double_int_one;
158 	    comb->elts[comb->n].val = comb->rest;
159 	    comb->rest = NULL_TREE;
160 	    comb->n++;
161 	  }
162 	return;
163       }
164   if (comb->n < MAX_AFF_ELTS)
165     {
166       comb->elts[comb->n].coef = scale;
167       comb->elts[comb->n].val = elt;
168       comb->n++;
169       return;
170     }
171 
172   type = comb->type;
173   if (POINTER_TYPE_P (type))
174     type = sizetype;
175 
176   if (double_int_one_p (scale))
177     elt = fold_convert (type, elt);
178   else
179     elt = fold_build2 (MULT_EXPR, type,
180 		       fold_convert (type, elt),
181 		       double_int_to_tree (type, scale));
182 
183   if (comb->rest)
184     comb->rest = fold_build2 (PLUS_EXPR, type, comb->rest,
185 			      elt);
186   else
187     comb->rest = elt;
188 }
189 
190 /* Adds CST to C.  */
191 
192 static void
193 aff_combination_add_cst (aff_tree *c, double_int cst)
194 {
195   c->offset = double_int_ext_for_comb (double_int_add (c->offset, cst), c);
196 }
197 
198 /* Adds COMB2 to COMB1.  */
199 
200 void
201 aff_combination_add (aff_tree *comb1, aff_tree *comb2)
202 {
203   unsigned i;
204 
205   aff_combination_add_cst (comb1, comb2->offset);
206   for (i = 0; i < comb2->n; i++)
207     aff_combination_add_elt (comb1, comb2->elts[i].val, comb2->elts[i].coef);
208   if (comb2->rest)
209     aff_combination_add_elt (comb1, comb2->rest, double_int_one);
210 }
211 
212 /* Converts affine combination COMB to TYPE.  */
213 
214 void
215 aff_combination_convert (aff_tree *comb, tree type)
216 {
217   unsigned i, j;
218   tree comb_type = comb->type;
219 
220   if  (TYPE_PRECISION (type) > TYPE_PRECISION (comb_type))
221     {
222       tree val = fold_convert (type, aff_combination_to_tree (comb));
223       tree_to_aff_combination (val, type, comb);
224       return;
225     }
226 
227   comb->type = type;
228   if (comb->rest && !POINTER_TYPE_P (type))
229     comb->rest = fold_convert (type, comb->rest);
230 
231   if (TYPE_PRECISION (type) == TYPE_PRECISION (comb_type))
232     return;
233 
234   comb->offset = double_int_ext_for_comb (comb->offset, comb);
235   for (i = j = 0; i < comb->n; i++)
236     {
237       double_int new_coef = double_int_ext_for_comb (comb->elts[i].coef, comb);
238       if (double_int_zero_p (new_coef))
239 	continue;
240       comb->elts[j].coef = new_coef;
241       comb->elts[j].val = fold_convert (type, comb->elts[i].val);
242       j++;
243     }
244 
245   comb->n = j;
246   if (comb->n < MAX_AFF_ELTS && comb->rest)
247     {
248       comb->elts[comb->n].coef = double_int_one;
249       comb->elts[comb->n].val = comb->rest;
250       comb->rest = NULL_TREE;
251       comb->n++;
252     }
253 }
254 
255 /* Splits EXPR into an affine combination of parts.  */
256 
257 void
258 tree_to_aff_combination (tree expr, tree type, aff_tree *comb)
259 {
260   aff_tree tmp;
261   enum tree_code code;
262   tree cst, core, toffset;
263   HOST_WIDE_INT bitpos, bitsize;
264   enum machine_mode mode;
265   int unsignedp, volatilep;
266 
267   STRIP_NOPS (expr);
268 
269   code = TREE_CODE (expr);
270   switch (code)
271     {
272     case INTEGER_CST:
273       aff_combination_const (comb, type, tree_to_double_int (expr));
274       return;
275 
276     case POINTER_PLUS_EXPR:
277       tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
278       tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
279       aff_combination_add (comb, &tmp);
280       return;
281 
282     case PLUS_EXPR:
283     case MINUS_EXPR:
284       tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
285       tree_to_aff_combination (TREE_OPERAND (expr, 1), type, &tmp);
286       if (code == MINUS_EXPR)
287 	aff_combination_scale (&tmp, double_int_minus_one);
288       aff_combination_add (comb, &tmp);
289       return;
290 
291     case MULT_EXPR:
292       cst = TREE_OPERAND (expr, 1);
293       if (TREE_CODE (cst) != INTEGER_CST)
294 	break;
295       tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
296       aff_combination_scale (comb, tree_to_double_int (cst));
297       return;
298 
299     case NEGATE_EXPR:
300       tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
301       aff_combination_scale (comb, double_int_minus_one);
302       return;
303 
304     case BIT_NOT_EXPR:
305       /* ~x = -x - 1 */
306       tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
307       aff_combination_scale (comb, double_int_minus_one);
308       aff_combination_add_cst (comb, double_int_minus_one);
309       return;
310 
311     case ADDR_EXPR:
312       /* Handle &MEM[ptr + CST] which is equivalent to POINTER_PLUS_EXPR.  */
313       if (TREE_CODE (TREE_OPERAND (expr, 0)) == MEM_REF)
314 	{
315 	  expr = TREE_OPERAND (expr, 0);
316 	  tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
317 	  tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
318 	  aff_combination_add (comb, &tmp);
319 	  return;
320 	}
321       core = get_inner_reference (TREE_OPERAND (expr, 0), &bitsize, &bitpos,
322 				  &toffset, &mode, &unsignedp, &volatilep,
323 				  false);
324       if (bitpos % BITS_PER_UNIT != 0)
325 	break;
326       aff_combination_const (comb, type,
327 			     uhwi_to_double_int (bitpos / BITS_PER_UNIT));
328       core = build_fold_addr_expr (core);
329       if (TREE_CODE (core) == ADDR_EXPR)
330 	aff_combination_add_elt (comb, core, double_int_one);
331       else
332 	{
333 	  tree_to_aff_combination (core, type, &tmp);
334 	  aff_combination_add (comb, &tmp);
335 	}
336       if (toffset)
337 	{
338 	  tree_to_aff_combination (toffset, type, &tmp);
339 	  aff_combination_add (comb, &tmp);
340 	}
341       return;
342 
343     case MEM_REF:
344       if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
345 	tree_to_aff_combination (TREE_OPERAND (TREE_OPERAND (expr, 0), 0),
346 				 type, comb);
347       else if (integer_zerop (TREE_OPERAND (expr, 1)))
348 	{
349 	  aff_combination_elt (comb, type, expr);
350 	  return;
351 	}
352       else
353 	aff_combination_elt (comb, type,
354 			     build2 (MEM_REF, TREE_TYPE (expr),
355 				     TREE_OPERAND (expr, 0),
356 				     build_int_cst
357 				      (TREE_TYPE (TREE_OPERAND (expr, 1)), 0)));
358       tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
359       aff_combination_add (comb, &tmp);
360       return;
361 
362     default:
363       break;
364     }
365 
366   aff_combination_elt (comb, type, expr);
367 }
368 
369 /* Creates EXPR + ELT * SCALE in TYPE.  EXPR is taken from affine
370    combination COMB.  */
371 
372 static tree
373 add_elt_to_tree (tree expr, tree type, tree elt, double_int scale,
374 		 aff_tree *comb)
375 {
376   enum tree_code code;
377   tree type1 = type;
378   if (POINTER_TYPE_P (type))
379     type1 = sizetype;
380 
381   scale = double_int_ext_for_comb (scale, comb);
382   elt = fold_convert (type1, elt);
383 
384   if (double_int_one_p (scale))
385     {
386       if (!expr)
387 	return fold_convert (type, elt);
388 
389       if (POINTER_TYPE_P (type))
390         return fold_build_pointer_plus (expr, elt);
391       return fold_build2 (PLUS_EXPR, type, expr, elt);
392     }
393 
394   if (double_int_minus_one_p (scale))
395     {
396       if (!expr)
397 	return fold_convert (type, fold_build1 (NEGATE_EXPR, type1, elt));
398 
399       if (POINTER_TYPE_P (type))
400 	{
401 	  elt = fold_build1 (NEGATE_EXPR, type1, elt);
402 	  return fold_build_pointer_plus (expr, elt);
403 	}
404       return fold_build2 (MINUS_EXPR, type, expr, elt);
405     }
406 
407   if (!expr)
408     return fold_convert (type,
409 			 fold_build2 (MULT_EXPR, type1, elt,
410 				      double_int_to_tree (type1, scale)));
411 
412   if (double_int_negative_p (scale))
413     {
414       code = MINUS_EXPR;
415       scale = double_int_neg (scale);
416     }
417   else
418     code = PLUS_EXPR;
419 
420   elt = fold_build2 (MULT_EXPR, type1, elt,
421 		     double_int_to_tree (type1, scale));
422   if (POINTER_TYPE_P (type))
423     {
424       if (code == MINUS_EXPR)
425         elt = fold_build1 (NEGATE_EXPR, type1, elt);
426       return fold_build_pointer_plus (expr, elt);
427     }
428   return fold_build2 (code, type, expr, elt);
429 }
430 
431 /* Makes tree from the affine combination COMB.  */
432 
433 tree
434 aff_combination_to_tree (aff_tree *comb)
435 {
436   tree type = comb->type;
437   tree expr = NULL_TREE;
438   unsigned i;
439   double_int off, sgn;
440   tree type1 = type;
441   if (POINTER_TYPE_P (type))
442     type1 = sizetype;
443 
444   gcc_assert (comb->n == MAX_AFF_ELTS || comb->rest == NULL_TREE);
445 
446   for (i = 0; i < comb->n; i++)
447     expr = add_elt_to_tree (expr, type, comb->elts[i].val, comb->elts[i].coef,
448 			    comb);
449 
450   if (comb->rest)
451     expr = add_elt_to_tree (expr, type, comb->rest, double_int_one, comb);
452 
453   /* Ensure that we get x - 1, not x + (-1) or x + 0xff..f if x is
454      unsigned.  */
455   if (double_int_negative_p (comb->offset))
456     {
457       off = double_int_neg (comb->offset);
458       sgn = double_int_minus_one;
459     }
460   else
461     {
462       off = comb->offset;
463       sgn = double_int_one;
464     }
465   return add_elt_to_tree (expr, type, double_int_to_tree (type1, off), sgn,
466 			  comb);
467 }
468 
469 /* Copies the tree elements of COMB to ensure that they are not shared.  */
470 
471 void
472 unshare_aff_combination (aff_tree *comb)
473 {
474   unsigned i;
475 
476   for (i = 0; i < comb->n; i++)
477     comb->elts[i].val = unshare_expr (comb->elts[i].val);
478   if (comb->rest)
479     comb->rest = unshare_expr (comb->rest);
480 }
481 
482 /* Remove M-th element from COMB.  */
483 
484 void
485 aff_combination_remove_elt (aff_tree *comb, unsigned m)
486 {
487   comb->n--;
488   if (m <= comb->n)
489     comb->elts[m] = comb->elts[comb->n];
490   if (comb->rest)
491     {
492       comb->elts[comb->n].coef = double_int_one;
493       comb->elts[comb->n].val = comb->rest;
494       comb->rest = NULL_TREE;
495       comb->n++;
496     }
497 }
498 
499 /* Adds C * COEF * VAL to R.  VAL may be NULL, in that case only
500    C * COEF is added to R.  */
501 
502 
503 static void
504 aff_combination_add_product (aff_tree *c, double_int coef, tree val,
505 			     aff_tree *r)
506 {
507   unsigned i;
508   tree aval, type;
509 
510   for (i = 0; i < c->n; i++)
511     {
512       aval = c->elts[i].val;
513       if (val)
514 	{
515 	  type = TREE_TYPE (aval);
516 	  aval = fold_build2 (MULT_EXPR, type, aval,
517 			      fold_convert (type, val));
518 	}
519 
520       aff_combination_add_elt (r, aval,
521 			       double_int_mul (coef, c->elts[i].coef));
522     }
523 
524   if (c->rest)
525     {
526       aval = c->rest;
527       if (val)
528 	{
529 	  type = TREE_TYPE (aval);
530 	  aval = fold_build2 (MULT_EXPR, type, aval,
531 			      fold_convert (type, val));
532 	}
533 
534       aff_combination_add_elt (r, aval, coef);
535     }
536 
537   if (val)
538     aff_combination_add_elt (r, val,
539 			     double_int_mul (coef, c->offset));
540   else
541     aff_combination_add_cst (r, double_int_mul (coef, c->offset));
542 }
543 
544 /* Multiplies C1 by C2, storing the result to R  */
545 
546 void
547 aff_combination_mult (aff_tree *c1, aff_tree *c2, aff_tree *r)
548 {
549   unsigned i;
550   gcc_assert (TYPE_PRECISION (c1->type) == TYPE_PRECISION (c2->type));
551 
552   aff_combination_zero (r, c1->type);
553 
554   for (i = 0; i < c2->n; i++)
555     aff_combination_add_product (c1, c2->elts[i].coef, c2->elts[i].val, r);
556   if (c2->rest)
557     aff_combination_add_product (c1, double_int_one, c2->rest, r);
558   aff_combination_add_product (c1, c2->offset, NULL, r);
559 }
560 
561 /* Returns the element of COMB whose value is VAL, or NULL if no such
562    element exists.  If IDX is not NULL, it is set to the index of VAL in
563    COMB.  */
564 
565 static struct aff_comb_elt *
566 aff_combination_find_elt (aff_tree *comb, tree val, unsigned *idx)
567 {
568   unsigned i;
569 
570   for (i = 0; i < comb->n; i++)
571     if (operand_equal_p (comb->elts[i].val, val, 0))
572       {
573 	if (idx)
574 	  *idx = i;
575 
576 	return &comb->elts[i];
577       }
578 
579   return NULL;
580 }
581 
582 /* Element of the cache that maps ssa name NAME to its expanded form
583    as an affine expression EXPANSION.  */
584 
585 struct name_expansion
586 {
587   aff_tree expansion;
588 
589   /* True if the expansion for the name is just being generated.  */
590   unsigned in_progress : 1;
591 };
592 
593 /* Expands SSA names in COMB recursively.  CACHE is used to cache the
594    results.  */
595 
596 void
597 aff_combination_expand (aff_tree *comb ATTRIBUTE_UNUSED,
598 			struct pointer_map_t **cache ATTRIBUTE_UNUSED)
599 {
600   unsigned i;
601   aff_tree to_add, current, curre;
602   tree e, rhs;
603   gimple def;
604   double_int scale;
605   void **slot;
606   struct name_expansion *exp;
607 
608   aff_combination_zero (&to_add, comb->type);
609   for (i = 0; i < comb->n; i++)
610     {
611       tree type, name;
612       enum tree_code code;
613 
614       e = comb->elts[i].val;
615       type = TREE_TYPE (e);
616       name = e;
617       /* Look through some conversions.  */
618       if (TREE_CODE (e) == NOP_EXPR
619           && (TYPE_PRECISION (type)
620 	      >= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (e, 0)))))
621 	name = TREE_OPERAND (e, 0);
622       if (TREE_CODE (name) != SSA_NAME)
623 	continue;
624       def = SSA_NAME_DEF_STMT (name);
625       if (!is_gimple_assign (def) || gimple_assign_lhs (def) != name)
626 	continue;
627 
628       code = gimple_assign_rhs_code (def);
629       if (code != SSA_NAME
630 	  && !IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code))
631 	  && (get_gimple_rhs_class (code) != GIMPLE_SINGLE_RHS
632 	      || !is_gimple_min_invariant (gimple_assign_rhs1 (def))))
633 	continue;
634 
635       /* We do not know whether the reference retains its value at the
636 	 place where the expansion is used.  */
637       if (TREE_CODE_CLASS (code) == tcc_reference)
638 	continue;
639 
640       if (!*cache)
641 	*cache = pointer_map_create ();
642       slot = pointer_map_insert (*cache, e);
643       exp = (struct name_expansion *) *slot;
644 
645       if (!exp)
646 	{
647 	  exp = XNEW (struct name_expansion);
648 	  exp->in_progress = 1;
649 	  *slot = exp;
650 	  /* In principle this is a generally valid folding, but
651 	     it is not unconditionally an optimization, so do it
652 	     here and not in fold_unary.  */
653 	  /* Convert (T1)(X *+- CST) into (T1)X *+- (T1)CST if T1 is wider
654 	     than the type of X and overflow for the type of X is
655 	     undefined.  */
656 	  if (e != name
657 	      && INTEGRAL_TYPE_P (type)
658 	      && INTEGRAL_TYPE_P (TREE_TYPE (name))
659 	      && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (name))
660 	      && TYPE_PRECISION (type) > TYPE_PRECISION (TREE_TYPE (name))
661 	      && (code == PLUS_EXPR || code == MINUS_EXPR || code == MULT_EXPR)
662 	      && TREE_CODE (gimple_assign_rhs2 (def)) == INTEGER_CST)
663 	    rhs = fold_build2 (code, type,
664 			       fold_convert (type, gimple_assign_rhs1 (def)),
665 			       fold_convert (type, gimple_assign_rhs2 (def)));
666 	  else
667 	    {
668 	      rhs = gimple_assign_rhs_to_tree (def);
669 	      if (e != name)
670 		rhs = fold_convert (type, rhs);
671 	    }
672 	  tree_to_aff_combination_expand (rhs, comb->type, &current, cache);
673 	  exp->expansion = current;
674 	  exp->in_progress = 0;
675 	}
676       else
677 	{
678 	  /* Since we follow the definitions in the SSA form, we should not
679 	     enter a cycle unless we pass through a phi node.  */
680 	  gcc_assert (!exp->in_progress);
681 	  current = exp->expansion;
682 	}
683 
684       /* Accumulate the new terms to TO_ADD, so that we do not modify
685 	 COMB while traversing it; include the term -coef * E, to remove
686          it from COMB.  */
687       scale = comb->elts[i].coef;
688       aff_combination_zero (&curre, comb->type);
689       aff_combination_add_elt (&curre, e, double_int_neg (scale));
690       aff_combination_scale (&current, scale);
691       aff_combination_add (&to_add, &current);
692       aff_combination_add (&to_add, &curre);
693     }
694   aff_combination_add (comb, &to_add);
695 }
696 
697 /* Similar to tree_to_aff_combination, but follows SSA name definitions
698    and expands them recursively.  CACHE is used to cache the expansions
699    of the ssa names, to avoid exponential time complexity for cases
700    like
701 
702    a1 = a0 + a0;
703    a2 = a1 + a1;
704    a3 = a2 + a2;
705    ...  */
706 
707 void
708 tree_to_aff_combination_expand (tree expr, tree type, aff_tree *comb,
709 				struct pointer_map_t **cache)
710 {
711   tree_to_aff_combination (expr, type, comb);
712   aff_combination_expand (comb, cache);
713 }
714 
715 /* Frees memory occupied by struct name_expansion in *VALUE.  Callback for
716    pointer_map_traverse.  */
717 
718 static bool
719 free_name_expansion (const void *key ATTRIBUTE_UNUSED, void **value,
720 		     void *data ATTRIBUTE_UNUSED)
721 {
722   struct name_expansion *const exp = (struct name_expansion *) *value;
723 
724   free (exp);
725   return true;
726 }
727 
728 /* Frees memory allocated for the CACHE used by
729    tree_to_aff_combination_expand.  */
730 
731 void
732 free_affine_expand_cache (struct pointer_map_t **cache)
733 {
734   if (!*cache)
735     return;
736 
737   pointer_map_traverse (*cache, free_name_expansion, NULL);
738   pointer_map_destroy (*cache);
739   *cache = NULL;
740 }
741 
742 /* If VAL != CST * DIV for any constant CST, returns false.
743    Otherwise, if VAL != 0 (and hence CST != 0), and *MULT_SET is true,
744    additionally compares CST and MULT, and if they are different,
745    returns false.  Finally, if neither of these two cases occur,
746    true is returned, and if CST != 0, CST is stored to MULT and
747    MULT_SET is set to true.  */
748 
749 static bool
750 double_int_constant_multiple_p (double_int val, double_int div,
751 				bool *mult_set, double_int *mult)
752 {
753   double_int rem, cst;
754 
755   if (double_int_zero_p (val))
756     return true;
757 
758   if (double_int_zero_p (div))
759     return false;
760 
761   cst = double_int_sdivmod (val, div, FLOOR_DIV_EXPR, &rem);
762   if (!double_int_zero_p (rem))
763     return false;
764 
765   if (*mult_set && !double_int_equal_p (*mult, cst))
766     return false;
767 
768   *mult_set = true;
769   *mult = cst;
770   return true;
771 }
772 
773 /* Returns true if VAL = X * DIV for some constant X.  If this is the case,
774    X is stored to MULT.  */
775 
776 bool
777 aff_combination_constant_multiple_p (aff_tree *val, aff_tree *div,
778 				     double_int *mult)
779 {
780   bool mult_set = false;
781   unsigned i;
782 
783   if (val->n == 0 && double_int_zero_p (val->offset))
784     {
785       *mult = double_int_zero;
786       return true;
787     }
788   if (val->n != div->n)
789     return false;
790 
791   if (val->rest || div->rest)
792     return false;
793 
794   if (!double_int_constant_multiple_p (val->offset, div->offset,
795 				       &mult_set, mult))
796     return false;
797 
798   for (i = 0; i < div->n; i++)
799     {
800       struct aff_comb_elt *elt
801 	      = aff_combination_find_elt (val, div->elts[i].val, NULL);
802       if (!elt)
803 	return false;
804       if (!double_int_constant_multiple_p (elt->coef, div->elts[i].coef,
805 					   &mult_set, mult))
806 	return false;
807     }
808 
809   gcc_assert (mult_set);
810   return true;
811 }
812 
813 /* Prints the affine VAL to the FILE. */
814 
815 void
816 print_aff (FILE *file, aff_tree *val)
817 {
818   unsigned i;
819   bool uns = TYPE_UNSIGNED (val->type);
820   if (POINTER_TYPE_P (val->type))
821     uns = false;
822   fprintf (file, "{\n  type = ");
823   print_generic_expr (file, val->type, TDF_VOPS|TDF_MEMSYMS);
824   fprintf (file, "\n  offset = ");
825   dump_double_int (file, val->offset, uns);
826   if (val->n > 0)
827     {
828       fprintf (file, "\n  elements = {\n");
829       for (i = 0; i < val->n; i++)
830 	{
831 	  fprintf (file, "    [%d] = ", i);
832 	  print_generic_expr (file, val->elts[i].val, TDF_VOPS|TDF_MEMSYMS);
833 
834 	  fprintf (file, " * ");
835 	  dump_double_int (file, val->elts[i].coef, uns);
836 	  if (i != val->n - 1)
837 	    fprintf (file, ", \n");
838 	}
839       fprintf (file, "\n  }");
840   }
841   if (val->rest)
842     {
843       fprintf (file, "\n  rest = ");
844       print_generic_expr (file, val->rest, TDF_VOPS|TDF_MEMSYMS);
845     }
846   fprintf (file, "\n}");
847 }
848 
849 /* Prints the affine VAL to the standard error, used for debugging.  */
850 
851 DEBUG_FUNCTION void
852 debug_aff (aff_tree *val)
853 {
854   print_aff (stderr, val);
855   fprintf (stderr, "\n");
856 }
857 
858 /* Returns address of the reference REF in ADDR.  The size of the accessed
859    location is stored to SIZE.  */
860 
861 void
862 get_inner_reference_aff (tree ref, aff_tree *addr, double_int *size)
863 {
864   HOST_WIDE_INT bitsize, bitpos;
865   tree toff;
866   enum machine_mode mode;
867   int uns, vol;
868   aff_tree tmp;
869   tree base = get_inner_reference (ref, &bitsize, &bitpos, &toff, &mode,
870 				   &uns, &vol, false);
871   tree base_addr = build_fold_addr_expr (base);
872 
873   /* ADDR = &BASE + TOFF + BITPOS / BITS_PER_UNIT.  */
874 
875   tree_to_aff_combination (base_addr, sizetype, addr);
876 
877   if (toff)
878     {
879       tree_to_aff_combination (toff, sizetype, &tmp);
880       aff_combination_add (addr, &tmp);
881     }
882 
883   aff_combination_const (&tmp, sizetype,
884 			 shwi_to_double_int (bitpos / BITS_PER_UNIT));
885   aff_combination_add (addr, &tmp);
886 
887   *size = shwi_to_double_int ((bitsize + BITS_PER_UNIT - 1) / BITS_PER_UNIT);
888 }
889 
890 /* Returns true if a region of size SIZE1 at position 0 and a region of
891    size SIZE2 at position DIFF cannot overlap.  */
892 
893 bool
894 aff_comb_cannot_overlap_p (aff_tree *diff, double_int size1, double_int size2)
895 {
896   double_int d, bound;
897 
898   /* Unless the difference is a constant, we fail.  */
899   if (diff->n != 0)
900     return false;
901 
902   d = diff->offset;
903   if (double_int_negative_p (d))
904     {
905       /* The second object is before the first one, we succeed if the last
906 	 element of the second object is before the start of the first one.  */
907       bound = double_int_add (d, double_int_add (size2, double_int_minus_one));
908       return double_int_negative_p (bound);
909     }
910   else
911     {
912       /* We succeed if the second object starts after the first one ends.  */
913       return double_int_scmp (size1, d) <= 0;
914     }
915 }
916 
917