1 /* Operations with affine combinations of trees.
2    Copyright (C) 2005-2018 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "ssa.h"
28 #include "tree-pretty-print.h"
29 #include "fold-const.h"
30 #include "tree-affine.h"
31 #include "gimplify.h"
32 #include "dumpfile.h"
33 #include "cfgexpand.h"
34 
35 /* Extends CST as appropriate for the affine combinations COMB.  */
36 
37 static widest_int
wide_int_ext_for_comb(const widest_int & cst,tree type)38 wide_int_ext_for_comb (const widest_int &cst, tree type)
39 {
40   return wi::sext (cst, TYPE_PRECISION (type));
41 }
42 
43 /* Likewise for polynomial offsets.  */
44 
45 static poly_widest_int
wide_int_ext_for_comb(const poly_widest_int & cst,tree type)46 wide_int_ext_for_comb (const poly_widest_int &cst, tree type)
47 {
48   return wi::sext (cst, TYPE_PRECISION (type));
49 }
50 
51 /* Initializes affine combination COMB so that its value is zero in TYPE.  */
52 
53 static void
aff_combination_zero(aff_tree * comb,tree type)54 aff_combination_zero (aff_tree *comb, tree type)
55 {
56   int i;
57   comb->type = type;
58   comb->offset = 0;
59   comb->n = 0;
60   for (i = 0; i < MAX_AFF_ELTS; i++)
61     comb->elts[i].coef = 0;
62   comb->rest = NULL_TREE;
63 }
64 
65 /* Sets COMB to CST.  */
66 
67 void
aff_combination_const(aff_tree * comb,tree type,const poly_widest_int & cst)68 aff_combination_const (aff_tree *comb, tree type, const poly_widest_int &cst)
69 {
70   aff_combination_zero (comb, type);
71   comb->offset = wide_int_ext_for_comb (cst, comb->type);;
72 }
73 
74 /* Sets COMB to single element ELT.  */
75 
76 void
aff_combination_elt(aff_tree * comb,tree type,tree elt)77 aff_combination_elt (aff_tree *comb, tree type, tree elt)
78 {
79   aff_combination_zero (comb, type);
80 
81   comb->n = 1;
82   comb->elts[0].val = elt;
83   comb->elts[0].coef = 1;
84 }
85 
86 /* Scales COMB by SCALE.  */
87 
88 void
aff_combination_scale(aff_tree * comb,const widest_int & scale_in)89 aff_combination_scale (aff_tree *comb, const widest_int &scale_in)
90 {
91   unsigned i, j;
92 
93   widest_int scale = wide_int_ext_for_comb (scale_in, comb->type);
94   if (scale == 1)
95     return;
96 
97   if (scale == 0)
98     {
99       aff_combination_zero (comb, comb->type);
100       return;
101     }
102 
103   comb->offset = wide_int_ext_for_comb (scale * comb->offset, comb->type);
104   for (i = 0, j = 0; i < comb->n; i++)
105     {
106       widest_int new_coef
107 	= wide_int_ext_for_comb (scale * comb->elts[i].coef, comb->type);
108       /* A coefficient may become zero due to overflow.  Remove the zero
109 	 elements.  */
110       if (new_coef == 0)
111 	continue;
112       comb->elts[j].coef = new_coef;
113       comb->elts[j].val = comb->elts[i].val;
114       j++;
115     }
116   comb->n = j;
117 
118   if (comb->rest)
119     {
120       tree type = comb->type;
121       if (POINTER_TYPE_P (type))
122 	type = sizetype;
123       if (comb->n < MAX_AFF_ELTS)
124 	{
125 	  comb->elts[comb->n].coef = scale;
126 	  comb->elts[comb->n].val = comb->rest;
127 	  comb->rest = NULL_TREE;
128 	  comb->n++;
129 	}
130       else
131 	comb->rest = fold_build2 (MULT_EXPR, type, comb->rest,
132 				  wide_int_to_tree (type, scale));
133     }
134 }
135 
136 /* Adds ELT * SCALE to COMB.  */
137 
138 void
aff_combination_add_elt(aff_tree * comb,tree elt,const widest_int & scale_in)139 aff_combination_add_elt (aff_tree *comb, tree elt, const widest_int &scale_in)
140 {
141   unsigned i;
142   tree type;
143 
144   widest_int scale = wide_int_ext_for_comb (scale_in, comb->type);
145   if (scale == 0)
146     return;
147 
148   for (i = 0; i < comb->n; i++)
149     if (operand_equal_p (comb->elts[i].val, elt, 0))
150       {
151 	widest_int new_coef
152 	  = wide_int_ext_for_comb (comb->elts[i].coef + scale, comb->type);
153 	if (new_coef != 0)
154 	  {
155 	    comb->elts[i].coef = new_coef;
156 	    return;
157 	  }
158 
159 	comb->n--;
160 	comb->elts[i] = comb->elts[comb->n];
161 
162 	if (comb->rest)
163 	  {
164 	    gcc_assert (comb->n == MAX_AFF_ELTS - 1);
165 	    comb->elts[comb->n].coef = 1;
166 	    comb->elts[comb->n].val = comb->rest;
167 	    comb->rest = NULL_TREE;
168 	    comb->n++;
169 	  }
170 	return;
171       }
172   if (comb->n < MAX_AFF_ELTS)
173     {
174       comb->elts[comb->n].coef = scale;
175       comb->elts[comb->n].val = elt;
176       comb->n++;
177       return;
178     }
179 
180   type = comb->type;
181   if (POINTER_TYPE_P (type))
182     type = sizetype;
183 
184   if (scale == 1)
185     elt = fold_convert (type, elt);
186   else
187     elt = fold_build2 (MULT_EXPR, type,
188 		       fold_convert (type, elt),
189 		       wide_int_to_tree (type, scale));
190 
191   if (comb->rest)
192     comb->rest = fold_build2 (PLUS_EXPR, type, comb->rest,
193 			      elt);
194   else
195     comb->rest = elt;
196 }
197 
198 /* Adds CST to C.  */
199 
200 static void
aff_combination_add_cst(aff_tree * c,const poly_widest_int & cst)201 aff_combination_add_cst (aff_tree *c, const poly_widest_int &cst)
202 {
203   c->offset = wide_int_ext_for_comb (c->offset + cst, c->type);
204 }
205 
206 /* Adds COMB2 to COMB1.  */
207 
208 void
aff_combination_add(aff_tree * comb1,aff_tree * comb2)209 aff_combination_add (aff_tree *comb1, aff_tree *comb2)
210 {
211   unsigned i;
212 
213   aff_combination_add_cst (comb1, comb2->offset);
214   for (i = 0; i < comb2->n; i++)
215     aff_combination_add_elt (comb1, comb2->elts[i].val, comb2->elts[i].coef);
216   if (comb2->rest)
217     aff_combination_add_elt (comb1, comb2->rest, 1);
218 }
219 
220 /* Converts affine combination COMB to TYPE.  */
221 
222 void
aff_combination_convert(aff_tree * comb,tree type)223 aff_combination_convert (aff_tree *comb, tree type)
224 {
225   unsigned i, j;
226   tree comb_type = comb->type;
227 
228   if  (TYPE_PRECISION (type) > TYPE_PRECISION (comb_type))
229     {
230       tree val = fold_convert (type, aff_combination_to_tree (comb));
231       tree_to_aff_combination (val, type, comb);
232       return;
233     }
234 
235   comb->type = type;
236   if (comb->rest && !POINTER_TYPE_P (type))
237     comb->rest = fold_convert (type, comb->rest);
238 
239   if (TYPE_PRECISION (type) == TYPE_PRECISION (comb_type))
240     return;
241 
242   comb->offset = wide_int_ext_for_comb (comb->offset, comb->type);
243   for (i = j = 0; i < comb->n; i++)
244     {
245       if (comb->elts[i].coef == 0)
246 	continue;
247       comb->elts[j].coef = comb->elts[i].coef;
248       comb->elts[j].val = fold_convert (type, comb->elts[i].val);
249       j++;
250     }
251 
252   comb->n = j;
253   if (comb->n < MAX_AFF_ELTS && comb->rest)
254     {
255       comb->elts[comb->n].coef = 1;
256       comb->elts[comb->n].val = comb->rest;
257       comb->rest = NULL_TREE;
258       comb->n++;
259     }
260 }
261 
262 /* Splits EXPR into an affine combination of parts.  */
263 
264 void
tree_to_aff_combination(tree expr,tree type,aff_tree * comb)265 tree_to_aff_combination (tree expr, tree type, aff_tree *comb)
266 {
267   aff_tree tmp;
268   enum tree_code code;
269   tree cst, core, toffset;
270   poly_int64 bitpos, bitsize, bytepos;
271   machine_mode mode;
272   int unsignedp, reversep, volatilep;
273 
274   STRIP_NOPS (expr);
275 
276   code = TREE_CODE (expr);
277   switch (code)
278     {
279     case POINTER_PLUS_EXPR:
280       tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
281       tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
282       aff_combination_add (comb, &tmp);
283       return;
284 
285     case PLUS_EXPR:
286     case MINUS_EXPR:
287       tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
288       tree_to_aff_combination (TREE_OPERAND (expr, 1), type, &tmp);
289       if (code == MINUS_EXPR)
290 	aff_combination_scale (&tmp, -1);
291       aff_combination_add (comb, &tmp);
292       return;
293 
294     case MULT_EXPR:
295       cst = TREE_OPERAND (expr, 1);
296       if (TREE_CODE (cst) != INTEGER_CST)
297 	break;
298       tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
299       aff_combination_scale (comb, wi::to_widest (cst));
300       return;
301 
302     case NEGATE_EXPR:
303       tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
304       aff_combination_scale (comb, -1);
305       return;
306 
307     case BIT_NOT_EXPR:
308       /* ~x = -x - 1 */
309       tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
310       aff_combination_scale (comb, -1);
311       aff_combination_add_cst (comb, -1);
312       return;
313 
314     case ADDR_EXPR:
315       /* Handle &MEM[ptr + CST] which is equivalent to POINTER_PLUS_EXPR.  */
316       if (TREE_CODE (TREE_OPERAND (expr, 0)) == MEM_REF)
317 	{
318 	  expr = TREE_OPERAND (expr, 0);
319 	  tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
320 	  tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
321 	  aff_combination_add (comb, &tmp);
322 	  return;
323 	}
324       core = get_inner_reference (TREE_OPERAND (expr, 0), &bitsize, &bitpos,
325 				  &toffset, &mode, &unsignedp, &reversep,
326 				  &volatilep);
327       if (!multiple_p (bitpos, BITS_PER_UNIT, &bytepos))
328 	break;
329       aff_combination_const (comb, type, bytepos);
330       if (TREE_CODE (core) == MEM_REF)
331 	{
332 	  tree mem_offset = TREE_OPERAND (core, 1);
333 	  aff_combination_add_cst (comb, wi::to_poly_widest (mem_offset));
334 	  core = TREE_OPERAND (core, 0);
335 	}
336       else
337 	core = build_fold_addr_expr (core);
338 
339       if (TREE_CODE (core) == ADDR_EXPR)
340 	aff_combination_add_elt (comb, core, 1);
341       else
342 	{
343 	  tree_to_aff_combination (core, type, &tmp);
344 	  aff_combination_add (comb, &tmp);
345 	}
346       if (toffset)
347 	{
348 	  tree_to_aff_combination (toffset, type, &tmp);
349 	  aff_combination_add (comb, &tmp);
350 	}
351       return;
352 
353     case MEM_REF:
354       if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
355 	tree_to_aff_combination (TREE_OPERAND (TREE_OPERAND (expr, 0), 0),
356 				 type, comb);
357       else if (integer_zerop (TREE_OPERAND (expr, 1)))
358 	{
359 	  aff_combination_elt (comb, type, expr);
360 	  return;
361 	}
362       else
363 	aff_combination_elt (comb, type,
364 			     build2 (MEM_REF, TREE_TYPE (expr),
365 				     TREE_OPERAND (expr, 0),
366 				     build_int_cst
367 				      (TREE_TYPE (TREE_OPERAND (expr, 1)), 0)));
368       tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
369       aff_combination_add (comb, &tmp);
370       return;
371 
372     CASE_CONVERT:
373       {
374 	tree otype = TREE_TYPE (expr);
375 	tree inner = TREE_OPERAND (expr, 0);
376 	tree itype = TREE_TYPE (inner);
377 	enum tree_code icode = TREE_CODE (inner);
378 
379 	/* In principle this is a valid folding, but it isn't necessarily
380 	   an optimization, so do it here and not in fold_unary.  */
381 	if ((icode == PLUS_EXPR || icode == MINUS_EXPR || icode == MULT_EXPR)
382 	    && TREE_CODE (itype) == INTEGER_TYPE
383 	    && TREE_CODE (otype) == INTEGER_TYPE
384 	    && TYPE_PRECISION (otype) > TYPE_PRECISION (itype))
385 	  {
386 	    tree op0 = TREE_OPERAND (inner, 0), op1 = TREE_OPERAND (inner, 1);
387 
388 	    /* If inner type has undefined overflow behavior, fold conversion
389 	       for below two cases:
390 		 (T1)(X *+- CST) -> (T1)X *+- (T1)CST
391 		 (T1)(X + X)     -> (T1)X + (T1)X.  */
392 	    if (TYPE_OVERFLOW_UNDEFINED (itype)
393 		&& (TREE_CODE (op1) == INTEGER_CST
394 		    || (icode == PLUS_EXPR && operand_equal_p (op0, op1, 0))))
395 	      {
396 		op0 = fold_convert (otype, op0);
397 		op1 = fold_convert (otype, op1);
398 		expr = fold_build2 (icode, otype, op0, op1);
399 		tree_to_aff_combination (expr, type, comb);
400 		return;
401 	      }
402 	    wide_int minv, maxv;
403 	    /* If inner type has wrapping overflow behavior, fold conversion
404 	       for below case:
405 		 (T1)(X - CST) -> (T1)X - (T1)CST
406 	       if X - CST doesn't overflow by range information.  Also handle
407 	       (T1)(X + CST) as (T1)(X - (-CST)).  */
408 	    if (TYPE_UNSIGNED (itype)
409 		&& TYPE_OVERFLOW_WRAPS (itype)
410 		&& TREE_CODE (op0) == SSA_NAME
411 		&& TREE_CODE (op1) == INTEGER_CST
412 		&& icode != MULT_EXPR
413 		&& get_range_info (op0, &minv, &maxv) == VR_RANGE)
414 	      {
415 		if (icode == PLUS_EXPR)
416 		  op1 = wide_int_to_tree (itype, -wi::to_wide (op1));
417 		if (wi::geu_p (minv, wi::to_wide (op1)))
418 		  {
419 		    op0 = fold_convert (otype, op0);
420 		    op1 = fold_convert (otype, op1);
421 		    expr = fold_build2 (MINUS_EXPR, otype, op0, op1);
422 		    tree_to_aff_combination (expr, type, comb);
423 		    return;
424 		  }
425 	      }
426 	  }
427       }
428       break;
429 
430     default:
431       {
432 	if (poly_int_tree_p (expr))
433 	  {
434 	    aff_combination_const (comb, type, wi::to_poly_widest (expr));
435 	    return;
436 	  }
437 	break;
438       }
439     }
440 
441   aff_combination_elt (comb, type, expr);
442 }
443 
444 /* Creates EXPR + ELT * SCALE in TYPE.  EXPR is taken from affine
445    combination COMB.  */
446 
447 static tree
add_elt_to_tree(tree expr,tree type,tree elt,const widest_int & scale_in)448 add_elt_to_tree (tree expr, tree type, tree elt, const widest_int &scale_in)
449 {
450   enum tree_code code;
451 
452   widest_int scale = wide_int_ext_for_comb (scale_in, type);
453 
454   elt = fold_convert (type, elt);
455   if (scale == 1)
456     {
457       if (!expr)
458 	return elt;
459 
460       return fold_build2 (PLUS_EXPR, type, expr, elt);
461     }
462 
463   if (scale == -1)
464     {
465       if (!expr)
466 	return fold_build1 (NEGATE_EXPR, type, elt);
467 
468       return fold_build2 (MINUS_EXPR, type, expr, elt);
469     }
470 
471   if (!expr)
472     return fold_build2 (MULT_EXPR, type, elt, wide_int_to_tree (type, scale));
473 
474   if (wi::neg_p (scale))
475     {
476       code = MINUS_EXPR;
477       scale = -scale;
478     }
479   else
480     code = PLUS_EXPR;
481 
482   elt = fold_build2 (MULT_EXPR, type, elt, wide_int_to_tree (type, scale));
483   return fold_build2 (code, type, expr, elt);
484 }
485 
486 /* Makes tree from the affine combination COMB.  */
487 
488 tree
aff_combination_to_tree(aff_tree * comb)489 aff_combination_to_tree (aff_tree *comb)
490 {
491   tree type = comb->type, base = NULL_TREE, expr = NULL_TREE;
492   unsigned i;
493   poly_widest_int off;
494   int sgn;
495 
496   gcc_assert (comb->n == MAX_AFF_ELTS || comb->rest == NULL_TREE);
497 
498   i = 0;
499   if (POINTER_TYPE_P (type))
500     {
501       type = sizetype;
502       if (comb->n > 0 && comb->elts[0].coef == 1
503 	  && POINTER_TYPE_P (TREE_TYPE (comb->elts[0].val)))
504 	{
505 	  base = comb->elts[0].val;
506 	  ++i;
507 	}
508     }
509 
510   for (; i < comb->n; i++)
511     expr = add_elt_to_tree (expr, type, comb->elts[i].val, comb->elts[i].coef);
512 
513   if (comb->rest)
514     expr = add_elt_to_tree (expr, type, comb->rest, 1);
515 
516   /* Ensure that we get x - 1, not x + (-1) or x + 0xff..f if x is
517      unsigned.  */
518   if (known_lt (comb->offset, 0))
519     {
520       off = -comb->offset;
521       sgn = -1;
522     }
523   else
524     {
525       off = comb->offset;
526       sgn = 1;
527     }
528   expr = add_elt_to_tree (expr, type, wide_int_to_tree (type, off), sgn);
529 
530   if (base)
531     return fold_build_pointer_plus (base, expr);
532   else
533     return fold_convert (comb->type, expr);
534 }
535 
536 /* Copies the tree elements of COMB to ensure that they are not shared.  */
537 
538 void
unshare_aff_combination(aff_tree * comb)539 unshare_aff_combination (aff_tree *comb)
540 {
541   unsigned i;
542 
543   for (i = 0; i < comb->n; i++)
544     comb->elts[i].val = unshare_expr (comb->elts[i].val);
545   if (comb->rest)
546     comb->rest = unshare_expr (comb->rest);
547 }
548 
549 /* Remove M-th element from COMB.  */
550 
551 void
aff_combination_remove_elt(aff_tree * comb,unsigned m)552 aff_combination_remove_elt (aff_tree *comb, unsigned m)
553 {
554   comb->n--;
555   if (m <= comb->n)
556     comb->elts[m] = comb->elts[comb->n];
557   if (comb->rest)
558     {
559       comb->elts[comb->n].coef = 1;
560       comb->elts[comb->n].val = comb->rest;
561       comb->rest = NULL_TREE;
562       comb->n++;
563     }
564 }
565 
566 /* Adds C * COEF * VAL to R.  VAL may be NULL, in that case only
567    C * COEF is added to R.  */
568 
569 
570 static void
aff_combination_add_product(aff_tree * c,const widest_int & coef,tree val,aff_tree * r)571 aff_combination_add_product (aff_tree *c, const widest_int &coef, tree val,
572 			     aff_tree *r)
573 {
574   unsigned i;
575   tree aval, type;
576 
577   for (i = 0; i < c->n; i++)
578     {
579       aval = c->elts[i].val;
580       if (val)
581 	{
582 	  type = TREE_TYPE (aval);
583 	  aval = fold_build2 (MULT_EXPR, type, aval,
584 			      fold_convert (type, val));
585 	}
586 
587       aff_combination_add_elt (r, aval, coef * c->elts[i].coef);
588     }
589 
590   if (c->rest)
591     {
592       aval = c->rest;
593       if (val)
594 	{
595 	  type = TREE_TYPE (aval);
596 	  aval = fold_build2 (MULT_EXPR, type, aval,
597 			      fold_convert (type, val));
598 	}
599 
600       aff_combination_add_elt (r, aval, coef);
601     }
602 
603   if (val)
604     {
605       if (c->offset.is_constant ())
606 	/* Access coeffs[0] directly, for efficiency.  */
607 	aff_combination_add_elt (r, val, coef * c->offset.coeffs[0]);
608       else
609 	{
610 	  /* c->offset is polynomial, so multiply VAL rather than COEF
611 	     by it.  */
612 	  tree offset = wide_int_to_tree (TREE_TYPE (val), c->offset);
613 	  val = fold_build2 (MULT_EXPR, TREE_TYPE (val), val, offset);
614 	  aff_combination_add_elt (r, val, coef);
615 	}
616     }
617   else
618     aff_combination_add_cst (r, coef * c->offset);
619 }
620 
621 /* Multiplies C1 by C2, storing the result to R  */
622 
623 void
aff_combination_mult(aff_tree * c1,aff_tree * c2,aff_tree * r)624 aff_combination_mult (aff_tree *c1, aff_tree *c2, aff_tree *r)
625 {
626   unsigned i;
627   gcc_assert (TYPE_PRECISION (c1->type) == TYPE_PRECISION (c2->type));
628 
629   aff_combination_zero (r, c1->type);
630 
631   for (i = 0; i < c2->n; i++)
632     aff_combination_add_product (c1, c2->elts[i].coef, c2->elts[i].val, r);
633   if (c2->rest)
634     aff_combination_add_product (c1, 1, c2->rest, r);
635   if (c2->offset.is_constant ())
636     /* Access coeffs[0] directly, for efficiency.  */
637     aff_combination_add_product (c1, c2->offset.coeffs[0], NULL, r);
638   else
639     {
640       /* c2->offset is polynomial, so do the multiplication in tree form.  */
641       tree offset = wide_int_to_tree (c2->type, c2->offset);
642       aff_combination_add_product (c1, 1, offset, r);
643     }
644 }
645 
646 /* Returns the element of COMB whose value is VAL, or NULL if no such
647    element exists.  If IDX is not NULL, it is set to the index of VAL in
648    COMB.  */
649 
650 static struct aff_comb_elt *
aff_combination_find_elt(aff_tree * comb,tree val,unsigned * idx)651 aff_combination_find_elt (aff_tree *comb, tree val, unsigned *idx)
652 {
653   unsigned i;
654 
655   for (i = 0; i < comb->n; i++)
656     if (operand_equal_p (comb->elts[i].val, val, 0))
657       {
658 	if (idx)
659 	  *idx = i;
660 
661 	return &comb->elts[i];
662       }
663 
664   return NULL;
665 }
666 
667 /* Element of the cache that maps ssa name NAME to its expanded form
668    as an affine expression EXPANSION.  */
669 
670 struct name_expansion
671 {
672   aff_tree expansion;
673 
674   /* True if the expansion for the name is just being generated.  */
675   unsigned in_progress : 1;
676 };
677 
678 /* Expands SSA names in COMB recursively.  CACHE is used to cache the
679    results.  */
680 
681 void
aff_combination_expand(aff_tree * comb ATTRIBUTE_UNUSED,hash_map<tree,name_expansion * > ** cache)682 aff_combination_expand (aff_tree *comb ATTRIBUTE_UNUSED,
683 			hash_map<tree, name_expansion *> **cache)
684 {
685   unsigned i;
686   aff_tree to_add, current, curre;
687   tree e, rhs;
688   gimple *def;
689   widest_int scale;
690   struct name_expansion *exp;
691 
692   aff_combination_zero (&to_add, comb->type);
693   for (i = 0; i < comb->n; i++)
694     {
695       tree type, name;
696       enum tree_code code;
697 
698       e = comb->elts[i].val;
699       type = TREE_TYPE (e);
700       name = e;
701       /* Look through some conversions.  */
702       if (CONVERT_EXPR_P (e)
703           && (TYPE_PRECISION (type)
704 	      >= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (e, 0)))))
705 	name = TREE_OPERAND (e, 0);
706       if (TREE_CODE (name) != SSA_NAME)
707 	continue;
708       def = SSA_NAME_DEF_STMT (name);
709       if (!is_gimple_assign (def) || gimple_assign_lhs (def) != name)
710 	continue;
711 
712       code = gimple_assign_rhs_code (def);
713       if (code != SSA_NAME
714 	  && !IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code))
715 	  && (get_gimple_rhs_class (code) != GIMPLE_SINGLE_RHS
716 	      || !is_gimple_min_invariant (gimple_assign_rhs1 (def))))
717 	continue;
718 
719       /* We do not know whether the reference retains its value at the
720 	 place where the expansion is used.  */
721       if (TREE_CODE_CLASS (code) == tcc_reference)
722 	continue;
723 
724       if (!*cache)
725 	*cache = new hash_map<tree, name_expansion *>;
726       name_expansion **slot = &(*cache)->get_or_insert (e);
727       exp = *slot;
728 
729       if (!exp)
730 	{
731 	  exp = XNEW (struct name_expansion);
732 	  exp->in_progress = 1;
733 	  *slot = exp;
734 	  rhs = gimple_assign_rhs_to_tree (def);
735 	  if (e != name)
736 	    rhs = fold_convert (type, rhs);
737 
738 	  tree_to_aff_combination_expand (rhs, comb->type, &current, cache);
739 	  exp->expansion = current;
740 	  exp->in_progress = 0;
741 	}
742       else
743 	{
744 	  /* Since we follow the definitions in the SSA form, we should not
745 	     enter a cycle unless we pass through a phi node.  */
746 	  gcc_assert (!exp->in_progress);
747 	  current = exp->expansion;
748 	}
749 
750       /* Accumulate the new terms to TO_ADD, so that we do not modify
751 	 COMB while traversing it; include the term -coef * E, to remove
752          it from COMB.  */
753       scale = comb->elts[i].coef;
754       aff_combination_zero (&curre, comb->type);
755       aff_combination_add_elt (&curre, e, -scale);
756       aff_combination_scale (&current, scale);
757       aff_combination_add (&to_add, &current);
758       aff_combination_add (&to_add, &curre);
759     }
760   aff_combination_add (comb, &to_add);
761 }
762 
763 /* Similar to tree_to_aff_combination, but follows SSA name definitions
764    and expands them recursively.  CACHE is used to cache the expansions
765    of the ssa names, to avoid exponential time complexity for cases
766    like
767 
768    a1 = a0 + a0;
769    a2 = a1 + a1;
770    a3 = a2 + a2;
771    ...  */
772 
773 void
tree_to_aff_combination_expand(tree expr,tree type,aff_tree * comb,hash_map<tree,name_expansion * > ** cache)774 tree_to_aff_combination_expand (tree expr, tree type, aff_tree *comb,
775 				hash_map<tree, name_expansion *> **cache)
776 {
777   tree_to_aff_combination (expr, type, comb);
778   aff_combination_expand (comb, cache);
779 }
780 
781 /* Frees memory occupied by struct name_expansion in *VALUE.  Callback for
782    hash_map::traverse.  */
783 
784 bool
free_name_expansion(tree const &,name_expansion ** value,void *)785 free_name_expansion (tree const &, name_expansion **value, void *)
786 {
787   free (*value);
788   return true;
789 }
790 
791 /* Frees memory allocated for the CACHE used by
792    tree_to_aff_combination_expand.  */
793 
794 void
free_affine_expand_cache(hash_map<tree,name_expansion * > ** cache)795 free_affine_expand_cache (hash_map<tree, name_expansion *> **cache)
796 {
797   if (!*cache)
798     return;
799 
800   (*cache)->traverse<void *, free_name_expansion> (NULL);
801   delete (*cache);
802   *cache = NULL;
803 }
804 
805 /* If VAL != CST * DIV for any constant CST, returns false.
806    Otherwise, if *MULT_SET is true, additionally compares CST and MULT,
807    and if they are different, returns false.  Finally, if neither of these
808    two cases occur, true is returned, and CST is stored to MULT and MULT_SET
809    is set to true.  */
810 
811 static bool
wide_int_constant_multiple_p(const poly_widest_int & val,const poly_widest_int & div,bool * mult_set,poly_widest_int * mult)812 wide_int_constant_multiple_p (const poly_widest_int &val,
813 			      const poly_widest_int &div,
814 			      bool *mult_set, poly_widest_int *mult)
815 {
816   poly_widest_int rem, cst;
817 
818   if (known_eq (val, 0))
819     {
820       if (*mult_set && maybe_ne (*mult, 0))
821 	return false;
822       *mult_set = true;
823       *mult = 0;
824       return true;
825     }
826 
827   if (maybe_eq (div, 0))
828     return false;
829 
830   if (!multiple_p (val, div, &cst))
831     return false;
832 
833   if (*mult_set && maybe_ne (*mult, cst))
834     return false;
835 
836   *mult_set = true;
837   *mult = cst;
838   return true;
839 }
840 
841 /* Returns true if VAL = X * DIV for some constant X.  If this is the case,
842    X is stored to MULT.  */
843 
844 bool
aff_combination_constant_multiple_p(aff_tree * val,aff_tree * div,poly_widest_int * mult)845 aff_combination_constant_multiple_p (aff_tree *val, aff_tree *div,
846 				     poly_widest_int *mult)
847 {
848   bool mult_set = false;
849   unsigned i;
850 
851   if (val->n == 0 && known_eq (val->offset, 0))
852     {
853       *mult = 0;
854       return true;
855     }
856   if (val->n != div->n)
857     return false;
858 
859   if (val->rest || div->rest)
860     return false;
861 
862   if (!wide_int_constant_multiple_p (val->offset, div->offset,
863 				     &mult_set, mult))
864     return false;
865 
866   for (i = 0; i < div->n; i++)
867     {
868       struct aff_comb_elt *elt
869 	      = aff_combination_find_elt (val, div->elts[i].val, NULL);
870       if (!elt)
871 	return false;
872       if (!wide_int_constant_multiple_p (elt->coef, div->elts[i].coef,
873 					 &mult_set, mult))
874 	return false;
875     }
876 
877   gcc_assert (mult_set);
878   return true;
879 }
880 
881 /* Prints the affine VAL to the FILE. */
882 
883 static void
print_aff(FILE * file,aff_tree * val)884 print_aff (FILE *file, aff_tree *val)
885 {
886   unsigned i;
887   signop sgn = TYPE_SIGN (val->type);
888   if (POINTER_TYPE_P (val->type))
889     sgn = SIGNED;
890   fprintf (file, "{\n  type = ");
891   print_generic_expr (file, val->type, TDF_VOPS|TDF_MEMSYMS);
892   fprintf (file, "\n  offset = ");
893   print_dec (val->offset, file, sgn);
894   if (val->n > 0)
895     {
896       fprintf (file, "\n  elements = {\n");
897       for (i = 0; i < val->n; i++)
898 	{
899 	  fprintf (file, "    [%d] = ", i);
900 	  print_generic_expr (file, val->elts[i].val, TDF_VOPS|TDF_MEMSYMS);
901 
902 	  fprintf (file, " * ");
903 	  print_dec (val->elts[i].coef, file, sgn);
904 	  if (i != val->n - 1)
905 	    fprintf (file, ", \n");
906 	}
907       fprintf (file, "\n  }");
908   }
909   if (val->rest)
910     {
911       fprintf (file, "\n  rest = ");
912       print_generic_expr (file, val->rest, TDF_VOPS|TDF_MEMSYMS);
913     }
914   fprintf (file, "\n}");
915 }
916 
917 /* Prints the affine VAL to the standard error, used for debugging.  */
918 
919 DEBUG_FUNCTION void
debug_aff(aff_tree * val)920 debug_aff (aff_tree *val)
921 {
922   print_aff (stderr, val);
923   fprintf (stderr, "\n");
924 }
925 
926 /* Computes address of the reference REF in ADDR.  The size of the accessed
927    location is stored to SIZE.  Returns the ultimate containing object to
928    which REF refers.  */
929 
930 tree
get_inner_reference_aff(tree ref,aff_tree * addr,poly_widest_int * size)931 get_inner_reference_aff (tree ref, aff_tree *addr, poly_widest_int *size)
932 {
933   poly_int64 bitsize, bitpos;
934   tree toff;
935   machine_mode mode;
936   int uns, rev, vol;
937   aff_tree tmp;
938   tree base = get_inner_reference (ref, &bitsize, &bitpos, &toff, &mode,
939 				   &uns, &rev, &vol);
940   tree base_addr = build_fold_addr_expr (base);
941 
942   /* ADDR = &BASE + TOFF + BITPOS / BITS_PER_UNIT.  */
943 
944   tree_to_aff_combination (base_addr, sizetype, addr);
945 
946   if (toff)
947     {
948       tree_to_aff_combination (toff, sizetype, &tmp);
949       aff_combination_add (addr, &tmp);
950     }
951 
952   aff_combination_const (&tmp, sizetype, bits_to_bytes_round_down (bitpos));
953   aff_combination_add (addr, &tmp);
954 
955   *size = bits_to_bytes_round_up (bitsize);
956 
957   return base;
958 }
959 
960 /* Returns true if a region of size SIZE1 at position 0 and a region of
961    size SIZE2 at position DIFF cannot overlap.  */
962 
963 bool
aff_comb_cannot_overlap_p(aff_tree * diff,const poly_widest_int & size1,const poly_widest_int & size2)964 aff_comb_cannot_overlap_p (aff_tree *diff, const poly_widest_int &size1,
965 			   const poly_widest_int &size2)
966 {
967   /* Unless the difference is a constant, we fail.  */
968   if (diff->n != 0)
969     return false;
970 
971   if (!ordered_p (diff->offset, 0))
972     return false;
973 
974   if (maybe_lt (diff->offset, 0))
975     {
976       /* The second object is before the first one, we succeed if the last
977 	 element of the second object is before the start of the first one.  */
978       return known_le (diff->offset + size2, 0);
979     }
980   else
981     {
982       /* We succeed if the second object starts after the first one ends.  */
983       return known_le (size1, diff->offset);
984     }
985 }
986 
987