1 /* Data references and dependences detectors.
2 Copyright (C) 2003-2016 Free Software Foundation, Inc.
3 Contributed by Sebastian Pop <pop@cri.ensmp.fr>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* This pass walks a given loop structure searching for array
22 references. The information about the array accesses is recorded
23 in DATA_REFERENCE structures.
24
25 The basic test for determining the dependences is:
26 given two access functions chrec1 and chrec2 to a same array, and
27 x and y two vectors from the iteration domain, the same element of
28 the array is accessed twice at iterations x and y if and only if:
29 | chrec1 (x) == chrec2 (y).
30
31 The goals of this analysis are:
32
33 - to determine the independence: the relation between two
34 independent accesses is qualified with the chrec_known (this
35 information allows a loop parallelization),
36
37 - when two data references access the same data, to qualify the
38 dependence relation with classic dependence representations:
39
40 - distance vectors
41 - direction vectors
42 - loop carried level dependence
43 - polyhedron dependence
44 or with the chains of recurrences based representation,
45
46 - to define a knowledge base for storing the data dependence
47 information,
48
49 - to define an interface to access this data.
50
51
52 Definitions:
53
54 - subscript: given two array accesses a subscript is the tuple
55 composed of the access functions for a given dimension. Example:
56 Given A[f1][f2][f3] and B[g1][g2][g3], there are three subscripts:
57 (f1, g1), (f2, g2), (f3, g3).
58
59 - Diophantine equation: an equation whose coefficients and
60 solutions are integer constants, for example the equation
61 | 3*x + 2*y = 1
62 has an integer solution x = 1 and y = -1.
63
64 References:
65
66 - "Advanced Compilation for High Performance Computing" by Randy
67 Allen and Ken Kennedy.
68 http://citeseer.ist.psu.edu/goff91practical.html
69
70 - "Loop Transformations for Restructuring Compilers - The Foundations"
71 by Utpal Banerjee.
72
73
74 */
75
76 #include "config.h"
77 #include "system.h"
78 #include "coretypes.h"
79 #include "backend.h"
80 #include "rtl.h"
81 #include "tree.h"
82 #include "gimple.h"
83 #include "gimple-pretty-print.h"
84 #include "alias.h"
85 #include "fold-const.h"
86 #include "expr.h"
87 #include "gimple-iterator.h"
88 #include "tree-ssa-loop-niter.h"
89 #include "tree-ssa-loop.h"
90 #include "tree-ssa.h"
91 #include "cfgloop.h"
92 #include "tree-data-ref.h"
93 #include "tree-scalar-evolution.h"
94 #include "dumpfile.h"
95 #include "tree-affine.h"
96 #include "params.h"
97
98 static struct datadep_stats
99 {
100 int num_dependence_tests;
101 int num_dependence_dependent;
102 int num_dependence_independent;
103 int num_dependence_undetermined;
104
105 int num_subscript_tests;
106 int num_subscript_undetermined;
107 int num_same_subscript_function;
108
109 int num_ziv;
110 int num_ziv_independent;
111 int num_ziv_dependent;
112 int num_ziv_unimplemented;
113
114 int num_siv;
115 int num_siv_independent;
116 int num_siv_dependent;
117 int num_siv_unimplemented;
118
119 int num_miv;
120 int num_miv_independent;
121 int num_miv_dependent;
122 int num_miv_unimplemented;
123 } dependence_stats;
124
125 static bool subscript_dependence_tester_1 (struct data_dependence_relation *,
126 struct data_reference *,
127 struct data_reference *,
128 struct loop *);
129 /* Returns true iff A divides B. */
130
131 static inline bool
tree_fold_divides_p(const_tree a,const_tree b)132 tree_fold_divides_p (const_tree a, const_tree b)
133 {
134 gcc_assert (TREE_CODE (a) == INTEGER_CST);
135 gcc_assert (TREE_CODE (b) == INTEGER_CST);
136 return integer_zerop (int_const_binop (TRUNC_MOD_EXPR, b, a));
137 }
138
139 /* Returns true iff A divides B. */
140
141 static inline bool
int_divides_p(int a,int b)142 int_divides_p (int a, int b)
143 {
144 return ((b % a) == 0);
145 }
146
147
148
149 /* Dump into FILE all the data references from DATAREFS. */
150
151 static void
dump_data_references(FILE * file,vec<data_reference_p> datarefs)152 dump_data_references (FILE *file, vec<data_reference_p> datarefs)
153 {
154 unsigned int i;
155 struct data_reference *dr;
156
157 FOR_EACH_VEC_ELT (datarefs, i, dr)
158 dump_data_reference (file, dr);
159 }
160
161 /* Unified dump into FILE all the data references from DATAREFS. */
162
163 DEBUG_FUNCTION void
debug(vec<data_reference_p> & ref)164 debug (vec<data_reference_p> &ref)
165 {
166 dump_data_references (stderr, ref);
167 }
168
169 DEBUG_FUNCTION void
debug(vec<data_reference_p> * ptr)170 debug (vec<data_reference_p> *ptr)
171 {
172 if (ptr)
173 debug (*ptr);
174 else
175 fprintf (stderr, "<nil>\n");
176 }
177
178
179 /* Dump into STDERR all the data references from DATAREFS. */
180
181 DEBUG_FUNCTION void
debug_data_references(vec<data_reference_p> datarefs)182 debug_data_references (vec<data_reference_p> datarefs)
183 {
184 dump_data_references (stderr, datarefs);
185 }
186
187 /* Print to STDERR the data_reference DR. */
188
189 DEBUG_FUNCTION void
debug_data_reference(struct data_reference * dr)190 debug_data_reference (struct data_reference *dr)
191 {
192 dump_data_reference (stderr, dr);
193 }
194
195 /* Dump function for a DATA_REFERENCE structure. */
196
197 void
dump_data_reference(FILE * outf,struct data_reference * dr)198 dump_data_reference (FILE *outf,
199 struct data_reference *dr)
200 {
201 unsigned int i;
202
203 fprintf (outf, "#(Data Ref: \n");
204 fprintf (outf, "# bb: %d \n", gimple_bb (DR_STMT (dr))->index);
205 fprintf (outf, "# stmt: ");
206 print_gimple_stmt (outf, DR_STMT (dr), 0, 0);
207 fprintf (outf, "# ref: ");
208 print_generic_stmt (outf, DR_REF (dr), 0);
209 fprintf (outf, "# base_object: ");
210 print_generic_stmt (outf, DR_BASE_OBJECT (dr), 0);
211
212 for (i = 0; i < DR_NUM_DIMENSIONS (dr); i++)
213 {
214 fprintf (outf, "# Access function %d: ", i);
215 print_generic_stmt (outf, DR_ACCESS_FN (dr, i), 0);
216 }
217 fprintf (outf, "#)\n");
218 }
219
220 /* Unified dump function for a DATA_REFERENCE structure. */
221
222 DEBUG_FUNCTION void
debug(data_reference & ref)223 debug (data_reference &ref)
224 {
225 dump_data_reference (stderr, &ref);
226 }
227
228 DEBUG_FUNCTION void
debug(data_reference * ptr)229 debug (data_reference *ptr)
230 {
231 if (ptr)
232 debug (*ptr);
233 else
234 fprintf (stderr, "<nil>\n");
235 }
236
237
238 /* Dumps the affine function described by FN to the file OUTF. */
239
240 DEBUG_FUNCTION void
dump_affine_function(FILE * outf,affine_fn fn)241 dump_affine_function (FILE *outf, affine_fn fn)
242 {
243 unsigned i;
244 tree coef;
245
246 print_generic_expr (outf, fn[0], TDF_SLIM);
247 for (i = 1; fn.iterate (i, &coef); i++)
248 {
249 fprintf (outf, " + ");
250 print_generic_expr (outf, coef, TDF_SLIM);
251 fprintf (outf, " * x_%u", i);
252 }
253 }
254
255 /* Dumps the conflict function CF to the file OUTF. */
256
257 DEBUG_FUNCTION void
dump_conflict_function(FILE * outf,conflict_function * cf)258 dump_conflict_function (FILE *outf, conflict_function *cf)
259 {
260 unsigned i;
261
262 if (cf->n == NO_DEPENDENCE)
263 fprintf (outf, "no dependence");
264 else if (cf->n == NOT_KNOWN)
265 fprintf (outf, "not known");
266 else
267 {
268 for (i = 0; i < cf->n; i++)
269 {
270 if (i != 0)
271 fprintf (outf, " ");
272 fprintf (outf, "[");
273 dump_affine_function (outf, cf->fns[i]);
274 fprintf (outf, "]");
275 }
276 }
277 }
278
279 /* Dump function for a SUBSCRIPT structure. */
280
281 DEBUG_FUNCTION void
dump_subscript(FILE * outf,struct subscript * subscript)282 dump_subscript (FILE *outf, struct subscript *subscript)
283 {
284 conflict_function *cf = SUB_CONFLICTS_IN_A (subscript);
285
286 fprintf (outf, "\n (subscript \n");
287 fprintf (outf, " iterations_that_access_an_element_twice_in_A: ");
288 dump_conflict_function (outf, cf);
289 if (CF_NONTRIVIAL_P (cf))
290 {
291 tree last_iteration = SUB_LAST_CONFLICT (subscript);
292 fprintf (outf, "\n last_conflict: ");
293 print_generic_expr (outf, last_iteration, 0);
294 }
295
296 cf = SUB_CONFLICTS_IN_B (subscript);
297 fprintf (outf, "\n iterations_that_access_an_element_twice_in_B: ");
298 dump_conflict_function (outf, cf);
299 if (CF_NONTRIVIAL_P (cf))
300 {
301 tree last_iteration = SUB_LAST_CONFLICT (subscript);
302 fprintf (outf, "\n last_conflict: ");
303 print_generic_expr (outf, last_iteration, 0);
304 }
305
306 fprintf (outf, "\n (Subscript distance: ");
307 print_generic_expr (outf, SUB_DISTANCE (subscript), 0);
308 fprintf (outf, " ))\n");
309 }
310
311 /* Print the classic direction vector DIRV to OUTF. */
312
313 DEBUG_FUNCTION void
print_direction_vector(FILE * outf,lambda_vector dirv,int length)314 print_direction_vector (FILE *outf,
315 lambda_vector dirv,
316 int length)
317 {
318 int eq;
319
320 for (eq = 0; eq < length; eq++)
321 {
322 enum data_dependence_direction dir = ((enum data_dependence_direction)
323 dirv[eq]);
324
325 switch (dir)
326 {
327 case dir_positive:
328 fprintf (outf, " +");
329 break;
330 case dir_negative:
331 fprintf (outf, " -");
332 break;
333 case dir_equal:
334 fprintf (outf, " =");
335 break;
336 case dir_positive_or_equal:
337 fprintf (outf, " +=");
338 break;
339 case dir_positive_or_negative:
340 fprintf (outf, " +-");
341 break;
342 case dir_negative_or_equal:
343 fprintf (outf, " -=");
344 break;
345 case dir_star:
346 fprintf (outf, " *");
347 break;
348 default:
349 fprintf (outf, "indep");
350 break;
351 }
352 }
353 fprintf (outf, "\n");
354 }
355
356 /* Print a vector of direction vectors. */
357
358 DEBUG_FUNCTION void
print_dir_vectors(FILE * outf,vec<lambda_vector> dir_vects,int length)359 print_dir_vectors (FILE *outf, vec<lambda_vector> dir_vects,
360 int length)
361 {
362 unsigned j;
363 lambda_vector v;
364
365 FOR_EACH_VEC_ELT (dir_vects, j, v)
366 print_direction_vector (outf, v, length);
367 }
368
369 /* Print out a vector VEC of length N to OUTFILE. */
370
371 DEBUG_FUNCTION void
print_lambda_vector(FILE * outfile,lambda_vector vector,int n)372 print_lambda_vector (FILE * outfile, lambda_vector vector, int n)
373 {
374 int i;
375
376 for (i = 0; i < n; i++)
377 fprintf (outfile, "%3d ", vector[i]);
378 fprintf (outfile, "\n");
379 }
380
381 /* Print a vector of distance vectors. */
382
383 DEBUG_FUNCTION void
print_dist_vectors(FILE * outf,vec<lambda_vector> dist_vects,int length)384 print_dist_vectors (FILE *outf, vec<lambda_vector> dist_vects,
385 int length)
386 {
387 unsigned j;
388 lambda_vector v;
389
390 FOR_EACH_VEC_ELT (dist_vects, j, v)
391 print_lambda_vector (outf, v, length);
392 }
393
394 /* Dump function for a DATA_DEPENDENCE_RELATION structure. */
395
396 DEBUG_FUNCTION void
dump_data_dependence_relation(FILE * outf,struct data_dependence_relation * ddr)397 dump_data_dependence_relation (FILE *outf,
398 struct data_dependence_relation *ddr)
399 {
400 struct data_reference *dra, *drb;
401
402 fprintf (outf, "(Data Dep: \n");
403
404 if (!ddr || DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
405 {
406 if (ddr)
407 {
408 dra = DDR_A (ddr);
409 drb = DDR_B (ddr);
410 if (dra)
411 dump_data_reference (outf, dra);
412 else
413 fprintf (outf, " (nil)\n");
414 if (drb)
415 dump_data_reference (outf, drb);
416 else
417 fprintf (outf, " (nil)\n");
418 }
419 fprintf (outf, " (don't know)\n)\n");
420 return;
421 }
422
423 dra = DDR_A (ddr);
424 drb = DDR_B (ddr);
425 dump_data_reference (outf, dra);
426 dump_data_reference (outf, drb);
427
428 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
429 fprintf (outf, " (no dependence)\n");
430
431 else if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE)
432 {
433 unsigned int i;
434 struct loop *loopi;
435
436 for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++)
437 {
438 fprintf (outf, " access_fn_A: ");
439 print_generic_stmt (outf, DR_ACCESS_FN (dra, i), 0);
440 fprintf (outf, " access_fn_B: ");
441 print_generic_stmt (outf, DR_ACCESS_FN (drb, i), 0);
442 dump_subscript (outf, DDR_SUBSCRIPT (ddr, i));
443 }
444
445 fprintf (outf, " inner loop index: %d\n", DDR_INNER_LOOP (ddr));
446 fprintf (outf, " loop nest: (");
447 FOR_EACH_VEC_ELT (DDR_LOOP_NEST (ddr), i, loopi)
448 fprintf (outf, "%d ", loopi->num);
449 fprintf (outf, ")\n");
450
451 for (i = 0; i < DDR_NUM_DIST_VECTS (ddr); i++)
452 {
453 fprintf (outf, " distance_vector: ");
454 print_lambda_vector (outf, DDR_DIST_VECT (ddr, i),
455 DDR_NB_LOOPS (ddr));
456 }
457
458 for (i = 0; i < DDR_NUM_DIR_VECTS (ddr); i++)
459 {
460 fprintf (outf, " direction_vector: ");
461 print_direction_vector (outf, DDR_DIR_VECT (ddr, i),
462 DDR_NB_LOOPS (ddr));
463 }
464 }
465
466 fprintf (outf, ")\n");
467 }
468
469 /* Debug version. */
470
471 DEBUG_FUNCTION void
debug_data_dependence_relation(struct data_dependence_relation * ddr)472 debug_data_dependence_relation (struct data_dependence_relation *ddr)
473 {
474 dump_data_dependence_relation (stderr, ddr);
475 }
476
477 /* Dump into FILE all the dependence relations from DDRS. */
478
479 DEBUG_FUNCTION void
dump_data_dependence_relations(FILE * file,vec<ddr_p> ddrs)480 dump_data_dependence_relations (FILE *file,
481 vec<ddr_p> ddrs)
482 {
483 unsigned int i;
484 struct data_dependence_relation *ddr;
485
486 FOR_EACH_VEC_ELT (ddrs, i, ddr)
487 dump_data_dependence_relation (file, ddr);
488 }
489
490 DEBUG_FUNCTION void
debug(vec<ddr_p> & ref)491 debug (vec<ddr_p> &ref)
492 {
493 dump_data_dependence_relations (stderr, ref);
494 }
495
496 DEBUG_FUNCTION void
debug(vec<ddr_p> * ptr)497 debug (vec<ddr_p> *ptr)
498 {
499 if (ptr)
500 debug (*ptr);
501 else
502 fprintf (stderr, "<nil>\n");
503 }
504
505
506 /* Dump to STDERR all the dependence relations from DDRS. */
507
508 DEBUG_FUNCTION void
debug_data_dependence_relations(vec<ddr_p> ddrs)509 debug_data_dependence_relations (vec<ddr_p> ddrs)
510 {
511 dump_data_dependence_relations (stderr, ddrs);
512 }
513
514 /* Dumps the distance and direction vectors in FILE. DDRS contains
515 the dependence relations, and VECT_SIZE is the size of the
516 dependence vectors, or in other words the number of loops in the
517 considered nest. */
518
519 DEBUG_FUNCTION void
dump_dist_dir_vectors(FILE * file,vec<ddr_p> ddrs)520 dump_dist_dir_vectors (FILE *file, vec<ddr_p> ddrs)
521 {
522 unsigned int i, j;
523 struct data_dependence_relation *ddr;
524 lambda_vector v;
525
526 FOR_EACH_VEC_ELT (ddrs, i, ddr)
527 if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE && DDR_AFFINE_P (ddr))
528 {
529 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), j, v)
530 {
531 fprintf (file, "DISTANCE_V (");
532 print_lambda_vector (file, v, DDR_NB_LOOPS (ddr));
533 fprintf (file, ")\n");
534 }
535
536 FOR_EACH_VEC_ELT (DDR_DIR_VECTS (ddr), j, v)
537 {
538 fprintf (file, "DIRECTION_V (");
539 print_direction_vector (file, v, DDR_NB_LOOPS (ddr));
540 fprintf (file, ")\n");
541 }
542 }
543
544 fprintf (file, "\n\n");
545 }
546
547 /* Dumps the data dependence relations DDRS in FILE. */
548
549 DEBUG_FUNCTION void
dump_ddrs(FILE * file,vec<ddr_p> ddrs)550 dump_ddrs (FILE *file, vec<ddr_p> ddrs)
551 {
552 unsigned int i;
553 struct data_dependence_relation *ddr;
554
555 FOR_EACH_VEC_ELT (ddrs, i, ddr)
556 dump_data_dependence_relation (file, ddr);
557
558 fprintf (file, "\n\n");
559 }
560
561 DEBUG_FUNCTION void
debug_ddrs(vec<ddr_p> ddrs)562 debug_ddrs (vec<ddr_p> ddrs)
563 {
564 dump_ddrs (stderr, ddrs);
565 }
566
567 /* Helper function for split_constant_offset. Expresses OP0 CODE OP1
568 (the type of the result is TYPE) as VAR + OFF, where OFF is a nonzero
569 constant of type ssizetype, and returns true. If we cannot do this
570 with OFF nonzero, OFF and VAR are set to NULL_TREE instead and false
571 is returned. */
572
573 static bool
split_constant_offset_1(tree type,tree op0,enum tree_code code,tree op1,tree * var,tree * off)574 split_constant_offset_1 (tree type, tree op0, enum tree_code code, tree op1,
575 tree *var, tree *off)
576 {
577 tree var0, var1;
578 tree off0, off1;
579 enum tree_code ocode = code;
580
581 *var = NULL_TREE;
582 *off = NULL_TREE;
583
584 switch (code)
585 {
586 case INTEGER_CST:
587 *var = build_int_cst (type, 0);
588 *off = fold_convert (ssizetype, op0);
589 return true;
590
591 case POINTER_PLUS_EXPR:
592 ocode = PLUS_EXPR;
593 /* FALLTHROUGH */
594 case PLUS_EXPR:
595 case MINUS_EXPR:
596 split_constant_offset (op0, &var0, &off0);
597 split_constant_offset (op1, &var1, &off1);
598 *var = fold_build2 (code, type, var0, var1);
599 *off = size_binop (ocode, off0, off1);
600 return true;
601
602 case MULT_EXPR:
603 if (TREE_CODE (op1) != INTEGER_CST)
604 return false;
605
606 split_constant_offset (op0, &var0, &off0);
607 *var = fold_build2 (MULT_EXPR, type, var0, op1);
608 *off = size_binop (MULT_EXPR, off0, fold_convert (ssizetype, op1));
609 return true;
610
611 case ADDR_EXPR:
612 {
613 tree base, poffset;
614 HOST_WIDE_INT pbitsize, pbitpos;
615 machine_mode pmode;
616 int punsignedp, preversep, pvolatilep;
617
618 op0 = TREE_OPERAND (op0, 0);
619 base
620 = get_inner_reference (op0, &pbitsize, &pbitpos, &poffset, &pmode,
621 &punsignedp, &preversep, &pvolatilep, false);
622
623 if (pbitpos % BITS_PER_UNIT != 0)
624 return false;
625 base = build_fold_addr_expr (base);
626 off0 = ssize_int (pbitpos / BITS_PER_UNIT);
627
628 if (poffset)
629 {
630 split_constant_offset (poffset, &poffset, &off1);
631 off0 = size_binop (PLUS_EXPR, off0, off1);
632 if (POINTER_TYPE_P (TREE_TYPE (base)))
633 base = fold_build_pointer_plus (base, poffset);
634 else
635 base = fold_build2 (PLUS_EXPR, TREE_TYPE (base), base,
636 fold_convert (TREE_TYPE (base), poffset));
637 }
638
639 var0 = fold_convert (type, base);
640
641 /* If variable length types are involved, punt, otherwise casts
642 might be converted into ARRAY_REFs in gimplify_conversion.
643 To compute that ARRAY_REF's element size TYPE_SIZE_UNIT, which
644 possibly no longer appears in current GIMPLE, might resurface.
645 This perhaps could run
646 if (CONVERT_EXPR_P (var0))
647 {
648 gimplify_conversion (&var0);
649 // Attempt to fill in any within var0 found ARRAY_REF's
650 // element size from corresponding op embedded ARRAY_REF,
651 // if unsuccessful, just punt.
652 } */
653 while (POINTER_TYPE_P (type))
654 type = TREE_TYPE (type);
655 if (int_size_in_bytes (type) < 0)
656 return false;
657
658 *var = var0;
659 *off = off0;
660 return true;
661 }
662
663 case SSA_NAME:
664 {
665 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op0))
666 return false;
667
668 gimple *def_stmt = SSA_NAME_DEF_STMT (op0);
669 enum tree_code subcode;
670
671 if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
672 return false;
673
674 var0 = gimple_assign_rhs1 (def_stmt);
675 subcode = gimple_assign_rhs_code (def_stmt);
676 var1 = gimple_assign_rhs2 (def_stmt);
677
678 return split_constant_offset_1 (type, var0, subcode, var1, var, off);
679 }
680 CASE_CONVERT:
681 {
682 /* We must not introduce undefined overflow, and we must not change the value.
683 Hence we're okay if the inner type doesn't overflow to start with
684 (pointer or signed), the outer type also is an integer or pointer
685 and the outer precision is at least as large as the inner. */
686 tree itype = TREE_TYPE (op0);
687 if ((POINTER_TYPE_P (itype)
688 || (INTEGRAL_TYPE_P (itype) && TYPE_OVERFLOW_UNDEFINED (itype)))
689 && TYPE_PRECISION (type) >= TYPE_PRECISION (itype)
690 && (POINTER_TYPE_P (type) || INTEGRAL_TYPE_P (type)))
691 {
692 split_constant_offset (op0, &var0, off);
693 *var = fold_convert (type, var0);
694 return true;
695 }
696 return false;
697 }
698
699 default:
700 return false;
701 }
702 }
703
704 /* Expresses EXP as VAR + OFF, where off is a constant. The type of OFF
705 will be ssizetype. */
706
707 void
split_constant_offset(tree exp,tree * var,tree * off)708 split_constant_offset (tree exp, tree *var, tree *off)
709 {
710 tree type = TREE_TYPE (exp), otype, op0, op1, e, o;
711 enum tree_code code;
712
713 *var = exp;
714 *off = ssize_int (0);
715 STRIP_NOPS (exp);
716
717 if (tree_is_chrec (exp)
718 || get_gimple_rhs_class (TREE_CODE (exp)) == GIMPLE_TERNARY_RHS)
719 return;
720
721 otype = TREE_TYPE (exp);
722 code = TREE_CODE (exp);
723 extract_ops_from_tree (exp, &code, &op0, &op1);
724 if (split_constant_offset_1 (otype, op0, code, op1, &e, &o))
725 {
726 *var = fold_convert (type, e);
727 *off = o;
728 }
729 }
730
731 /* Returns the address ADDR of an object in a canonical shape (without nop
732 casts, and with type of pointer to the object). */
733
734 static tree
canonicalize_base_object_address(tree addr)735 canonicalize_base_object_address (tree addr)
736 {
737 tree orig = addr;
738
739 STRIP_NOPS (addr);
740
741 /* The base address may be obtained by casting from integer, in that case
742 keep the cast. */
743 if (!POINTER_TYPE_P (TREE_TYPE (addr)))
744 return orig;
745
746 if (TREE_CODE (addr) != ADDR_EXPR)
747 return addr;
748
749 return build_fold_addr_expr (TREE_OPERAND (addr, 0));
750 }
751
752 /* Analyzes the behavior of the memory reference DR in the innermost loop or
753 basic block that contains it. Returns true if analysis succeed or false
754 otherwise. */
755
756 bool
dr_analyze_innermost(struct data_reference * dr,struct loop * nest)757 dr_analyze_innermost (struct data_reference *dr, struct loop *nest)
758 {
759 gimple *stmt = DR_STMT (dr);
760 struct loop *loop = loop_containing_stmt (stmt);
761 tree ref = DR_REF (dr);
762 HOST_WIDE_INT pbitsize, pbitpos;
763 tree base, poffset;
764 machine_mode pmode;
765 int punsignedp, preversep, pvolatilep;
766 affine_iv base_iv, offset_iv;
767 tree init, dinit, step;
768 bool in_loop = (loop && loop->num);
769
770 if (dump_file && (dump_flags & TDF_DETAILS))
771 fprintf (dump_file, "analyze_innermost: ");
772
773 base = get_inner_reference (ref, &pbitsize, &pbitpos, &poffset, &pmode,
774 &punsignedp, &preversep, &pvolatilep, false);
775 gcc_assert (base != NULL_TREE);
776
777 if (pbitpos % BITS_PER_UNIT != 0)
778 {
779 if (dump_file && (dump_flags & TDF_DETAILS))
780 fprintf (dump_file, "failed: bit offset alignment.\n");
781 return false;
782 }
783
784 if (preversep)
785 {
786 if (dump_file && (dump_flags & TDF_DETAILS))
787 fprintf (dump_file, "failed: reverse storage order.\n");
788 return false;
789 }
790
791 if (TREE_CODE (base) == MEM_REF)
792 {
793 if (!integer_zerop (TREE_OPERAND (base, 1)))
794 {
795 offset_int moff = mem_ref_offset (base);
796 tree mofft = wide_int_to_tree (sizetype, moff);
797 if (!poffset)
798 poffset = mofft;
799 else
800 poffset = size_binop (PLUS_EXPR, poffset, mofft);
801 }
802 base = TREE_OPERAND (base, 0);
803 }
804 else
805 base = build_fold_addr_expr (base);
806
807 if (in_loop)
808 {
809 if (!simple_iv (loop, loop_containing_stmt (stmt), base, &base_iv,
810 nest ? true : false))
811 {
812 if (nest)
813 {
814 if (dump_file && (dump_flags & TDF_DETAILS))
815 fprintf (dump_file, "failed: evolution of base is not"
816 " affine.\n");
817 return false;
818 }
819 else
820 {
821 base_iv.base = base;
822 base_iv.step = ssize_int (0);
823 base_iv.no_overflow = true;
824 }
825 }
826 }
827 else
828 {
829 base_iv.base = base;
830 base_iv.step = ssize_int (0);
831 base_iv.no_overflow = true;
832 }
833
834 if (!poffset)
835 {
836 offset_iv.base = ssize_int (0);
837 offset_iv.step = ssize_int (0);
838 }
839 else
840 {
841 if (!in_loop)
842 {
843 offset_iv.base = poffset;
844 offset_iv.step = ssize_int (0);
845 }
846 else if (!simple_iv (loop, loop_containing_stmt (stmt),
847 poffset, &offset_iv,
848 nest ? true : false))
849 {
850 if (nest)
851 {
852 if (dump_file && (dump_flags & TDF_DETAILS))
853 fprintf (dump_file, "failed: evolution of offset is not"
854 " affine.\n");
855 return false;
856 }
857 else
858 {
859 offset_iv.base = poffset;
860 offset_iv.step = ssize_int (0);
861 }
862 }
863 }
864
865 init = ssize_int (pbitpos / BITS_PER_UNIT);
866 split_constant_offset (base_iv.base, &base_iv.base, &dinit);
867 init = size_binop (PLUS_EXPR, init, dinit);
868 split_constant_offset (offset_iv.base, &offset_iv.base, &dinit);
869 init = size_binop (PLUS_EXPR, init, dinit);
870
871 step = size_binop (PLUS_EXPR,
872 fold_convert (ssizetype, base_iv.step),
873 fold_convert (ssizetype, offset_iv.step));
874
875 DR_BASE_ADDRESS (dr) = canonicalize_base_object_address (base_iv.base);
876
877 DR_OFFSET (dr) = fold_convert (ssizetype, offset_iv.base);
878 DR_INIT (dr) = init;
879 DR_STEP (dr) = step;
880
881 DR_ALIGNED_TO (dr) = size_int (highest_pow2_factor (offset_iv.base));
882
883 if (dump_file && (dump_flags & TDF_DETAILS))
884 fprintf (dump_file, "success.\n");
885
886 return true;
887 }
888
889 /* Determines the base object and the list of indices of memory reference
890 DR, analyzed in LOOP and instantiated in loop nest NEST. */
891
892 static void
dr_analyze_indices(struct data_reference * dr,loop_p nest,loop_p loop)893 dr_analyze_indices (struct data_reference *dr, loop_p nest, loop_p loop)
894 {
895 vec<tree> access_fns = vNULL;
896 tree ref, op;
897 tree base, off, access_fn;
898 basic_block before_loop;
899
900 /* If analyzing a basic-block there are no indices to analyze
901 and thus no access functions. */
902 if (!nest)
903 {
904 DR_BASE_OBJECT (dr) = DR_REF (dr);
905 DR_ACCESS_FNS (dr).create (0);
906 return;
907 }
908
909 ref = DR_REF (dr);
910 before_loop = block_before_loop (nest);
911
912 /* REALPART_EXPR and IMAGPART_EXPR can be handled like accesses
913 into a two element array with a constant index. The base is
914 then just the immediate underlying object. */
915 if (TREE_CODE (ref) == REALPART_EXPR)
916 {
917 ref = TREE_OPERAND (ref, 0);
918 access_fns.safe_push (integer_zero_node);
919 }
920 else if (TREE_CODE (ref) == IMAGPART_EXPR)
921 {
922 ref = TREE_OPERAND (ref, 0);
923 access_fns.safe_push (integer_one_node);
924 }
925
926 /* Analyze access functions of dimensions we know to be independent. */
927 while (handled_component_p (ref))
928 {
929 if (TREE_CODE (ref) == ARRAY_REF)
930 {
931 op = TREE_OPERAND (ref, 1);
932 access_fn = analyze_scalar_evolution (loop, op);
933 access_fn = instantiate_scev (before_loop, loop, access_fn);
934 access_fns.safe_push (access_fn);
935 }
936 else if (TREE_CODE (ref) == COMPONENT_REF
937 && TREE_CODE (TREE_TYPE (TREE_OPERAND (ref, 0))) == RECORD_TYPE)
938 {
939 /* For COMPONENT_REFs of records (but not unions!) use the
940 FIELD_DECL offset as constant access function so we can
941 disambiguate a[i].f1 and a[i].f2. */
942 tree off = component_ref_field_offset (ref);
943 off = size_binop (PLUS_EXPR,
944 size_binop (MULT_EXPR,
945 fold_convert (bitsizetype, off),
946 bitsize_int (BITS_PER_UNIT)),
947 DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1)));
948 access_fns.safe_push (off);
949 }
950 else
951 /* If we have an unhandled component we could not translate
952 to an access function stop analyzing. We have determined
953 our base object in this case. */
954 break;
955
956 ref = TREE_OPERAND (ref, 0);
957 }
958
959 /* If the address operand of a MEM_REF base has an evolution in the
960 analyzed nest, add it as an additional independent access-function. */
961 if (TREE_CODE (ref) == MEM_REF)
962 {
963 op = TREE_OPERAND (ref, 0);
964 access_fn = analyze_scalar_evolution (loop, op);
965 access_fn = instantiate_scev (before_loop, loop, access_fn);
966 if (TREE_CODE (access_fn) == POLYNOMIAL_CHREC)
967 {
968 tree orig_type;
969 tree memoff = TREE_OPERAND (ref, 1);
970 base = initial_condition (access_fn);
971 orig_type = TREE_TYPE (base);
972 STRIP_USELESS_TYPE_CONVERSION (base);
973 split_constant_offset (base, &base, &off);
974 STRIP_USELESS_TYPE_CONVERSION (base);
975 /* Fold the MEM_REF offset into the evolutions initial
976 value to make more bases comparable. */
977 if (!integer_zerop (memoff))
978 {
979 off = size_binop (PLUS_EXPR, off,
980 fold_convert (ssizetype, memoff));
981 memoff = build_int_cst (TREE_TYPE (memoff), 0);
982 }
983 /* Adjust the offset so it is a multiple of the access type
984 size and thus we separate bases that can possibly be used
985 to produce partial overlaps (which the access_fn machinery
986 cannot handle). */
987 wide_int rem;
988 if (TYPE_SIZE_UNIT (TREE_TYPE (ref))
989 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (ref))) == INTEGER_CST
990 && !integer_zerop (TYPE_SIZE_UNIT (TREE_TYPE (ref))))
991 rem = wi::mod_trunc (off, TYPE_SIZE_UNIT (TREE_TYPE (ref)), SIGNED);
992 else
993 /* If we can't compute the remainder simply force the initial
994 condition to zero. */
995 rem = off;
996 off = wide_int_to_tree (ssizetype, wi::sub (off, rem));
997 memoff = wide_int_to_tree (TREE_TYPE (memoff), rem);
998 /* And finally replace the initial condition. */
999 access_fn = chrec_replace_initial_condition
1000 (access_fn, fold_convert (orig_type, off));
1001 /* ??? This is still not a suitable base object for
1002 dr_may_alias_p - the base object needs to be an
1003 access that covers the object as whole. With
1004 an evolution in the pointer this cannot be
1005 guaranteed.
1006 As a band-aid, mark the access so we can special-case
1007 it in dr_may_alias_p. */
1008 tree old = ref;
1009 ref = fold_build2_loc (EXPR_LOCATION (ref),
1010 MEM_REF, TREE_TYPE (ref),
1011 base, memoff);
1012 MR_DEPENDENCE_CLIQUE (ref) = MR_DEPENDENCE_CLIQUE (old);
1013 MR_DEPENDENCE_BASE (ref) = MR_DEPENDENCE_BASE (old);
1014 DR_UNCONSTRAINED_BASE (dr) = true;
1015 access_fns.safe_push (access_fn);
1016 }
1017 }
1018 else if (DECL_P (ref))
1019 {
1020 /* Canonicalize DR_BASE_OBJECT to MEM_REF form. */
1021 ref = build2 (MEM_REF, TREE_TYPE (ref),
1022 build_fold_addr_expr (ref),
1023 build_int_cst (reference_alias_ptr_type (ref), 0));
1024 }
1025
1026 DR_BASE_OBJECT (dr) = ref;
1027 DR_ACCESS_FNS (dr) = access_fns;
1028 }
1029
1030 /* Extracts the alias analysis information from the memory reference DR. */
1031
1032 static void
dr_analyze_alias(struct data_reference * dr)1033 dr_analyze_alias (struct data_reference *dr)
1034 {
1035 tree ref = DR_REF (dr);
1036 tree base = get_base_address (ref), addr;
1037
1038 if (INDIRECT_REF_P (base)
1039 || TREE_CODE (base) == MEM_REF)
1040 {
1041 addr = TREE_OPERAND (base, 0);
1042 if (TREE_CODE (addr) == SSA_NAME)
1043 DR_PTR_INFO (dr) = SSA_NAME_PTR_INFO (addr);
1044 }
1045 }
1046
1047 /* Frees data reference DR. */
1048
1049 void
free_data_ref(data_reference_p dr)1050 free_data_ref (data_reference_p dr)
1051 {
1052 DR_ACCESS_FNS (dr).release ();
1053 free (dr);
1054 }
1055
1056 /* Analyzes memory reference MEMREF accessed in STMT. The reference
1057 is read if IS_READ is true, write otherwise. Returns the
1058 data_reference description of MEMREF. NEST is the outermost loop
1059 in which the reference should be instantiated, LOOP is the loop in
1060 which the data reference should be analyzed. */
1061
1062 struct data_reference *
create_data_ref(loop_p nest,loop_p loop,tree memref,gimple * stmt,bool is_read)1063 create_data_ref (loop_p nest, loop_p loop, tree memref, gimple *stmt,
1064 bool is_read)
1065 {
1066 struct data_reference *dr;
1067
1068 if (dump_file && (dump_flags & TDF_DETAILS))
1069 {
1070 fprintf (dump_file, "Creating dr for ");
1071 print_generic_expr (dump_file, memref, TDF_SLIM);
1072 fprintf (dump_file, "\n");
1073 }
1074
1075 dr = XCNEW (struct data_reference);
1076 DR_STMT (dr) = stmt;
1077 DR_REF (dr) = memref;
1078 DR_IS_READ (dr) = is_read;
1079
1080 dr_analyze_innermost (dr, nest);
1081 dr_analyze_indices (dr, nest, loop);
1082 dr_analyze_alias (dr);
1083
1084 if (dump_file && (dump_flags & TDF_DETAILS))
1085 {
1086 unsigned i;
1087 fprintf (dump_file, "\tbase_address: ");
1088 print_generic_expr (dump_file, DR_BASE_ADDRESS (dr), TDF_SLIM);
1089 fprintf (dump_file, "\n\toffset from base address: ");
1090 print_generic_expr (dump_file, DR_OFFSET (dr), TDF_SLIM);
1091 fprintf (dump_file, "\n\tconstant offset from base address: ");
1092 print_generic_expr (dump_file, DR_INIT (dr), TDF_SLIM);
1093 fprintf (dump_file, "\n\tstep: ");
1094 print_generic_expr (dump_file, DR_STEP (dr), TDF_SLIM);
1095 fprintf (dump_file, "\n\taligned to: ");
1096 print_generic_expr (dump_file, DR_ALIGNED_TO (dr), TDF_SLIM);
1097 fprintf (dump_file, "\n\tbase_object: ");
1098 print_generic_expr (dump_file, DR_BASE_OBJECT (dr), TDF_SLIM);
1099 fprintf (dump_file, "\n");
1100 for (i = 0; i < DR_NUM_DIMENSIONS (dr); i++)
1101 {
1102 fprintf (dump_file, "\tAccess function %d: ", i);
1103 print_generic_stmt (dump_file, DR_ACCESS_FN (dr, i), TDF_SLIM);
1104 }
1105 }
1106
1107 return dr;
1108 }
1109
1110 /* Check if OFFSET1 and OFFSET2 (DR_OFFSETs of some data-refs) are identical
1111 expressions. */
1112 static bool
dr_equal_offsets_p1(tree offset1,tree offset2)1113 dr_equal_offsets_p1 (tree offset1, tree offset2)
1114 {
1115 bool res;
1116
1117 STRIP_NOPS (offset1);
1118 STRIP_NOPS (offset2);
1119
1120 if (offset1 == offset2)
1121 return true;
1122
1123 if (TREE_CODE (offset1) != TREE_CODE (offset2)
1124 || (!BINARY_CLASS_P (offset1) && !UNARY_CLASS_P (offset1)))
1125 return false;
1126
1127 res = dr_equal_offsets_p1 (TREE_OPERAND (offset1, 0),
1128 TREE_OPERAND (offset2, 0));
1129
1130 if (!res || !BINARY_CLASS_P (offset1))
1131 return res;
1132
1133 res = dr_equal_offsets_p1 (TREE_OPERAND (offset1, 1),
1134 TREE_OPERAND (offset2, 1));
1135
1136 return res;
1137 }
1138
1139 /* Check if DRA and DRB have equal offsets. */
1140 bool
dr_equal_offsets_p(struct data_reference * dra,struct data_reference * drb)1141 dr_equal_offsets_p (struct data_reference *dra,
1142 struct data_reference *drb)
1143 {
1144 tree offset1, offset2;
1145
1146 offset1 = DR_OFFSET (dra);
1147 offset2 = DR_OFFSET (drb);
1148
1149 return dr_equal_offsets_p1 (offset1, offset2);
1150 }
1151
1152 /* Returns true if FNA == FNB. */
1153
1154 static bool
affine_function_equal_p(affine_fn fna,affine_fn fnb)1155 affine_function_equal_p (affine_fn fna, affine_fn fnb)
1156 {
1157 unsigned i, n = fna.length ();
1158
1159 if (n != fnb.length ())
1160 return false;
1161
1162 for (i = 0; i < n; i++)
1163 if (!operand_equal_p (fna[i], fnb[i], 0))
1164 return false;
1165
1166 return true;
1167 }
1168
1169 /* If all the functions in CF are the same, returns one of them,
1170 otherwise returns NULL. */
1171
1172 static affine_fn
common_affine_function(conflict_function * cf)1173 common_affine_function (conflict_function *cf)
1174 {
1175 unsigned i;
1176 affine_fn comm;
1177
1178 if (!CF_NONTRIVIAL_P (cf))
1179 return affine_fn ();
1180
1181 comm = cf->fns[0];
1182
1183 for (i = 1; i < cf->n; i++)
1184 if (!affine_function_equal_p (comm, cf->fns[i]))
1185 return affine_fn ();
1186
1187 return comm;
1188 }
1189
1190 /* Returns the base of the affine function FN. */
1191
1192 static tree
affine_function_base(affine_fn fn)1193 affine_function_base (affine_fn fn)
1194 {
1195 return fn[0];
1196 }
1197
1198 /* Returns true if FN is a constant. */
1199
1200 static bool
affine_function_constant_p(affine_fn fn)1201 affine_function_constant_p (affine_fn fn)
1202 {
1203 unsigned i;
1204 tree coef;
1205
1206 for (i = 1; fn.iterate (i, &coef); i++)
1207 if (!integer_zerop (coef))
1208 return false;
1209
1210 return true;
1211 }
1212
1213 /* Returns true if FN is the zero constant function. */
1214
1215 static bool
affine_function_zero_p(affine_fn fn)1216 affine_function_zero_p (affine_fn fn)
1217 {
1218 return (integer_zerop (affine_function_base (fn))
1219 && affine_function_constant_p (fn));
1220 }
1221
1222 /* Returns a signed integer type with the largest precision from TA
1223 and TB. */
1224
1225 static tree
signed_type_for_types(tree ta,tree tb)1226 signed_type_for_types (tree ta, tree tb)
1227 {
1228 if (TYPE_PRECISION (ta) > TYPE_PRECISION (tb))
1229 return signed_type_for (ta);
1230 else
1231 return signed_type_for (tb);
1232 }
1233
1234 /* Applies operation OP on affine functions FNA and FNB, and returns the
1235 result. */
1236
1237 static affine_fn
affine_fn_op(enum tree_code op,affine_fn fna,affine_fn fnb)1238 affine_fn_op (enum tree_code op, affine_fn fna, affine_fn fnb)
1239 {
1240 unsigned i, n, m;
1241 affine_fn ret;
1242 tree coef;
1243
1244 if (fnb.length () > fna.length ())
1245 {
1246 n = fna.length ();
1247 m = fnb.length ();
1248 }
1249 else
1250 {
1251 n = fnb.length ();
1252 m = fna.length ();
1253 }
1254
1255 ret.create (m);
1256 for (i = 0; i < n; i++)
1257 {
1258 tree type = signed_type_for_types (TREE_TYPE (fna[i]),
1259 TREE_TYPE (fnb[i]));
1260 ret.quick_push (fold_build2 (op, type, fna[i], fnb[i]));
1261 }
1262
1263 for (; fna.iterate (i, &coef); i++)
1264 ret.quick_push (fold_build2 (op, signed_type_for (TREE_TYPE (coef)),
1265 coef, integer_zero_node));
1266 for (; fnb.iterate (i, &coef); i++)
1267 ret.quick_push (fold_build2 (op, signed_type_for (TREE_TYPE (coef)),
1268 integer_zero_node, coef));
1269
1270 return ret;
1271 }
1272
1273 /* Returns the sum of affine functions FNA and FNB. */
1274
1275 static affine_fn
affine_fn_plus(affine_fn fna,affine_fn fnb)1276 affine_fn_plus (affine_fn fna, affine_fn fnb)
1277 {
1278 return affine_fn_op (PLUS_EXPR, fna, fnb);
1279 }
1280
1281 /* Returns the difference of affine functions FNA and FNB. */
1282
1283 static affine_fn
affine_fn_minus(affine_fn fna,affine_fn fnb)1284 affine_fn_minus (affine_fn fna, affine_fn fnb)
1285 {
1286 return affine_fn_op (MINUS_EXPR, fna, fnb);
1287 }
1288
1289 /* Frees affine function FN. */
1290
1291 static void
affine_fn_free(affine_fn fn)1292 affine_fn_free (affine_fn fn)
1293 {
1294 fn.release ();
1295 }
1296
1297 /* Determine for each subscript in the data dependence relation DDR
1298 the distance. */
1299
1300 static void
compute_subscript_distance(struct data_dependence_relation * ddr)1301 compute_subscript_distance (struct data_dependence_relation *ddr)
1302 {
1303 conflict_function *cf_a, *cf_b;
1304 affine_fn fn_a, fn_b, diff;
1305
1306 if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE)
1307 {
1308 unsigned int i;
1309
1310 for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++)
1311 {
1312 struct subscript *subscript;
1313
1314 subscript = DDR_SUBSCRIPT (ddr, i);
1315 cf_a = SUB_CONFLICTS_IN_A (subscript);
1316 cf_b = SUB_CONFLICTS_IN_B (subscript);
1317
1318 fn_a = common_affine_function (cf_a);
1319 fn_b = common_affine_function (cf_b);
1320 if (!fn_a.exists () || !fn_b.exists ())
1321 {
1322 SUB_DISTANCE (subscript) = chrec_dont_know;
1323 return;
1324 }
1325 diff = affine_fn_minus (fn_a, fn_b);
1326
1327 if (affine_function_constant_p (diff))
1328 SUB_DISTANCE (subscript) = affine_function_base (diff);
1329 else
1330 SUB_DISTANCE (subscript) = chrec_dont_know;
1331
1332 affine_fn_free (diff);
1333 }
1334 }
1335 }
1336
1337 /* Returns the conflict function for "unknown". */
1338
1339 static conflict_function *
conflict_fn_not_known(void)1340 conflict_fn_not_known (void)
1341 {
1342 conflict_function *fn = XCNEW (conflict_function);
1343 fn->n = NOT_KNOWN;
1344
1345 return fn;
1346 }
1347
1348 /* Returns the conflict function for "independent". */
1349
1350 static conflict_function *
conflict_fn_no_dependence(void)1351 conflict_fn_no_dependence (void)
1352 {
1353 conflict_function *fn = XCNEW (conflict_function);
1354 fn->n = NO_DEPENDENCE;
1355
1356 return fn;
1357 }
1358
1359 /* Returns true if the address of OBJ is invariant in LOOP. */
1360
1361 static bool
object_address_invariant_in_loop_p(const struct loop * loop,const_tree obj)1362 object_address_invariant_in_loop_p (const struct loop *loop, const_tree obj)
1363 {
1364 while (handled_component_p (obj))
1365 {
1366 if (TREE_CODE (obj) == ARRAY_REF)
1367 {
1368 /* Index of the ARRAY_REF was zeroed in analyze_indices, thus we only
1369 need to check the stride and the lower bound of the reference. */
1370 if (chrec_contains_symbols_defined_in_loop (TREE_OPERAND (obj, 2),
1371 loop->num)
1372 || chrec_contains_symbols_defined_in_loop (TREE_OPERAND (obj, 3),
1373 loop->num))
1374 return false;
1375 }
1376 else if (TREE_CODE (obj) == COMPONENT_REF)
1377 {
1378 if (chrec_contains_symbols_defined_in_loop (TREE_OPERAND (obj, 2),
1379 loop->num))
1380 return false;
1381 }
1382 obj = TREE_OPERAND (obj, 0);
1383 }
1384
1385 if (!INDIRECT_REF_P (obj)
1386 && TREE_CODE (obj) != MEM_REF)
1387 return true;
1388
1389 return !chrec_contains_symbols_defined_in_loop (TREE_OPERAND (obj, 0),
1390 loop->num);
1391 }
1392
1393 /* Returns false if we can prove that data references A and B do not alias,
1394 true otherwise. If LOOP_NEST is false no cross-iteration aliases are
1395 considered. */
1396
1397 bool
dr_may_alias_p(const struct data_reference * a,const struct data_reference * b,bool loop_nest)1398 dr_may_alias_p (const struct data_reference *a, const struct data_reference *b,
1399 bool loop_nest)
1400 {
1401 tree addr_a = DR_BASE_OBJECT (a);
1402 tree addr_b = DR_BASE_OBJECT (b);
1403
1404 /* If we are not processing a loop nest but scalar code we
1405 do not need to care about possible cross-iteration dependences
1406 and thus can process the full original reference. Do so,
1407 similar to how loop invariant motion applies extra offset-based
1408 disambiguation. */
1409 if (!loop_nest)
1410 {
1411 aff_tree off1, off2;
1412 widest_int size1, size2;
1413 get_inner_reference_aff (DR_REF (a), &off1, &size1);
1414 get_inner_reference_aff (DR_REF (b), &off2, &size2);
1415 aff_combination_scale (&off1, -1);
1416 aff_combination_add (&off2, &off1);
1417 if (aff_comb_cannot_overlap_p (&off2, size1, size2))
1418 return false;
1419 }
1420
1421 if ((TREE_CODE (addr_a) == MEM_REF || TREE_CODE (addr_a) == TARGET_MEM_REF)
1422 && (TREE_CODE (addr_b) == MEM_REF || TREE_CODE (addr_b) == TARGET_MEM_REF)
1423 && MR_DEPENDENCE_CLIQUE (addr_a) == MR_DEPENDENCE_CLIQUE (addr_b)
1424 && MR_DEPENDENCE_BASE (addr_a) != MR_DEPENDENCE_BASE (addr_b))
1425 return false;
1426
1427 /* If we had an evolution in a pointer-based MEM_REF BASE_OBJECT we
1428 do not know the size of the base-object. So we cannot do any
1429 offset/overlap based analysis but have to rely on points-to
1430 information only. */
1431 if (TREE_CODE (addr_a) == MEM_REF
1432 && (DR_UNCONSTRAINED_BASE (a)
1433 || TREE_CODE (TREE_OPERAND (addr_a, 0)) == SSA_NAME))
1434 {
1435 /* For true dependences we can apply TBAA. */
1436 if (flag_strict_aliasing
1437 && DR_IS_WRITE (a) && DR_IS_READ (b)
1438 && !alias_sets_conflict_p (get_alias_set (DR_REF (a)),
1439 get_alias_set (DR_REF (b))))
1440 return false;
1441 if (TREE_CODE (addr_b) == MEM_REF)
1442 return ptr_derefs_may_alias_p (TREE_OPERAND (addr_a, 0),
1443 TREE_OPERAND (addr_b, 0));
1444 else
1445 return ptr_derefs_may_alias_p (TREE_OPERAND (addr_a, 0),
1446 build_fold_addr_expr (addr_b));
1447 }
1448 else if (TREE_CODE (addr_b) == MEM_REF
1449 && (DR_UNCONSTRAINED_BASE (b)
1450 || TREE_CODE (TREE_OPERAND (addr_b, 0)) == SSA_NAME))
1451 {
1452 /* For true dependences we can apply TBAA. */
1453 if (flag_strict_aliasing
1454 && DR_IS_WRITE (a) && DR_IS_READ (b)
1455 && !alias_sets_conflict_p (get_alias_set (DR_REF (a)),
1456 get_alias_set (DR_REF (b))))
1457 return false;
1458 if (TREE_CODE (addr_a) == MEM_REF)
1459 return ptr_derefs_may_alias_p (TREE_OPERAND (addr_a, 0),
1460 TREE_OPERAND (addr_b, 0));
1461 else
1462 return ptr_derefs_may_alias_p (build_fold_addr_expr (addr_a),
1463 TREE_OPERAND (addr_b, 0));
1464 }
1465
1466 /* Otherwise DR_BASE_OBJECT is an access that covers the whole object
1467 that is being subsetted in the loop nest. */
1468 if (DR_IS_WRITE (a) && DR_IS_WRITE (b))
1469 return refs_output_dependent_p (addr_a, addr_b);
1470 else if (DR_IS_READ (a) && DR_IS_WRITE (b))
1471 return refs_anti_dependent_p (addr_a, addr_b);
1472 return refs_may_alias_p (addr_a, addr_b);
1473 }
1474
1475 /* Initialize a data dependence relation between data accesses A and
1476 B. NB_LOOPS is the number of loops surrounding the references: the
1477 size of the classic distance/direction vectors. */
1478
1479 struct data_dependence_relation *
initialize_data_dependence_relation(struct data_reference * a,struct data_reference * b,vec<loop_p> loop_nest)1480 initialize_data_dependence_relation (struct data_reference *a,
1481 struct data_reference *b,
1482 vec<loop_p> loop_nest)
1483 {
1484 struct data_dependence_relation *res;
1485 unsigned int i;
1486
1487 res = XNEW (struct data_dependence_relation);
1488 DDR_A (res) = a;
1489 DDR_B (res) = b;
1490 DDR_LOOP_NEST (res).create (0);
1491 DDR_REVERSED_P (res) = false;
1492 DDR_SUBSCRIPTS (res).create (0);
1493 DDR_DIR_VECTS (res).create (0);
1494 DDR_DIST_VECTS (res).create (0);
1495
1496 if (a == NULL || b == NULL)
1497 {
1498 DDR_ARE_DEPENDENT (res) = chrec_dont_know;
1499 return res;
1500 }
1501
1502 /* If the data references do not alias, then they are independent. */
1503 if (!dr_may_alias_p (a, b, loop_nest.exists ()))
1504 {
1505 DDR_ARE_DEPENDENT (res) = chrec_known;
1506 return res;
1507 }
1508
1509 /* The case where the references are exactly the same. */
1510 if (operand_equal_p (DR_REF (a), DR_REF (b), 0))
1511 {
1512 if ((loop_nest.exists ()
1513 && !object_address_invariant_in_loop_p (loop_nest[0],
1514 DR_BASE_OBJECT (a)))
1515 || DR_NUM_DIMENSIONS (a) == 0)
1516 {
1517 DDR_ARE_DEPENDENT (res) = chrec_dont_know;
1518 return res;
1519 }
1520 DDR_AFFINE_P (res) = true;
1521 DDR_ARE_DEPENDENT (res) = NULL_TREE;
1522 DDR_SUBSCRIPTS (res).create (DR_NUM_DIMENSIONS (a));
1523 DDR_LOOP_NEST (res) = loop_nest;
1524 DDR_INNER_LOOP (res) = 0;
1525 DDR_SELF_REFERENCE (res) = true;
1526 for (i = 0; i < DR_NUM_DIMENSIONS (a); i++)
1527 {
1528 struct subscript *subscript;
1529
1530 subscript = XNEW (struct subscript);
1531 SUB_CONFLICTS_IN_A (subscript) = conflict_fn_not_known ();
1532 SUB_CONFLICTS_IN_B (subscript) = conflict_fn_not_known ();
1533 SUB_LAST_CONFLICT (subscript) = chrec_dont_know;
1534 SUB_DISTANCE (subscript) = chrec_dont_know;
1535 DDR_SUBSCRIPTS (res).safe_push (subscript);
1536 }
1537 return res;
1538 }
1539
1540 /* If the references do not access the same object, we do not know
1541 whether they alias or not. */
1542 if (!operand_equal_p (DR_BASE_OBJECT (a), DR_BASE_OBJECT (b), 0))
1543 {
1544 DDR_ARE_DEPENDENT (res) = chrec_dont_know;
1545 return res;
1546 }
1547
1548 /* If the base of the object is not invariant in the loop nest, we cannot
1549 analyze it. TODO -- in fact, it would suffice to record that there may
1550 be arbitrary dependences in the loops where the base object varies. */
1551 if ((loop_nest.exists ()
1552 && !object_address_invariant_in_loop_p (loop_nest[0], DR_BASE_OBJECT (a)))
1553 || DR_NUM_DIMENSIONS (a) == 0)
1554 {
1555 DDR_ARE_DEPENDENT (res) = chrec_dont_know;
1556 return res;
1557 }
1558
1559 /* If the number of dimensions of the access to not agree we can have
1560 a pointer access to a component of the array element type and an
1561 array access while the base-objects are still the same. Punt. */
1562 if (DR_NUM_DIMENSIONS (a) != DR_NUM_DIMENSIONS (b))
1563 {
1564 DDR_ARE_DEPENDENT (res) = chrec_dont_know;
1565 return res;
1566 }
1567
1568 DDR_AFFINE_P (res) = true;
1569 DDR_ARE_DEPENDENT (res) = NULL_TREE;
1570 DDR_SUBSCRIPTS (res).create (DR_NUM_DIMENSIONS (a));
1571 DDR_LOOP_NEST (res) = loop_nest;
1572 DDR_INNER_LOOP (res) = 0;
1573 DDR_SELF_REFERENCE (res) = false;
1574
1575 for (i = 0; i < DR_NUM_DIMENSIONS (a); i++)
1576 {
1577 struct subscript *subscript;
1578
1579 subscript = XNEW (struct subscript);
1580 SUB_CONFLICTS_IN_A (subscript) = conflict_fn_not_known ();
1581 SUB_CONFLICTS_IN_B (subscript) = conflict_fn_not_known ();
1582 SUB_LAST_CONFLICT (subscript) = chrec_dont_know;
1583 SUB_DISTANCE (subscript) = chrec_dont_know;
1584 DDR_SUBSCRIPTS (res).safe_push (subscript);
1585 }
1586
1587 return res;
1588 }
1589
1590 /* Frees memory used by the conflict function F. */
1591
1592 static void
free_conflict_function(conflict_function * f)1593 free_conflict_function (conflict_function *f)
1594 {
1595 unsigned i;
1596
1597 if (CF_NONTRIVIAL_P (f))
1598 {
1599 for (i = 0; i < f->n; i++)
1600 affine_fn_free (f->fns[i]);
1601 }
1602 free (f);
1603 }
1604
1605 /* Frees memory used by SUBSCRIPTS. */
1606
1607 static void
free_subscripts(vec<subscript_p> subscripts)1608 free_subscripts (vec<subscript_p> subscripts)
1609 {
1610 unsigned i;
1611 subscript_p s;
1612
1613 FOR_EACH_VEC_ELT (subscripts, i, s)
1614 {
1615 free_conflict_function (s->conflicting_iterations_in_a);
1616 free_conflict_function (s->conflicting_iterations_in_b);
1617 free (s);
1618 }
1619 subscripts.release ();
1620 }
1621
1622 /* Set DDR_ARE_DEPENDENT to CHREC and finalize the subscript overlap
1623 description. */
1624
1625 static inline void
finalize_ddr_dependent(struct data_dependence_relation * ddr,tree chrec)1626 finalize_ddr_dependent (struct data_dependence_relation *ddr,
1627 tree chrec)
1628 {
1629 DDR_ARE_DEPENDENT (ddr) = chrec;
1630 free_subscripts (DDR_SUBSCRIPTS (ddr));
1631 DDR_SUBSCRIPTS (ddr).create (0);
1632 }
1633
1634 /* The dependence relation DDR cannot be represented by a distance
1635 vector. */
1636
1637 static inline void
non_affine_dependence_relation(struct data_dependence_relation * ddr)1638 non_affine_dependence_relation (struct data_dependence_relation *ddr)
1639 {
1640 if (dump_file && (dump_flags & TDF_DETAILS))
1641 fprintf (dump_file, "(Dependence relation cannot be represented by distance vector.) \n");
1642
1643 DDR_AFFINE_P (ddr) = false;
1644 }
1645
1646
1647
1648 /* This section contains the classic Banerjee tests. */
1649
1650 /* Returns true iff CHREC_A and CHREC_B are not dependent on any index
1651 variables, i.e., if the ZIV (Zero Index Variable) test is true. */
1652
1653 static inline bool
ziv_subscript_p(const_tree chrec_a,const_tree chrec_b)1654 ziv_subscript_p (const_tree chrec_a, const_tree chrec_b)
1655 {
1656 return (evolution_function_is_constant_p (chrec_a)
1657 && evolution_function_is_constant_p (chrec_b));
1658 }
1659
1660 /* Returns true iff CHREC_A and CHREC_B are dependent on an index
1661 variable, i.e., if the SIV (Single Index Variable) test is true. */
1662
1663 static bool
siv_subscript_p(const_tree chrec_a,const_tree chrec_b)1664 siv_subscript_p (const_tree chrec_a, const_tree chrec_b)
1665 {
1666 if ((evolution_function_is_constant_p (chrec_a)
1667 && evolution_function_is_univariate_p (chrec_b))
1668 || (evolution_function_is_constant_p (chrec_b)
1669 && evolution_function_is_univariate_p (chrec_a)))
1670 return true;
1671
1672 if (evolution_function_is_univariate_p (chrec_a)
1673 && evolution_function_is_univariate_p (chrec_b))
1674 {
1675 switch (TREE_CODE (chrec_a))
1676 {
1677 case POLYNOMIAL_CHREC:
1678 switch (TREE_CODE (chrec_b))
1679 {
1680 case POLYNOMIAL_CHREC:
1681 if (CHREC_VARIABLE (chrec_a) != CHREC_VARIABLE (chrec_b))
1682 return false;
1683
1684 default:
1685 return true;
1686 }
1687
1688 default:
1689 return true;
1690 }
1691 }
1692
1693 return false;
1694 }
1695
1696 /* Creates a conflict function with N dimensions. The affine functions
1697 in each dimension follow. */
1698
1699 static conflict_function *
conflict_fn(unsigned n,...)1700 conflict_fn (unsigned n, ...)
1701 {
1702 unsigned i;
1703 conflict_function *ret = XCNEW (conflict_function);
1704 va_list ap;
1705
1706 gcc_assert (0 < n && n <= MAX_DIM);
1707 va_start (ap, n);
1708
1709 ret->n = n;
1710 for (i = 0; i < n; i++)
1711 ret->fns[i] = va_arg (ap, affine_fn);
1712 va_end (ap);
1713
1714 return ret;
1715 }
1716
1717 /* Returns constant affine function with value CST. */
1718
1719 static affine_fn
affine_fn_cst(tree cst)1720 affine_fn_cst (tree cst)
1721 {
1722 affine_fn fn;
1723 fn.create (1);
1724 fn.quick_push (cst);
1725 return fn;
1726 }
1727
1728 /* Returns affine function with single variable, CST + COEF * x_DIM. */
1729
1730 static affine_fn
affine_fn_univar(tree cst,unsigned dim,tree coef)1731 affine_fn_univar (tree cst, unsigned dim, tree coef)
1732 {
1733 affine_fn fn;
1734 fn.create (dim + 1);
1735 unsigned i;
1736
1737 gcc_assert (dim > 0);
1738 fn.quick_push (cst);
1739 for (i = 1; i < dim; i++)
1740 fn.quick_push (integer_zero_node);
1741 fn.quick_push (coef);
1742 return fn;
1743 }
1744
1745 /* Analyze a ZIV (Zero Index Variable) subscript. *OVERLAPS_A and
1746 *OVERLAPS_B are initialized to the functions that describe the
1747 relation between the elements accessed twice by CHREC_A and
1748 CHREC_B. For k >= 0, the following property is verified:
1749
1750 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
1751
1752 static void
analyze_ziv_subscript(tree chrec_a,tree chrec_b,conflict_function ** overlaps_a,conflict_function ** overlaps_b,tree * last_conflicts)1753 analyze_ziv_subscript (tree chrec_a,
1754 tree chrec_b,
1755 conflict_function **overlaps_a,
1756 conflict_function **overlaps_b,
1757 tree *last_conflicts)
1758 {
1759 tree type, difference;
1760 dependence_stats.num_ziv++;
1761
1762 if (dump_file && (dump_flags & TDF_DETAILS))
1763 fprintf (dump_file, "(analyze_ziv_subscript \n");
1764
1765 type = signed_type_for_types (TREE_TYPE (chrec_a), TREE_TYPE (chrec_b));
1766 chrec_a = chrec_convert (type, chrec_a, NULL);
1767 chrec_b = chrec_convert (type, chrec_b, NULL);
1768 difference = chrec_fold_minus (type, chrec_a, chrec_b);
1769
1770 switch (TREE_CODE (difference))
1771 {
1772 case INTEGER_CST:
1773 if (integer_zerop (difference))
1774 {
1775 /* The difference is equal to zero: the accessed index
1776 overlaps for each iteration in the loop. */
1777 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
1778 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node));
1779 *last_conflicts = chrec_dont_know;
1780 dependence_stats.num_ziv_dependent++;
1781 }
1782 else
1783 {
1784 /* The accesses do not overlap. */
1785 *overlaps_a = conflict_fn_no_dependence ();
1786 *overlaps_b = conflict_fn_no_dependence ();
1787 *last_conflicts = integer_zero_node;
1788 dependence_stats.num_ziv_independent++;
1789 }
1790 break;
1791
1792 default:
1793 /* We're not sure whether the indexes overlap. For the moment,
1794 conservatively answer "don't know". */
1795 if (dump_file && (dump_flags & TDF_DETAILS))
1796 fprintf (dump_file, "ziv test failed: difference is non-integer.\n");
1797
1798 *overlaps_a = conflict_fn_not_known ();
1799 *overlaps_b = conflict_fn_not_known ();
1800 *last_conflicts = chrec_dont_know;
1801 dependence_stats.num_ziv_unimplemented++;
1802 break;
1803 }
1804
1805 if (dump_file && (dump_flags & TDF_DETAILS))
1806 fprintf (dump_file, ")\n");
1807 }
1808
1809 /* Similar to max_stmt_executions_int, but returns the bound as a tree,
1810 and only if it fits to the int type. If this is not the case, or the
1811 bound on the number of iterations of LOOP could not be derived, returns
1812 chrec_dont_know. */
1813
1814 static tree
max_stmt_executions_tree(struct loop * loop)1815 max_stmt_executions_tree (struct loop *loop)
1816 {
1817 widest_int nit;
1818
1819 if (!max_stmt_executions (loop, &nit))
1820 return chrec_dont_know;
1821
1822 if (!wi::fits_to_tree_p (nit, unsigned_type_node))
1823 return chrec_dont_know;
1824
1825 return wide_int_to_tree (unsigned_type_node, nit);
1826 }
1827
1828 /* Determine whether the CHREC is always positive/negative. If the expression
1829 cannot be statically analyzed, return false, otherwise set the answer into
1830 VALUE. */
1831
1832 static bool
chrec_is_positive(tree chrec,bool * value)1833 chrec_is_positive (tree chrec, bool *value)
1834 {
1835 bool value0, value1, value2;
1836 tree end_value, nb_iter;
1837
1838 switch (TREE_CODE (chrec))
1839 {
1840 case POLYNOMIAL_CHREC:
1841 if (!chrec_is_positive (CHREC_LEFT (chrec), &value0)
1842 || !chrec_is_positive (CHREC_RIGHT (chrec), &value1))
1843 return false;
1844
1845 /* FIXME -- overflows. */
1846 if (value0 == value1)
1847 {
1848 *value = value0;
1849 return true;
1850 }
1851
1852 /* Otherwise the chrec is under the form: "{-197, +, 2}_1",
1853 and the proof consists in showing that the sign never
1854 changes during the execution of the loop, from 0 to
1855 loop->nb_iterations. */
1856 if (!evolution_function_is_affine_p (chrec))
1857 return false;
1858
1859 nb_iter = number_of_latch_executions (get_chrec_loop (chrec));
1860 if (chrec_contains_undetermined (nb_iter))
1861 return false;
1862
1863 #if 0
1864 /* TODO -- If the test is after the exit, we may decrease the number of
1865 iterations by one. */
1866 if (after_exit)
1867 nb_iter = chrec_fold_minus (type, nb_iter, build_int_cst (type, 1));
1868 #endif
1869
1870 end_value = chrec_apply (CHREC_VARIABLE (chrec), chrec, nb_iter);
1871
1872 if (!chrec_is_positive (end_value, &value2))
1873 return false;
1874
1875 *value = value0;
1876 return value0 == value1;
1877
1878 case INTEGER_CST:
1879 switch (tree_int_cst_sgn (chrec))
1880 {
1881 case -1:
1882 *value = false;
1883 break;
1884 case 1:
1885 *value = true;
1886 break;
1887 default:
1888 return false;
1889 }
1890 return true;
1891
1892 default:
1893 return false;
1894 }
1895 }
1896
1897
1898 /* Analyze a SIV (Single Index Variable) subscript where CHREC_A is a
1899 constant, and CHREC_B is an affine function. *OVERLAPS_A and
1900 *OVERLAPS_B are initialized to the functions that describe the
1901 relation between the elements accessed twice by CHREC_A and
1902 CHREC_B. For k >= 0, the following property is verified:
1903
1904 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
1905
1906 static void
analyze_siv_subscript_cst_affine(tree chrec_a,tree chrec_b,conflict_function ** overlaps_a,conflict_function ** overlaps_b,tree * last_conflicts)1907 analyze_siv_subscript_cst_affine (tree chrec_a,
1908 tree chrec_b,
1909 conflict_function **overlaps_a,
1910 conflict_function **overlaps_b,
1911 tree *last_conflicts)
1912 {
1913 bool value0, value1, value2;
1914 tree type, difference, tmp;
1915
1916 type = signed_type_for_types (TREE_TYPE (chrec_a), TREE_TYPE (chrec_b));
1917 chrec_a = chrec_convert (type, chrec_a, NULL);
1918 chrec_b = chrec_convert (type, chrec_b, NULL);
1919 difference = chrec_fold_minus (type, initial_condition (chrec_b), chrec_a);
1920
1921 /* Special case overlap in the first iteration. */
1922 if (integer_zerop (difference))
1923 {
1924 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
1925 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node));
1926 *last_conflicts = integer_one_node;
1927 return;
1928 }
1929
1930 if (!chrec_is_positive (initial_condition (difference), &value0))
1931 {
1932 if (dump_file && (dump_flags & TDF_DETAILS))
1933 fprintf (dump_file, "siv test failed: chrec is not positive.\n");
1934
1935 dependence_stats.num_siv_unimplemented++;
1936 *overlaps_a = conflict_fn_not_known ();
1937 *overlaps_b = conflict_fn_not_known ();
1938 *last_conflicts = chrec_dont_know;
1939 return;
1940 }
1941 else
1942 {
1943 if (value0 == false)
1944 {
1945 if (!chrec_is_positive (CHREC_RIGHT (chrec_b), &value1))
1946 {
1947 if (dump_file && (dump_flags & TDF_DETAILS))
1948 fprintf (dump_file, "siv test failed: chrec not positive.\n");
1949
1950 *overlaps_a = conflict_fn_not_known ();
1951 *overlaps_b = conflict_fn_not_known ();
1952 *last_conflicts = chrec_dont_know;
1953 dependence_stats.num_siv_unimplemented++;
1954 return;
1955 }
1956 else
1957 {
1958 if (value1 == true)
1959 {
1960 /* Example:
1961 chrec_a = 12
1962 chrec_b = {10, +, 1}
1963 */
1964
1965 if (tree_fold_divides_p (CHREC_RIGHT (chrec_b), difference))
1966 {
1967 HOST_WIDE_INT numiter;
1968 struct loop *loop = get_chrec_loop (chrec_b);
1969
1970 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
1971 tmp = fold_build2 (EXACT_DIV_EXPR, type,
1972 fold_build1 (ABS_EXPR, type, difference),
1973 CHREC_RIGHT (chrec_b));
1974 *overlaps_b = conflict_fn (1, affine_fn_cst (tmp));
1975 *last_conflicts = integer_one_node;
1976
1977
1978 /* Perform weak-zero siv test to see if overlap is
1979 outside the loop bounds. */
1980 numiter = max_stmt_executions_int (loop);
1981
1982 if (numiter >= 0
1983 && compare_tree_int (tmp, numiter) > 0)
1984 {
1985 free_conflict_function (*overlaps_a);
1986 free_conflict_function (*overlaps_b);
1987 *overlaps_a = conflict_fn_no_dependence ();
1988 *overlaps_b = conflict_fn_no_dependence ();
1989 *last_conflicts = integer_zero_node;
1990 dependence_stats.num_siv_independent++;
1991 return;
1992 }
1993 dependence_stats.num_siv_dependent++;
1994 return;
1995 }
1996
1997 /* When the step does not divide the difference, there are
1998 no overlaps. */
1999 else
2000 {
2001 *overlaps_a = conflict_fn_no_dependence ();
2002 *overlaps_b = conflict_fn_no_dependence ();
2003 *last_conflicts = integer_zero_node;
2004 dependence_stats.num_siv_independent++;
2005 return;
2006 }
2007 }
2008
2009 else
2010 {
2011 /* Example:
2012 chrec_a = 12
2013 chrec_b = {10, +, -1}
2014
2015 In this case, chrec_a will not overlap with chrec_b. */
2016 *overlaps_a = conflict_fn_no_dependence ();
2017 *overlaps_b = conflict_fn_no_dependence ();
2018 *last_conflicts = integer_zero_node;
2019 dependence_stats.num_siv_independent++;
2020 return;
2021 }
2022 }
2023 }
2024 else
2025 {
2026 if (!chrec_is_positive (CHREC_RIGHT (chrec_b), &value2))
2027 {
2028 if (dump_file && (dump_flags & TDF_DETAILS))
2029 fprintf (dump_file, "siv test failed: chrec not positive.\n");
2030
2031 *overlaps_a = conflict_fn_not_known ();
2032 *overlaps_b = conflict_fn_not_known ();
2033 *last_conflicts = chrec_dont_know;
2034 dependence_stats.num_siv_unimplemented++;
2035 return;
2036 }
2037 else
2038 {
2039 if (value2 == false)
2040 {
2041 /* Example:
2042 chrec_a = 3
2043 chrec_b = {10, +, -1}
2044 */
2045 if (tree_fold_divides_p (CHREC_RIGHT (chrec_b), difference))
2046 {
2047 HOST_WIDE_INT numiter;
2048 struct loop *loop = get_chrec_loop (chrec_b);
2049
2050 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
2051 tmp = fold_build2 (EXACT_DIV_EXPR, type, difference,
2052 CHREC_RIGHT (chrec_b));
2053 *overlaps_b = conflict_fn (1, affine_fn_cst (tmp));
2054 *last_conflicts = integer_one_node;
2055
2056 /* Perform weak-zero siv test to see if overlap is
2057 outside the loop bounds. */
2058 numiter = max_stmt_executions_int (loop);
2059
2060 if (numiter >= 0
2061 && compare_tree_int (tmp, numiter) > 0)
2062 {
2063 free_conflict_function (*overlaps_a);
2064 free_conflict_function (*overlaps_b);
2065 *overlaps_a = conflict_fn_no_dependence ();
2066 *overlaps_b = conflict_fn_no_dependence ();
2067 *last_conflicts = integer_zero_node;
2068 dependence_stats.num_siv_independent++;
2069 return;
2070 }
2071 dependence_stats.num_siv_dependent++;
2072 return;
2073 }
2074
2075 /* When the step does not divide the difference, there
2076 are no overlaps. */
2077 else
2078 {
2079 *overlaps_a = conflict_fn_no_dependence ();
2080 *overlaps_b = conflict_fn_no_dependence ();
2081 *last_conflicts = integer_zero_node;
2082 dependence_stats.num_siv_independent++;
2083 return;
2084 }
2085 }
2086 else
2087 {
2088 /* Example:
2089 chrec_a = 3
2090 chrec_b = {4, +, 1}
2091
2092 In this case, chrec_a will not overlap with chrec_b. */
2093 *overlaps_a = conflict_fn_no_dependence ();
2094 *overlaps_b = conflict_fn_no_dependence ();
2095 *last_conflicts = integer_zero_node;
2096 dependence_stats.num_siv_independent++;
2097 return;
2098 }
2099 }
2100 }
2101 }
2102 }
2103
2104 /* Helper recursive function for initializing the matrix A. Returns
2105 the initial value of CHREC. */
2106
2107 static tree
initialize_matrix_A(lambda_matrix A,tree chrec,unsigned index,int mult)2108 initialize_matrix_A (lambda_matrix A, tree chrec, unsigned index, int mult)
2109 {
2110 gcc_assert (chrec);
2111
2112 switch (TREE_CODE (chrec))
2113 {
2114 case POLYNOMIAL_CHREC:
2115 A[index][0] = mult * int_cst_value (CHREC_RIGHT (chrec));
2116 return initialize_matrix_A (A, CHREC_LEFT (chrec), index + 1, mult);
2117
2118 case PLUS_EXPR:
2119 case MULT_EXPR:
2120 case MINUS_EXPR:
2121 {
2122 tree op0 = initialize_matrix_A (A, TREE_OPERAND (chrec, 0), index, mult);
2123 tree op1 = initialize_matrix_A (A, TREE_OPERAND (chrec, 1), index, mult);
2124
2125 return chrec_fold_op (TREE_CODE (chrec), chrec_type (chrec), op0, op1);
2126 }
2127
2128 CASE_CONVERT:
2129 {
2130 tree op = initialize_matrix_A (A, TREE_OPERAND (chrec, 0), index, mult);
2131 return chrec_convert (chrec_type (chrec), op, NULL);
2132 }
2133
2134 case BIT_NOT_EXPR:
2135 {
2136 /* Handle ~X as -1 - X. */
2137 tree op = initialize_matrix_A (A, TREE_OPERAND (chrec, 0), index, mult);
2138 return chrec_fold_op (MINUS_EXPR, chrec_type (chrec),
2139 build_int_cst (TREE_TYPE (chrec), -1), op);
2140 }
2141
2142 case INTEGER_CST:
2143 return chrec;
2144
2145 default:
2146 gcc_unreachable ();
2147 return NULL_TREE;
2148 }
2149 }
2150
2151 #define FLOOR_DIV(x,y) ((x) / (y))
2152
2153 /* Solves the special case of the Diophantine equation:
2154 | {0, +, STEP_A}_x (OVERLAPS_A) = {0, +, STEP_B}_y (OVERLAPS_B)
2155
2156 Computes the descriptions OVERLAPS_A and OVERLAPS_B. NITER is the
2157 number of iterations that loops X and Y run. The overlaps will be
2158 constructed as evolutions in dimension DIM. */
2159
2160 static void
compute_overlap_steps_for_affine_univar(int niter,int step_a,int step_b,affine_fn * overlaps_a,affine_fn * overlaps_b,tree * last_conflicts,int dim)2161 compute_overlap_steps_for_affine_univar (int niter, int step_a, int step_b,
2162 affine_fn *overlaps_a,
2163 affine_fn *overlaps_b,
2164 tree *last_conflicts, int dim)
2165 {
2166 if (((step_a > 0 && step_b > 0)
2167 || (step_a < 0 && step_b < 0)))
2168 {
2169 int step_overlaps_a, step_overlaps_b;
2170 int gcd_steps_a_b, last_conflict, tau2;
2171
2172 gcd_steps_a_b = gcd (step_a, step_b);
2173 step_overlaps_a = step_b / gcd_steps_a_b;
2174 step_overlaps_b = step_a / gcd_steps_a_b;
2175
2176 if (niter > 0)
2177 {
2178 tau2 = FLOOR_DIV (niter, step_overlaps_a);
2179 tau2 = MIN (tau2, FLOOR_DIV (niter, step_overlaps_b));
2180 last_conflict = tau2;
2181 *last_conflicts = build_int_cst (NULL_TREE, last_conflict);
2182 }
2183 else
2184 *last_conflicts = chrec_dont_know;
2185
2186 *overlaps_a = affine_fn_univar (integer_zero_node, dim,
2187 build_int_cst (NULL_TREE,
2188 step_overlaps_a));
2189 *overlaps_b = affine_fn_univar (integer_zero_node, dim,
2190 build_int_cst (NULL_TREE,
2191 step_overlaps_b));
2192 }
2193
2194 else
2195 {
2196 *overlaps_a = affine_fn_cst (integer_zero_node);
2197 *overlaps_b = affine_fn_cst (integer_zero_node);
2198 *last_conflicts = integer_zero_node;
2199 }
2200 }
2201
2202 /* Solves the special case of a Diophantine equation where CHREC_A is
2203 an affine bivariate function, and CHREC_B is an affine univariate
2204 function. For example,
2205
2206 | {{0, +, 1}_x, +, 1335}_y = {0, +, 1336}_z
2207
2208 has the following overlapping functions:
2209
2210 | x (t, u, v) = {{0, +, 1336}_t, +, 1}_v
2211 | y (t, u, v) = {{0, +, 1336}_u, +, 1}_v
2212 | z (t, u, v) = {{{0, +, 1}_t, +, 1335}_u, +, 1}_v
2213
2214 FORNOW: This is a specialized implementation for a case occurring in
2215 a common benchmark. Implement the general algorithm. */
2216
2217 static void
compute_overlap_steps_for_affine_1_2(tree chrec_a,tree chrec_b,conflict_function ** overlaps_a,conflict_function ** overlaps_b,tree * last_conflicts)2218 compute_overlap_steps_for_affine_1_2 (tree chrec_a, tree chrec_b,
2219 conflict_function **overlaps_a,
2220 conflict_function **overlaps_b,
2221 tree *last_conflicts)
2222 {
2223 bool xz_p, yz_p, xyz_p;
2224 int step_x, step_y, step_z;
2225 HOST_WIDE_INT niter_x, niter_y, niter_z, niter;
2226 affine_fn overlaps_a_xz, overlaps_b_xz;
2227 affine_fn overlaps_a_yz, overlaps_b_yz;
2228 affine_fn overlaps_a_xyz, overlaps_b_xyz;
2229 affine_fn ova1, ova2, ovb;
2230 tree last_conflicts_xz, last_conflicts_yz, last_conflicts_xyz;
2231
2232 step_x = int_cst_value (CHREC_RIGHT (CHREC_LEFT (chrec_a)));
2233 step_y = int_cst_value (CHREC_RIGHT (chrec_a));
2234 step_z = int_cst_value (CHREC_RIGHT (chrec_b));
2235
2236 niter_x = max_stmt_executions_int (get_chrec_loop (CHREC_LEFT (chrec_a)));
2237 niter_y = max_stmt_executions_int (get_chrec_loop (chrec_a));
2238 niter_z = max_stmt_executions_int (get_chrec_loop (chrec_b));
2239
2240 if (niter_x < 0 || niter_y < 0 || niter_z < 0)
2241 {
2242 if (dump_file && (dump_flags & TDF_DETAILS))
2243 fprintf (dump_file, "overlap steps test failed: no iteration counts.\n");
2244
2245 *overlaps_a = conflict_fn_not_known ();
2246 *overlaps_b = conflict_fn_not_known ();
2247 *last_conflicts = chrec_dont_know;
2248 return;
2249 }
2250
2251 niter = MIN (niter_x, niter_z);
2252 compute_overlap_steps_for_affine_univar (niter, step_x, step_z,
2253 &overlaps_a_xz,
2254 &overlaps_b_xz,
2255 &last_conflicts_xz, 1);
2256 niter = MIN (niter_y, niter_z);
2257 compute_overlap_steps_for_affine_univar (niter, step_y, step_z,
2258 &overlaps_a_yz,
2259 &overlaps_b_yz,
2260 &last_conflicts_yz, 2);
2261 niter = MIN (niter_x, niter_z);
2262 niter = MIN (niter_y, niter);
2263 compute_overlap_steps_for_affine_univar (niter, step_x + step_y, step_z,
2264 &overlaps_a_xyz,
2265 &overlaps_b_xyz,
2266 &last_conflicts_xyz, 3);
2267
2268 xz_p = !integer_zerop (last_conflicts_xz);
2269 yz_p = !integer_zerop (last_conflicts_yz);
2270 xyz_p = !integer_zerop (last_conflicts_xyz);
2271
2272 if (xz_p || yz_p || xyz_p)
2273 {
2274 ova1 = affine_fn_cst (integer_zero_node);
2275 ova2 = affine_fn_cst (integer_zero_node);
2276 ovb = affine_fn_cst (integer_zero_node);
2277 if (xz_p)
2278 {
2279 affine_fn t0 = ova1;
2280 affine_fn t2 = ovb;
2281
2282 ova1 = affine_fn_plus (ova1, overlaps_a_xz);
2283 ovb = affine_fn_plus (ovb, overlaps_b_xz);
2284 affine_fn_free (t0);
2285 affine_fn_free (t2);
2286 *last_conflicts = last_conflicts_xz;
2287 }
2288 if (yz_p)
2289 {
2290 affine_fn t0 = ova2;
2291 affine_fn t2 = ovb;
2292
2293 ova2 = affine_fn_plus (ova2, overlaps_a_yz);
2294 ovb = affine_fn_plus (ovb, overlaps_b_yz);
2295 affine_fn_free (t0);
2296 affine_fn_free (t2);
2297 *last_conflicts = last_conflicts_yz;
2298 }
2299 if (xyz_p)
2300 {
2301 affine_fn t0 = ova1;
2302 affine_fn t2 = ova2;
2303 affine_fn t4 = ovb;
2304
2305 ova1 = affine_fn_plus (ova1, overlaps_a_xyz);
2306 ova2 = affine_fn_plus (ova2, overlaps_a_xyz);
2307 ovb = affine_fn_plus (ovb, overlaps_b_xyz);
2308 affine_fn_free (t0);
2309 affine_fn_free (t2);
2310 affine_fn_free (t4);
2311 *last_conflicts = last_conflicts_xyz;
2312 }
2313 *overlaps_a = conflict_fn (2, ova1, ova2);
2314 *overlaps_b = conflict_fn (1, ovb);
2315 }
2316 else
2317 {
2318 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
2319 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node));
2320 *last_conflicts = integer_zero_node;
2321 }
2322
2323 affine_fn_free (overlaps_a_xz);
2324 affine_fn_free (overlaps_b_xz);
2325 affine_fn_free (overlaps_a_yz);
2326 affine_fn_free (overlaps_b_yz);
2327 affine_fn_free (overlaps_a_xyz);
2328 affine_fn_free (overlaps_b_xyz);
2329 }
2330
2331 /* Copy the elements of vector VEC1 with length SIZE to VEC2. */
2332
2333 static void
lambda_vector_copy(lambda_vector vec1,lambda_vector vec2,int size)2334 lambda_vector_copy (lambda_vector vec1, lambda_vector vec2,
2335 int size)
2336 {
2337 memcpy (vec2, vec1, size * sizeof (*vec1));
2338 }
2339
2340 /* Copy the elements of M x N matrix MAT1 to MAT2. */
2341
2342 static void
lambda_matrix_copy(lambda_matrix mat1,lambda_matrix mat2,int m,int n)2343 lambda_matrix_copy (lambda_matrix mat1, lambda_matrix mat2,
2344 int m, int n)
2345 {
2346 int i;
2347
2348 for (i = 0; i < m; i++)
2349 lambda_vector_copy (mat1[i], mat2[i], n);
2350 }
2351
2352 /* Store the N x N identity matrix in MAT. */
2353
2354 static void
lambda_matrix_id(lambda_matrix mat,int size)2355 lambda_matrix_id (lambda_matrix mat, int size)
2356 {
2357 int i, j;
2358
2359 for (i = 0; i < size; i++)
2360 for (j = 0; j < size; j++)
2361 mat[i][j] = (i == j) ? 1 : 0;
2362 }
2363
2364 /* Return the first nonzero element of vector VEC1 between START and N.
2365 We must have START <= N. Returns N if VEC1 is the zero vector. */
2366
2367 static int
lambda_vector_first_nz(lambda_vector vec1,int n,int start)2368 lambda_vector_first_nz (lambda_vector vec1, int n, int start)
2369 {
2370 int j = start;
2371 while (j < n && vec1[j] == 0)
2372 j++;
2373 return j;
2374 }
2375
2376 /* Add a multiple of row R1 of matrix MAT with N columns to row R2:
2377 R2 = R2 + CONST1 * R1. */
2378
2379 static void
lambda_matrix_row_add(lambda_matrix mat,int n,int r1,int r2,int const1)2380 lambda_matrix_row_add (lambda_matrix mat, int n, int r1, int r2, int const1)
2381 {
2382 int i;
2383
2384 if (const1 == 0)
2385 return;
2386
2387 for (i = 0; i < n; i++)
2388 mat[r2][i] += const1 * mat[r1][i];
2389 }
2390
2391 /* Multiply vector VEC1 of length SIZE by a constant CONST1,
2392 and store the result in VEC2. */
2393
2394 static void
lambda_vector_mult_const(lambda_vector vec1,lambda_vector vec2,int size,int const1)2395 lambda_vector_mult_const (lambda_vector vec1, lambda_vector vec2,
2396 int size, int const1)
2397 {
2398 int i;
2399
2400 if (const1 == 0)
2401 lambda_vector_clear (vec2, size);
2402 else
2403 for (i = 0; i < size; i++)
2404 vec2[i] = const1 * vec1[i];
2405 }
2406
2407 /* Negate vector VEC1 with length SIZE and store it in VEC2. */
2408
2409 static void
lambda_vector_negate(lambda_vector vec1,lambda_vector vec2,int size)2410 lambda_vector_negate (lambda_vector vec1, lambda_vector vec2,
2411 int size)
2412 {
2413 lambda_vector_mult_const (vec1, vec2, size, -1);
2414 }
2415
2416 /* Negate row R1 of matrix MAT which has N columns. */
2417
2418 static void
lambda_matrix_row_negate(lambda_matrix mat,int n,int r1)2419 lambda_matrix_row_negate (lambda_matrix mat, int n, int r1)
2420 {
2421 lambda_vector_negate (mat[r1], mat[r1], n);
2422 }
2423
2424 /* Return true if two vectors are equal. */
2425
2426 static bool
lambda_vector_equal(lambda_vector vec1,lambda_vector vec2,int size)2427 lambda_vector_equal (lambda_vector vec1, lambda_vector vec2, int size)
2428 {
2429 int i;
2430 for (i = 0; i < size; i++)
2431 if (vec1[i] != vec2[i])
2432 return false;
2433 return true;
2434 }
2435
2436 /* Given an M x N integer matrix A, this function determines an M x
2437 M unimodular matrix U, and an M x N echelon matrix S such that
2438 "U.A = S". This decomposition is also known as "right Hermite".
2439
2440 Ref: Algorithm 2.1 page 33 in "Loop Transformations for
2441 Restructuring Compilers" Utpal Banerjee. */
2442
2443 static void
lambda_matrix_right_hermite(lambda_matrix A,int m,int n,lambda_matrix S,lambda_matrix U)2444 lambda_matrix_right_hermite (lambda_matrix A, int m, int n,
2445 lambda_matrix S, lambda_matrix U)
2446 {
2447 int i, j, i0 = 0;
2448
2449 lambda_matrix_copy (A, S, m, n);
2450 lambda_matrix_id (U, m);
2451
2452 for (j = 0; j < n; j++)
2453 {
2454 if (lambda_vector_first_nz (S[j], m, i0) < m)
2455 {
2456 ++i0;
2457 for (i = m - 1; i >= i0; i--)
2458 {
2459 while (S[i][j] != 0)
2460 {
2461 int sigma, factor, a, b;
2462
2463 a = S[i-1][j];
2464 b = S[i][j];
2465 sigma = (a * b < 0) ? -1: 1;
2466 a = abs (a);
2467 b = abs (b);
2468 factor = sigma * (a / b);
2469
2470 lambda_matrix_row_add (S, n, i, i-1, -factor);
2471 std::swap (S[i], S[i-1]);
2472
2473 lambda_matrix_row_add (U, m, i, i-1, -factor);
2474 std::swap (U[i], U[i-1]);
2475 }
2476 }
2477 }
2478 }
2479 }
2480
2481 /* Determines the overlapping elements due to accesses CHREC_A and
2482 CHREC_B, that are affine functions. This function cannot handle
2483 symbolic evolution functions, ie. when initial conditions are
2484 parameters, because it uses lambda matrices of integers. */
2485
2486 static void
analyze_subscript_affine_affine(tree chrec_a,tree chrec_b,conflict_function ** overlaps_a,conflict_function ** overlaps_b,tree * last_conflicts)2487 analyze_subscript_affine_affine (tree chrec_a,
2488 tree chrec_b,
2489 conflict_function **overlaps_a,
2490 conflict_function **overlaps_b,
2491 tree *last_conflicts)
2492 {
2493 unsigned nb_vars_a, nb_vars_b, dim;
2494 HOST_WIDE_INT init_a, init_b, gamma, gcd_alpha_beta;
2495 lambda_matrix A, U, S;
2496 struct obstack scratch_obstack;
2497
2498 if (eq_evolutions_p (chrec_a, chrec_b))
2499 {
2500 /* The accessed index overlaps for each iteration in the
2501 loop. */
2502 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
2503 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node));
2504 *last_conflicts = chrec_dont_know;
2505 return;
2506 }
2507 if (dump_file && (dump_flags & TDF_DETAILS))
2508 fprintf (dump_file, "(analyze_subscript_affine_affine \n");
2509
2510 /* For determining the initial intersection, we have to solve a
2511 Diophantine equation. This is the most time consuming part.
2512
2513 For answering to the question: "Is there a dependence?" we have
2514 to prove that there exists a solution to the Diophantine
2515 equation, and that the solution is in the iteration domain,
2516 i.e. the solution is positive or zero, and that the solution
2517 happens before the upper bound loop.nb_iterations. Otherwise
2518 there is no dependence. This function outputs a description of
2519 the iterations that hold the intersections. */
2520
2521 nb_vars_a = nb_vars_in_chrec (chrec_a);
2522 nb_vars_b = nb_vars_in_chrec (chrec_b);
2523
2524 gcc_obstack_init (&scratch_obstack);
2525
2526 dim = nb_vars_a + nb_vars_b;
2527 U = lambda_matrix_new (dim, dim, &scratch_obstack);
2528 A = lambda_matrix_new (dim, 1, &scratch_obstack);
2529 S = lambda_matrix_new (dim, 1, &scratch_obstack);
2530
2531 init_a = int_cst_value (initialize_matrix_A (A, chrec_a, 0, 1));
2532 init_b = int_cst_value (initialize_matrix_A (A, chrec_b, nb_vars_a, -1));
2533 gamma = init_b - init_a;
2534
2535 /* Don't do all the hard work of solving the Diophantine equation
2536 when we already know the solution: for example,
2537 | {3, +, 1}_1
2538 | {3, +, 4}_2
2539 | gamma = 3 - 3 = 0.
2540 Then the first overlap occurs during the first iterations:
2541 | {3, +, 1}_1 ({0, +, 4}_x) = {3, +, 4}_2 ({0, +, 1}_x)
2542 */
2543 if (gamma == 0)
2544 {
2545 if (nb_vars_a == 1 && nb_vars_b == 1)
2546 {
2547 HOST_WIDE_INT step_a, step_b;
2548 HOST_WIDE_INT niter, niter_a, niter_b;
2549 affine_fn ova, ovb;
2550
2551 niter_a = max_stmt_executions_int (get_chrec_loop (chrec_a));
2552 niter_b = max_stmt_executions_int (get_chrec_loop (chrec_b));
2553 niter = MIN (niter_a, niter_b);
2554 step_a = int_cst_value (CHREC_RIGHT (chrec_a));
2555 step_b = int_cst_value (CHREC_RIGHT (chrec_b));
2556
2557 compute_overlap_steps_for_affine_univar (niter, step_a, step_b,
2558 &ova, &ovb,
2559 last_conflicts, 1);
2560 *overlaps_a = conflict_fn (1, ova);
2561 *overlaps_b = conflict_fn (1, ovb);
2562 }
2563
2564 else if (nb_vars_a == 2 && nb_vars_b == 1)
2565 compute_overlap_steps_for_affine_1_2
2566 (chrec_a, chrec_b, overlaps_a, overlaps_b, last_conflicts);
2567
2568 else if (nb_vars_a == 1 && nb_vars_b == 2)
2569 compute_overlap_steps_for_affine_1_2
2570 (chrec_b, chrec_a, overlaps_b, overlaps_a, last_conflicts);
2571
2572 else
2573 {
2574 if (dump_file && (dump_flags & TDF_DETAILS))
2575 fprintf (dump_file, "affine-affine test failed: too many variables.\n");
2576 *overlaps_a = conflict_fn_not_known ();
2577 *overlaps_b = conflict_fn_not_known ();
2578 *last_conflicts = chrec_dont_know;
2579 }
2580 goto end_analyze_subs_aa;
2581 }
2582
2583 /* U.A = S */
2584 lambda_matrix_right_hermite (A, dim, 1, S, U);
2585
2586 if (S[0][0] < 0)
2587 {
2588 S[0][0] *= -1;
2589 lambda_matrix_row_negate (U, dim, 0);
2590 }
2591 gcd_alpha_beta = S[0][0];
2592
2593 /* Something went wrong: for example in {1, +, 0}_5 vs. {0, +, 0}_5,
2594 but that is a quite strange case. Instead of ICEing, answer
2595 don't know. */
2596 if (gcd_alpha_beta == 0)
2597 {
2598 *overlaps_a = conflict_fn_not_known ();
2599 *overlaps_b = conflict_fn_not_known ();
2600 *last_conflicts = chrec_dont_know;
2601 goto end_analyze_subs_aa;
2602 }
2603
2604 /* The classic "gcd-test". */
2605 if (!int_divides_p (gcd_alpha_beta, gamma))
2606 {
2607 /* The "gcd-test" has determined that there is no integer
2608 solution, i.e. there is no dependence. */
2609 *overlaps_a = conflict_fn_no_dependence ();
2610 *overlaps_b = conflict_fn_no_dependence ();
2611 *last_conflicts = integer_zero_node;
2612 }
2613
2614 /* Both access functions are univariate. This includes SIV and MIV cases. */
2615 else if (nb_vars_a == 1 && nb_vars_b == 1)
2616 {
2617 /* Both functions should have the same evolution sign. */
2618 if (((A[0][0] > 0 && -A[1][0] > 0)
2619 || (A[0][0] < 0 && -A[1][0] < 0)))
2620 {
2621 /* The solutions are given by:
2622 |
2623 | [GAMMA/GCD_ALPHA_BETA t].[u11 u12] = [x0]
2624 | [u21 u22] [y0]
2625
2626 For a given integer t. Using the following variables,
2627
2628 | i0 = u11 * gamma / gcd_alpha_beta
2629 | j0 = u12 * gamma / gcd_alpha_beta
2630 | i1 = u21
2631 | j1 = u22
2632
2633 the solutions are:
2634
2635 | x0 = i0 + i1 * t,
2636 | y0 = j0 + j1 * t. */
2637 HOST_WIDE_INT i0, j0, i1, j1;
2638
2639 i0 = U[0][0] * gamma / gcd_alpha_beta;
2640 j0 = U[0][1] * gamma / gcd_alpha_beta;
2641 i1 = U[1][0];
2642 j1 = U[1][1];
2643
2644 if ((i1 == 0 && i0 < 0)
2645 || (j1 == 0 && j0 < 0))
2646 {
2647 /* There is no solution.
2648 FIXME: The case "i0 > nb_iterations, j0 > nb_iterations"
2649 falls in here, but for the moment we don't look at the
2650 upper bound of the iteration domain. */
2651 *overlaps_a = conflict_fn_no_dependence ();
2652 *overlaps_b = conflict_fn_no_dependence ();
2653 *last_conflicts = integer_zero_node;
2654 goto end_analyze_subs_aa;
2655 }
2656
2657 if (i1 > 0 && j1 > 0)
2658 {
2659 HOST_WIDE_INT niter_a
2660 = max_stmt_executions_int (get_chrec_loop (chrec_a));
2661 HOST_WIDE_INT niter_b
2662 = max_stmt_executions_int (get_chrec_loop (chrec_b));
2663 HOST_WIDE_INT niter = MIN (niter_a, niter_b);
2664
2665 /* (X0, Y0) is a solution of the Diophantine equation:
2666 "chrec_a (X0) = chrec_b (Y0)". */
2667 HOST_WIDE_INT tau1 = MAX (CEIL (-i0, i1),
2668 CEIL (-j0, j1));
2669 HOST_WIDE_INT x0 = i1 * tau1 + i0;
2670 HOST_WIDE_INT y0 = j1 * tau1 + j0;
2671
2672 /* (X1, Y1) is the smallest positive solution of the eq
2673 "chrec_a (X1) = chrec_b (Y1)", i.e. this is where the
2674 first conflict occurs. */
2675 HOST_WIDE_INT min_multiple = MIN (x0 / i1, y0 / j1);
2676 HOST_WIDE_INT x1 = x0 - i1 * min_multiple;
2677 HOST_WIDE_INT y1 = y0 - j1 * min_multiple;
2678
2679 if (niter > 0)
2680 {
2681 HOST_WIDE_INT tau2 = MIN (FLOOR_DIV (niter_a - i0, i1),
2682 FLOOR_DIV (niter_b - j0, j1));
2683 HOST_WIDE_INT last_conflict = tau2 - (x1 - i0)/i1;
2684
2685 /* If the overlap occurs outside of the bounds of the
2686 loop, there is no dependence. */
2687 if (x1 >= niter_a || y1 >= niter_b)
2688 {
2689 *overlaps_a = conflict_fn_no_dependence ();
2690 *overlaps_b = conflict_fn_no_dependence ();
2691 *last_conflicts = integer_zero_node;
2692 goto end_analyze_subs_aa;
2693 }
2694 else
2695 *last_conflicts = build_int_cst (NULL_TREE, last_conflict);
2696 }
2697 else
2698 *last_conflicts = chrec_dont_know;
2699
2700 *overlaps_a
2701 = conflict_fn (1,
2702 affine_fn_univar (build_int_cst (NULL_TREE, x1),
2703 1,
2704 build_int_cst (NULL_TREE, i1)));
2705 *overlaps_b
2706 = conflict_fn (1,
2707 affine_fn_univar (build_int_cst (NULL_TREE, y1),
2708 1,
2709 build_int_cst (NULL_TREE, j1)));
2710 }
2711 else
2712 {
2713 /* FIXME: For the moment, the upper bound of the
2714 iteration domain for i and j is not checked. */
2715 if (dump_file && (dump_flags & TDF_DETAILS))
2716 fprintf (dump_file, "affine-affine test failed: unimplemented.\n");
2717 *overlaps_a = conflict_fn_not_known ();
2718 *overlaps_b = conflict_fn_not_known ();
2719 *last_conflicts = chrec_dont_know;
2720 }
2721 }
2722 else
2723 {
2724 if (dump_file && (dump_flags & TDF_DETAILS))
2725 fprintf (dump_file, "affine-affine test failed: unimplemented.\n");
2726 *overlaps_a = conflict_fn_not_known ();
2727 *overlaps_b = conflict_fn_not_known ();
2728 *last_conflicts = chrec_dont_know;
2729 }
2730 }
2731 else
2732 {
2733 if (dump_file && (dump_flags & TDF_DETAILS))
2734 fprintf (dump_file, "affine-affine test failed: unimplemented.\n");
2735 *overlaps_a = conflict_fn_not_known ();
2736 *overlaps_b = conflict_fn_not_known ();
2737 *last_conflicts = chrec_dont_know;
2738 }
2739
2740 end_analyze_subs_aa:
2741 obstack_free (&scratch_obstack, NULL);
2742 if (dump_file && (dump_flags & TDF_DETAILS))
2743 {
2744 fprintf (dump_file, " (overlaps_a = ");
2745 dump_conflict_function (dump_file, *overlaps_a);
2746 fprintf (dump_file, ")\n (overlaps_b = ");
2747 dump_conflict_function (dump_file, *overlaps_b);
2748 fprintf (dump_file, "))\n");
2749 }
2750 }
2751
2752 /* Returns true when analyze_subscript_affine_affine can be used for
2753 determining the dependence relation between chrec_a and chrec_b,
2754 that contain symbols. This function modifies chrec_a and chrec_b
2755 such that the analysis result is the same, and such that they don't
2756 contain symbols, and then can safely be passed to the analyzer.
2757
2758 Example: The analysis of the following tuples of evolutions produce
2759 the same results: {x+1, +, 1}_1 vs. {x+3, +, 1}_1, and {-2, +, 1}_1
2760 vs. {0, +, 1}_1
2761
2762 {x+1, +, 1}_1 ({2, +, 1}_1) = {x+3, +, 1}_1 ({0, +, 1}_1)
2763 {-2, +, 1}_1 ({2, +, 1}_1) = {0, +, 1}_1 ({0, +, 1}_1)
2764 */
2765
2766 static bool
can_use_analyze_subscript_affine_affine(tree * chrec_a,tree * chrec_b)2767 can_use_analyze_subscript_affine_affine (tree *chrec_a, tree *chrec_b)
2768 {
2769 tree diff, type, left_a, left_b, right_b;
2770
2771 if (chrec_contains_symbols (CHREC_RIGHT (*chrec_a))
2772 || chrec_contains_symbols (CHREC_RIGHT (*chrec_b)))
2773 /* FIXME: For the moment not handled. Might be refined later. */
2774 return false;
2775
2776 type = chrec_type (*chrec_a);
2777 left_a = CHREC_LEFT (*chrec_a);
2778 left_b = chrec_convert (type, CHREC_LEFT (*chrec_b), NULL);
2779 diff = chrec_fold_minus (type, left_a, left_b);
2780
2781 if (!evolution_function_is_constant_p (diff))
2782 return false;
2783
2784 if (dump_file && (dump_flags & TDF_DETAILS))
2785 fprintf (dump_file, "can_use_subscript_aff_aff_for_symbolic \n");
2786
2787 *chrec_a = build_polynomial_chrec (CHREC_VARIABLE (*chrec_a),
2788 diff, CHREC_RIGHT (*chrec_a));
2789 right_b = chrec_convert (type, CHREC_RIGHT (*chrec_b), NULL);
2790 *chrec_b = build_polynomial_chrec (CHREC_VARIABLE (*chrec_b),
2791 build_int_cst (type, 0),
2792 right_b);
2793 return true;
2794 }
2795
2796 /* Analyze a SIV (Single Index Variable) subscript. *OVERLAPS_A and
2797 *OVERLAPS_B are initialized to the functions that describe the
2798 relation between the elements accessed twice by CHREC_A and
2799 CHREC_B. For k >= 0, the following property is verified:
2800
2801 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
2802
2803 static void
analyze_siv_subscript(tree chrec_a,tree chrec_b,conflict_function ** overlaps_a,conflict_function ** overlaps_b,tree * last_conflicts,int loop_nest_num)2804 analyze_siv_subscript (tree chrec_a,
2805 tree chrec_b,
2806 conflict_function **overlaps_a,
2807 conflict_function **overlaps_b,
2808 tree *last_conflicts,
2809 int loop_nest_num)
2810 {
2811 dependence_stats.num_siv++;
2812
2813 if (dump_file && (dump_flags & TDF_DETAILS))
2814 fprintf (dump_file, "(analyze_siv_subscript \n");
2815
2816 if (evolution_function_is_constant_p (chrec_a)
2817 && evolution_function_is_affine_in_loop (chrec_b, loop_nest_num))
2818 analyze_siv_subscript_cst_affine (chrec_a, chrec_b,
2819 overlaps_a, overlaps_b, last_conflicts);
2820
2821 else if (evolution_function_is_affine_in_loop (chrec_a, loop_nest_num)
2822 && evolution_function_is_constant_p (chrec_b))
2823 analyze_siv_subscript_cst_affine (chrec_b, chrec_a,
2824 overlaps_b, overlaps_a, last_conflicts);
2825
2826 else if (evolution_function_is_affine_in_loop (chrec_a, loop_nest_num)
2827 && evolution_function_is_affine_in_loop (chrec_b, loop_nest_num))
2828 {
2829 if (!chrec_contains_symbols (chrec_a)
2830 && !chrec_contains_symbols (chrec_b))
2831 {
2832 analyze_subscript_affine_affine (chrec_a, chrec_b,
2833 overlaps_a, overlaps_b,
2834 last_conflicts);
2835
2836 if (CF_NOT_KNOWN_P (*overlaps_a)
2837 || CF_NOT_KNOWN_P (*overlaps_b))
2838 dependence_stats.num_siv_unimplemented++;
2839 else if (CF_NO_DEPENDENCE_P (*overlaps_a)
2840 || CF_NO_DEPENDENCE_P (*overlaps_b))
2841 dependence_stats.num_siv_independent++;
2842 else
2843 dependence_stats.num_siv_dependent++;
2844 }
2845 else if (can_use_analyze_subscript_affine_affine (&chrec_a,
2846 &chrec_b))
2847 {
2848 analyze_subscript_affine_affine (chrec_a, chrec_b,
2849 overlaps_a, overlaps_b,
2850 last_conflicts);
2851
2852 if (CF_NOT_KNOWN_P (*overlaps_a)
2853 || CF_NOT_KNOWN_P (*overlaps_b))
2854 dependence_stats.num_siv_unimplemented++;
2855 else if (CF_NO_DEPENDENCE_P (*overlaps_a)
2856 || CF_NO_DEPENDENCE_P (*overlaps_b))
2857 dependence_stats.num_siv_independent++;
2858 else
2859 dependence_stats.num_siv_dependent++;
2860 }
2861 else
2862 goto siv_subscript_dontknow;
2863 }
2864
2865 else
2866 {
2867 siv_subscript_dontknow:;
2868 if (dump_file && (dump_flags & TDF_DETAILS))
2869 fprintf (dump_file, " siv test failed: unimplemented");
2870 *overlaps_a = conflict_fn_not_known ();
2871 *overlaps_b = conflict_fn_not_known ();
2872 *last_conflicts = chrec_dont_know;
2873 dependence_stats.num_siv_unimplemented++;
2874 }
2875
2876 if (dump_file && (dump_flags & TDF_DETAILS))
2877 fprintf (dump_file, ")\n");
2878 }
2879
2880 /* Returns false if we can prove that the greatest common divisor of the steps
2881 of CHREC does not divide CST, false otherwise. */
2882
2883 static bool
gcd_of_steps_may_divide_p(const_tree chrec,const_tree cst)2884 gcd_of_steps_may_divide_p (const_tree chrec, const_tree cst)
2885 {
2886 HOST_WIDE_INT cd = 0, val;
2887 tree step;
2888
2889 if (!tree_fits_shwi_p (cst))
2890 return true;
2891 val = tree_to_shwi (cst);
2892
2893 while (TREE_CODE (chrec) == POLYNOMIAL_CHREC)
2894 {
2895 step = CHREC_RIGHT (chrec);
2896 if (!tree_fits_shwi_p (step))
2897 return true;
2898 cd = gcd (cd, tree_to_shwi (step));
2899 chrec = CHREC_LEFT (chrec);
2900 }
2901
2902 return val % cd == 0;
2903 }
2904
2905 /* Analyze a MIV (Multiple Index Variable) subscript with respect to
2906 LOOP_NEST. *OVERLAPS_A and *OVERLAPS_B are initialized to the
2907 functions that describe the relation between the elements accessed
2908 twice by CHREC_A and CHREC_B. For k >= 0, the following property
2909 is verified:
2910
2911 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
2912
2913 static void
analyze_miv_subscript(tree chrec_a,tree chrec_b,conflict_function ** overlaps_a,conflict_function ** overlaps_b,tree * last_conflicts,struct loop * loop_nest)2914 analyze_miv_subscript (tree chrec_a,
2915 tree chrec_b,
2916 conflict_function **overlaps_a,
2917 conflict_function **overlaps_b,
2918 tree *last_conflicts,
2919 struct loop *loop_nest)
2920 {
2921 tree type, difference;
2922
2923 dependence_stats.num_miv++;
2924 if (dump_file && (dump_flags & TDF_DETAILS))
2925 fprintf (dump_file, "(analyze_miv_subscript \n");
2926
2927 type = signed_type_for_types (TREE_TYPE (chrec_a), TREE_TYPE (chrec_b));
2928 chrec_a = chrec_convert (type, chrec_a, NULL);
2929 chrec_b = chrec_convert (type, chrec_b, NULL);
2930 difference = chrec_fold_minus (type, chrec_a, chrec_b);
2931
2932 if (eq_evolutions_p (chrec_a, chrec_b))
2933 {
2934 /* Access functions are the same: all the elements are accessed
2935 in the same order. */
2936 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
2937 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node));
2938 *last_conflicts = max_stmt_executions_tree (get_chrec_loop (chrec_a));
2939 dependence_stats.num_miv_dependent++;
2940 }
2941
2942 else if (evolution_function_is_constant_p (difference)
2943 /* For the moment, the following is verified:
2944 evolution_function_is_affine_multivariate_p (chrec_a,
2945 loop_nest->num) */
2946 && !gcd_of_steps_may_divide_p (chrec_a, difference))
2947 {
2948 /* testsuite/.../ssa-chrec-33.c
2949 {{21, +, 2}_1, +, -2}_2 vs. {{20, +, 2}_1, +, -2}_2
2950
2951 The difference is 1, and all the evolution steps are multiples
2952 of 2, consequently there are no overlapping elements. */
2953 *overlaps_a = conflict_fn_no_dependence ();
2954 *overlaps_b = conflict_fn_no_dependence ();
2955 *last_conflicts = integer_zero_node;
2956 dependence_stats.num_miv_independent++;
2957 }
2958
2959 else if (evolution_function_is_affine_multivariate_p (chrec_a, loop_nest->num)
2960 && !chrec_contains_symbols (chrec_a)
2961 && evolution_function_is_affine_multivariate_p (chrec_b, loop_nest->num)
2962 && !chrec_contains_symbols (chrec_b))
2963 {
2964 /* testsuite/.../ssa-chrec-35.c
2965 {0, +, 1}_2 vs. {0, +, 1}_3
2966 the overlapping elements are respectively located at iterations:
2967 {0, +, 1}_x and {0, +, 1}_x,
2968 in other words, we have the equality:
2969 {0, +, 1}_2 ({0, +, 1}_x) = {0, +, 1}_3 ({0, +, 1}_x)
2970
2971 Other examples:
2972 {{0, +, 1}_1, +, 2}_2 ({0, +, 1}_x, {0, +, 1}_y) =
2973 {0, +, 1}_1 ({{0, +, 1}_x, +, 2}_y)
2974
2975 {{0, +, 2}_1, +, 3}_2 ({0, +, 1}_y, {0, +, 1}_x) =
2976 {{0, +, 3}_1, +, 2}_2 ({0, +, 1}_x, {0, +, 1}_y)
2977 */
2978 analyze_subscript_affine_affine (chrec_a, chrec_b,
2979 overlaps_a, overlaps_b, last_conflicts);
2980
2981 if (CF_NOT_KNOWN_P (*overlaps_a)
2982 || CF_NOT_KNOWN_P (*overlaps_b))
2983 dependence_stats.num_miv_unimplemented++;
2984 else if (CF_NO_DEPENDENCE_P (*overlaps_a)
2985 || CF_NO_DEPENDENCE_P (*overlaps_b))
2986 dependence_stats.num_miv_independent++;
2987 else
2988 dependence_stats.num_miv_dependent++;
2989 }
2990
2991 else
2992 {
2993 /* When the analysis is too difficult, answer "don't know". */
2994 if (dump_file && (dump_flags & TDF_DETAILS))
2995 fprintf (dump_file, "analyze_miv_subscript test failed: unimplemented.\n");
2996
2997 *overlaps_a = conflict_fn_not_known ();
2998 *overlaps_b = conflict_fn_not_known ();
2999 *last_conflicts = chrec_dont_know;
3000 dependence_stats.num_miv_unimplemented++;
3001 }
3002
3003 if (dump_file && (dump_flags & TDF_DETAILS))
3004 fprintf (dump_file, ")\n");
3005 }
3006
3007 /* Determines the iterations for which CHREC_A is equal to CHREC_B in
3008 with respect to LOOP_NEST. OVERLAP_ITERATIONS_A and
3009 OVERLAP_ITERATIONS_B are initialized with two functions that
3010 describe the iterations that contain conflicting elements.
3011
3012 Remark: For an integer k >= 0, the following equality is true:
3013
3014 CHREC_A (OVERLAP_ITERATIONS_A (k)) == CHREC_B (OVERLAP_ITERATIONS_B (k)).
3015 */
3016
3017 static void
analyze_overlapping_iterations(tree chrec_a,tree chrec_b,conflict_function ** overlap_iterations_a,conflict_function ** overlap_iterations_b,tree * last_conflicts,struct loop * loop_nest)3018 analyze_overlapping_iterations (tree chrec_a,
3019 tree chrec_b,
3020 conflict_function **overlap_iterations_a,
3021 conflict_function **overlap_iterations_b,
3022 tree *last_conflicts, struct loop *loop_nest)
3023 {
3024 unsigned int lnn = loop_nest->num;
3025
3026 dependence_stats.num_subscript_tests++;
3027
3028 if (dump_file && (dump_flags & TDF_DETAILS))
3029 {
3030 fprintf (dump_file, "(analyze_overlapping_iterations \n");
3031 fprintf (dump_file, " (chrec_a = ");
3032 print_generic_expr (dump_file, chrec_a, 0);
3033 fprintf (dump_file, ")\n (chrec_b = ");
3034 print_generic_expr (dump_file, chrec_b, 0);
3035 fprintf (dump_file, ")\n");
3036 }
3037
3038 if (chrec_a == NULL_TREE
3039 || chrec_b == NULL_TREE
3040 || chrec_contains_undetermined (chrec_a)
3041 || chrec_contains_undetermined (chrec_b))
3042 {
3043 dependence_stats.num_subscript_undetermined++;
3044
3045 *overlap_iterations_a = conflict_fn_not_known ();
3046 *overlap_iterations_b = conflict_fn_not_known ();
3047 }
3048
3049 /* If they are the same chrec, and are affine, they overlap
3050 on every iteration. */
3051 else if (eq_evolutions_p (chrec_a, chrec_b)
3052 && (evolution_function_is_affine_multivariate_p (chrec_a, lnn)
3053 || operand_equal_p (chrec_a, chrec_b, 0)))
3054 {
3055 dependence_stats.num_same_subscript_function++;
3056 *overlap_iterations_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
3057 *overlap_iterations_b = conflict_fn (1, affine_fn_cst (integer_zero_node));
3058 *last_conflicts = chrec_dont_know;
3059 }
3060
3061 /* If they aren't the same, and aren't affine, we can't do anything
3062 yet. */
3063 else if ((chrec_contains_symbols (chrec_a)
3064 || chrec_contains_symbols (chrec_b))
3065 && (!evolution_function_is_affine_multivariate_p (chrec_a, lnn)
3066 || !evolution_function_is_affine_multivariate_p (chrec_b, lnn)))
3067 {
3068 dependence_stats.num_subscript_undetermined++;
3069 *overlap_iterations_a = conflict_fn_not_known ();
3070 *overlap_iterations_b = conflict_fn_not_known ();
3071 }
3072
3073 else if (ziv_subscript_p (chrec_a, chrec_b))
3074 analyze_ziv_subscript (chrec_a, chrec_b,
3075 overlap_iterations_a, overlap_iterations_b,
3076 last_conflicts);
3077
3078 else if (siv_subscript_p (chrec_a, chrec_b))
3079 analyze_siv_subscript (chrec_a, chrec_b,
3080 overlap_iterations_a, overlap_iterations_b,
3081 last_conflicts, lnn);
3082
3083 else
3084 analyze_miv_subscript (chrec_a, chrec_b,
3085 overlap_iterations_a, overlap_iterations_b,
3086 last_conflicts, loop_nest);
3087
3088 if (dump_file && (dump_flags & TDF_DETAILS))
3089 {
3090 fprintf (dump_file, " (overlap_iterations_a = ");
3091 dump_conflict_function (dump_file, *overlap_iterations_a);
3092 fprintf (dump_file, ")\n (overlap_iterations_b = ");
3093 dump_conflict_function (dump_file, *overlap_iterations_b);
3094 fprintf (dump_file, "))\n");
3095 }
3096 }
3097
3098 /* Helper function for uniquely inserting distance vectors. */
3099
3100 static void
save_dist_v(struct data_dependence_relation * ddr,lambda_vector dist_v)3101 save_dist_v (struct data_dependence_relation *ddr, lambda_vector dist_v)
3102 {
3103 unsigned i;
3104 lambda_vector v;
3105
3106 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, v)
3107 if (lambda_vector_equal (v, dist_v, DDR_NB_LOOPS (ddr)))
3108 return;
3109
3110 DDR_DIST_VECTS (ddr).safe_push (dist_v);
3111 }
3112
3113 /* Helper function for uniquely inserting direction vectors. */
3114
3115 static void
save_dir_v(struct data_dependence_relation * ddr,lambda_vector dir_v)3116 save_dir_v (struct data_dependence_relation *ddr, lambda_vector dir_v)
3117 {
3118 unsigned i;
3119 lambda_vector v;
3120
3121 FOR_EACH_VEC_ELT (DDR_DIR_VECTS (ddr), i, v)
3122 if (lambda_vector_equal (v, dir_v, DDR_NB_LOOPS (ddr)))
3123 return;
3124
3125 DDR_DIR_VECTS (ddr).safe_push (dir_v);
3126 }
3127
3128 /* Add a distance of 1 on all the loops outer than INDEX. If we
3129 haven't yet determined a distance for this outer loop, push a new
3130 distance vector composed of the previous distance, and a distance
3131 of 1 for this outer loop. Example:
3132
3133 | loop_1
3134 | loop_2
3135 | A[10]
3136 | endloop_2
3137 | endloop_1
3138
3139 Saved vectors are of the form (dist_in_1, dist_in_2). First, we
3140 save (0, 1), then we have to save (1, 0). */
3141
3142 static void
add_outer_distances(struct data_dependence_relation * ddr,lambda_vector dist_v,int index)3143 add_outer_distances (struct data_dependence_relation *ddr,
3144 lambda_vector dist_v, int index)
3145 {
3146 /* For each outer loop where init_v is not set, the accesses are
3147 in dependence of distance 1 in the loop. */
3148 while (--index >= 0)
3149 {
3150 lambda_vector save_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
3151 lambda_vector_copy (dist_v, save_v, DDR_NB_LOOPS (ddr));
3152 save_v[index] = 1;
3153 save_dist_v (ddr, save_v);
3154 }
3155 }
3156
3157 /* Return false when fail to represent the data dependence as a
3158 distance vector. INIT_B is set to true when a component has been
3159 added to the distance vector DIST_V. INDEX_CARRY is then set to
3160 the index in DIST_V that carries the dependence. */
3161
3162 static bool
build_classic_dist_vector_1(struct data_dependence_relation * ddr,struct data_reference * ddr_a,struct data_reference * ddr_b,lambda_vector dist_v,bool * init_b,int * index_carry)3163 build_classic_dist_vector_1 (struct data_dependence_relation *ddr,
3164 struct data_reference *ddr_a,
3165 struct data_reference *ddr_b,
3166 lambda_vector dist_v, bool *init_b,
3167 int *index_carry)
3168 {
3169 unsigned i;
3170 lambda_vector init_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
3171
3172 for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++)
3173 {
3174 tree access_fn_a, access_fn_b;
3175 struct subscript *subscript = DDR_SUBSCRIPT (ddr, i);
3176
3177 if (chrec_contains_undetermined (SUB_DISTANCE (subscript)))
3178 {
3179 non_affine_dependence_relation (ddr);
3180 return false;
3181 }
3182
3183 access_fn_a = DR_ACCESS_FN (ddr_a, i);
3184 access_fn_b = DR_ACCESS_FN (ddr_b, i);
3185
3186 if (TREE_CODE (access_fn_a) == POLYNOMIAL_CHREC
3187 && TREE_CODE (access_fn_b) == POLYNOMIAL_CHREC)
3188 {
3189 int dist, index;
3190 int var_a = CHREC_VARIABLE (access_fn_a);
3191 int var_b = CHREC_VARIABLE (access_fn_b);
3192
3193 if (var_a != var_b
3194 || chrec_contains_undetermined (SUB_DISTANCE (subscript)))
3195 {
3196 non_affine_dependence_relation (ddr);
3197 return false;
3198 }
3199
3200 dist = int_cst_value (SUB_DISTANCE (subscript));
3201 index = index_in_loop_nest (var_a, DDR_LOOP_NEST (ddr));
3202 *index_carry = MIN (index, *index_carry);
3203
3204 /* This is the subscript coupling test. If we have already
3205 recorded a distance for this loop (a distance coming from
3206 another subscript), it should be the same. For example,
3207 in the following code, there is no dependence:
3208
3209 | loop i = 0, N, 1
3210 | T[i+1][i] = ...
3211 | ... = T[i][i]
3212 | endloop
3213 */
3214 if (init_v[index] != 0 && dist_v[index] != dist)
3215 {
3216 finalize_ddr_dependent (ddr, chrec_known);
3217 return false;
3218 }
3219
3220 dist_v[index] = dist;
3221 init_v[index] = 1;
3222 *init_b = true;
3223 }
3224 else if (!operand_equal_p (access_fn_a, access_fn_b, 0))
3225 {
3226 /* This can be for example an affine vs. constant dependence
3227 (T[i] vs. T[3]) that is not an affine dependence and is
3228 not representable as a distance vector. */
3229 non_affine_dependence_relation (ddr);
3230 return false;
3231 }
3232 }
3233
3234 return true;
3235 }
3236
3237 /* Return true when the DDR contains only constant access functions. */
3238
3239 static bool
constant_access_functions(const struct data_dependence_relation * ddr)3240 constant_access_functions (const struct data_dependence_relation *ddr)
3241 {
3242 unsigned i;
3243
3244 for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++)
3245 if (!evolution_function_is_constant_p (DR_ACCESS_FN (DDR_A (ddr), i))
3246 || !evolution_function_is_constant_p (DR_ACCESS_FN (DDR_B (ddr), i)))
3247 return false;
3248
3249 return true;
3250 }
3251
3252 /* Helper function for the case where DDR_A and DDR_B are the same
3253 multivariate access function with a constant step. For an example
3254 see pr34635-1.c. */
3255
3256 static void
add_multivariate_self_dist(struct data_dependence_relation * ddr,tree c_2)3257 add_multivariate_self_dist (struct data_dependence_relation *ddr, tree c_2)
3258 {
3259 int x_1, x_2;
3260 tree c_1 = CHREC_LEFT (c_2);
3261 tree c_0 = CHREC_LEFT (c_1);
3262 lambda_vector dist_v;
3263 int v1, v2, cd;
3264
3265 /* Polynomials with more than 2 variables are not handled yet. When
3266 the evolution steps are parameters, it is not possible to
3267 represent the dependence using classical distance vectors. */
3268 if (TREE_CODE (c_0) != INTEGER_CST
3269 || TREE_CODE (CHREC_RIGHT (c_1)) != INTEGER_CST
3270 || TREE_CODE (CHREC_RIGHT (c_2)) != INTEGER_CST)
3271 {
3272 DDR_AFFINE_P (ddr) = false;
3273 return;
3274 }
3275
3276 x_2 = index_in_loop_nest (CHREC_VARIABLE (c_2), DDR_LOOP_NEST (ddr));
3277 x_1 = index_in_loop_nest (CHREC_VARIABLE (c_1), DDR_LOOP_NEST (ddr));
3278
3279 /* For "{{0, +, 2}_1, +, 3}_2" the distance vector is (3, -2). */
3280 dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
3281 v1 = int_cst_value (CHREC_RIGHT (c_1));
3282 v2 = int_cst_value (CHREC_RIGHT (c_2));
3283 cd = gcd (v1, v2);
3284 v1 /= cd;
3285 v2 /= cd;
3286
3287 if (v2 < 0)
3288 {
3289 v2 = -v2;
3290 v1 = -v1;
3291 }
3292
3293 dist_v[x_1] = v2;
3294 dist_v[x_2] = -v1;
3295 save_dist_v (ddr, dist_v);
3296
3297 add_outer_distances (ddr, dist_v, x_1);
3298 }
3299
3300 /* Helper function for the case where DDR_A and DDR_B are the same
3301 access functions. */
3302
3303 static void
add_other_self_distances(struct data_dependence_relation * ddr)3304 add_other_self_distances (struct data_dependence_relation *ddr)
3305 {
3306 lambda_vector dist_v;
3307 unsigned i;
3308 int index_carry = DDR_NB_LOOPS (ddr);
3309
3310 for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++)
3311 {
3312 tree access_fun = DR_ACCESS_FN (DDR_A (ddr), i);
3313
3314 if (TREE_CODE (access_fun) == POLYNOMIAL_CHREC)
3315 {
3316 if (!evolution_function_is_univariate_p (access_fun))
3317 {
3318 if (DDR_NUM_SUBSCRIPTS (ddr) != 1)
3319 {
3320 DDR_ARE_DEPENDENT (ddr) = chrec_dont_know;
3321 return;
3322 }
3323
3324 access_fun = DR_ACCESS_FN (DDR_A (ddr), 0);
3325
3326 if (TREE_CODE (CHREC_LEFT (access_fun)) == POLYNOMIAL_CHREC)
3327 add_multivariate_self_dist (ddr, access_fun);
3328 else
3329 /* The evolution step is not constant: it varies in
3330 the outer loop, so this cannot be represented by a
3331 distance vector. For example in pr34635.c the
3332 evolution is {0, +, {0, +, 4}_1}_2. */
3333 DDR_AFFINE_P (ddr) = false;
3334
3335 return;
3336 }
3337
3338 index_carry = MIN (index_carry,
3339 index_in_loop_nest (CHREC_VARIABLE (access_fun),
3340 DDR_LOOP_NEST (ddr)));
3341 }
3342 }
3343
3344 dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
3345 add_outer_distances (ddr, dist_v, index_carry);
3346 }
3347
3348 static void
insert_innermost_unit_dist_vector(struct data_dependence_relation * ddr)3349 insert_innermost_unit_dist_vector (struct data_dependence_relation *ddr)
3350 {
3351 lambda_vector dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
3352
3353 dist_v[DDR_INNER_LOOP (ddr)] = 1;
3354 save_dist_v (ddr, dist_v);
3355 }
3356
3357 /* Adds a unit distance vector to DDR when there is a 0 overlap. This
3358 is the case for example when access functions are the same and
3359 equal to a constant, as in:
3360
3361 | loop_1
3362 | A[3] = ...
3363 | ... = A[3]
3364 | endloop_1
3365
3366 in which case the distance vectors are (0) and (1). */
3367
3368 static void
add_distance_for_zero_overlaps(struct data_dependence_relation * ddr)3369 add_distance_for_zero_overlaps (struct data_dependence_relation *ddr)
3370 {
3371 unsigned i, j;
3372
3373 for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++)
3374 {
3375 subscript_p sub = DDR_SUBSCRIPT (ddr, i);
3376 conflict_function *ca = SUB_CONFLICTS_IN_A (sub);
3377 conflict_function *cb = SUB_CONFLICTS_IN_B (sub);
3378
3379 for (j = 0; j < ca->n; j++)
3380 if (affine_function_zero_p (ca->fns[j]))
3381 {
3382 insert_innermost_unit_dist_vector (ddr);
3383 return;
3384 }
3385
3386 for (j = 0; j < cb->n; j++)
3387 if (affine_function_zero_p (cb->fns[j]))
3388 {
3389 insert_innermost_unit_dist_vector (ddr);
3390 return;
3391 }
3392 }
3393 }
3394
3395 /* Compute the classic per loop distance vector. DDR is the data
3396 dependence relation to build a vector from. Return false when fail
3397 to represent the data dependence as a distance vector. */
3398
3399 static bool
build_classic_dist_vector(struct data_dependence_relation * ddr,struct loop * loop_nest)3400 build_classic_dist_vector (struct data_dependence_relation *ddr,
3401 struct loop *loop_nest)
3402 {
3403 bool init_b = false;
3404 int index_carry = DDR_NB_LOOPS (ddr);
3405 lambda_vector dist_v;
3406
3407 if (DDR_ARE_DEPENDENT (ddr) != NULL_TREE)
3408 return false;
3409
3410 if (same_access_functions (ddr))
3411 {
3412 /* Save the 0 vector. */
3413 dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
3414 save_dist_v (ddr, dist_v);
3415
3416 if (constant_access_functions (ddr))
3417 add_distance_for_zero_overlaps (ddr);
3418
3419 if (DDR_NB_LOOPS (ddr) > 1)
3420 add_other_self_distances (ddr);
3421
3422 return true;
3423 }
3424
3425 dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
3426 if (!build_classic_dist_vector_1 (ddr, DDR_A (ddr), DDR_B (ddr),
3427 dist_v, &init_b, &index_carry))
3428 return false;
3429
3430 /* Save the distance vector if we initialized one. */
3431 if (init_b)
3432 {
3433 /* Verify a basic constraint: classic distance vectors should
3434 always be lexicographically positive.
3435
3436 Data references are collected in the order of execution of
3437 the program, thus for the following loop
3438
3439 | for (i = 1; i < 100; i++)
3440 | for (j = 1; j < 100; j++)
3441 | {
3442 | t = T[j+1][i-1]; // A
3443 | T[j][i] = t + 2; // B
3444 | }
3445
3446 references are collected following the direction of the wind:
3447 A then B. The data dependence tests are performed also
3448 following this order, such that we're looking at the distance
3449 separating the elements accessed by A from the elements later
3450 accessed by B. But in this example, the distance returned by
3451 test_dep (A, B) is lexicographically negative (-1, 1), that
3452 means that the access A occurs later than B with respect to
3453 the outer loop, ie. we're actually looking upwind. In this
3454 case we solve test_dep (B, A) looking downwind to the
3455 lexicographically positive solution, that returns the
3456 distance vector (1, -1). */
3457 if (!lambda_vector_lexico_pos (dist_v, DDR_NB_LOOPS (ddr)))
3458 {
3459 lambda_vector save_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
3460 if (!subscript_dependence_tester_1 (ddr, DDR_B (ddr), DDR_A (ddr),
3461 loop_nest))
3462 return false;
3463 compute_subscript_distance (ddr);
3464 if (!build_classic_dist_vector_1 (ddr, DDR_B (ddr), DDR_A (ddr),
3465 save_v, &init_b, &index_carry))
3466 return false;
3467 save_dist_v (ddr, save_v);
3468 DDR_REVERSED_P (ddr) = true;
3469
3470 /* In this case there is a dependence forward for all the
3471 outer loops:
3472
3473 | for (k = 1; k < 100; k++)
3474 | for (i = 1; i < 100; i++)
3475 | for (j = 1; j < 100; j++)
3476 | {
3477 | t = T[j+1][i-1]; // A
3478 | T[j][i] = t + 2; // B
3479 | }
3480
3481 the vectors are:
3482 (0, 1, -1)
3483 (1, 1, -1)
3484 (1, -1, 1)
3485 */
3486 if (DDR_NB_LOOPS (ddr) > 1)
3487 {
3488 add_outer_distances (ddr, save_v, index_carry);
3489 add_outer_distances (ddr, dist_v, index_carry);
3490 }
3491 }
3492 else
3493 {
3494 lambda_vector save_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
3495 lambda_vector_copy (dist_v, save_v, DDR_NB_LOOPS (ddr));
3496
3497 if (DDR_NB_LOOPS (ddr) > 1)
3498 {
3499 lambda_vector opposite_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
3500
3501 if (!subscript_dependence_tester_1 (ddr, DDR_B (ddr),
3502 DDR_A (ddr), loop_nest))
3503 return false;
3504 compute_subscript_distance (ddr);
3505 if (!build_classic_dist_vector_1 (ddr, DDR_B (ddr), DDR_A (ddr),
3506 opposite_v, &init_b,
3507 &index_carry))
3508 return false;
3509
3510 save_dist_v (ddr, save_v);
3511 add_outer_distances (ddr, dist_v, index_carry);
3512 add_outer_distances (ddr, opposite_v, index_carry);
3513 }
3514 else
3515 save_dist_v (ddr, save_v);
3516 }
3517 }
3518 else
3519 {
3520 /* There is a distance of 1 on all the outer loops: Example:
3521 there is a dependence of distance 1 on loop_1 for the array A.
3522
3523 | loop_1
3524 | A[5] = ...
3525 | endloop
3526 */
3527 add_outer_distances (ddr, dist_v,
3528 lambda_vector_first_nz (dist_v,
3529 DDR_NB_LOOPS (ddr), 0));
3530 }
3531
3532 if (dump_file && (dump_flags & TDF_DETAILS))
3533 {
3534 unsigned i;
3535
3536 fprintf (dump_file, "(build_classic_dist_vector\n");
3537 for (i = 0; i < DDR_NUM_DIST_VECTS (ddr); i++)
3538 {
3539 fprintf (dump_file, " dist_vector = (");
3540 print_lambda_vector (dump_file, DDR_DIST_VECT (ddr, i),
3541 DDR_NB_LOOPS (ddr));
3542 fprintf (dump_file, " )\n");
3543 }
3544 fprintf (dump_file, ")\n");
3545 }
3546
3547 return true;
3548 }
3549
3550 /* Return the direction for a given distance.
3551 FIXME: Computing dir this way is suboptimal, since dir can catch
3552 cases that dist is unable to represent. */
3553
3554 static inline enum data_dependence_direction
dir_from_dist(int dist)3555 dir_from_dist (int dist)
3556 {
3557 if (dist > 0)
3558 return dir_positive;
3559 else if (dist < 0)
3560 return dir_negative;
3561 else
3562 return dir_equal;
3563 }
3564
3565 /* Compute the classic per loop direction vector. DDR is the data
3566 dependence relation to build a vector from. */
3567
3568 static void
build_classic_dir_vector(struct data_dependence_relation * ddr)3569 build_classic_dir_vector (struct data_dependence_relation *ddr)
3570 {
3571 unsigned i, j;
3572 lambda_vector dist_v;
3573
3574 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
3575 {
3576 lambda_vector dir_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
3577
3578 for (j = 0; j < DDR_NB_LOOPS (ddr); j++)
3579 dir_v[j] = dir_from_dist (dist_v[j]);
3580
3581 save_dir_v (ddr, dir_v);
3582 }
3583 }
3584
3585 /* Helper function. Returns true when there is a dependence between
3586 data references DRA and DRB. */
3587
3588 static bool
subscript_dependence_tester_1(struct data_dependence_relation * ddr,struct data_reference * dra,struct data_reference * drb,struct loop * loop_nest)3589 subscript_dependence_tester_1 (struct data_dependence_relation *ddr,
3590 struct data_reference *dra,
3591 struct data_reference *drb,
3592 struct loop *loop_nest)
3593 {
3594 unsigned int i;
3595 tree last_conflicts;
3596 struct subscript *subscript;
3597 tree res = NULL_TREE;
3598
3599 for (i = 0; DDR_SUBSCRIPTS (ddr).iterate (i, &subscript); i++)
3600 {
3601 conflict_function *overlaps_a, *overlaps_b;
3602
3603 analyze_overlapping_iterations (DR_ACCESS_FN (dra, i),
3604 DR_ACCESS_FN (drb, i),
3605 &overlaps_a, &overlaps_b,
3606 &last_conflicts, loop_nest);
3607
3608 if (SUB_CONFLICTS_IN_A (subscript))
3609 free_conflict_function (SUB_CONFLICTS_IN_A (subscript));
3610 if (SUB_CONFLICTS_IN_B (subscript))
3611 free_conflict_function (SUB_CONFLICTS_IN_B (subscript));
3612
3613 SUB_CONFLICTS_IN_A (subscript) = overlaps_a;
3614 SUB_CONFLICTS_IN_B (subscript) = overlaps_b;
3615 SUB_LAST_CONFLICT (subscript) = last_conflicts;
3616
3617 /* If there is any undetermined conflict function we have to
3618 give a conservative answer in case we cannot prove that
3619 no dependence exists when analyzing another subscript. */
3620 if (CF_NOT_KNOWN_P (overlaps_a)
3621 || CF_NOT_KNOWN_P (overlaps_b))
3622 {
3623 res = chrec_dont_know;
3624 continue;
3625 }
3626
3627 /* When there is a subscript with no dependence we can stop. */
3628 else if (CF_NO_DEPENDENCE_P (overlaps_a)
3629 || CF_NO_DEPENDENCE_P (overlaps_b))
3630 {
3631 res = chrec_known;
3632 break;
3633 }
3634 }
3635
3636 if (res == NULL_TREE)
3637 return true;
3638
3639 if (res == chrec_known)
3640 dependence_stats.num_dependence_independent++;
3641 else
3642 dependence_stats.num_dependence_undetermined++;
3643 finalize_ddr_dependent (ddr, res);
3644 return false;
3645 }
3646
3647 /* Computes the conflicting iterations in LOOP_NEST, and initialize DDR. */
3648
3649 static void
subscript_dependence_tester(struct data_dependence_relation * ddr,struct loop * loop_nest)3650 subscript_dependence_tester (struct data_dependence_relation *ddr,
3651 struct loop *loop_nest)
3652 {
3653 if (subscript_dependence_tester_1 (ddr, DDR_A (ddr), DDR_B (ddr), loop_nest))
3654 dependence_stats.num_dependence_dependent++;
3655
3656 compute_subscript_distance (ddr);
3657 if (build_classic_dist_vector (ddr, loop_nest))
3658 build_classic_dir_vector (ddr);
3659 }
3660
3661 /* Returns true when all the access functions of A are affine or
3662 constant with respect to LOOP_NEST. */
3663
3664 static bool
access_functions_are_affine_or_constant_p(const struct data_reference * a,const struct loop * loop_nest)3665 access_functions_are_affine_or_constant_p (const struct data_reference *a,
3666 const struct loop *loop_nest)
3667 {
3668 unsigned int i;
3669 vec<tree> fns = DR_ACCESS_FNS (a);
3670 tree t;
3671
3672 FOR_EACH_VEC_ELT (fns, i, t)
3673 if (!evolution_function_is_invariant_p (t, loop_nest->num)
3674 && !evolution_function_is_affine_multivariate_p (t, loop_nest->num))
3675 return false;
3676
3677 return true;
3678 }
3679
3680 /* This computes the affine dependence relation between A and B with
3681 respect to LOOP_NEST. CHREC_KNOWN is used for representing the
3682 independence between two accesses, while CHREC_DONT_KNOW is used
3683 for representing the unknown relation.
3684
3685 Note that it is possible to stop the computation of the dependence
3686 relation the first time we detect a CHREC_KNOWN element for a given
3687 subscript. */
3688
3689 void
compute_affine_dependence(struct data_dependence_relation * ddr,struct loop * loop_nest)3690 compute_affine_dependence (struct data_dependence_relation *ddr,
3691 struct loop *loop_nest)
3692 {
3693 struct data_reference *dra = DDR_A (ddr);
3694 struct data_reference *drb = DDR_B (ddr);
3695
3696 if (dump_file && (dump_flags & TDF_DETAILS))
3697 {
3698 fprintf (dump_file, "(compute_affine_dependence\n");
3699 fprintf (dump_file, " stmt_a: ");
3700 print_gimple_stmt (dump_file, DR_STMT (dra), 0, TDF_SLIM);
3701 fprintf (dump_file, " stmt_b: ");
3702 print_gimple_stmt (dump_file, DR_STMT (drb), 0, TDF_SLIM);
3703 }
3704
3705 /* Analyze only when the dependence relation is not yet known. */
3706 if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE)
3707 {
3708 dependence_stats.num_dependence_tests++;
3709
3710 if (access_functions_are_affine_or_constant_p (dra, loop_nest)
3711 && access_functions_are_affine_or_constant_p (drb, loop_nest))
3712 subscript_dependence_tester (ddr, loop_nest);
3713
3714 /* As a last case, if the dependence cannot be determined, or if
3715 the dependence is considered too difficult to determine, answer
3716 "don't know". */
3717 else
3718 {
3719 dependence_stats.num_dependence_undetermined++;
3720
3721 if (dump_file && (dump_flags & TDF_DETAILS))
3722 {
3723 fprintf (dump_file, "Data ref a:\n");
3724 dump_data_reference (dump_file, dra);
3725 fprintf (dump_file, "Data ref b:\n");
3726 dump_data_reference (dump_file, drb);
3727 fprintf (dump_file, "affine dependence test not usable: access function not affine or constant.\n");
3728 }
3729 finalize_ddr_dependent (ddr, chrec_dont_know);
3730 }
3731 }
3732
3733 if (dump_file && (dump_flags & TDF_DETAILS))
3734 {
3735 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
3736 fprintf (dump_file, ") -> no dependence\n");
3737 else if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
3738 fprintf (dump_file, ") -> dependence analysis failed\n");
3739 else
3740 fprintf (dump_file, ")\n");
3741 }
3742 }
3743
3744 /* Compute in DEPENDENCE_RELATIONS the data dependence graph for all
3745 the data references in DATAREFS, in the LOOP_NEST. When
3746 COMPUTE_SELF_AND_RR is FALSE, don't compute read-read and self
3747 relations. Return true when successful, i.e. data references number
3748 is small enough to be handled. */
3749
3750 bool
compute_all_dependences(vec<data_reference_p> datarefs,vec<ddr_p> * dependence_relations,vec<loop_p> loop_nest,bool compute_self_and_rr)3751 compute_all_dependences (vec<data_reference_p> datarefs,
3752 vec<ddr_p> *dependence_relations,
3753 vec<loop_p> loop_nest,
3754 bool compute_self_and_rr)
3755 {
3756 struct data_dependence_relation *ddr;
3757 struct data_reference *a, *b;
3758 unsigned int i, j;
3759
3760 if ((int) datarefs.length ()
3761 > PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS))
3762 {
3763 struct data_dependence_relation *ddr;
3764
3765 /* Insert a single relation into dependence_relations:
3766 chrec_dont_know. */
3767 ddr = initialize_data_dependence_relation (NULL, NULL, loop_nest);
3768 dependence_relations->safe_push (ddr);
3769 return false;
3770 }
3771
3772 FOR_EACH_VEC_ELT (datarefs, i, a)
3773 for (j = i + 1; datarefs.iterate (j, &b); j++)
3774 if (DR_IS_WRITE (a) || DR_IS_WRITE (b) || compute_self_and_rr)
3775 {
3776 ddr = initialize_data_dependence_relation (a, b, loop_nest);
3777 dependence_relations->safe_push (ddr);
3778 if (loop_nest.exists ())
3779 compute_affine_dependence (ddr, loop_nest[0]);
3780 }
3781
3782 if (compute_self_and_rr)
3783 FOR_EACH_VEC_ELT (datarefs, i, a)
3784 {
3785 ddr = initialize_data_dependence_relation (a, a, loop_nest);
3786 dependence_relations->safe_push (ddr);
3787 if (loop_nest.exists ())
3788 compute_affine_dependence (ddr, loop_nest[0]);
3789 }
3790
3791 return true;
3792 }
3793
3794 /* Describes a location of a memory reference. */
3795
3796 struct data_ref_loc
3797 {
3798 /* The memory reference. */
3799 tree ref;
3800
3801 /* True if the memory reference is read. */
3802 bool is_read;
3803 };
3804
3805
3806 /* Stores the locations of memory references in STMT to REFERENCES. Returns
3807 true if STMT clobbers memory, false otherwise. */
3808
3809 static bool
get_references_in_stmt(gimple * stmt,vec<data_ref_loc,va_heap> * references)3810 get_references_in_stmt (gimple *stmt, vec<data_ref_loc, va_heap> *references)
3811 {
3812 bool clobbers_memory = false;
3813 data_ref_loc ref;
3814 tree op0, op1;
3815 enum gimple_code stmt_code = gimple_code (stmt);
3816
3817 /* ASM_EXPR and CALL_EXPR may embed arbitrary side effects.
3818 As we cannot model data-references to not spelled out
3819 accesses give up if they may occur. */
3820 if (stmt_code == GIMPLE_CALL
3821 && !(gimple_call_flags (stmt) & ECF_CONST))
3822 {
3823 /* Allow IFN_GOMP_SIMD_LANE in their own loops. */
3824 if (gimple_call_internal_p (stmt))
3825 switch (gimple_call_internal_fn (stmt))
3826 {
3827 case IFN_GOMP_SIMD_LANE:
3828 {
3829 struct loop *loop = gimple_bb (stmt)->loop_father;
3830 tree uid = gimple_call_arg (stmt, 0);
3831 gcc_assert (TREE_CODE (uid) == SSA_NAME);
3832 if (loop == NULL
3833 || loop->simduid != SSA_NAME_VAR (uid))
3834 clobbers_memory = true;
3835 break;
3836 }
3837 case IFN_MASK_LOAD:
3838 case IFN_MASK_STORE:
3839 break;
3840 default:
3841 clobbers_memory = true;
3842 break;
3843 }
3844 else
3845 clobbers_memory = true;
3846 }
3847 else if (stmt_code == GIMPLE_ASM
3848 && (gimple_asm_volatile_p (as_a <gasm *> (stmt))
3849 || gimple_vuse (stmt)))
3850 clobbers_memory = true;
3851
3852 if (!gimple_vuse (stmt))
3853 return clobbers_memory;
3854
3855 if (stmt_code == GIMPLE_ASSIGN)
3856 {
3857 tree base;
3858 op0 = gimple_assign_lhs (stmt);
3859 op1 = gimple_assign_rhs1 (stmt);
3860
3861 if (DECL_P (op1)
3862 || (REFERENCE_CLASS_P (op1)
3863 && (base = get_base_address (op1))
3864 && TREE_CODE (base) != SSA_NAME))
3865 {
3866 ref.ref = op1;
3867 ref.is_read = true;
3868 references->safe_push (ref);
3869 }
3870 }
3871 else if (stmt_code == GIMPLE_CALL)
3872 {
3873 unsigned i, n;
3874 tree ptr, type;
3875 unsigned int align;
3876
3877 ref.is_read = false;
3878 if (gimple_call_internal_p (stmt))
3879 switch (gimple_call_internal_fn (stmt))
3880 {
3881 case IFN_MASK_LOAD:
3882 if (gimple_call_lhs (stmt) == NULL_TREE)
3883 break;
3884 ref.is_read = true;
3885 case IFN_MASK_STORE:
3886 ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)), 0);
3887 align = tree_to_shwi (gimple_call_arg (stmt, 1));
3888 if (ref.is_read)
3889 type = TREE_TYPE (gimple_call_lhs (stmt));
3890 else
3891 type = TREE_TYPE (gimple_call_arg (stmt, 3));
3892 if (TYPE_ALIGN (type) != align)
3893 type = build_aligned_type (type, align);
3894 ref.ref = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0),
3895 ptr);
3896 references->safe_push (ref);
3897 return false;
3898 default:
3899 break;
3900 }
3901
3902 op0 = gimple_call_lhs (stmt);
3903 n = gimple_call_num_args (stmt);
3904 for (i = 0; i < n; i++)
3905 {
3906 op1 = gimple_call_arg (stmt, i);
3907
3908 if (DECL_P (op1)
3909 || (REFERENCE_CLASS_P (op1) && get_base_address (op1)))
3910 {
3911 ref.ref = op1;
3912 ref.is_read = true;
3913 references->safe_push (ref);
3914 }
3915 }
3916 }
3917 else
3918 return clobbers_memory;
3919
3920 if (op0
3921 && (DECL_P (op0)
3922 || (REFERENCE_CLASS_P (op0) && get_base_address (op0))))
3923 {
3924 ref.ref = op0;
3925 ref.is_read = false;
3926 references->safe_push (ref);
3927 }
3928 return clobbers_memory;
3929 }
3930
3931
3932 /* Returns true if the loop-nest has any data reference. */
3933
3934 bool
loop_nest_has_data_refs(loop_p loop)3935 loop_nest_has_data_refs (loop_p loop)
3936 {
3937 basic_block *bbs = get_loop_body (loop);
3938 vec<data_ref_loc> references;
3939 references.create (3);
3940
3941 for (unsigned i = 0; i < loop->num_nodes; i++)
3942 {
3943 basic_block bb = bbs[i];
3944 gimple_stmt_iterator bsi;
3945
3946 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
3947 {
3948 gimple *stmt = gsi_stmt (bsi);
3949 get_references_in_stmt (stmt, &references);
3950 if (references.length ())
3951 {
3952 free (bbs);
3953 references.release ();
3954 return true;
3955 }
3956 }
3957 }
3958 free (bbs);
3959 references.release ();
3960
3961 if (loop->inner)
3962 {
3963 loop = loop->inner;
3964 while (loop)
3965 {
3966 if (loop_nest_has_data_refs (loop))
3967 return true;
3968 loop = loop->next;
3969 }
3970 }
3971 return false;
3972 }
3973
3974 /* Stores the data references in STMT to DATAREFS. If there is an unanalyzable
3975 reference, returns false, otherwise returns true. NEST is the outermost
3976 loop of the loop nest in which the references should be analyzed. */
3977
3978 bool
find_data_references_in_stmt(struct loop * nest,gimple * stmt,vec<data_reference_p> * datarefs)3979 find_data_references_in_stmt (struct loop *nest, gimple *stmt,
3980 vec<data_reference_p> *datarefs)
3981 {
3982 unsigned i;
3983 auto_vec<data_ref_loc, 2> references;
3984 data_ref_loc *ref;
3985 bool ret = true;
3986 data_reference_p dr;
3987
3988 if (get_references_in_stmt (stmt, &references))
3989 return false;
3990
3991 FOR_EACH_VEC_ELT (references, i, ref)
3992 {
3993 dr = create_data_ref (nest, loop_containing_stmt (stmt),
3994 ref->ref, stmt, ref->is_read);
3995 gcc_assert (dr != NULL);
3996 datarefs->safe_push (dr);
3997 }
3998 references.release ();
3999 return ret;
4000 }
4001
4002 /* Stores the data references in STMT to DATAREFS. If there is an
4003 unanalyzable reference, returns false, otherwise returns true.
4004 NEST is the outermost loop of the loop nest in which the references
4005 should be instantiated, LOOP is the loop in which the references
4006 should be analyzed. */
4007
4008 bool
graphite_find_data_references_in_stmt(loop_p nest,loop_p loop,gimple * stmt,vec<data_reference_p> * datarefs)4009 graphite_find_data_references_in_stmt (loop_p nest, loop_p loop, gimple *stmt,
4010 vec<data_reference_p> *datarefs)
4011 {
4012 unsigned i;
4013 auto_vec<data_ref_loc, 2> references;
4014 data_ref_loc *ref;
4015 bool ret = true;
4016 data_reference_p dr;
4017
4018 if (get_references_in_stmt (stmt, &references))
4019 return false;
4020
4021 FOR_EACH_VEC_ELT (references, i, ref)
4022 {
4023 dr = create_data_ref (nest, loop, ref->ref, stmt, ref->is_read);
4024 gcc_assert (dr != NULL);
4025 datarefs->safe_push (dr);
4026 }
4027
4028 references.release ();
4029 return ret;
4030 }
4031
4032 /* Search the data references in LOOP, and record the information into
4033 DATAREFS. Returns chrec_dont_know when failing to analyze a
4034 difficult case, returns NULL_TREE otherwise. */
4035
4036 tree
find_data_references_in_bb(struct loop * loop,basic_block bb,vec<data_reference_p> * datarefs)4037 find_data_references_in_bb (struct loop *loop, basic_block bb,
4038 vec<data_reference_p> *datarefs)
4039 {
4040 gimple_stmt_iterator bsi;
4041
4042 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
4043 {
4044 gimple *stmt = gsi_stmt (bsi);
4045
4046 if (!find_data_references_in_stmt (loop, stmt, datarefs))
4047 {
4048 struct data_reference *res;
4049 res = XCNEW (struct data_reference);
4050 datarefs->safe_push (res);
4051
4052 return chrec_dont_know;
4053 }
4054 }
4055
4056 return NULL_TREE;
4057 }
4058
4059 /* Search the data references in LOOP, and record the information into
4060 DATAREFS. Returns chrec_dont_know when failing to analyze a
4061 difficult case, returns NULL_TREE otherwise.
4062
4063 TODO: This function should be made smarter so that it can handle address
4064 arithmetic as if they were array accesses, etc. */
4065
4066 tree
find_data_references_in_loop(struct loop * loop,vec<data_reference_p> * datarefs)4067 find_data_references_in_loop (struct loop *loop,
4068 vec<data_reference_p> *datarefs)
4069 {
4070 basic_block bb, *bbs;
4071 unsigned int i;
4072
4073 bbs = get_loop_body_in_dom_order (loop);
4074
4075 for (i = 0; i < loop->num_nodes; i++)
4076 {
4077 bb = bbs[i];
4078
4079 if (find_data_references_in_bb (loop, bb, datarefs) == chrec_dont_know)
4080 {
4081 free (bbs);
4082 return chrec_dont_know;
4083 }
4084 }
4085 free (bbs);
4086
4087 return NULL_TREE;
4088 }
4089
4090 /* Recursive helper function. */
4091
4092 static bool
find_loop_nest_1(struct loop * loop,vec<loop_p> * loop_nest)4093 find_loop_nest_1 (struct loop *loop, vec<loop_p> *loop_nest)
4094 {
4095 /* Inner loops of the nest should not contain siblings. Example:
4096 when there are two consecutive loops,
4097
4098 | loop_0
4099 | loop_1
4100 | A[{0, +, 1}_1]
4101 | endloop_1
4102 | loop_2
4103 | A[{0, +, 1}_2]
4104 | endloop_2
4105 | endloop_0
4106
4107 the dependence relation cannot be captured by the distance
4108 abstraction. */
4109 if (loop->next)
4110 return false;
4111
4112 loop_nest->safe_push (loop);
4113 if (loop->inner)
4114 return find_loop_nest_1 (loop->inner, loop_nest);
4115 return true;
4116 }
4117
4118 /* Return false when the LOOP is not well nested. Otherwise return
4119 true and insert in LOOP_NEST the loops of the nest. LOOP_NEST will
4120 contain the loops from the outermost to the innermost, as they will
4121 appear in the classic distance vector. */
4122
4123 bool
find_loop_nest(struct loop * loop,vec<loop_p> * loop_nest)4124 find_loop_nest (struct loop *loop, vec<loop_p> *loop_nest)
4125 {
4126 loop_nest->safe_push (loop);
4127 if (loop->inner)
4128 return find_loop_nest_1 (loop->inner, loop_nest);
4129 return true;
4130 }
4131
4132 /* Returns true when the data dependences have been computed, false otherwise.
4133 Given a loop nest LOOP, the following vectors are returned:
4134 DATAREFS is initialized to all the array elements contained in this loop,
4135 DEPENDENCE_RELATIONS contains the relations between the data references.
4136 Compute read-read and self relations if
4137 COMPUTE_SELF_AND_READ_READ_DEPENDENCES is TRUE. */
4138
4139 bool
compute_data_dependences_for_loop(struct loop * loop,bool compute_self_and_read_read_dependences,vec<loop_p> * loop_nest,vec<data_reference_p> * datarefs,vec<ddr_p> * dependence_relations)4140 compute_data_dependences_for_loop (struct loop *loop,
4141 bool compute_self_and_read_read_dependences,
4142 vec<loop_p> *loop_nest,
4143 vec<data_reference_p> *datarefs,
4144 vec<ddr_p> *dependence_relations)
4145 {
4146 bool res = true;
4147
4148 memset (&dependence_stats, 0, sizeof (dependence_stats));
4149
4150 /* If the loop nest is not well formed, or one of the data references
4151 is not computable, give up without spending time to compute other
4152 dependences. */
4153 if (!loop
4154 || !find_loop_nest (loop, loop_nest)
4155 || find_data_references_in_loop (loop, datarefs) == chrec_dont_know
4156 || !compute_all_dependences (*datarefs, dependence_relations, *loop_nest,
4157 compute_self_and_read_read_dependences))
4158 res = false;
4159
4160 if (dump_file && (dump_flags & TDF_STATS))
4161 {
4162 fprintf (dump_file, "Dependence tester statistics:\n");
4163
4164 fprintf (dump_file, "Number of dependence tests: %d\n",
4165 dependence_stats.num_dependence_tests);
4166 fprintf (dump_file, "Number of dependence tests classified dependent: %d\n",
4167 dependence_stats.num_dependence_dependent);
4168 fprintf (dump_file, "Number of dependence tests classified independent: %d\n",
4169 dependence_stats.num_dependence_independent);
4170 fprintf (dump_file, "Number of undetermined dependence tests: %d\n",
4171 dependence_stats.num_dependence_undetermined);
4172
4173 fprintf (dump_file, "Number of subscript tests: %d\n",
4174 dependence_stats.num_subscript_tests);
4175 fprintf (dump_file, "Number of undetermined subscript tests: %d\n",
4176 dependence_stats.num_subscript_undetermined);
4177 fprintf (dump_file, "Number of same subscript function: %d\n",
4178 dependence_stats.num_same_subscript_function);
4179
4180 fprintf (dump_file, "Number of ziv tests: %d\n",
4181 dependence_stats.num_ziv);
4182 fprintf (dump_file, "Number of ziv tests returning dependent: %d\n",
4183 dependence_stats.num_ziv_dependent);
4184 fprintf (dump_file, "Number of ziv tests returning independent: %d\n",
4185 dependence_stats.num_ziv_independent);
4186 fprintf (dump_file, "Number of ziv tests unimplemented: %d\n",
4187 dependence_stats.num_ziv_unimplemented);
4188
4189 fprintf (dump_file, "Number of siv tests: %d\n",
4190 dependence_stats.num_siv);
4191 fprintf (dump_file, "Number of siv tests returning dependent: %d\n",
4192 dependence_stats.num_siv_dependent);
4193 fprintf (dump_file, "Number of siv tests returning independent: %d\n",
4194 dependence_stats.num_siv_independent);
4195 fprintf (dump_file, "Number of siv tests unimplemented: %d\n",
4196 dependence_stats.num_siv_unimplemented);
4197
4198 fprintf (dump_file, "Number of miv tests: %d\n",
4199 dependence_stats.num_miv);
4200 fprintf (dump_file, "Number of miv tests returning dependent: %d\n",
4201 dependence_stats.num_miv_dependent);
4202 fprintf (dump_file, "Number of miv tests returning independent: %d\n",
4203 dependence_stats.num_miv_independent);
4204 fprintf (dump_file, "Number of miv tests unimplemented: %d\n",
4205 dependence_stats.num_miv_unimplemented);
4206 }
4207
4208 return res;
4209 }
4210
4211 /* Free the memory used by a data dependence relation DDR. */
4212
4213 void
free_dependence_relation(struct data_dependence_relation * ddr)4214 free_dependence_relation (struct data_dependence_relation *ddr)
4215 {
4216 if (ddr == NULL)
4217 return;
4218
4219 if (DDR_SUBSCRIPTS (ddr).exists ())
4220 free_subscripts (DDR_SUBSCRIPTS (ddr));
4221 DDR_DIST_VECTS (ddr).release ();
4222 DDR_DIR_VECTS (ddr).release ();
4223
4224 free (ddr);
4225 }
4226
4227 /* Free the memory used by the data dependence relations from
4228 DEPENDENCE_RELATIONS. */
4229
4230 void
free_dependence_relations(vec<ddr_p> dependence_relations)4231 free_dependence_relations (vec<ddr_p> dependence_relations)
4232 {
4233 unsigned int i;
4234 struct data_dependence_relation *ddr;
4235
4236 FOR_EACH_VEC_ELT (dependence_relations, i, ddr)
4237 if (ddr)
4238 free_dependence_relation (ddr);
4239
4240 dependence_relations.release ();
4241 }
4242
4243 /* Free the memory used by the data references from DATAREFS. */
4244
4245 void
free_data_refs(vec<data_reference_p> datarefs)4246 free_data_refs (vec<data_reference_p> datarefs)
4247 {
4248 unsigned int i;
4249 struct data_reference *dr;
4250
4251 FOR_EACH_VEC_ELT (datarefs, i, dr)
4252 free_data_ref (dr);
4253 datarefs.release ();
4254 }
4255