middle-end/98773 - always sign extend CHREC_RIGHT
[gcc.git] / gcc / tree-data-ref.c
1 /* Data references and dependences detectors.
2 Copyright (C) 2003-2021 Free Software Foundation, Inc.
3 Contributed by Sebastian Pop <pop@cri.ensmp.fr>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* This pass walks a given loop structure searching for array
22 references. The information about the array accesses is recorded
23 in DATA_REFERENCE structures.
24
25 The basic test for determining the dependences is:
26 given two access functions chrec1 and chrec2 to a same array, and
27 x and y two vectors from the iteration domain, the same element of
28 the array is accessed twice at iterations x and y if and only if:
29 | chrec1 (x) == chrec2 (y).
30
31 The goals of this analysis are:
32
33 - to determine the independence: the relation between two
34 independent accesses is qualified with the chrec_known (this
35 information allows a loop parallelization),
36
37 - when two data references access the same data, to qualify the
38 dependence relation with classic dependence representations:
39
40 - distance vectors
41 - direction vectors
42 - loop carried level dependence
43 - polyhedron dependence
44 or with the chains of recurrences based representation,
45
46 - to define a knowledge base for storing the data dependence
47 information,
48
49 - to define an interface to access this data.
50
51
52 Definitions:
53
54 - subscript: given two array accesses a subscript is the tuple
55 composed of the access functions for a given dimension. Example:
56 Given A[f1][f2][f3] and B[g1][g2][g3], there are three subscripts:
57 (f1, g1), (f2, g2), (f3, g3).
58
59 - Diophantine equation: an equation whose coefficients and
60 solutions are integer constants, for example the equation
61 | 3*x + 2*y = 1
62 has an integer solution x = 1 and y = -1.
63
64 References:
65
66 - "Advanced Compilation for High Performance Computing" by Randy
67 Allen and Ken Kennedy.
68 http://citeseer.ist.psu.edu/goff91practical.html
69
70 - "Loop Transformations for Restructuring Compilers - The Foundations"
71 by Utpal Banerjee.
72
73
74 */
75
76 #include "config.h"
77 #include "system.h"
78 #include "coretypes.h"
79 #include "backend.h"
80 #include "rtl.h"
81 #include "tree.h"
82 #include "gimple.h"
83 #include "gimple-pretty-print.h"
84 #include "alias.h"
85 #include "fold-const.h"
86 #include "expr.h"
87 #include "gimple-iterator.h"
88 #include "tree-ssa-loop-niter.h"
89 #include "tree-ssa-loop.h"
90 #include "tree-ssa.h"
91 #include "cfgloop.h"
92 #include "tree-data-ref.h"
93 #include "tree-scalar-evolution.h"
94 #include "dumpfile.h"
95 #include "tree-affine.h"
96 #include "builtins.h"
97 #include "tree-eh.h"
98 #include "ssa.h"
99 #include "internal-fn.h"
100 #include "range-op.h"
101 #include "vr-values.h"
102
103 static struct datadep_stats
104 {
105 int num_dependence_tests;
106 int num_dependence_dependent;
107 int num_dependence_independent;
108 int num_dependence_undetermined;
109
110 int num_subscript_tests;
111 int num_subscript_undetermined;
112 int num_same_subscript_function;
113
114 int num_ziv;
115 int num_ziv_independent;
116 int num_ziv_dependent;
117 int num_ziv_unimplemented;
118
119 int num_siv;
120 int num_siv_independent;
121 int num_siv_dependent;
122 int num_siv_unimplemented;
123
124 int num_miv;
125 int num_miv_independent;
126 int num_miv_dependent;
127 int num_miv_unimplemented;
128 } dependence_stats;
129
130 static bool subscript_dependence_tester_1 (struct data_dependence_relation *,
131 unsigned int, unsigned int,
132 class loop *);
133 /* Returns true iff A divides B. */
134
135 static inline bool
136 tree_fold_divides_p (const_tree a, const_tree b)
137 {
138 gcc_assert (TREE_CODE (a) == INTEGER_CST);
139 gcc_assert (TREE_CODE (b) == INTEGER_CST);
140 return integer_zerop (int_const_binop (TRUNC_MOD_EXPR, b, a));
141 }
142
143 /* Returns true iff A divides B. */
144
145 static inline bool
146 int_divides_p (lambda_int a, lambda_int b)
147 {
148 return ((b % a) == 0);
149 }
150
151 /* Return true if reference REF contains a union access. */
152
153 static bool
154 ref_contains_union_access_p (tree ref)
155 {
156 while (handled_component_p (ref))
157 {
158 ref = TREE_OPERAND (ref, 0);
159 if (TREE_CODE (TREE_TYPE (ref)) == UNION_TYPE
160 || TREE_CODE (TREE_TYPE (ref)) == QUAL_UNION_TYPE)
161 return true;
162 }
163 return false;
164 }
165
166 \f
167
168 /* Dump into FILE all the data references from DATAREFS. */
169
170 static void
171 dump_data_references (FILE *file, vec<data_reference_p> datarefs)
172 {
173 unsigned int i;
174 struct data_reference *dr;
175
176 FOR_EACH_VEC_ELT (datarefs, i, dr)
177 dump_data_reference (file, dr);
178 }
179
180 /* Unified dump into FILE all the data references from DATAREFS. */
181
182 DEBUG_FUNCTION void
183 debug (vec<data_reference_p> &ref)
184 {
185 dump_data_references (stderr, ref);
186 }
187
188 DEBUG_FUNCTION void
189 debug (vec<data_reference_p> *ptr)
190 {
191 if (ptr)
192 debug (*ptr);
193 else
194 fprintf (stderr, "<nil>\n");
195 }
196
197
198 /* Dump into STDERR all the data references from DATAREFS. */
199
200 DEBUG_FUNCTION void
201 debug_data_references (vec<data_reference_p> datarefs)
202 {
203 dump_data_references (stderr, datarefs);
204 }
205
206 /* Print to STDERR the data_reference DR. */
207
208 DEBUG_FUNCTION void
209 debug_data_reference (struct data_reference *dr)
210 {
211 dump_data_reference (stderr, dr);
212 }
213
214 /* Dump function for a DATA_REFERENCE structure. */
215
216 void
217 dump_data_reference (FILE *outf,
218 struct data_reference *dr)
219 {
220 unsigned int i;
221
222 fprintf (outf, "#(Data Ref: \n");
223 fprintf (outf, "# bb: %d \n", gimple_bb (DR_STMT (dr))->index);
224 fprintf (outf, "# stmt: ");
225 print_gimple_stmt (outf, DR_STMT (dr), 0);
226 fprintf (outf, "# ref: ");
227 print_generic_stmt (outf, DR_REF (dr));
228 fprintf (outf, "# base_object: ");
229 print_generic_stmt (outf, DR_BASE_OBJECT (dr));
230
231 for (i = 0; i < DR_NUM_DIMENSIONS (dr); i++)
232 {
233 fprintf (outf, "# Access function %d: ", i);
234 print_generic_stmt (outf, DR_ACCESS_FN (dr, i));
235 }
236 fprintf (outf, "#)\n");
237 }
238
239 /* Unified dump function for a DATA_REFERENCE structure. */
240
241 DEBUG_FUNCTION void
242 debug (data_reference &ref)
243 {
244 dump_data_reference (stderr, &ref);
245 }
246
247 DEBUG_FUNCTION void
248 debug (data_reference *ptr)
249 {
250 if (ptr)
251 debug (*ptr);
252 else
253 fprintf (stderr, "<nil>\n");
254 }
255
256
257 /* Dumps the affine function described by FN to the file OUTF. */
258
259 DEBUG_FUNCTION void
260 dump_affine_function (FILE *outf, affine_fn fn)
261 {
262 unsigned i;
263 tree coef;
264
265 print_generic_expr (outf, fn[0], TDF_SLIM);
266 for (i = 1; fn.iterate (i, &coef); i++)
267 {
268 fprintf (outf, " + ");
269 print_generic_expr (outf, coef, TDF_SLIM);
270 fprintf (outf, " * x_%u", i);
271 }
272 }
273
274 /* Dumps the conflict function CF to the file OUTF. */
275
276 DEBUG_FUNCTION void
277 dump_conflict_function (FILE *outf, conflict_function *cf)
278 {
279 unsigned i;
280
281 if (cf->n == NO_DEPENDENCE)
282 fprintf (outf, "no dependence");
283 else if (cf->n == NOT_KNOWN)
284 fprintf (outf, "not known");
285 else
286 {
287 for (i = 0; i < cf->n; i++)
288 {
289 if (i != 0)
290 fprintf (outf, " ");
291 fprintf (outf, "[");
292 dump_affine_function (outf, cf->fns[i]);
293 fprintf (outf, "]");
294 }
295 }
296 }
297
298 /* Dump function for a SUBSCRIPT structure. */
299
300 DEBUG_FUNCTION void
301 dump_subscript (FILE *outf, struct subscript *subscript)
302 {
303 conflict_function *cf = SUB_CONFLICTS_IN_A (subscript);
304
305 fprintf (outf, "\n (subscript \n");
306 fprintf (outf, " iterations_that_access_an_element_twice_in_A: ");
307 dump_conflict_function (outf, cf);
308 if (CF_NONTRIVIAL_P (cf))
309 {
310 tree last_iteration = SUB_LAST_CONFLICT (subscript);
311 fprintf (outf, "\n last_conflict: ");
312 print_generic_expr (outf, last_iteration);
313 }
314
315 cf = SUB_CONFLICTS_IN_B (subscript);
316 fprintf (outf, "\n iterations_that_access_an_element_twice_in_B: ");
317 dump_conflict_function (outf, cf);
318 if (CF_NONTRIVIAL_P (cf))
319 {
320 tree last_iteration = SUB_LAST_CONFLICT (subscript);
321 fprintf (outf, "\n last_conflict: ");
322 print_generic_expr (outf, last_iteration);
323 }
324
325 fprintf (outf, "\n (Subscript distance: ");
326 print_generic_expr (outf, SUB_DISTANCE (subscript));
327 fprintf (outf, " ))\n");
328 }
329
330 /* Print the classic direction vector DIRV to OUTF. */
331
332 DEBUG_FUNCTION void
333 print_direction_vector (FILE *outf,
334 lambda_vector dirv,
335 int length)
336 {
337 int eq;
338
339 for (eq = 0; eq < length; eq++)
340 {
341 enum data_dependence_direction dir = ((enum data_dependence_direction)
342 dirv[eq]);
343
344 switch (dir)
345 {
346 case dir_positive:
347 fprintf (outf, " +");
348 break;
349 case dir_negative:
350 fprintf (outf, " -");
351 break;
352 case dir_equal:
353 fprintf (outf, " =");
354 break;
355 case dir_positive_or_equal:
356 fprintf (outf, " +=");
357 break;
358 case dir_positive_or_negative:
359 fprintf (outf, " +-");
360 break;
361 case dir_negative_or_equal:
362 fprintf (outf, " -=");
363 break;
364 case dir_star:
365 fprintf (outf, " *");
366 break;
367 default:
368 fprintf (outf, "indep");
369 break;
370 }
371 }
372 fprintf (outf, "\n");
373 }
374
375 /* Print a vector of direction vectors. */
376
377 DEBUG_FUNCTION void
378 print_dir_vectors (FILE *outf, vec<lambda_vector> dir_vects,
379 int length)
380 {
381 unsigned j;
382 lambda_vector v;
383
384 FOR_EACH_VEC_ELT (dir_vects, j, v)
385 print_direction_vector (outf, v, length);
386 }
387
388 /* Print out a vector VEC of length N to OUTFILE. */
389
390 DEBUG_FUNCTION void
391 print_lambda_vector (FILE * outfile, lambda_vector vector, int n)
392 {
393 int i;
394
395 for (i = 0; i < n; i++)
396 fprintf (outfile, "%3d ", (int)vector[i]);
397 fprintf (outfile, "\n");
398 }
399
400 /* Print a vector of distance vectors. */
401
402 DEBUG_FUNCTION void
403 print_dist_vectors (FILE *outf, vec<lambda_vector> dist_vects,
404 int length)
405 {
406 unsigned j;
407 lambda_vector v;
408
409 FOR_EACH_VEC_ELT (dist_vects, j, v)
410 print_lambda_vector (outf, v, length);
411 }
412
413 /* Dump function for a DATA_DEPENDENCE_RELATION structure. */
414
415 DEBUG_FUNCTION void
416 dump_data_dependence_relation (FILE *outf,
417 struct data_dependence_relation *ddr)
418 {
419 struct data_reference *dra, *drb;
420
421 fprintf (outf, "(Data Dep: \n");
422
423 if (!ddr || DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
424 {
425 if (ddr)
426 {
427 dra = DDR_A (ddr);
428 drb = DDR_B (ddr);
429 if (dra)
430 dump_data_reference (outf, dra);
431 else
432 fprintf (outf, " (nil)\n");
433 if (drb)
434 dump_data_reference (outf, drb);
435 else
436 fprintf (outf, " (nil)\n");
437 }
438 fprintf (outf, " (don't know)\n)\n");
439 return;
440 }
441
442 dra = DDR_A (ddr);
443 drb = DDR_B (ddr);
444 dump_data_reference (outf, dra);
445 dump_data_reference (outf, drb);
446
447 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
448 fprintf (outf, " (no dependence)\n");
449
450 else if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE)
451 {
452 unsigned int i;
453 class loop *loopi;
454
455 subscript *sub;
456 FOR_EACH_VEC_ELT (DDR_SUBSCRIPTS (ddr), i, sub)
457 {
458 fprintf (outf, " access_fn_A: ");
459 print_generic_stmt (outf, SUB_ACCESS_FN (sub, 0));
460 fprintf (outf, " access_fn_B: ");
461 print_generic_stmt (outf, SUB_ACCESS_FN (sub, 1));
462 dump_subscript (outf, sub);
463 }
464
465 fprintf (outf, " loop nest: (");
466 FOR_EACH_VEC_ELT (DDR_LOOP_NEST (ddr), i, loopi)
467 fprintf (outf, "%d ", loopi->num);
468 fprintf (outf, ")\n");
469
470 for (i = 0; i < DDR_NUM_DIST_VECTS (ddr); i++)
471 {
472 fprintf (outf, " distance_vector: ");
473 print_lambda_vector (outf, DDR_DIST_VECT (ddr, i),
474 DDR_NB_LOOPS (ddr));
475 }
476
477 for (i = 0; i < DDR_NUM_DIR_VECTS (ddr); i++)
478 {
479 fprintf (outf, " direction_vector: ");
480 print_direction_vector (outf, DDR_DIR_VECT (ddr, i),
481 DDR_NB_LOOPS (ddr));
482 }
483 }
484
485 fprintf (outf, ")\n");
486 }
487
488 /* Debug version. */
489
490 DEBUG_FUNCTION void
491 debug_data_dependence_relation (struct data_dependence_relation *ddr)
492 {
493 dump_data_dependence_relation (stderr, ddr);
494 }
495
496 /* Dump into FILE all the dependence relations from DDRS. */
497
498 DEBUG_FUNCTION void
499 dump_data_dependence_relations (FILE *file,
500 vec<ddr_p> ddrs)
501 {
502 unsigned int i;
503 struct data_dependence_relation *ddr;
504
505 FOR_EACH_VEC_ELT (ddrs, i, ddr)
506 dump_data_dependence_relation (file, ddr);
507 }
508
509 DEBUG_FUNCTION void
510 debug (vec<ddr_p> &ref)
511 {
512 dump_data_dependence_relations (stderr, ref);
513 }
514
515 DEBUG_FUNCTION void
516 debug (vec<ddr_p> *ptr)
517 {
518 if (ptr)
519 debug (*ptr);
520 else
521 fprintf (stderr, "<nil>\n");
522 }
523
524
525 /* Dump to STDERR all the dependence relations from DDRS. */
526
527 DEBUG_FUNCTION void
528 debug_data_dependence_relations (vec<ddr_p> ddrs)
529 {
530 dump_data_dependence_relations (stderr, ddrs);
531 }
532
533 /* Dumps the distance and direction vectors in FILE. DDRS contains
534 the dependence relations, and VECT_SIZE is the size of the
535 dependence vectors, or in other words the number of loops in the
536 considered nest. */
537
538 DEBUG_FUNCTION void
539 dump_dist_dir_vectors (FILE *file, vec<ddr_p> ddrs)
540 {
541 unsigned int i, j;
542 struct data_dependence_relation *ddr;
543 lambda_vector v;
544
545 FOR_EACH_VEC_ELT (ddrs, i, ddr)
546 if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE && DDR_AFFINE_P (ddr))
547 {
548 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), j, v)
549 {
550 fprintf (file, "DISTANCE_V (");
551 print_lambda_vector (file, v, DDR_NB_LOOPS (ddr));
552 fprintf (file, ")\n");
553 }
554
555 FOR_EACH_VEC_ELT (DDR_DIR_VECTS (ddr), j, v)
556 {
557 fprintf (file, "DIRECTION_V (");
558 print_direction_vector (file, v, DDR_NB_LOOPS (ddr));
559 fprintf (file, ")\n");
560 }
561 }
562
563 fprintf (file, "\n\n");
564 }
565
566 /* Dumps the data dependence relations DDRS in FILE. */
567
568 DEBUG_FUNCTION void
569 dump_ddrs (FILE *file, vec<ddr_p> ddrs)
570 {
571 unsigned int i;
572 struct data_dependence_relation *ddr;
573
574 FOR_EACH_VEC_ELT (ddrs, i, ddr)
575 dump_data_dependence_relation (file, ddr);
576
577 fprintf (file, "\n\n");
578 }
579
580 DEBUG_FUNCTION void
581 debug_ddrs (vec<ddr_p> ddrs)
582 {
583 dump_ddrs (stderr, ddrs);
584 }
585
586 /* If RESULT_RANGE is nonnull, set *RESULT_RANGE to the range of
587 OP0 CODE OP1, where:
588
589 - OP0 CODE OP1 has integral type TYPE
590 - the range of OP0 is given by OP0_RANGE and
591 - the range of OP1 is given by OP1_RANGE.
592
593 Independently of RESULT_RANGE, try to compute:
594
595 DELTA = ((sizetype) OP0 CODE (sizetype) OP1)
596 - (sizetype) (OP0 CODE OP1)
597
598 as a constant and subtract DELTA from the ssizetype constant in *OFF.
599 Return true on success, or false if DELTA is not known at compile time.
600
601 Truncation and sign changes are known to distribute over CODE, i.e.
602
603 (itype) (A CODE B) == (itype) A CODE (itype) B
604
605 for any integral type ITYPE whose precision is no greater than the
606 precision of A and B. */
607
608 static bool
609 compute_distributive_range (tree type, value_range &op0_range,
610 tree_code code, value_range &op1_range,
611 tree *off, value_range *result_range)
612 {
613 gcc_assert (INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_TRAPS (type));
614 if (result_range)
615 {
616 range_operator *op = range_op_handler (code, type);
617 op->fold_range (*result_range, type, op0_range, op1_range);
618 }
619
620 /* The distributive property guarantees that if TYPE is no narrower
621 than SIZETYPE,
622
623 (sizetype) (OP0 CODE OP1) == (sizetype) OP0 CODE (sizetype) OP1
624
625 and so we can treat DELTA as zero. */
626 if (TYPE_PRECISION (type) >= TYPE_PRECISION (sizetype))
627 return true;
628
629 /* If overflow is undefined, we can assume that:
630
631 X == (ssizetype) OP0 CODE (ssizetype) OP1
632
633 is within the range of TYPE, i.e.:
634
635 X == (ssizetype) (TYPE) X
636
637 Distributing the (TYPE) truncation over X gives:
638
639 X == (ssizetype) (OP0 CODE OP1)
640
641 Casting both sides to sizetype and distributing the sizetype cast
642 over X gives:
643
644 (sizetype) OP0 CODE (sizetype) OP1 == (sizetype) (OP0 CODE OP1)
645
646 and so we can treat DELTA as zero. */
647 if (TYPE_OVERFLOW_UNDEFINED (type))
648 return true;
649
650 /* Compute the range of:
651
652 (ssizetype) OP0 CODE (ssizetype) OP1
653
654 The distributive property guarantees that this has the same bitpattern as:
655
656 (sizetype) OP0 CODE (sizetype) OP1
657
658 but its range is more conducive to analysis. */
659 range_cast (op0_range, ssizetype);
660 range_cast (op1_range, ssizetype);
661 value_range wide_range;
662 range_operator *op = range_op_handler (code, ssizetype);
663 bool saved_flag_wrapv = flag_wrapv;
664 flag_wrapv = 1;
665 op->fold_range (wide_range, ssizetype, op0_range, op1_range);
666 flag_wrapv = saved_flag_wrapv;
667 if (wide_range.num_pairs () != 1 || !range_int_cst_p (&wide_range))
668 return false;
669
670 wide_int lb = wide_range.lower_bound ();
671 wide_int ub = wide_range.upper_bound ();
672
673 /* Calculate the number of times that each end of the range overflows or
674 underflows TYPE. We can only calculate DELTA if the numbers match. */
675 unsigned int precision = TYPE_PRECISION (type);
676 if (!TYPE_UNSIGNED (type))
677 {
678 wide_int type_min = wi::mask (precision - 1, true, lb.get_precision ());
679 lb -= type_min;
680 ub -= type_min;
681 }
682 wide_int upper_bits = wi::mask (precision, true, lb.get_precision ());
683 lb &= upper_bits;
684 ub &= upper_bits;
685 if (lb != ub)
686 return false;
687
688 /* OP0 CODE OP1 overflows exactly arshift (LB, PRECISION) times, with
689 negative values indicating underflow. The low PRECISION bits of LB
690 are clear, so DELTA is therefore LB (== UB). */
691 *off = wide_int_to_tree (ssizetype, wi::to_wide (*off) - lb);
692 return true;
693 }
694
695 /* Return true if (sizetype) OP == (sizetype) (TO_TYPE) OP,
696 given that OP has type FROM_TYPE and range RANGE. Both TO_TYPE and
697 FROM_TYPE are integral types. */
698
699 static bool
700 nop_conversion_for_offset_p (tree to_type, tree from_type, value_range &range)
701 {
702 gcc_assert (INTEGRAL_TYPE_P (to_type)
703 && INTEGRAL_TYPE_P (from_type)
704 && !TYPE_OVERFLOW_TRAPS (to_type)
705 && !TYPE_OVERFLOW_TRAPS (from_type));
706
707 /* Converting to something no narrower than sizetype and then to sizetype
708 is equivalent to converting directly to sizetype. */
709 if (TYPE_PRECISION (to_type) >= TYPE_PRECISION (sizetype))
710 return true;
711
712 /* Check whether TO_TYPE can represent all values that FROM_TYPE can. */
713 if (TYPE_PRECISION (from_type) < TYPE_PRECISION (to_type)
714 && (TYPE_UNSIGNED (from_type) || !TYPE_UNSIGNED (to_type)))
715 return true;
716
717 /* For narrowing conversions, we could in principle test whether
718 the bits in FROM_TYPE but not in TO_TYPE have a fixed value
719 and apply a constant adjustment.
720
721 For other conversions (which involve a sign change) we could
722 check that the signs are always equal, and apply a constant
723 adjustment if the signs are negative.
724
725 However, both cases should be rare. */
726 return range_fits_type_p (&range, TYPE_PRECISION (to_type),
727 TYPE_SIGN (to_type));
728 }
729
730 static void
731 split_constant_offset (tree type, tree *var, tree *off,
732 value_range *result_range,
733 hash_map<tree, std::pair<tree, tree> > &cache,
734 unsigned *limit);
735
736 /* Helper function for split_constant_offset. If TYPE is a pointer type,
737 try to express OP0 CODE OP1 as:
738
739 POINTER_PLUS <*VAR, (sizetype) *OFF>
740
741 where:
742
743 - *VAR has type TYPE
744 - *OFF is a constant of type ssizetype.
745
746 If TYPE is an integral type, try to express (sizetype) (OP0 CODE OP1) as:
747
748 *VAR + (sizetype) *OFF
749
750 where:
751
752 - *VAR has type sizetype
753 - *OFF is a constant of type ssizetype.
754
755 In both cases, OP0 CODE OP1 has type TYPE.
756
757 Return true on success. A false return value indicates that we can't
758 do better than set *OFF to zero.
759
760 When returning true, set RESULT_RANGE to the range of OP0 CODE OP1,
761 if RESULT_RANGE is nonnull and if we can do better than assume VR_VARYING.
762
763 CACHE caches {*VAR, *OFF} pairs for SSA names that we've previously
764 visited. LIMIT counts down the number of SSA names that we are
765 allowed to process before giving up. */
766
767 static bool
768 split_constant_offset_1 (tree type, tree op0, enum tree_code code, tree op1,
769 tree *var, tree *off, value_range *result_range,
770 hash_map<tree, std::pair<tree, tree> > &cache,
771 unsigned *limit)
772 {
773 tree var0, var1;
774 tree off0, off1;
775 value_range op0_range, op1_range;
776
777 *var = NULL_TREE;
778 *off = NULL_TREE;
779
780 switch (code)
781 {
782 case INTEGER_CST:
783 *var = size_int (0);
784 *off = fold_convert (ssizetype, op0);
785 if (result_range)
786 result_range->set (op0, op0);
787 return true;
788
789 case POINTER_PLUS_EXPR:
790 split_constant_offset (op0, &var0, &off0, nullptr, cache, limit);
791 split_constant_offset (op1, &var1, &off1, nullptr, cache, limit);
792 *var = fold_build2 (POINTER_PLUS_EXPR, type, var0, var1);
793 *off = size_binop (PLUS_EXPR, off0, off1);
794 return true;
795
796 case PLUS_EXPR:
797 case MINUS_EXPR:
798 split_constant_offset (op0, &var0, &off0, &op0_range, cache, limit);
799 split_constant_offset (op1, &var1, &off1, &op1_range, cache, limit);
800 *off = size_binop (code, off0, off1);
801 if (!compute_distributive_range (type, op0_range, code, op1_range,
802 off, result_range))
803 return false;
804 *var = fold_build2 (code, sizetype, var0, var1);
805 return true;
806
807 case MULT_EXPR:
808 if (TREE_CODE (op1) != INTEGER_CST)
809 return false;
810
811 split_constant_offset (op0, &var0, &off0, &op0_range, cache, limit);
812 op1_range.set (op1, op1);
813 *off = size_binop (MULT_EXPR, off0, fold_convert (ssizetype, op1));
814 if (!compute_distributive_range (type, op0_range, code, op1_range,
815 off, result_range))
816 return false;
817 *var = fold_build2 (MULT_EXPR, sizetype, var0,
818 fold_convert (sizetype, op1));
819 return true;
820
821 case ADDR_EXPR:
822 {
823 tree base, poffset;
824 poly_int64 pbitsize, pbitpos, pbytepos;
825 machine_mode pmode;
826 int punsignedp, preversep, pvolatilep;
827
828 op0 = TREE_OPERAND (op0, 0);
829 base
830 = get_inner_reference (op0, &pbitsize, &pbitpos, &poffset, &pmode,
831 &punsignedp, &preversep, &pvolatilep);
832
833 if (!multiple_p (pbitpos, BITS_PER_UNIT, &pbytepos))
834 return false;
835 base = build_fold_addr_expr (base);
836 off0 = ssize_int (pbytepos);
837
838 if (poffset)
839 {
840 split_constant_offset (poffset, &poffset, &off1, nullptr,
841 cache, limit);
842 off0 = size_binop (PLUS_EXPR, off0, off1);
843 base = fold_build_pointer_plus (base, poffset);
844 }
845
846 var0 = fold_convert (type, base);
847
848 /* If variable length types are involved, punt, otherwise casts
849 might be converted into ARRAY_REFs in gimplify_conversion.
850 To compute that ARRAY_REF's element size TYPE_SIZE_UNIT, which
851 possibly no longer appears in current GIMPLE, might resurface.
852 This perhaps could run
853 if (CONVERT_EXPR_P (var0))
854 {
855 gimplify_conversion (&var0);
856 // Attempt to fill in any within var0 found ARRAY_REF's
857 // element size from corresponding op embedded ARRAY_REF,
858 // if unsuccessful, just punt.
859 } */
860 while (POINTER_TYPE_P (type))
861 type = TREE_TYPE (type);
862 if (int_size_in_bytes (type) < 0)
863 return false;
864
865 *var = var0;
866 *off = off0;
867 return true;
868 }
869
870 case SSA_NAME:
871 {
872 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op0))
873 return false;
874
875 gimple *def_stmt = SSA_NAME_DEF_STMT (op0);
876 enum tree_code subcode;
877
878 if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
879 return false;
880
881 subcode = gimple_assign_rhs_code (def_stmt);
882
883 /* We are using a cache to avoid un-CSEing large amounts of code. */
884 bool use_cache = false;
885 if (!has_single_use (op0)
886 && (subcode == POINTER_PLUS_EXPR
887 || subcode == PLUS_EXPR
888 || subcode == MINUS_EXPR
889 || subcode == MULT_EXPR
890 || subcode == ADDR_EXPR
891 || CONVERT_EXPR_CODE_P (subcode)))
892 {
893 use_cache = true;
894 bool existed;
895 std::pair<tree, tree> &e = cache.get_or_insert (op0, &existed);
896 if (existed)
897 {
898 if (integer_zerop (e.second))
899 return false;
900 *var = e.first;
901 *off = e.second;
902 /* The caller sets the range in this case. */
903 return true;
904 }
905 e = std::make_pair (op0, ssize_int (0));
906 }
907
908 if (*limit == 0)
909 return false;
910 --*limit;
911
912 var0 = gimple_assign_rhs1 (def_stmt);
913 var1 = gimple_assign_rhs2 (def_stmt);
914
915 bool res = split_constant_offset_1 (type, var0, subcode, var1,
916 var, off, nullptr, cache, limit);
917 if (res && use_cache)
918 *cache.get (op0) = std::make_pair (*var, *off);
919 /* The caller sets the range in this case. */
920 return res;
921 }
922 CASE_CONVERT:
923 {
924 /* We can only handle the following conversions:
925
926 - Conversions from one pointer type to another pointer type.
927
928 - Conversions from one non-trapping integral type to another
929 non-trapping integral type. In this case, the recursive
930 call makes sure that:
931
932 (sizetype) OP0
933
934 can be expressed as a sizetype operation involving VAR and OFF,
935 and all we need to do is check whether:
936
937 (sizetype) OP0 == (sizetype) (TYPE) OP0
938
939 - Conversions from a non-trapping sizetype-size integral type to
940 a like-sized pointer type. In this case, the recursive call
941 makes sure that:
942
943 (sizetype) OP0 == *VAR + (sizetype) *OFF
944
945 and we can convert that to:
946
947 POINTER_PLUS <(TYPE) *VAR, (sizetype) *OFF>
948
949 - Conversions from a sizetype-sized pointer type to a like-sized
950 non-trapping integral type. In this case, the recursive call
951 makes sure that:
952
953 OP0 == POINTER_PLUS <*VAR, (sizetype) *OFF>
954
955 where the POINTER_PLUS and *VAR have the same precision as
956 TYPE (and the same precision as sizetype). Then:
957
958 (sizetype) (TYPE) OP0 == (sizetype) *VAR + (sizetype) *OFF. */
959 tree itype = TREE_TYPE (op0);
960 if ((POINTER_TYPE_P (itype)
961 || (INTEGRAL_TYPE_P (itype) && !TYPE_OVERFLOW_TRAPS (itype)))
962 && (POINTER_TYPE_P (type)
963 || (INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_TRAPS (type)))
964 && (POINTER_TYPE_P (type) == POINTER_TYPE_P (itype)
965 || (TYPE_PRECISION (type) == TYPE_PRECISION (sizetype)
966 && TYPE_PRECISION (itype) == TYPE_PRECISION (sizetype))))
967 {
968 if (POINTER_TYPE_P (type))
969 {
970 split_constant_offset (op0, var, off, nullptr, cache, limit);
971 *var = fold_convert (type, *var);
972 }
973 else if (POINTER_TYPE_P (itype))
974 {
975 split_constant_offset (op0, var, off, nullptr, cache, limit);
976 *var = fold_convert (sizetype, *var);
977 }
978 else
979 {
980 split_constant_offset (op0, var, off, &op0_range,
981 cache, limit);
982 if (!nop_conversion_for_offset_p (type, itype, op0_range))
983 return false;
984 if (result_range)
985 {
986 *result_range = op0_range;
987 range_cast (*result_range, type);
988 }
989 }
990 return true;
991 }
992 return false;
993 }
994
995 default:
996 return false;
997 }
998 }
999
1000 /* If EXP has pointer type, try to express it as:
1001
1002 POINTER_PLUS <*VAR, (sizetype) *OFF>
1003
1004 where:
1005
1006 - *VAR has the same type as EXP
1007 - *OFF is a constant of type ssizetype.
1008
1009 If EXP has an integral type, try to express (sizetype) EXP as:
1010
1011 *VAR + (sizetype) *OFF
1012
1013 where:
1014
1015 - *VAR has type sizetype
1016 - *OFF is a constant of type ssizetype.
1017
1018 If EXP_RANGE is nonnull, set it to the range of EXP.
1019
1020 CACHE caches {*VAR, *OFF} pairs for SSA names that we've previously
1021 visited. LIMIT counts down the number of SSA names that we are
1022 allowed to process before giving up. */
1023
1024 static void
1025 split_constant_offset (tree exp, tree *var, tree *off, value_range *exp_range,
1026 hash_map<tree, std::pair<tree, tree> > &cache,
1027 unsigned *limit)
1028 {
1029 tree type = TREE_TYPE (exp), op0, op1;
1030 enum tree_code code;
1031
1032 code = TREE_CODE (exp);
1033 if (exp_range)
1034 {
1035 *exp_range = type;
1036 if (code == SSA_NAME)
1037 {
1038 wide_int var_min, var_max;
1039 value_range_kind vr_kind = get_range_info (exp, &var_min, &var_max);
1040 wide_int var_nonzero = get_nonzero_bits (exp);
1041 vr_kind = intersect_range_with_nonzero_bits (vr_kind,
1042 &var_min, &var_max,
1043 var_nonzero,
1044 TYPE_SIGN (type));
1045 if (vr_kind == VR_RANGE)
1046 *exp_range = value_range (type, var_min, var_max);
1047 }
1048 }
1049
1050 if (!tree_is_chrec (exp)
1051 && get_gimple_rhs_class (TREE_CODE (exp)) != GIMPLE_TERNARY_RHS)
1052 {
1053 extract_ops_from_tree (exp, &code, &op0, &op1);
1054 if (split_constant_offset_1 (type, op0, code, op1, var, off,
1055 exp_range, cache, limit))
1056 return;
1057 }
1058
1059 *var = exp;
1060 if (INTEGRAL_TYPE_P (type))
1061 *var = fold_convert (sizetype, *var);
1062 *off = ssize_int (0);
1063 if (exp_range && code != SSA_NAME)
1064 {
1065 wide_int var_min, var_max;
1066 if (determine_value_range (exp, &var_min, &var_max) == VR_RANGE)
1067 *exp_range = value_range (type, var_min, var_max);
1068 }
1069 }
1070
1071 /* Expresses EXP as VAR + OFF, where OFF is a constant. VAR has the same
1072 type as EXP while OFF has type ssizetype. */
1073
1074 void
1075 split_constant_offset (tree exp, tree *var, tree *off)
1076 {
1077 unsigned limit = param_ssa_name_def_chain_limit;
1078 static hash_map<tree, std::pair<tree, tree> > *cache;
1079 if (!cache)
1080 cache = new hash_map<tree, std::pair<tree, tree> > (37);
1081 split_constant_offset (exp, var, off, nullptr, *cache, &limit);
1082 *var = fold_convert (TREE_TYPE (exp), *var);
1083 cache->empty ();
1084 }
1085
1086 /* Returns the address ADDR of an object in a canonical shape (without nop
1087 casts, and with type of pointer to the object). */
1088
1089 static tree
1090 canonicalize_base_object_address (tree addr)
1091 {
1092 tree orig = addr;
1093
1094 STRIP_NOPS (addr);
1095
1096 /* The base address may be obtained by casting from integer, in that case
1097 keep the cast. */
1098 if (!POINTER_TYPE_P (TREE_TYPE (addr)))
1099 return orig;
1100
1101 if (TREE_CODE (addr) != ADDR_EXPR)
1102 return addr;
1103
1104 return build_fold_addr_expr (TREE_OPERAND (addr, 0));
1105 }
1106
1107 /* Analyze the behavior of memory reference REF within STMT.
1108 There are two modes:
1109
1110 - BB analysis. In this case we simply split the address into base,
1111 init and offset components, without reference to any containing loop.
1112 The resulting base and offset are general expressions and they can
1113 vary arbitrarily from one iteration of the containing loop to the next.
1114 The step is always zero.
1115
1116 - loop analysis. In this case we analyze the reference both wrt LOOP
1117 and on the basis that the reference occurs (is "used") in LOOP;
1118 see the comment above analyze_scalar_evolution_in_loop for more
1119 information about this distinction. The base, init, offset and
1120 step fields are all invariant in LOOP.
1121
1122 Perform BB analysis if LOOP is null, or if LOOP is the function's
1123 dummy outermost loop. In other cases perform loop analysis.
1124
1125 Return true if the analysis succeeded and store the results in DRB if so.
1126 BB analysis can only fail for bitfield or reversed-storage accesses. */
1127
1128 opt_result
1129 dr_analyze_innermost (innermost_loop_behavior *drb, tree ref,
1130 class loop *loop, const gimple *stmt)
1131 {
1132 poly_int64 pbitsize, pbitpos;
1133 tree base, poffset;
1134 machine_mode pmode;
1135 int punsignedp, preversep, pvolatilep;
1136 affine_iv base_iv, offset_iv;
1137 tree init, dinit, step;
1138 bool in_loop = (loop && loop->num);
1139
1140 if (dump_file && (dump_flags & TDF_DETAILS))
1141 fprintf (dump_file, "analyze_innermost: ");
1142
1143 base = get_inner_reference (ref, &pbitsize, &pbitpos, &poffset, &pmode,
1144 &punsignedp, &preversep, &pvolatilep);
1145 gcc_assert (base != NULL_TREE);
1146
1147 poly_int64 pbytepos;
1148 if (!multiple_p (pbitpos, BITS_PER_UNIT, &pbytepos))
1149 return opt_result::failure_at (stmt,
1150 "failed: bit offset alignment.\n");
1151
1152 if (preversep)
1153 return opt_result::failure_at (stmt,
1154 "failed: reverse storage order.\n");
1155
1156 /* Calculate the alignment and misalignment for the inner reference. */
1157 unsigned int HOST_WIDE_INT bit_base_misalignment;
1158 unsigned int bit_base_alignment;
1159 get_object_alignment_1 (base, &bit_base_alignment, &bit_base_misalignment);
1160
1161 /* There are no bitfield references remaining in BASE, so the values
1162 we got back must be whole bytes. */
1163 gcc_assert (bit_base_alignment % BITS_PER_UNIT == 0
1164 && bit_base_misalignment % BITS_PER_UNIT == 0);
1165 unsigned int base_alignment = bit_base_alignment / BITS_PER_UNIT;
1166 poly_int64 base_misalignment = bit_base_misalignment / BITS_PER_UNIT;
1167
1168 if (TREE_CODE (base) == MEM_REF)
1169 {
1170 if (!integer_zerop (TREE_OPERAND (base, 1)))
1171 {
1172 /* Subtract MOFF from the base and add it to POFFSET instead.
1173 Adjust the misalignment to reflect the amount we subtracted. */
1174 poly_offset_int moff = mem_ref_offset (base);
1175 base_misalignment -= moff.force_shwi ();
1176 tree mofft = wide_int_to_tree (sizetype, moff);
1177 if (!poffset)
1178 poffset = mofft;
1179 else
1180 poffset = size_binop (PLUS_EXPR, poffset, mofft);
1181 }
1182 base = TREE_OPERAND (base, 0);
1183 }
1184 else
1185 base = build_fold_addr_expr (base);
1186
1187 if (in_loop)
1188 {
1189 if (!simple_iv (loop, loop, base, &base_iv, true))
1190 return opt_result::failure_at
1191 (stmt, "failed: evolution of base is not affine.\n");
1192 }
1193 else
1194 {
1195 base_iv.base = base;
1196 base_iv.step = ssize_int (0);
1197 base_iv.no_overflow = true;
1198 }
1199
1200 if (!poffset)
1201 {
1202 offset_iv.base = ssize_int (0);
1203 offset_iv.step = ssize_int (0);
1204 }
1205 else
1206 {
1207 if (!in_loop)
1208 {
1209 offset_iv.base = poffset;
1210 offset_iv.step = ssize_int (0);
1211 }
1212 else if (!simple_iv (loop, loop, poffset, &offset_iv, true))
1213 return opt_result::failure_at
1214 (stmt, "failed: evolution of offset is not affine.\n");
1215 }
1216
1217 init = ssize_int (pbytepos);
1218
1219 /* Subtract any constant component from the base and add it to INIT instead.
1220 Adjust the misalignment to reflect the amount we subtracted. */
1221 split_constant_offset (base_iv.base, &base_iv.base, &dinit);
1222 init = size_binop (PLUS_EXPR, init, dinit);
1223 base_misalignment -= TREE_INT_CST_LOW (dinit);
1224
1225 split_constant_offset (offset_iv.base, &offset_iv.base, &dinit);
1226 init = size_binop (PLUS_EXPR, init, dinit);
1227
1228 step = size_binop (PLUS_EXPR,
1229 fold_convert (ssizetype, base_iv.step),
1230 fold_convert (ssizetype, offset_iv.step));
1231
1232 base = canonicalize_base_object_address (base_iv.base);
1233
1234 /* See if get_pointer_alignment can guarantee a higher alignment than
1235 the one we calculated above. */
1236 unsigned int HOST_WIDE_INT alt_misalignment;
1237 unsigned int alt_alignment;
1238 get_pointer_alignment_1 (base, &alt_alignment, &alt_misalignment);
1239
1240 /* As above, these values must be whole bytes. */
1241 gcc_assert (alt_alignment % BITS_PER_UNIT == 0
1242 && alt_misalignment % BITS_PER_UNIT == 0);
1243 alt_alignment /= BITS_PER_UNIT;
1244 alt_misalignment /= BITS_PER_UNIT;
1245
1246 if (base_alignment < alt_alignment)
1247 {
1248 base_alignment = alt_alignment;
1249 base_misalignment = alt_misalignment;
1250 }
1251
1252 drb->base_address = base;
1253 drb->offset = fold_convert (ssizetype, offset_iv.base);
1254 drb->init = init;
1255 drb->step = step;
1256 if (known_misalignment (base_misalignment, base_alignment,
1257 &drb->base_misalignment))
1258 drb->base_alignment = base_alignment;
1259 else
1260 {
1261 drb->base_alignment = known_alignment (base_misalignment);
1262 drb->base_misalignment = 0;
1263 }
1264 drb->offset_alignment = highest_pow2_factor (offset_iv.base);
1265 drb->step_alignment = highest_pow2_factor (step);
1266
1267 if (dump_file && (dump_flags & TDF_DETAILS))
1268 fprintf (dump_file, "success.\n");
1269
1270 return opt_result::success ();
1271 }
1272
1273 /* Return true if OP is a valid component reference for a DR access
1274 function. This accepts a subset of what handled_component_p accepts. */
1275
1276 static bool
1277 access_fn_component_p (tree op)
1278 {
1279 switch (TREE_CODE (op))
1280 {
1281 case REALPART_EXPR:
1282 case IMAGPART_EXPR:
1283 case ARRAY_REF:
1284 return true;
1285
1286 case COMPONENT_REF:
1287 return TREE_CODE (TREE_TYPE (TREE_OPERAND (op, 0))) == RECORD_TYPE;
1288
1289 default:
1290 return false;
1291 }
1292 }
1293
1294 /* Returns whether BASE can have a access_fn_component_p with BASE
1295 as base. */
1296
1297 static bool
1298 base_supports_access_fn_components_p (tree base)
1299 {
1300 switch (TREE_CODE (TREE_TYPE (base)))
1301 {
1302 case COMPLEX_TYPE:
1303 case ARRAY_TYPE:
1304 case RECORD_TYPE:
1305 return true;
1306 default:
1307 return false;
1308 }
1309 }
1310
1311 /* Determines the base object and the list of indices of memory reference
1312 DR, analyzed in LOOP and instantiated before NEST. */
1313
1314 static void
1315 dr_analyze_indices (struct data_reference *dr, edge nest, loop_p loop)
1316 {
1317 vec<tree> access_fns = vNULL;
1318 tree ref, op;
1319 tree base, off, access_fn;
1320
1321 /* If analyzing a basic-block there are no indices to analyze
1322 and thus no access functions. */
1323 if (!nest)
1324 {
1325 DR_BASE_OBJECT (dr) = DR_REF (dr);
1326 DR_ACCESS_FNS (dr).create (0);
1327 return;
1328 }
1329
1330 ref = DR_REF (dr);
1331
1332 /* REALPART_EXPR and IMAGPART_EXPR can be handled like accesses
1333 into a two element array with a constant index. The base is
1334 then just the immediate underlying object. */
1335 if (TREE_CODE (ref) == REALPART_EXPR)
1336 {
1337 ref = TREE_OPERAND (ref, 0);
1338 access_fns.safe_push (integer_zero_node);
1339 }
1340 else if (TREE_CODE (ref) == IMAGPART_EXPR)
1341 {
1342 ref = TREE_OPERAND (ref, 0);
1343 access_fns.safe_push (integer_one_node);
1344 }
1345
1346 /* Analyze access functions of dimensions we know to be independent.
1347 The list of component references handled here should be kept in
1348 sync with access_fn_component_p. */
1349 while (handled_component_p (ref))
1350 {
1351 if (TREE_CODE (ref) == ARRAY_REF)
1352 {
1353 op = TREE_OPERAND (ref, 1);
1354 access_fn = analyze_scalar_evolution (loop, op);
1355 access_fn = instantiate_scev (nest, loop, access_fn);
1356 access_fns.safe_push (access_fn);
1357 }
1358 else if (TREE_CODE (ref) == COMPONENT_REF
1359 && TREE_CODE (TREE_TYPE (TREE_OPERAND (ref, 0))) == RECORD_TYPE)
1360 {
1361 /* For COMPONENT_REFs of records (but not unions!) use the
1362 FIELD_DECL offset as constant access function so we can
1363 disambiguate a[i].f1 and a[i].f2. */
1364 tree off = component_ref_field_offset (ref);
1365 off = size_binop (PLUS_EXPR,
1366 size_binop (MULT_EXPR,
1367 fold_convert (bitsizetype, off),
1368 bitsize_int (BITS_PER_UNIT)),
1369 DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1)));
1370 access_fns.safe_push (off);
1371 }
1372 else
1373 /* If we have an unhandled component we could not translate
1374 to an access function stop analyzing. We have determined
1375 our base object in this case. */
1376 break;
1377
1378 ref = TREE_OPERAND (ref, 0);
1379 }
1380
1381 /* If the address operand of a MEM_REF base has an evolution in the
1382 analyzed nest, add it as an additional independent access-function. */
1383 if (TREE_CODE (ref) == MEM_REF)
1384 {
1385 op = TREE_OPERAND (ref, 0);
1386 access_fn = analyze_scalar_evolution (loop, op);
1387 access_fn = instantiate_scev (nest, loop, access_fn);
1388 if (TREE_CODE (access_fn) == POLYNOMIAL_CHREC)
1389 {
1390 tree orig_type;
1391 tree memoff = TREE_OPERAND (ref, 1);
1392 base = initial_condition (access_fn);
1393 orig_type = TREE_TYPE (base);
1394 STRIP_USELESS_TYPE_CONVERSION (base);
1395 split_constant_offset (base, &base, &off);
1396 STRIP_USELESS_TYPE_CONVERSION (base);
1397 /* Fold the MEM_REF offset into the evolutions initial
1398 value to make more bases comparable. */
1399 if (!integer_zerop (memoff))
1400 {
1401 off = size_binop (PLUS_EXPR, off,
1402 fold_convert (ssizetype, memoff));
1403 memoff = build_int_cst (TREE_TYPE (memoff), 0);
1404 }
1405 /* Adjust the offset so it is a multiple of the access type
1406 size and thus we separate bases that can possibly be used
1407 to produce partial overlaps (which the access_fn machinery
1408 cannot handle). */
1409 wide_int rem;
1410 if (TYPE_SIZE_UNIT (TREE_TYPE (ref))
1411 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (ref))) == INTEGER_CST
1412 && !integer_zerop (TYPE_SIZE_UNIT (TREE_TYPE (ref))))
1413 rem = wi::mod_trunc
1414 (wi::to_wide (off),
1415 wi::to_wide (TYPE_SIZE_UNIT (TREE_TYPE (ref))),
1416 SIGNED);
1417 else
1418 /* If we can't compute the remainder simply force the initial
1419 condition to zero. */
1420 rem = wi::to_wide (off);
1421 off = wide_int_to_tree (ssizetype, wi::to_wide (off) - rem);
1422 memoff = wide_int_to_tree (TREE_TYPE (memoff), rem);
1423 /* And finally replace the initial condition. */
1424 access_fn = chrec_replace_initial_condition
1425 (access_fn, fold_convert (orig_type, off));
1426 /* ??? This is still not a suitable base object for
1427 dr_may_alias_p - the base object needs to be an
1428 access that covers the object as whole. With
1429 an evolution in the pointer this cannot be
1430 guaranteed.
1431 As a band-aid, mark the access so we can special-case
1432 it in dr_may_alias_p. */
1433 tree old = ref;
1434 ref = fold_build2_loc (EXPR_LOCATION (ref),
1435 MEM_REF, TREE_TYPE (ref),
1436 base, memoff);
1437 MR_DEPENDENCE_CLIQUE (ref) = MR_DEPENDENCE_CLIQUE (old);
1438 MR_DEPENDENCE_BASE (ref) = MR_DEPENDENCE_BASE (old);
1439 DR_UNCONSTRAINED_BASE (dr) = true;
1440 access_fns.safe_push (access_fn);
1441 }
1442 }
1443 else if (DECL_P (ref))
1444 {
1445 /* Canonicalize DR_BASE_OBJECT to MEM_REF form. */
1446 ref = build2 (MEM_REF, TREE_TYPE (ref),
1447 build_fold_addr_expr (ref),
1448 build_int_cst (reference_alias_ptr_type (ref), 0));
1449 }
1450
1451 DR_BASE_OBJECT (dr) = ref;
1452 DR_ACCESS_FNS (dr) = access_fns;
1453 }
1454
1455 /* Extracts the alias analysis information from the memory reference DR. */
1456
1457 static void
1458 dr_analyze_alias (struct data_reference *dr)
1459 {
1460 tree ref = DR_REF (dr);
1461 tree base = get_base_address (ref), addr;
1462
1463 if (INDIRECT_REF_P (base)
1464 || TREE_CODE (base) == MEM_REF)
1465 {
1466 addr = TREE_OPERAND (base, 0);
1467 if (TREE_CODE (addr) == SSA_NAME)
1468 DR_PTR_INFO (dr) = SSA_NAME_PTR_INFO (addr);
1469 }
1470 }
1471
1472 /* Frees data reference DR. */
1473
1474 void
1475 free_data_ref (data_reference_p dr)
1476 {
1477 DR_ACCESS_FNS (dr).release ();
1478 free (dr);
1479 }
1480
1481 /* Analyze memory reference MEMREF, which is accessed in STMT.
1482 The reference is a read if IS_READ is true, otherwise it is a write.
1483 IS_CONDITIONAL_IN_STMT indicates that the reference is conditional
1484 within STMT, i.e. that it might not occur even if STMT is executed
1485 and runs to completion.
1486
1487 Return the data_reference description of MEMREF. NEST is the outermost
1488 loop in which the reference should be instantiated, LOOP is the loop
1489 in which the data reference should be analyzed. */
1490
1491 struct data_reference *
1492 create_data_ref (edge nest, loop_p loop, tree memref, gimple *stmt,
1493 bool is_read, bool is_conditional_in_stmt)
1494 {
1495 struct data_reference *dr;
1496
1497 if (dump_file && (dump_flags & TDF_DETAILS))
1498 {
1499 fprintf (dump_file, "Creating dr for ");
1500 print_generic_expr (dump_file, memref, TDF_SLIM);
1501 fprintf (dump_file, "\n");
1502 }
1503
1504 dr = XCNEW (struct data_reference);
1505 DR_STMT (dr) = stmt;
1506 DR_REF (dr) = memref;
1507 DR_IS_READ (dr) = is_read;
1508 DR_IS_CONDITIONAL_IN_STMT (dr) = is_conditional_in_stmt;
1509
1510 dr_analyze_innermost (&DR_INNERMOST (dr), memref,
1511 nest != NULL ? loop : NULL, stmt);
1512 dr_analyze_indices (dr, nest, loop);
1513 dr_analyze_alias (dr);
1514
1515 if (dump_file && (dump_flags & TDF_DETAILS))
1516 {
1517 unsigned i;
1518 fprintf (dump_file, "\tbase_address: ");
1519 print_generic_expr (dump_file, DR_BASE_ADDRESS (dr), TDF_SLIM);
1520 fprintf (dump_file, "\n\toffset from base address: ");
1521 print_generic_expr (dump_file, DR_OFFSET (dr), TDF_SLIM);
1522 fprintf (dump_file, "\n\tconstant offset from base address: ");
1523 print_generic_expr (dump_file, DR_INIT (dr), TDF_SLIM);
1524 fprintf (dump_file, "\n\tstep: ");
1525 print_generic_expr (dump_file, DR_STEP (dr), TDF_SLIM);
1526 fprintf (dump_file, "\n\tbase alignment: %d", DR_BASE_ALIGNMENT (dr));
1527 fprintf (dump_file, "\n\tbase misalignment: %d",
1528 DR_BASE_MISALIGNMENT (dr));
1529 fprintf (dump_file, "\n\toffset alignment: %d",
1530 DR_OFFSET_ALIGNMENT (dr));
1531 fprintf (dump_file, "\n\tstep alignment: %d", DR_STEP_ALIGNMENT (dr));
1532 fprintf (dump_file, "\n\tbase_object: ");
1533 print_generic_expr (dump_file, DR_BASE_OBJECT (dr), TDF_SLIM);
1534 fprintf (dump_file, "\n");
1535 for (i = 0; i < DR_NUM_DIMENSIONS (dr); i++)
1536 {
1537 fprintf (dump_file, "\tAccess function %d: ", i);
1538 print_generic_stmt (dump_file, DR_ACCESS_FN (dr, i), TDF_SLIM);
1539 }
1540 }
1541
1542 return dr;
1543 }
1544
1545 /* A helper function computes order between two tree expressions T1 and T2.
1546 This is used in comparator functions sorting objects based on the order
1547 of tree expressions. The function returns -1, 0, or 1. */
1548
1549 int
1550 data_ref_compare_tree (tree t1, tree t2)
1551 {
1552 int i, cmp;
1553 enum tree_code code;
1554 char tclass;
1555
1556 if (t1 == t2)
1557 return 0;
1558 if (t1 == NULL)
1559 return -1;
1560 if (t2 == NULL)
1561 return 1;
1562
1563 STRIP_USELESS_TYPE_CONVERSION (t1);
1564 STRIP_USELESS_TYPE_CONVERSION (t2);
1565 if (t1 == t2)
1566 return 0;
1567
1568 if (TREE_CODE (t1) != TREE_CODE (t2)
1569 && ! (CONVERT_EXPR_P (t1) && CONVERT_EXPR_P (t2)))
1570 return TREE_CODE (t1) < TREE_CODE (t2) ? -1 : 1;
1571
1572 code = TREE_CODE (t1);
1573 switch (code)
1574 {
1575 case INTEGER_CST:
1576 return tree_int_cst_compare (t1, t2);
1577
1578 case STRING_CST:
1579 if (TREE_STRING_LENGTH (t1) != TREE_STRING_LENGTH (t2))
1580 return TREE_STRING_LENGTH (t1) < TREE_STRING_LENGTH (t2) ? -1 : 1;
1581 return memcmp (TREE_STRING_POINTER (t1), TREE_STRING_POINTER (t2),
1582 TREE_STRING_LENGTH (t1));
1583
1584 case SSA_NAME:
1585 if (SSA_NAME_VERSION (t1) != SSA_NAME_VERSION (t2))
1586 return SSA_NAME_VERSION (t1) < SSA_NAME_VERSION (t2) ? -1 : 1;
1587 break;
1588
1589 default:
1590 if (POLY_INT_CST_P (t1))
1591 return compare_sizes_for_sort (wi::to_poly_widest (t1),
1592 wi::to_poly_widest (t2));
1593
1594 tclass = TREE_CODE_CLASS (code);
1595
1596 /* For decls, compare their UIDs. */
1597 if (tclass == tcc_declaration)
1598 {
1599 if (DECL_UID (t1) != DECL_UID (t2))
1600 return DECL_UID (t1) < DECL_UID (t2) ? -1 : 1;
1601 break;
1602 }
1603 /* For expressions, compare their operands recursively. */
1604 else if (IS_EXPR_CODE_CLASS (tclass))
1605 {
1606 for (i = TREE_OPERAND_LENGTH (t1) - 1; i >= 0; --i)
1607 {
1608 cmp = data_ref_compare_tree (TREE_OPERAND (t1, i),
1609 TREE_OPERAND (t2, i));
1610 if (cmp != 0)
1611 return cmp;
1612 }
1613 }
1614 else
1615 gcc_unreachable ();
1616 }
1617
1618 return 0;
1619 }
1620
1621 /* Return TRUE it's possible to resolve data dependence DDR by runtime alias
1622 check. */
1623
1624 opt_result
1625 runtime_alias_check_p (ddr_p ddr, class loop *loop, bool speed_p)
1626 {
1627 if (dump_enabled_p ())
1628 dump_printf (MSG_NOTE,
1629 "consider run-time aliasing test between %T and %T\n",
1630 DR_REF (DDR_A (ddr)), DR_REF (DDR_B (ddr)));
1631
1632 if (!speed_p)
1633 return opt_result::failure_at (DR_STMT (DDR_A (ddr)),
1634 "runtime alias check not supported when"
1635 " optimizing for size.\n");
1636
1637 /* FORNOW: We don't support versioning with outer-loop in either
1638 vectorization or loop distribution. */
1639 if (loop != NULL && loop->inner != NULL)
1640 return opt_result::failure_at (DR_STMT (DDR_A (ddr)),
1641 "runtime alias check not supported for"
1642 " outer loop.\n");
1643
1644 return opt_result::success ();
1645 }
1646
1647 /* Operator == between two dr_with_seg_len objects.
1648
1649 This equality operator is used to make sure two data refs
1650 are the same one so that we will consider to combine the
1651 aliasing checks of those two pairs of data dependent data
1652 refs. */
1653
1654 static bool
1655 operator == (const dr_with_seg_len& d1,
1656 const dr_with_seg_len& d2)
1657 {
1658 return (operand_equal_p (DR_BASE_ADDRESS (d1.dr),
1659 DR_BASE_ADDRESS (d2.dr), 0)
1660 && data_ref_compare_tree (DR_OFFSET (d1.dr), DR_OFFSET (d2.dr)) == 0
1661 && data_ref_compare_tree (DR_INIT (d1.dr), DR_INIT (d2.dr)) == 0
1662 && data_ref_compare_tree (d1.seg_len, d2.seg_len) == 0
1663 && known_eq (d1.access_size, d2.access_size)
1664 && d1.align == d2.align);
1665 }
1666
1667 /* Comparison function for sorting objects of dr_with_seg_len_pair_t
1668 so that we can combine aliasing checks in one scan. */
1669
1670 static int
1671 comp_dr_with_seg_len_pair (const void *pa_, const void *pb_)
1672 {
1673 const dr_with_seg_len_pair_t* pa = (const dr_with_seg_len_pair_t *) pa_;
1674 const dr_with_seg_len_pair_t* pb = (const dr_with_seg_len_pair_t *) pb_;
1675 const dr_with_seg_len &a1 = pa->first, &a2 = pa->second;
1676 const dr_with_seg_len &b1 = pb->first, &b2 = pb->second;
1677
1678 /* For DR pairs (a, b) and (c, d), we only consider to merge the alias checks
1679 if a and c have the same basic address snd step, and b and d have the same
1680 address and step. Therefore, if any a&c or b&d don't have the same address
1681 and step, we don't care the order of those two pairs after sorting. */
1682 int comp_res;
1683
1684 if ((comp_res = data_ref_compare_tree (DR_BASE_ADDRESS (a1.dr),
1685 DR_BASE_ADDRESS (b1.dr))) != 0)
1686 return comp_res;
1687 if ((comp_res = data_ref_compare_tree (DR_BASE_ADDRESS (a2.dr),
1688 DR_BASE_ADDRESS (b2.dr))) != 0)
1689 return comp_res;
1690 if ((comp_res = data_ref_compare_tree (DR_STEP (a1.dr),
1691 DR_STEP (b1.dr))) != 0)
1692 return comp_res;
1693 if ((comp_res = data_ref_compare_tree (DR_STEP (a2.dr),
1694 DR_STEP (b2.dr))) != 0)
1695 return comp_res;
1696 if ((comp_res = data_ref_compare_tree (DR_OFFSET (a1.dr),
1697 DR_OFFSET (b1.dr))) != 0)
1698 return comp_res;
1699 if ((comp_res = data_ref_compare_tree (DR_INIT (a1.dr),
1700 DR_INIT (b1.dr))) != 0)
1701 return comp_res;
1702 if ((comp_res = data_ref_compare_tree (DR_OFFSET (a2.dr),
1703 DR_OFFSET (b2.dr))) != 0)
1704 return comp_res;
1705 if ((comp_res = data_ref_compare_tree (DR_INIT (a2.dr),
1706 DR_INIT (b2.dr))) != 0)
1707 return comp_res;
1708
1709 return 0;
1710 }
1711
1712 /* Dump information about ALIAS_PAIR, indenting each line by INDENT. */
1713
1714 static void
1715 dump_alias_pair (dr_with_seg_len_pair_t *alias_pair, const char *indent)
1716 {
1717 dump_printf (MSG_NOTE, "%sreference: %T vs. %T\n", indent,
1718 DR_REF (alias_pair->first.dr),
1719 DR_REF (alias_pair->second.dr));
1720
1721 dump_printf (MSG_NOTE, "%ssegment length: %T", indent,
1722 alias_pair->first.seg_len);
1723 if (!operand_equal_p (alias_pair->first.seg_len,
1724 alias_pair->second.seg_len, 0))
1725 dump_printf (MSG_NOTE, " vs. %T", alias_pair->second.seg_len);
1726
1727 dump_printf (MSG_NOTE, "\n%saccess size: ", indent);
1728 dump_dec (MSG_NOTE, alias_pair->first.access_size);
1729 if (maybe_ne (alias_pair->first.access_size, alias_pair->second.access_size))
1730 {
1731 dump_printf (MSG_NOTE, " vs. ");
1732 dump_dec (MSG_NOTE, alias_pair->second.access_size);
1733 }
1734
1735 dump_printf (MSG_NOTE, "\n%salignment: %d", indent,
1736 alias_pair->first.align);
1737 if (alias_pair->first.align != alias_pair->second.align)
1738 dump_printf (MSG_NOTE, " vs. %d", alias_pair->second.align);
1739
1740 dump_printf (MSG_NOTE, "\n%sflags: ", indent);
1741 if (alias_pair->flags & DR_ALIAS_RAW)
1742 dump_printf (MSG_NOTE, " RAW");
1743 if (alias_pair->flags & DR_ALIAS_WAR)
1744 dump_printf (MSG_NOTE, " WAR");
1745 if (alias_pair->flags & DR_ALIAS_WAW)
1746 dump_printf (MSG_NOTE, " WAW");
1747 if (alias_pair->flags & DR_ALIAS_ARBITRARY)
1748 dump_printf (MSG_NOTE, " ARBITRARY");
1749 if (alias_pair->flags & DR_ALIAS_SWAPPED)
1750 dump_printf (MSG_NOTE, " SWAPPED");
1751 if (alias_pair->flags & DR_ALIAS_UNSWAPPED)
1752 dump_printf (MSG_NOTE, " UNSWAPPED");
1753 if (alias_pair->flags & DR_ALIAS_MIXED_STEPS)
1754 dump_printf (MSG_NOTE, " MIXED_STEPS");
1755 if (alias_pair->flags == 0)
1756 dump_printf (MSG_NOTE, " <none>");
1757 dump_printf (MSG_NOTE, "\n");
1758 }
1759
1760 /* Merge alias checks recorded in ALIAS_PAIRS and remove redundant ones.
1761 FACTOR is number of iterations that each data reference is accessed.
1762
1763 Basically, for each pair of dependent data refs store_ptr_0 & load_ptr_0,
1764 we create an expression:
1765
1766 ((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
1767 || (load_ptr_0 + load_segment_length_0) <= store_ptr_0))
1768
1769 for aliasing checks. However, in some cases we can decrease the number
1770 of checks by combining two checks into one. For example, suppose we have
1771 another pair of data refs store_ptr_0 & load_ptr_1, and if the following
1772 condition is satisfied:
1773
1774 load_ptr_0 < load_ptr_1 &&
1775 load_ptr_1 - load_ptr_0 - load_segment_length_0 < store_segment_length_0
1776
1777 (this condition means, in each iteration of vectorized loop, the accessed
1778 memory of store_ptr_0 cannot be between the memory of load_ptr_0 and
1779 load_ptr_1.)
1780
1781 we then can use only the following expression to finish the alising checks
1782 between store_ptr_0 & load_ptr_0 and store_ptr_0 & load_ptr_1:
1783
1784 ((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
1785 || (load_ptr_1 + load_segment_length_1 <= store_ptr_0))
1786
1787 Note that we only consider that load_ptr_0 and load_ptr_1 have the same
1788 basic address. */
1789
1790 void
1791 prune_runtime_alias_test_list (vec<dr_with_seg_len_pair_t> *alias_pairs,
1792 poly_uint64)
1793 {
1794 if (alias_pairs->is_empty ())
1795 return;
1796
1797 /* Canonicalize each pair so that the base components are ordered wrt
1798 data_ref_compare_tree. This allows the loop below to merge more
1799 cases. */
1800 unsigned int i;
1801 dr_with_seg_len_pair_t *alias_pair;
1802 FOR_EACH_VEC_ELT (*alias_pairs, i, alias_pair)
1803 {
1804 data_reference_p dr_a = alias_pair->first.dr;
1805 data_reference_p dr_b = alias_pair->second.dr;
1806 int comp_res = data_ref_compare_tree (DR_BASE_ADDRESS (dr_a),
1807 DR_BASE_ADDRESS (dr_b));
1808 if (comp_res == 0)
1809 comp_res = data_ref_compare_tree (DR_OFFSET (dr_a), DR_OFFSET (dr_b));
1810 if (comp_res == 0)
1811 comp_res = data_ref_compare_tree (DR_INIT (dr_a), DR_INIT (dr_b));
1812 if (comp_res > 0)
1813 {
1814 std::swap (alias_pair->first, alias_pair->second);
1815 alias_pair->flags |= DR_ALIAS_SWAPPED;
1816 }
1817 else
1818 alias_pair->flags |= DR_ALIAS_UNSWAPPED;
1819 }
1820
1821 /* Sort the collected data ref pairs so that we can scan them once to
1822 combine all possible aliasing checks. */
1823 alias_pairs->qsort (comp_dr_with_seg_len_pair);
1824
1825 /* Scan the sorted dr pairs and check if we can combine alias checks
1826 of two neighboring dr pairs. */
1827 unsigned int last = 0;
1828 for (i = 1; i < alias_pairs->length (); ++i)
1829 {
1830 /* Deal with two ddrs (dr_a1, dr_b1) and (dr_a2, dr_b2). */
1831 dr_with_seg_len_pair_t *alias_pair1 = &(*alias_pairs)[last];
1832 dr_with_seg_len_pair_t *alias_pair2 = &(*alias_pairs)[i];
1833
1834 dr_with_seg_len *dr_a1 = &alias_pair1->first;
1835 dr_with_seg_len *dr_b1 = &alias_pair1->second;
1836 dr_with_seg_len *dr_a2 = &alias_pair2->first;
1837 dr_with_seg_len *dr_b2 = &alias_pair2->second;
1838
1839 /* Remove duplicate data ref pairs. */
1840 if (*dr_a1 == *dr_a2 && *dr_b1 == *dr_b2)
1841 {
1842 if (dump_enabled_p ())
1843 dump_printf (MSG_NOTE, "found equal ranges %T, %T and %T, %T\n",
1844 DR_REF (dr_a1->dr), DR_REF (dr_b1->dr),
1845 DR_REF (dr_a2->dr), DR_REF (dr_b2->dr));
1846 alias_pair1->flags |= alias_pair2->flags;
1847 continue;
1848 }
1849
1850 /* Assume that we won't be able to merge the pairs, then correct
1851 if we do. */
1852 last += 1;
1853 if (last != i)
1854 (*alias_pairs)[last] = (*alias_pairs)[i];
1855
1856 if (*dr_a1 == *dr_a2 || *dr_b1 == *dr_b2)
1857 {
1858 /* We consider the case that DR_B1 and DR_B2 are same memrefs,
1859 and DR_A1 and DR_A2 are two consecutive memrefs. */
1860 if (*dr_a1 == *dr_a2)
1861 {
1862 std::swap (dr_a1, dr_b1);
1863 std::swap (dr_a2, dr_b2);
1864 }
1865
1866 poly_int64 init_a1, init_a2;
1867 /* Only consider cases in which the distance between the initial
1868 DR_A1 and the initial DR_A2 is known at compile time. */
1869 if (!operand_equal_p (DR_BASE_ADDRESS (dr_a1->dr),
1870 DR_BASE_ADDRESS (dr_a2->dr), 0)
1871 || !operand_equal_p (DR_OFFSET (dr_a1->dr),
1872 DR_OFFSET (dr_a2->dr), 0)
1873 || !poly_int_tree_p (DR_INIT (dr_a1->dr), &init_a1)
1874 || !poly_int_tree_p (DR_INIT (dr_a2->dr), &init_a2))
1875 continue;
1876
1877 /* Don't combine if we can't tell which one comes first. */
1878 if (!ordered_p (init_a1, init_a2))
1879 continue;
1880
1881 /* Work out what the segment length would be if we did combine
1882 DR_A1 and DR_A2:
1883
1884 - If DR_A1 and DR_A2 have equal lengths, that length is
1885 also the combined length.
1886
1887 - If DR_A1 and DR_A2 both have negative "lengths", the combined
1888 length is the lower bound on those lengths.
1889
1890 - If DR_A1 and DR_A2 both have positive lengths, the combined
1891 length is the upper bound on those lengths.
1892
1893 Other cases are unlikely to give a useful combination.
1894
1895 The lengths both have sizetype, so the sign is taken from
1896 the step instead. */
1897 poly_uint64 new_seg_len = 0;
1898 bool new_seg_len_p = !operand_equal_p (dr_a1->seg_len,
1899 dr_a2->seg_len, 0);
1900 if (new_seg_len_p)
1901 {
1902 poly_uint64 seg_len_a1, seg_len_a2;
1903 if (!poly_int_tree_p (dr_a1->seg_len, &seg_len_a1)
1904 || !poly_int_tree_p (dr_a2->seg_len, &seg_len_a2))
1905 continue;
1906
1907 tree indicator_a = dr_direction_indicator (dr_a1->dr);
1908 if (TREE_CODE (indicator_a) != INTEGER_CST)
1909 continue;
1910
1911 tree indicator_b = dr_direction_indicator (dr_a2->dr);
1912 if (TREE_CODE (indicator_b) != INTEGER_CST)
1913 continue;
1914
1915 int sign_a = tree_int_cst_sgn (indicator_a);
1916 int sign_b = tree_int_cst_sgn (indicator_b);
1917
1918 if (sign_a <= 0 && sign_b <= 0)
1919 new_seg_len = lower_bound (seg_len_a1, seg_len_a2);
1920 else if (sign_a >= 0 && sign_b >= 0)
1921 new_seg_len = upper_bound (seg_len_a1, seg_len_a2);
1922 else
1923 continue;
1924 }
1925 /* At this point we're committed to merging the refs. */
1926
1927 /* Make sure dr_a1 starts left of dr_a2. */
1928 if (maybe_gt (init_a1, init_a2))
1929 {
1930 std::swap (*dr_a1, *dr_a2);
1931 std::swap (init_a1, init_a2);
1932 }
1933
1934 /* The DR_Bs are equal, so only the DR_As can introduce
1935 mixed steps. */
1936 if (!operand_equal_p (DR_STEP (dr_a1->dr), DR_STEP (dr_a2->dr), 0))
1937 alias_pair1->flags |= DR_ALIAS_MIXED_STEPS;
1938
1939 if (new_seg_len_p)
1940 {
1941 dr_a1->seg_len = build_int_cst (TREE_TYPE (dr_a1->seg_len),
1942 new_seg_len);
1943 dr_a1->align = MIN (dr_a1->align, known_alignment (new_seg_len));
1944 }
1945
1946 /* This is always positive due to the swap above. */
1947 poly_uint64 diff = init_a2 - init_a1;
1948
1949 /* The new check will start at DR_A1. Make sure that its access
1950 size encompasses the initial DR_A2. */
1951 if (maybe_lt (dr_a1->access_size, diff + dr_a2->access_size))
1952 {
1953 dr_a1->access_size = upper_bound (dr_a1->access_size,
1954 diff + dr_a2->access_size);
1955 unsigned int new_align = known_alignment (dr_a1->access_size);
1956 dr_a1->align = MIN (dr_a1->align, new_align);
1957 }
1958 if (dump_enabled_p ())
1959 dump_printf (MSG_NOTE, "merging ranges for %T, %T and %T, %T\n",
1960 DR_REF (dr_a1->dr), DR_REF (dr_b1->dr),
1961 DR_REF (dr_a2->dr), DR_REF (dr_b2->dr));
1962 alias_pair1->flags |= alias_pair2->flags;
1963 last -= 1;
1964 }
1965 }
1966 alias_pairs->truncate (last + 1);
1967
1968 /* Try to restore the original dr_with_seg_len order within each
1969 dr_with_seg_len_pair_t. If we ended up combining swapped and
1970 unswapped pairs into the same check, we have to invalidate any
1971 RAW, WAR and WAW information for it. */
1972 if (dump_enabled_p ())
1973 dump_printf (MSG_NOTE, "merged alias checks:\n");
1974 FOR_EACH_VEC_ELT (*alias_pairs, i, alias_pair)
1975 {
1976 unsigned int swap_mask = (DR_ALIAS_SWAPPED | DR_ALIAS_UNSWAPPED);
1977 unsigned int swapped = (alias_pair->flags & swap_mask);
1978 if (swapped == DR_ALIAS_SWAPPED)
1979 std::swap (alias_pair->first, alias_pair->second);
1980 else if (swapped != DR_ALIAS_UNSWAPPED)
1981 alias_pair->flags |= DR_ALIAS_ARBITRARY;
1982 alias_pair->flags &= ~swap_mask;
1983 if (dump_enabled_p ())
1984 dump_alias_pair (alias_pair, " ");
1985 }
1986 }
1987
1988 /* A subroutine of create_intersect_range_checks, with a subset of the
1989 same arguments. Try to use IFN_CHECK_RAW_PTRS and IFN_CHECK_WAR_PTRS
1990 to optimize cases in which the references form a simple RAW, WAR or
1991 WAR dependence. */
1992
1993 static bool
1994 create_ifn_alias_checks (tree *cond_expr,
1995 const dr_with_seg_len_pair_t &alias_pair)
1996 {
1997 const dr_with_seg_len& dr_a = alias_pair.first;
1998 const dr_with_seg_len& dr_b = alias_pair.second;
1999
2000 /* Check for cases in which:
2001
2002 (a) we have a known RAW, WAR or WAR dependence
2003 (b) the accesses are well-ordered in both the original and new code
2004 (see the comment above the DR_ALIAS_* flags for details); and
2005 (c) the DR_STEPs describe all access pairs covered by ALIAS_PAIR. */
2006 if (alias_pair.flags & ~(DR_ALIAS_RAW | DR_ALIAS_WAR | DR_ALIAS_WAW))
2007 return false;
2008
2009 /* Make sure that both DRs access the same pattern of bytes,
2010 with a constant length and step. */
2011 poly_uint64 seg_len;
2012 if (!operand_equal_p (dr_a.seg_len, dr_b.seg_len, 0)
2013 || !poly_int_tree_p (dr_a.seg_len, &seg_len)
2014 || maybe_ne (dr_a.access_size, dr_b.access_size)
2015 || !operand_equal_p (DR_STEP (dr_a.dr), DR_STEP (dr_b.dr), 0)
2016 || !tree_fits_uhwi_p (DR_STEP (dr_a.dr)))
2017 return false;
2018
2019 unsigned HOST_WIDE_INT bytes = tree_to_uhwi (DR_STEP (dr_a.dr));
2020 tree addr_a = DR_BASE_ADDRESS (dr_a.dr);
2021 tree addr_b = DR_BASE_ADDRESS (dr_b.dr);
2022
2023 /* See whether the target suports what we want to do. WAW checks are
2024 equivalent to WAR checks here. */
2025 internal_fn ifn = (alias_pair.flags & DR_ALIAS_RAW
2026 ? IFN_CHECK_RAW_PTRS
2027 : IFN_CHECK_WAR_PTRS);
2028 unsigned int align = MIN (dr_a.align, dr_b.align);
2029 poly_uint64 full_length = seg_len + bytes;
2030 if (!internal_check_ptrs_fn_supported_p (ifn, TREE_TYPE (addr_a),
2031 full_length, align))
2032 {
2033 full_length = seg_len + dr_a.access_size;
2034 if (!internal_check_ptrs_fn_supported_p (ifn, TREE_TYPE (addr_a),
2035 full_length, align))
2036 return false;
2037 }
2038
2039 /* Commit to using this form of test. */
2040 addr_a = fold_build_pointer_plus (addr_a, DR_OFFSET (dr_a.dr));
2041 addr_a = fold_build_pointer_plus (addr_a, DR_INIT (dr_a.dr));
2042
2043 addr_b = fold_build_pointer_plus (addr_b, DR_OFFSET (dr_b.dr));
2044 addr_b = fold_build_pointer_plus (addr_b, DR_INIT (dr_b.dr));
2045
2046 *cond_expr = build_call_expr_internal_loc (UNKNOWN_LOCATION,
2047 ifn, boolean_type_node,
2048 4, addr_a, addr_b,
2049 size_int (full_length),
2050 size_int (align));
2051
2052 if (dump_enabled_p ())
2053 {
2054 if (ifn == IFN_CHECK_RAW_PTRS)
2055 dump_printf (MSG_NOTE, "using an IFN_CHECK_RAW_PTRS test\n");
2056 else
2057 dump_printf (MSG_NOTE, "using an IFN_CHECK_WAR_PTRS test\n");
2058 }
2059 return true;
2060 }
2061
2062 /* Try to generate a runtime condition that is true if ALIAS_PAIR is
2063 free of aliases, using a condition based on index values instead
2064 of a condition based on addresses. Return true on success,
2065 storing the condition in *COND_EXPR.
2066
2067 This can only be done if the two data references in ALIAS_PAIR access
2068 the same array object and the index is the only difference. For example,
2069 if the two data references are DR_A and DR_B:
2070
2071 DR_A DR_B
2072 data-ref arr[i] arr[j]
2073 base_object arr arr
2074 index {i_0, +, 1}_loop {j_0, +, 1}_loop
2075
2076 The addresses and their index are like:
2077
2078 |<- ADDR_A ->| |<- ADDR_B ->|
2079 ------------------------------------------------------->
2080 | | | | | | | | | |
2081 ------------------------------------------------------->
2082 i_0 ... i_0+4 j_0 ... j_0+4
2083
2084 We can create expression based on index rather than address:
2085
2086 (unsigned) (i_0 - j_0 + 3) <= 6
2087
2088 i.e. the indices are less than 4 apart.
2089
2090 Note evolution step of index needs to be considered in comparison. */
2091
2092 static bool
2093 create_intersect_range_checks_index (class loop *loop, tree *cond_expr,
2094 const dr_with_seg_len_pair_t &alias_pair)
2095 {
2096 const dr_with_seg_len &dr_a = alias_pair.first;
2097 const dr_with_seg_len &dr_b = alias_pair.second;
2098 if ((alias_pair.flags & DR_ALIAS_MIXED_STEPS)
2099 || integer_zerop (DR_STEP (dr_a.dr))
2100 || integer_zerop (DR_STEP (dr_b.dr))
2101 || DR_NUM_DIMENSIONS (dr_a.dr) != DR_NUM_DIMENSIONS (dr_b.dr))
2102 return false;
2103
2104 poly_uint64 seg_len1, seg_len2;
2105 if (!poly_int_tree_p (dr_a.seg_len, &seg_len1)
2106 || !poly_int_tree_p (dr_b.seg_len, &seg_len2))
2107 return false;
2108
2109 if (!tree_fits_shwi_p (DR_STEP (dr_a.dr)))
2110 return false;
2111
2112 if (!operand_equal_p (DR_BASE_OBJECT (dr_a.dr), DR_BASE_OBJECT (dr_b.dr), 0))
2113 return false;
2114
2115 if (!operand_equal_p (DR_STEP (dr_a.dr), DR_STEP (dr_b.dr), 0))
2116 return false;
2117
2118 gcc_assert (TREE_CODE (DR_STEP (dr_a.dr)) == INTEGER_CST);
2119
2120 bool neg_step = tree_int_cst_compare (DR_STEP (dr_a.dr), size_zero_node) < 0;
2121 unsigned HOST_WIDE_INT abs_step = tree_to_shwi (DR_STEP (dr_a.dr));
2122 if (neg_step)
2123 {
2124 abs_step = -abs_step;
2125 seg_len1 = (-wi::to_poly_wide (dr_a.seg_len)).force_uhwi ();
2126 seg_len2 = (-wi::to_poly_wide (dr_b.seg_len)).force_uhwi ();
2127 }
2128
2129 /* Infer the number of iterations with which the memory segment is accessed
2130 by DR. In other words, alias is checked if memory segment accessed by
2131 DR_A in some iterations intersect with memory segment accessed by DR_B
2132 in the same amount iterations.
2133 Note segnment length is a linear function of number of iterations with
2134 DR_STEP as the coefficient. */
2135 poly_uint64 niter_len1, niter_len2;
2136 if (!can_div_trunc_p (seg_len1 + abs_step - 1, abs_step, &niter_len1)
2137 || !can_div_trunc_p (seg_len2 + abs_step - 1, abs_step, &niter_len2))
2138 return false;
2139
2140 /* Divide each access size by the byte step, rounding up. */
2141 poly_uint64 niter_access1, niter_access2;
2142 if (!can_div_trunc_p (dr_a.access_size + abs_step - 1,
2143 abs_step, &niter_access1)
2144 || !can_div_trunc_p (dr_b.access_size + abs_step - 1,
2145 abs_step, &niter_access2))
2146 return false;
2147
2148 bool waw_or_war_p = (alias_pair.flags & ~(DR_ALIAS_WAR | DR_ALIAS_WAW)) == 0;
2149
2150 unsigned int i;
2151 for (i = 0; i < DR_NUM_DIMENSIONS (dr_a.dr); i++)
2152 {
2153 tree access1 = DR_ACCESS_FN (dr_a.dr, i);
2154 tree access2 = DR_ACCESS_FN (dr_b.dr, i);
2155 /* Two indices must be the same if they are not scev, or not scev wrto
2156 current loop being vecorized. */
2157 if (TREE_CODE (access1) != POLYNOMIAL_CHREC
2158 || TREE_CODE (access2) != POLYNOMIAL_CHREC
2159 || CHREC_VARIABLE (access1) != (unsigned)loop->num
2160 || CHREC_VARIABLE (access2) != (unsigned)loop->num)
2161 {
2162 if (operand_equal_p (access1, access2, 0))
2163 continue;
2164
2165 return false;
2166 }
2167 /* The two indices must have the same step. */
2168 if (!operand_equal_p (CHREC_RIGHT (access1), CHREC_RIGHT (access2), 0))
2169 return false;
2170
2171 tree idx_step = CHREC_RIGHT (access1);
2172 /* Index must have const step, otherwise DR_STEP won't be constant. */
2173 gcc_assert (TREE_CODE (idx_step) == INTEGER_CST);
2174 /* Index must evaluate in the same direction as DR. */
2175 gcc_assert (!neg_step || tree_int_cst_sign_bit (idx_step) == 1);
2176
2177 tree min1 = CHREC_LEFT (access1);
2178 tree min2 = CHREC_LEFT (access2);
2179 if (!types_compatible_p (TREE_TYPE (min1), TREE_TYPE (min2)))
2180 return false;
2181
2182 /* Ideally, alias can be checked against loop's control IV, but we
2183 need to prove linear mapping between control IV and reference
2184 index. Although that should be true, we check against (array)
2185 index of data reference. Like segment length, index length is
2186 linear function of the number of iterations with index_step as
2187 the coefficient, i.e, niter_len * idx_step. */
2188 offset_int abs_idx_step = offset_int::from (wi::to_wide (idx_step),
2189 SIGNED);
2190 if (neg_step)
2191 abs_idx_step = -abs_idx_step;
2192 poly_offset_int idx_len1 = abs_idx_step * niter_len1;
2193 poly_offset_int idx_len2 = abs_idx_step * niter_len2;
2194 poly_offset_int idx_access1 = abs_idx_step * niter_access1;
2195 poly_offset_int idx_access2 = abs_idx_step * niter_access2;
2196
2197 gcc_assert (known_ge (idx_len1, 0)
2198 && known_ge (idx_len2, 0)
2199 && known_ge (idx_access1, 0)
2200 && known_ge (idx_access2, 0));
2201
2202 /* Each access has the following pattern, with lengths measured
2203 in units of INDEX:
2204
2205 <-- idx_len -->
2206 <--- A: -ve step --->
2207 +-----+-------+-----+-------+-----+
2208 | n-1 | ..... | 0 | ..... | n-1 |
2209 +-----+-------+-----+-------+-----+
2210 <--- B: +ve step --->
2211 <-- idx_len -->
2212 |
2213 min
2214
2215 where "n" is the number of scalar iterations covered by the segment
2216 and where each access spans idx_access units.
2217
2218 A is the range of bytes accessed when the step is negative,
2219 B is the range when the step is positive.
2220
2221 When checking for general overlap, we need to test whether
2222 the range:
2223
2224 [min1 + low_offset1, min2 + high_offset1 + idx_access1 - 1]
2225
2226 overlaps:
2227
2228 [min2 + low_offset2, min2 + high_offset2 + idx_access2 - 1]
2229
2230 where:
2231
2232 low_offsetN = +ve step ? 0 : -idx_lenN;
2233 high_offsetN = +ve step ? idx_lenN : 0;
2234
2235 This is equivalent to testing whether:
2236
2237 min1 + low_offset1 <= min2 + high_offset2 + idx_access2 - 1
2238 && min2 + low_offset2 <= min1 + high_offset1 + idx_access1 - 1
2239
2240 Converting this into a single test, there is an overlap if:
2241
2242 0 <= min2 - min1 + bias <= limit
2243
2244 where bias = high_offset2 + idx_access2 - 1 - low_offset1
2245 limit = (high_offset1 - low_offset1 + idx_access1 - 1)
2246 + (high_offset2 - low_offset2 + idx_access2 - 1)
2247 i.e. limit = idx_len1 + idx_access1 - 1 + idx_len2 + idx_access2 - 1
2248
2249 Combining the tests requires limit to be computable in an unsigned
2250 form of the index type; if it isn't, we fall back to the usual
2251 pointer-based checks.
2252
2253 We can do better if DR_B is a write and if DR_A and DR_B are
2254 well-ordered in both the original and the new code (see the
2255 comment above the DR_ALIAS_* flags for details). In this case
2256 we know that for each i in [0, n-1], the write performed by
2257 access i of DR_B occurs after access numbers j<=i of DR_A in
2258 both the original and the new code. Any write or anti
2259 dependencies wrt those DR_A accesses are therefore maintained.
2260
2261 We just need to make sure that each individual write in DR_B does not
2262 overlap any higher-indexed access in DR_A; such DR_A accesses happen
2263 after the DR_B access in the original code but happen before it in
2264 the new code.
2265
2266 We know the steps for both accesses are equal, so by induction, we
2267 just need to test whether the first write of DR_B overlaps a later
2268 access of DR_A. In other words, we need to move min1 along by
2269 one iteration:
2270
2271 min1' = min1 + idx_step
2272
2273 and use the ranges:
2274
2275 [min1' + low_offset1', min1' + high_offset1' + idx_access1 - 1]
2276
2277 and:
2278
2279 [min2, min2 + idx_access2 - 1]
2280
2281 where:
2282
2283 low_offset1' = +ve step ? 0 : -(idx_len1 - |idx_step|)
2284 high_offset1' = +ve_step ? idx_len1 - |idx_step| : 0. */
2285 if (waw_or_war_p)
2286 idx_len1 -= abs_idx_step;
2287
2288 poly_offset_int limit = idx_len1 + idx_access1 - 1 + idx_access2 - 1;
2289 if (!waw_or_war_p)
2290 limit += idx_len2;
2291
2292 tree utype = unsigned_type_for (TREE_TYPE (min1));
2293 if (!wi::fits_to_tree_p (limit, utype))
2294 return false;
2295
2296 poly_offset_int low_offset1 = neg_step ? -idx_len1 : 0;
2297 poly_offset_int high_offset2 = neg_step || waw_or_war_p ? 0 : idx_len2;
2298 poly_offset_int bias = high_offset2 + idx_access2 - 1 - low_offset1;
2299 /* Equivalent to adding IDX_STEP to MIN1. */
2300 if (waw_or_war_p)
2301 bias -= wi::to_offset (idx_step);
2302
2303 tree subject = fold_build2 (MINUS_EXPR, utype,
2304 fold_convert (utype, min2),
2305 fold_convert (utype, min1));
2306 subject = fold_build2 (PLUS_EXPR, utype, subject,
2307 wide_int_to_tree (utype, bias));
2308 tree part_cond_expr = fold_build2 (GT_EXPR, boolean_type_node, subject,
2309 wide_int_to_tree (utype, limit));
2310 if (*cond_expr)
2311 *cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
2312 *cond_expr, part_cond_expr);
2313 else
2314 *cond_expr = part_cond_expr;
2315 }
2316 if (dump_enabled_p ())
2317 {
2318 if (waw_or_war_p)
2319 dump_printf (MSG_NOTE, "using an index-based WAR/WAW test\n");
2320 else
2321 dump_printf (MSG_NOTE, "using an index-based overlap test\n");
2322 }
2323 return true;
2324 }
2325
2326 /* A subroutine of create_intersect_range_checks, with a subset of the
2327 same arguments. Try to optimize cases in which the second access
2328 is a write and in which some overlap is valid. */
2329
2330 static bool
2331 create_waw_or_war_checks (tree *cond_expr,
2332 const dr_with_seg_len_pair_t &alias_pair)
2333 {
2334 const dr_with_seg_len& dr_a = alias_pair.first;
2335 const dr_with_seg_len& dr_b = alias_pair.second;
2336
2337 /* Check for cases in which:
2338
2339 (a) DR_B is always a write;
2340 (b) the accesses are well-ordered in both the original and new code
2341 (see the comment above the DR_ALIAS_* flags for details); and
2342 (c) the DR_STEPs describe all access pairs covered by ALIAS_PAIR. */
2343 if (alias_pair.flags & ~(DR_ALIAS_WAR | DR_ALIAS_WAW))
2344 return false;
2345
2346 /* Check for equal (but possibly variable) steps. */
2347 tree step = DR_STEP (dr_a.dr);
2348 if (!operand_equal_p (step, DR_STEP (dr_b.dr)))
2349 return false;
2350
2351 /* Make sure that we can operate on sizetype without loss of precision. */
2352 tree addr_type = TREE_TYPE (DR_BASE_ADDRESS (dr_a.dr));
2353 if (TYPE_PRECISION (addr_type) != TYPE_PRECISION (sizetype))
2354 return false;
2355
2356 /* All addresses involved are known to have a common alignment ALIGN.
2357 We can therefore subtract ALIGN from an exclusive endpoint to get
2358 an inclusive endpoint. In the best (and common) case, ALIGN is the
2359 same as the access sizes of both DRs, and so subtracting ALIGN
2360 cancels out the addition of an access size. */
2361 unsigned int align = MIN (dr_a.align, dr_b.align);
2362 poly_uint64 last_chunk_a = dr_a.access_size - align;
2363 poly_uint64 last_chunk_b = dr_b.access_size - align;
2364
2365 /* Get a boolean expression that is true when the step is negative. */
2366 tree indicator = dr_direction_indicator (dr_a.dr);
2367 tree neg_step = fold_build2 (LT_EXPR, boolean_type_node,
2368 fold_convert (ssizetype, indicator),
2369 ssize_int (0));
2370
2371 /* Get lengths in sizetype. */
2372 tree seg_len_a
2373 = fold_convert (sizetype, rewrite_to_non_trapping_overflow (dr_a.seg_len));
2374 step = fold_convert (sizetype, rewrite_to_non_trapping_overflow (step));
2375
2376 /* Each access has the following pattern:
2377
2378 <- |seg_len| ->
2379 <--- A: -ve step --->
2380 +-----+-------+-----+-------+-----+
2381 | n-1 | ..... | 0 | ..... | n-1 |
2382 +-----+-------+-----+-------+-----+
2383 <--- B: +ve step --->
2384 <- |seg_len| ->
2385 |
2386 base address
2387
2388 where "n" is the number of scalar iterations covered by the segment.
2389
2390 A is the range of bytes accessed when the step is negative,
2391 B is the range when the step is positive.
2392
2393 We know that DR_B is a write. We also know (from checking that
2394 DR_A and DR_B are well-ordered) that for each i in [0, n-1],
2395 the write performed by access i of DR_B occurs after access numbers
2396 j<=i of DR_A in both the original and the new code. Any write or
2397 anti dependencies wrt those DR_A accesses are therefore maintained.
2398
2399 We just need to make sure that each individual write in DR_B does not
2400 overlap any higher-indexed access in DR_A; such DR_A accesses happen
2401 after the DR_B access in the original code but happen before it in
2402 the new code.
2403
2404 We know the steps for both accesses are equal, so by induction, we
2405 just need to test whether the first write of DR_B overlaps a later
2406 access of DR_A. In other words, we need to move addr_a along by
2407 one iteration:
2408
2409 addr_a' = addr_a + step
2410
2411 and check whether:
2412
2413 [addr_b, addr_b + last_chunk_b]
2414
2415 overlaps:
2416
2417 [addr_a' + low_offset_a, addr_a' + high_offset_a + last_chunk_a]
2418
2419 where [low_offset_a, high_offset_a] spans accesses [1, n-1]. I.e.:
2420
2421 low_offset_a = +ve step ? 0 : seg_len_a - step
2422 high_offset_a = +ve step ? seg_len_a - step : 0
2423
2424 This is equivalent to testing whether:
2425
2426 addr_a' + low_offset_a <= addr_b + last_chunk_b
2427 && addr_b <= addr_a' + high_offset_a + last_chunk_a
2428
2429 Converting this into a single test, there is an overlap if:
2430
2431 0 <= addr_b + last_chunk_b - addr_a' - low_offset_a <= limit
2432
2433 where limit = high_offset_a - low_offset_a + last_chunk_a + last_chunk_b
2434
2435 If DR_A is performed, limit + |step| - last_chunk_b is known to be
2436 less than the size of the object underlying DR_A. We also know
2437 that last_chunk_b <= |step|; this is checked elsewhere if it isn't
2438 guaranteed at compile time. There can therefore be no overflow if
2439 "limit" is calculated in an unsigned type with pointer precision. */
2440 tree addr_a = fold_build_pointer_plus (DR_BASE_ADDRESS (dr_a.dr),
2441 DR_OFFSET (dr_a.dr));
2442 addr_a = fold_build_pointer_plus (addr_a, DR_INIT (dr_a.dr));
2443
2444 tree addr_b = fold_build_pointer_plus (DR_BASE_ADDRESS (dr_b.dr),
2445 DR_OFFSET (dr_b.dr));
2446 addr_b = fold_build_pointer_plus (addr_b, DR_INIT (dr_b.dr));
2447
2448 /* Advance ADDR_A by one iteration and adjust the length to compensate. */
2449 addr_a = fold_build_pointer_plus (addr_a, step);
2450 tree seg_len_a_minus_step = fold_build2 (MINUS_EXPR, sizetype,
2451 seg_len_a, step);
2452 if (!CONSTANT_CLASS_P (seg_len_a_minus_step))
2453 seg_len_a_minus_step = build1 (SAVE_EXPR, sizetype, seg_len_a_minus_step);
2454
2455 tree low_offset_a = fold_build3 (COND_EXPR, sizetype, neg_step,
2456 seg_len_a_minus_step, size_zero_node);
2457 if (!CONSTANT_CLASS_P (low_offset_a))
2458 low_offset_a = build1 (SAVE_EXPR, sizetype, low_offset_a);
2459
2460 /* We could use COND_EXPR <neg_step, size_zero_node, seg_len_a_minus_step>,
2461 but it's usually more efficient to reuse the LOW_OFFSET_A result. */
2462 tree high_offset_a = fold_build2 (MINUS_EXPR, sizetype, seg_len_a_minus_step,
2463 low_offset_a);
2464
2465 /* The amount added to addr_b - addr_a'. */
2466 tree bias = fold_build2 (MINUS_EXPR, sizetype,
2467 size_int (last_chunk_b), low_offset_a);
2468
2469 tree limit = fold_build2 (MINUS_EXPR, sizetype, high_offset_a, low_offset_a);
2470 limit = fold_build2 (PLUS_EXPR, sizetype, limit,
2471 size_int (last_chunk_a + last_chunk_b));
2472
2473 tree subject = fold_build2 (POINTER_DIFF_EXPR, ssizetype, addr_b, addr_a);
2474 subject = fold_build2 (PLUS_EXPR, sizetype,
2475 fold_convert (sizetype, subject), bias);
2476
2477 *cond_expr = fold_build2 (GT_EXPR, boolean_type_node, subject, limit);
2478 if (dump_enabled_p ())
2479 dump_printf (MSG_NOTE, "using an address-based WAR/WAW test\n");
2480 return true;
2481 }
2482
2483 /* If ALIGN is nonzero, set up *SEQ_MIN_OUT and *SEQ_MAX_OUT so that for
2484 every address ADDR accessed by D:
2485
2486 *SEQ_MIN_OUT <= ADDR (== ADDR & -ALIGN) <= *SEQ_MAX_OUT
2487
2488 In this case, every element accessed by D is aligned to at least
2489 ALIGN bytes.
2490
2491 If ALIGN is zero then instead set *SEG_MAX_OUT so that:
2492
2493 *SEQ_MIN_OUT <= ADDR < *SEQ_MAX_OUT. */
2494
2495 static void
2496 get_segment_min_max (const dr_with_seg_len &d, tree *seg_min_out,
2497 tree *seg_max_out, HOST_WIDE_INT align)
2498 {
2499 /* Each access has the following pattern:
2500
2501 <- |seg_len| ->
2502 <--- A: -ve step --->
2503 +-----+-------+-----+-------+-----+
2504 | n-1 | ,.... | 0 | ..... | n-1 |
2505 +-----+-------+-----+-------+-----+
2506 <--- B: +ve step --->
2507 <- |seg_len| ->
2508 |
2509 base address
2510
2511 where "n" is the number of scalar iterations covered by the segment.
2512 (This should be VF for a particular pair if we know that both steps
2513 are the same, otherwise it will be the full number of scalar loop
2514 iterations.)
2515
2516 A is the range of bytes accessed when the step is negative,
2517 B is the range when the step is positive.
2518
2519 If the access size is "access_size" bytes, the lowest addressed byte is:
2520
2521 base + (step < 0 ? seg_len : 0) [LB]
2522
2523 and the highest addressed byte is always below:
2524
2525 base + (step < 0 ? 0 : seg_len) + access_size [UB]
2526
2527 Thus:
2528
2529 LB <= ADDR < UB
2530
2531 If ALIGN is nonzero, all three values are aligned to at least ALIGN
2532 bytes, so:
2533
2534 LB <= ADDR <= UB - ALIGN
2535
2536 where "- ALIGN" folds naturally with the "+ access_size" and often
2537 cancels it out.
2538
2539 We don't try to simplify LB and UB beyond this (e.g. by using
2540 MIN and MAX based on whether seg_len rather than the stride is
2541 negative) because it is possible for the absolute size of the
2542 segment to overflow the range of a ssize_t.
2543
2544 Keeping the pointer_plus outside of the cond_expr should allow
2545 the cond_exprs to be shared with other alias checks. */
2546 tree indicator = dr_direction_indicator (d.dr);
2547 tree neg_step = fold_build2 (LT_EXPR, boolean_type_node,
2548 fold_convert (ssizetype, indicator),
2549 ssize_int (0));
2550 tree addr_base = fold_build_pointer_plus (DR_BASE_ADDRESS (d.dr),
2551 DR_OFFSET (d.dr));
2552 addr_base = fold_build_pointer_plus (addr_base, DR_INIT (d.dr));
2553 tree seg_len
2554 = fold_convert (sizetype, rewrite_to_non_trapping_overflow (d.seg_len));
2555
2556 tree min_reach = fold_build3 (COND_EXPR, sizetype, neg_step,
2557 seg_len, size_zero_node);
2558 tree max_reach = fold_build3 (COND_EXPR, sizetype, neg_step,
2559 size_zero_node, seg_len);
2560 max_reach = fold_build2 (PLUS_EXPR, sizetype, max_reach,
2561 size_int (d.access_size - align));
2562
2563 *seg_min_out = fold_build_pointer_plus (addr_base, min_reach);
2564 *seg_max_out = fold_build_pointer_plus (addr_base, max_reach);
2565 }
2566
2567 /* Generate a runtime condition that is true if ALIAS_PAIR is free of aliases,
2568 storing the condition in *COND_EXPR. The fallback is to generate a
2569 a test that the two accesses do not overlap:
2570
2571 end_a <= start_b || end_b <= start_a. */
2572
2573 static void
2574 create_intersect_range_checks (class loop *loop, tree *cond_expr,
2575 const dr_with_seg_len_pair_t &alias_pair)
2576 {
2577 const dr_with_seg_len& dr_a = alias_pair.first;
2578 const dr_with_seg_len& dr_b = alias_pair.second;
2579 *cond_expr = NULL_TREE;
2580 if (create_intersect_range_checks_index (loop, cond_expr, alias_pair))
2581 return;
2582
2583 if (create_ifn_alias_checks (cond_expr, alias_pair))
2584 return;
2585
2586 if (create_waw_or_war_checks (cond_expr, alias_pair))
2587 return;
2588
2589 unsigned HOST_WIDE_INT min_align;
2590 tree_code cmp_code;
2591 /* We don't have to check DR_ALIAS_MIXED_STEPS here, since both versions
2592 are equivalent. This is just an optimization heuristic. */
2593 if (TREE_CODE (DR_STEP (dr_a.dr)) == INTEGER_CST
2594 && TREE_CODE (DR_STEP (dr_b.dr)) == INTEGER_CST)
2595 {
2596 /* In this case adding access_size to seg_len is likely to give
2597 a simple X * step, where X is either the number of scalar
2598 iterations or the vectorization factor. We're better off
2599 keeping that, rather than subtracting an alignment from it.
2600
2601 In this case the maximum values are exclusive and so there is
2602 no alias if the maximum of one segment equals the minimum
2603 of another. */
2604 min_align = 0;
2605 cmp_code = LE_EXPR;
2606 }
2607 else
2608 {
2609 /* Calculate the minimum alignment shared by all four pointers,
2610 then arrange for this alignment to be subtracted from the
2611 exclusive maximum values to get inclusive maximum values.
2612 This "- min_align" is cumulative with a "+ access_size"
2613 in the calculation of the maximum values. In the best
2614 (and common) case, the two cancel each other out, leaving
2615 us with an inclusive bound based only on seg_len. In the
2616 worst case we're simply adding a smaller number than before.
2617
2618 Because the maximum values are inclusive, there is an alias
2619 if the maximum value of one segment is equal to the minimum
2620 value of the other. */
2621 min_align = MIN (dr_a.align, dr_b.align);
2622 cmp_code = LT_EXPR;
2623 }
2624
2625 tree seg_a_min, seg_a_max, seg_b_min, seg_b_max;
2626 get_segment_min_max (dr_a, &seg_a_min, &seg_a_max, min_align);
2627 get_segment_min_max (dr_b, &seg_b_min, &seg_b_max, min_align);
2628
2629 *cond_expr
2630 = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
2631 fold_build2 (cmp_code, boolean_type_node, seg_a_max, seg_b_min),
2632 fold_build2 (cmp_code, boolean_type_node, seg_b_max, seg_a_min));
2633 if (dump_enabled_p ())
2634 dump_printf (MSG_NOTE, "using an address-based overlap test\n");
2635 }
2636
2637 /* Create a conditional expression that represents the run-time checks for
2638 overlapping of address ranges represented by a list of data references
2639 pairs passed in ALIAS_PAIRS. Data references are in LOOP. The returned
2640 COND_EXPR is the conditional expression to be used in the if statement
2641 that controls which version of the loop gets executed at runtime. */
2642
2643 void
2644 create_runtime_alias_checks (class loop *loop,
2645 vec<dr_with_seg_len_pair_t> *alias_pairs,
2646 tree * cond_expr)
2647 {
2648 tree part_cond_expr;
2649
2650 fold_defer_overflow_warnings ();
2651 dr_with_seg_len_pair_t *alias_pair;
2652 unsigned int i;
2653 FOR_EACH_VEC_ELT (*alias_pairs, i, alias_pair)
2654 {
2655 gcc_assert (alias_pair->flags);
2656 if (dump_enabled_p ())
2657 dump_printf (MSG_NOTE,
2658 "create runtime check for data references %T and %T\n",
2659 DR_REF (alias_pair->first.dr),
2660 DR_REF (alias_pair->second.dr));
2661
2662 /* Create condition expression for each pair data references. */
2663 create_intersect_range_checks (loop, &part_cond_expr, *alias_pair);
2664 if (*cond_expr)
2665 *cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
2666 *cond_expr, part_cond_expr);
2667 else
2668 *cond_expr = part_cond_expr;
2669 }
2670 fold_undefer_and_ignore_overflow_warnings ();
2671 }
2672
2673 /* Check if OFFSET1 and OFFSET2 (DR_OFFSETs of some data-refs) are identical
2674 expressions. */
2675 static bool
2676 dr_equal_offsets_p1 (tree offset1, tree offset2)
2677 {
2678 bool res;
2679
2680 STRIP_NOPS (offset1);
2681 STRIP_NOPS (offset2);
2682
2683 if (offset1 == offset2)
2684 return true;
2685
2686 if (TREE_CODE (offset1) != TREE_CODE (offset2)
2687 || (!BINARY_CLASS_P (offset1) && !UNARY_CLASS_P (offset1)))
2688 return false;
2689
2690 res = dr_equal_offsets_p1 (TREE_OPERAND (offset1, 0),
2691 TREE_OPERAND (offset2, 0));
2692
2693 if (!res || !BINARY_CLASS_P (offset1))
2694 return res;
2695
2696 res = dr_equal_offsets_p1 (TREE_OPERAND (offset1, 1),
2697 TREE_OPERAND (offset2, 1));
2698
2699 return res;
2700 }
2701
2702 /* Check if DRA and DRB have equal offsets. */
2703 bool
2704 dr_equal_offsets_p (struct data_reference *dra,
2705 struct data_reference *drb)
2706 {
2707 tree offset1, offset2;
2708
2709 offset1 = DR_OFFSET (dra);
2710 offset2 = DR_OFFSET (drb);
2711
2712 return dr_equal_offsets_p1 (offset1, offset2);
2713 }
2714
2715 /* Returns true if FNA == FNB. */
2716
2717 static bool
2718 affine_function_equal_p (affine_fn fna, affine_fn fnb)
2719 {
2720 unsigned i, n = fna.length ();
2721
2722 if (n != fnb.length ())
2723 return false;
2724
2725 for (i = 0; i < n; i++)
2726 if (!operand_equal_p (fna[i], fnb[i], 0))
2727 return false;
2728
2729 return true;
2730 }
2731
2732 /* If all the functions in CF are the same, returns one of them,
2733 otherwise returns NULL. */
2734
2735 static affine_fn
2736 common_affine_function (conflict_function *cf)
2737 {
2738 unsigned i;
2739 affine_fn comm;
2740
2741 if (!CF_NONTRIVIAL_P (cf))
2742 return affine_fn ();
2743
2744 comm = cf->fns[0];
2745
2746 for (i = 1; i < cf->n; i++)
2747 if (!affine_function_equal_p (comm, cf->fns[i]))
2748 return affine_fn ();
2749
2750 return comm;
2751 }
2752
2753 /* Returns the base of the affine function FN. */
2754
2755 static tree
2756 affine_function_base (affine_fn fn)
2757 {
2758 return fn[0];
2759 }
2760
2761 /* Returns true if FN is a constant. */
2762
2763 static bool
2764 affine_function_constant_p (affine_fn fn)
2765 {
2766 unsigned i;
2767 tree coef;
2768
2769 for (i = 1; fn.iterate (i, &coef); i++)
2770 if (!integer_zerop (coef))
2771 return false;
2772
2773 return true;
2774 }
2775
2776 /* Returns true if FN is the zero constant function. */
2777
2778 static bool
2779 affine_function_zero_p (affine_fn fn)
2780 {
2781 return (integer_zerop (affine_function_base (fn))
2782 && affine_function_constant_p (fn));
2783 }
2784
2785 /* Returns a signed integer type with the largest precision from TA
2786 and TB. */
2787
2788 static tree
2789 signed_type_for_types (tree ta, tree tb)
2790 {
2791 if (TYPE_PRECISION (ta) > TYPE_PRECISION (tb))
2792 return signed_type_for (ta);
2793 else
2794 return signed_type_for (tb);
2795 }
2796
2797 /* Applies operation OP on affine functions FNA and FNB, and returns the
2798 result. */
2799
2800 static affine_fn
2801 affine_fn_op (enum tree_code op, affine_fn fna, affine_fn fnb)
2802 {
2803 unsigned i, n, m;
2804 affine_fn ret;
2805 tree coef;
2806
2807 if (fnb.length () > fna.length ())
2808 {
2809 n = fna.length ();
2810 m = fnb.length ();
2811 }
2812 else
2813 {
2814 n = fnb.length ();
2815 m = fna.length ();
2816 }
2817
2818 ret.create (m);
2819 for (i = 0; i < n; i++)
2820 {
2821 tree type = signed_type_for_types (TREE_TYPE (fna[i]),
2822 TREE_TYPE (fnb[i]));
2823 ret.quick_push (fold_build2 (op, type, fna[i], fnb[i]));
2824 }
2825
2826 for (; fna.iterate (i, &coef); i++)
2827 ret.quick_push (fold_build2 (op, signed_type_for (TREE_TYPE (coef)),
2828 coef, integer_zero_node));
2829 for (; fnb.iterate (i, &coef); i++)
2830 ret.quick_push (fold_build2 (op, signed_type_for (TREE_TYPE (coef)),
2831 integer_zero_node, coef));
2832
2833 return ret;
2834 }
2835
2836 /* Returns the sum of affine functions FNA and FNB. */
2837
2838 static affine_fn
2839 affine_fn_plus (affine_fn fna, affine_fn fnb)
2840 {
2841 return affine_fn_op (PLUS_EXPR, fna, fnb);
2842 }
2843
2844 /* Returns the difference of affine functions FNA and FNB. */
2845
2846 static affine_fn
2847 affine_fn_minus (affine_fn fna, affine_fn fnb)
2848 {
2849 return affine_fn_op (MINUS_EXPR, fna, fnb);
2850 }
2851
2852 /* Frees affine function FN. */
2853
2854 static void
2855 affine_fn_free (affine_fn fn)
2856 {
2857 fn.release ();
2858 }
2859
2860 /* Determine for each subscript in the data dependence relation DDR
2861 the distance. */
2862
2863 static void
2864 compute_subscript_distance (struct data_dependence_relation *ddr)
2865 {
2866 conflict_function *cf_a, *cf_b;
2867 affine_fn fn_a, fn_b, diff;
2868
2869 if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE)
2870 {
2871 unsigned int i;
2872
2873 for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++)
2874 {
2875 struct subscript *subscript;
2876
2877 subscript = DDR_SUBSCRIPT (ddr, i);
2878 cf_a = SUB_CONFLICTS_IN_A (subscript);
2879 cf_b = SUB_CONFLICTS_IN_B (subscript);
2880
2881 fn_a = common_affine_function (cf_a);
2882 fn_b = common_affine_function (cf_b);
2883 if (!fn_a.exists () || !fn_b.exists ())
2884 {
2885 SUB_DISTANCE (subscript) = chrec_dont_know;
2886 return;
2887 }
2888 diff = affine_fn_minus (fn_a, fn_b);
2889
2890 if (affine_function_constant_p (diff))
2891 SUB_DISTANCE (subscript) = affine_function_base (diff);
2892 else
2893 SUB_DISTANCE (subscript) = chrec_dont_know;
2894
2895 affine_fn_free (diff);
2896 }
2897 }
2898 }
2899
2900 /* Returns the conflict function for "unknown". */
2901
2902 static conflict_function *
2903 conflict_fn_not_known (void)
2904 {
2905 conflict_function *fn = XCNEW (conflict_function);
2906 fn->n = NOT_KNOWN;
2907
2908 return fn;
2909 }
2910
2911 /* Returns the conflict function for "independent". */
2912
2913 static conflict_function *
2914 conflict_fn_no_dependence (void)
2915 {
2916 conflict_function *fn = XCNEW (conflict_function);
2917 fn->n = NO_DEPENDENCE;
2918
2919 return fn;
2920 }
2921
2922 /* Returns true if the address of OBJ is invariant in LOOP. */
2923
2924 static bool
2925 object_address_invariant_in_loop_p (const class loop *loop, const_tree obj)
2926 {
2927 while (handled_component_p (obj))
2928 {
2929 if (TREE_CODE (obj) == ARRAY_REF)
2930 {
2931 for (int i = 1; i < 4; ++i)
2932 if (chrec_contains_symbols_defined_in_loop (TREE_OPERAND (obj, i),
2933 loop->num))
2934 return false;
2935 }
2936 else if (TREE_CODE (obj) == COMPONENT_REF)
2937 {
2938 if (chrec_contains_symbols_defined_in_loop (TREE_OPERAND (obj, 2),
2939 loop->num))
2940 return false;
2941 }
2942 obj = TREE_OPERAND (obj, 0);
2943 }
2944
2945 if (!INDIRECT_REF_P (obj)
2946 && TREE_CODE (obj) != MEM_REF)
2947 return true;
2948
2949 return !chrec_contains_symbols_defined_in_loop (TREE_OPERAND (obj, 0),
2950 loop->num);
2951 }
2952
2953 /* Returns false if we can prove that data references A and B do not alias,
2954 true otherwise. If LOOP_NEST is false no cross-iteration aliases are
2955 considered. */
2956
2957 bool
2958 dr_may_alias_p (const struct data_reference *a, const struct data_reference *b,
2959 class loop *loop_nest)
2960 {
2961 tree addr_a = DR_BASE_OBJECT (a);
2962 tree addr_b = DR_BASE_OBJECT (b);
2963
2964 /* If we are not processing a loop nest but scalar code we
2965 do not need to care about possible cross-iteration dependences
2966 and thus can process the full original reference. Do so,
2967 similar to how loop invariant motion applies extra offset-based
2968 disambiguation. */
2969 if (!loop_nest)
2970 {
2971 aff_tree off1, off2;
2972 poly_widest_int size1, size2;
2973 get_inner_reference_aff (DR_REF (a), &off1, &size1);
2974 get_inner_reference_aff (DR_REF (b), &off2, &size2);
2975 aff_combination_scale (&off1, -1);
2976 aff_combination_add (&off2, &off1);
2977 if (aff_comb_cannot_overlap_p (&off2, size1, size2))
2978 return false;
2979 }
2980
2981 if ((TREE_CODE (addr_a) == MEM_REF || TREE_CODE (addr_a) == TARGET_MEM_REF)
2982 && (TREE_CODE (addr_b) == MEM_REF || TREE_CODE (addr_b) == TARGET_MEM_REF)
2983 /* For cross-iteration dependences the cliques must be valid for the
2984 whole loop, not just individual iterations. */
2985 && (!loop_nest
2986 || MR_DEPENDENCE_CLIQUE (addr_a) == 1
2987 || MR_DEPENDENCE_CLIQUE (addr_a) == loop_nest->owned_clique)
2988 && MR_DEPENDENCE_CLIQUE (addr_a) == MR_DEPENDENCE_CLIQUE (addr_b)
2989 && MR_DEPENDENCE_BASE (addr_a) != MR_DEPENDENCE_BASE (addr_b))
2990 return false;
2991
2992 /* If we had an evolution in a pointer-based MEM_REF BASE_OBJECT we
2993 do not know the size of the base-object. So we cannot do any
2994 offset/overlap based analysis but have to rely on points-to
2995 information only. */
2996 if (TREE_CODE (addr_a) == MEM_REF
2997 && (DR_UNCONSTRAINED_BASE (a)
2998 || TREE_CODE (TREE_OPERAND (addr_a, 0)) == SSA_NAME))
2999 {
3000 /* For true dependences we can apply TBAA. */
3001 if (flag_strict_aliasing
3002 && DR_IS_WRITE (a) && DR_IS_READ (b)
3003 && !alias_sets_conflict_p (get_alias_set (DR_REF (a)),
3004 get_alias_set (DR_REF (b))))
3005 return false;
3006 if (TREE_CODE (addr_b) == MEM_REF)
3007 return ptr_derefs_may_alias_p (TREE_OPERAND (addr_a, 0),
3008 TREE_OPERAND (addr_b, 0));
3009 else
3010 return ptr_derefs_may_alias_p (TREE_OPERAND (addr_a, 0),
3011 build_fold_addr_expr (addr_b));
3012 }
3013 else if (TREE_CODE (addr_b) == MEM_REF
3014 && (DR_UNCONSTRAINED_BASE (b)
3015 || TREE_CODE (TREE_OPERAND (addr_b, 0)) == SSA_NAME))
3016 {
3017 /* For true dependences we can apply TBAA. */
3018 if (flag_strict_aliasing
3019 && DR_IS_WRITE (a) && DR_IS_READ (b)
3020 && !alias_sets_conflict_p (get_alias_set (DR_REF (a)),
3021 get_alias_set (DR_REF (b))))
3022 return false;
3023 if (TREE_CODE (addr_a) == MEM_REF)
3024 return ptr_derefs_may_alias_p (TREE_OPERAND (addr_a, 0),
3025 TREE_OPERAND (addr_b, 0));
3026 else
3027 return ptr_derefs_may_alias_p (build_fold_addr_expr (addr_a),
3028 TREE_OPERAND (addr_b, 0));
3029 }
3030
3031 /* Otherwise DR_BASE_OBJECT is an access that covers the whole object
3032 that is being subsetted in the loop nest. */
3033 if (DR_IS_WRITE (a) && DR_IS_WRITE (b))
3034 return refs_output_dependent_p (addr_a, addr_b);
3035 else if (DR_IS_READ (a) && DR_IS_WRITE (b))
3036 return refs_anti_dependent_p (addr_a, addr_b);
3037 return refs_may_alias_p (addr_a, addr_b);
3038 }
3039
3040 /* REF_A and REF_B both satisfy access_fn_component_p. Return true
3041 if it is meaningful to compare their associated access functions
3042 when checking for dependencies. */
3043
3044 static bool
3045 access_fn_components_comparable_p (tree ref_a, tree ref_b)
3046 {
3047 /* Allow pairs of component refs from the following sets:
3048
3049 { REALPART_EXPR, IMAGPART_EXPR }
3050 { COMPONENT_REF }
3051 { ARRAY_REF }. */
3052 tree_code code_a = TREE_CODE (ref_a);
3053 tree_code code_b = TREE_CODE (ref_b);
3054 if (code_a == IMAGPART_EXPR)
3055 code_a = REALPART_EXPR;
3056 if (code_b == IMAGPART_EXPR)
3057 code_b = REALPART_EXPR;
3058 if (code_a != code_b)
3059 return false;
3060
3061 if (TREE_CODE (ref_a) == COMPONENT_REF)
3062 /* ??? We cannot simply use the type of operand #0 of the refs here as
3063 the Fortran compiler smuggles type punning into COMPONENT_REFs.
3064 Use the DECL_CONTEXT of the FIELD_DECLs instead. */
3065 return (DECL_CONTEXT (TREE_OPERAND (ref_a, 1))
3066 == DECL_CONTEXT (TREE_OPERAND (ref_b, 1)));
3067
3068 return types_compatible_p (TREE_TYPE (TREE_OPERAND (ref_a, 0)),
3069 TREE_TYPE (TREE_OPERAND (ref_b, 0)));
3070 }
3071
3072 /* Initialize a data dependence relation between data accesses A and
3073 B. NB_LOOPS is the number of loops surrounding the references: the
3074 size of the classic distance/direction vectors. */
3075
3076 struct data_dependence_relation *
3077 initialize_data_dependence_relation (struct data_reference *a,
3078 struct data_reference *b,
3079 vec<loop_p> loop_nest)
3080 {
3081 struct data_dependence_relation *res;
3082 unsigned int i;
3083
3084 res = XCNEW (struct data_dependence_relation);
3085 DDR_A (res) = a;
3086 DDR_B (res) = b;
3087 DDR_LOOP_NEST (res).create (0);
3088 DDR_SUBSCRIPTS (res).create (0);
3089 DDR_DIR_VECTS (res).create (0);
3090 DDR_DIST_VECTS (res).create (0);
3091
3092 if (a == NULL || b == NULL)
3093 {
3094 DDR_ARE_DEPENDENT (res) = chrec_dont_know;
3095 return res;
3096 }
3097
3098 /* If the data references do not alias, then they are independent. */
3099 if (!dr_may_alias_p (a, b, loop_nest.exists () ? loop_nest[0] : NULL))
3100 {
3101 DDR_ARE_DEPENDENT (res) = chrec_known;
3102 return res;
3103 }
3104
3105 unsigned int num_dimensions_a = DR_NUM_DIMENSIONS (a);
3106 unsigned int num_dimensions_b = DR_NUM_DIMENSIONS (b);
3107 if (num_dimensions_a == 0 || num_dimensions_b == 0)
3108 {
3109 DDR_ARE_DEPENDENT (res) = chrec_dont_know;
3110 return res;
3111 }
3112
3113 /* For unconstrained bases, the root (highest-indexed) subscript
3114 describes a variation in the base of the original DR_REF rather
3115 than a component access. We have no type that accurately describes
3116 the new DR_BASE_OBJECT (whose TREE_TYPE describes the type *after*
3117 applying this subscript) so limit the search to the last real
3118 component access.
3119
3120 E.g. for:
3121
3122 void
3123 f (int a[][8], int b[][8])
3124 {
3125 for (int i = 0; i < 8; ++i)
3126 a[i * 2][0] = b[i][0];
3127 }
3128
3129 the a and b accesses have a single ARRAY_REF component reference [0]
3130 but have two subscripts. */
3131 if (DR_UNCONSTRAINED_BASE (a))
3132 num_dimensions_a -= 1;
3133 if (DR_UNCONSTRAINED_BASE (b))
3134 num_dimensions_b -= 1;
3135
3136 /* These structures describe sequences of component references in
3137 DR_REF (A) and DR_REF (B). Each component reference is tied to a
3138 specific access function. */
3139 struct {
3140 /* The sequence starts at DR_ACCESS_FN (A, START_A) of A and
3141 DR_ACCESS_FN (B, START_B) of B (inclusive) and extends to higher
3142 indices. In C notation, these are the indices of the rightmost
3143 component references; e.g. for a sequence .b.c.d, the start
3144 index is for .d. */
3145 unsigned int start_a;
3146 unsigned int start_b;
3147
3148 /* The sequence contains LENGTH consecutive access functions from
3149 each DR. */
3150 unsigned int length;
3151
3152 /* The enclosing objects for the A and B sequences respectively,
3153 i.e. the objects to which DR_ACCESS_FN (A, START_A + LENGTH - 1)
3154 and DR_ACCESS_FN (B, START_B + LENGTH - 1) are applied. */
3155 tree object_a;
3156 tree object_b;
3157 } full_seq = {}, struct_seq = {};
3158
3159 /* Before each iteration of the loop:
3160
3161 - REF_A is what you get after applying DR_ACCESS_FN (A, INDEX_A) and
3162 - REF_B is what you get after applying DR_ACCESS_FN (B, INDEX_B). */
3163 unsigned int index_a = 0;
3164 unsigned int index_b = 0;
3165 tree ref_a = DR_REF (a);
3166 tree ref_b = DR_REF (b);
3167
3168 /* Now walk the component references from the final DR_REFs back up to
3169 the enclosing base objects. Each component reference corresponds
3170 to one access function in the DR, with access function 0 being for
3171 the final DR_REF and the highest-indexed access function being the
3172 one that is applied to the base of the DR.
3173
3174 Look for a sequence of component references whose access functions
3175 are comparable (see access_fn_components_comparable_p). If more
3176 than one such sequence exists, pick the one nearest the base
3177 (which is the leftmost sequence in C notation). Store this sequence
3178 in FULL_SEQ.
3179
3180 For example, if we have:
3181
3182 struct foo { struct bar s; ... } (*a)[10], (*b)[10];
3183
3184 A: a[0][i].s.c.d
3185 B: __real b[0][i].s.e[i].f
3186
3187 (where d is the same type as the real component of f) then the access
3188 functions would be:
3189
3190 0 1 2 3
3191 A: .d .c .s [i]
3192
3193 0 1 2 3 4 5
3194 B: __real .f [i] .e .s [i]
3195
3196 The A0/B2 column isn't comparable, since .d is a COMPONENT_REF
3197 and [i] is an ARRAY_REF. However, the A1/B3 column contains two
3198 COMPONENT_REF accesses for struct bar, so is comparable. Likewise
3199 the A2/B4 column contains two COMPONENT_REF accesses for struct foo,
3200 so is comparable. The A3/B5 column contains two ARRAY_REFs that
3201 index foo[10] arrays, so is again comparable. The sequence is
3202 therefore:
3203
3204 A: [1, 3] (i.e. [i].s.c)
3205 B: [3, 5] (i.e. [i].s.e)
3206
3207 Also look for sequences of component references whose access
3208 functions are comparable and whose enclosing objects have the same
3209 RECORD_TYPE. Store this sequence in STRUCT_SEQ. In the above
3210 example, STRUCT_SEQ would be:
3211
3212 A: [1, 2] (i.e. s.c)
3213 B: [3, 4] (i.e. s.e) */
3214 while (index_a < num_dimensions_a && index_b < num_dimensions_b)
3215 {
3216 /* REF_A and REF_B must be one of the component access types
3217 allowed by dr_analyze_indices. */
3218 gcc_checking_assert (access_fn_component_p (ref_a));
3219 gcc_checking_assert (access_fn_component_p (ref_b));
3220
3221 /* Get the immediately-enclosing objects for REF_A and REF_B,
3222 i.e. the references *before* applying DR_ACCESS_FN (A, INDEX_A)
3223 and DR_ACCESS_FN (B, INDEX_B). */
3224 tree object_a = TREE_OPERAND (ref_a, 0);
3225 tree object_b = TREE_OPERAND (ref_b, 0);
3226
3227 tree type_a = TREE_TYPE (object_a);
3228 tree type_b = TREE_TYPE (object_b);
3229 if (access_fn_components_comparable_p (ref_a, ref_b))
3230 {
3231 /* This pair of component accesses is comparable for dependence
3232 analysis, so we can include DR_ACCESS_FN (A, INDEX_A) and
3233 DR_ACCESS_FN (B, INDEX_B) in the sequence. */
3234 if (full_seq.start_a + full_seq.length != index_a
3235 || full_seq.start_b + full_seq.length != index_b)
3236 {
3237 /* The accesses don't extend the current sequence,
3238 so start a new one here. */
3239 full_seq.start_a = index_a;
3240 full_seq.start_b = index_b;
3241 full_seq.length = 0;
3242 }
3243
3244 /* Add this pair of references to the sequence. */
3245 full_seq.length += 1;
3246 full_seq.object_a = object_a;
3247 full_seq.object_b = object_b;
3248
3249 /* If the enclosing objects are structures (and thus have the
3250 same RECORD_TYPE), record the new sequence in STRUCT_SEQ. */
3251 if (TREE_CODE (type_a) == RECORD_TYPE)
3252 struct_seq = full_seq;
3253
3254 /* Move to the next containing reference for both A and B. */
3255 ref_a = object_a;
3256 ref_b = object_b;
3257 index_a += 1;
3258 index_b += 1;
3259 continue;
3260 }
3261
3262 /* Try to approach equal type sizes. */
3263 if (!COMPLETE_TYPE_P (type_a)
3264 || !COMPLETE_TYPE_P (type_b)
3265 || !tree_fits_uhwi_p (TYPE_SIZE_UNIT (type_a))
3266 || !tree_fits_uhwi_p (TYPE_SIZE_UNIT (type_b)))
3267 break;
3268
3269 unsigned HOST_WIDE_INT size_a = tree_to_uhwi (TYPE_SIZE_UNIT (type_a));
3270 unsigned HOST_WIDE_INT size_b = tree_to_uhwi (TYPE_SIZE_UNIT (type_b));
3271 if (size_a <= size_b)
3272 {
3273 index_a += 1;
3274 ref_a = object_a;
3275 }
3276 if (size_b <= size_a)
3277 {
3278 index_b += 1;
3279 ref_b = object_b;
3280 }
3281 }
3282
3283 /* See whether FULL_SEQ ends at the base and whether the two bases
3284 are equal. We do not care about TBAA or alignment info so we can
3285 use OEP_ADDRESS_OF to avoid false negatives. */
3286 tree base_a = DR_BASE_OBJECT (a);
3287 tree base_b = DR_BASE_OBJECT (b);
3288 bool same_base_p = (full_seq.start_a + full_seq.length == num_dimensions_a
3289 && full_seq.start_b + full_seq.length == num_dimensions_b
3290 && DR_UNCONSTRAINED_BASE (a) == DR_UNCONSTRAINED_BASE (b)
3291 && operand_equal_p (base_a, base_b, OEP_ADDRESS_OF)
3292 && (types_compatible_p (TREE_TYPE (base_a),
3293 TREE_TYPE (base_b))
3294 || (!base_supports_access_fn_components_p (base_a)
3295 && !base_supports_access_fn_components_p (base_b)
3296 && operand_equal_p
3297 (TYPE_SIZE (TREE_TYPE (base_a)),
3298 TYPE_SIZE (TREE_TYPE (base_b)), 0)))
3299 && (!loop_nest.exists ()
3300 || (object_address_invariant_in_loop_p
3301 (loop_nest[0], base_a))));
3302
3303 /* If the bases are the same, we can include the base variation too.
3304 E.g. the b accesses in:
3305
3306 for (int i = 0; i < n; ++i)
3307 b[i + 4][0] = b[i][0];
3308
3309 have a definite dependence distance of 4, while for:
3310
3311 for (int i = 0; i < n; ++i)
3312 a[i + 4][0] = b[i][0];
3313
3314 the dependence distance depends on the gap between a and b.
3315
3316 If the bases are different then we can only rely on the sequence
3317 rooted at a structure access, since arrays are allowed to overlap
3318 arbitrarily and change shape arbitrarily. E.g. we treat this as
3319 valid code:
3320
3321 int a[256];
3322 ...
3323 ((int (*)[4][3]) &a[1])[i][0] += ((int (*)[4][3]) &a[2])[i][0];
3324
3325 where two lvalues with the same int[4][3] type overlap, and where
3326 both lvalues are distinct from the object's declared type. */
3327 if (same_base_p)
3328 {
3329 if (DR_UNCONSTRAINED_BASE (a))
3330 full_seq.length += 1;
3331 }
3332 else
3333 full_seq = struct_seq;
3334
3335 /* Punt if we didn't find a suitable sequence. */
3336 if (full_seq.length == 0)
3337 {
3338 DDR_ARE_DEPENDENT (res) = chrec_dont_know;
3339 return res;
3340 }
3341
3342 if (!same_base_p)
3343 {
3344 /* Partial overlap is possible for different bases when strict aliasing
3345 is not in effect. It's also possible if either base involves a union
3346 access; e.g. for:
3347
3348 struct s1 { int a[2]; };
3349 struct s2 { struct s1 b; int c; };
3350 struct s3 { int d; struct s1 e; };
3351 union u { struct s2 f; struct s3 g; } *p, *q;
3352
3353 the s1 at "p->f.b" (base "p->f") partially overlaps the s1 at
3354 "p->g.e" (base "p->g") and might partially overlap the s1 at
3355 "q->g.e" (base "q->g"). */
3356 if (!flag_strict_aliasing
3357 || ref_contains_union_access_p (full_seq.object_a)
3358 || ref_contains_union_access_p (full_seq.object_b))
3359 {
3360 DDR_ARE_DEPENDENT (res) = chrec_dont_know;
3361 return res;
3362 }
3363
3364 DDR_COULD_BE_INDEPENDENT_P (res) = true;
3365 if (!loop_nest.exists ()
3366 || (object_address_invariant_in_loop_p (loop_nest[0],
3367 full_seq.object_a)
3368 && object_address_invariant_in_loop_p (loop_nest[0],
3369 full_seq.object_b)))
3370 {
3371 DDR_OBJECT_A (res) = full_seq.object_a;
3372 DDR_OBJECT_B (res) = full_seq.object_b;
3373 }
3374 }
3375
3376 DDR_AFFINE_P (res) = true;
3377 DDR_ARE_DEPENDENT (res) = NULL_TREE;
3378 DDR_SUBSCRIPTS (res).create (full_seq.length);
3379 DDR_LOOP_NEST (res) = loop_nest;
3380 DDR_SELF_REFERENCE (res) = false;
3381
3382 for (i = 0; i < full_seq.length; ++i)
3383 {
3384 struct subscript *subscript;
3385
3386 subscript = XNEW (struct subscript);
3387 SUB_ACCESS_FN (subscript, 0) = DR_ACCESS_FN (a, full_seq.start_a + i);
3388 SUB_ACCESS_FN (subscript, 1) = DR_ACCESS_FN (b, full_seq.start_b + i);
3389 SUB_CONFLICTS_IN_A (subscript) = conflict_fn_not_known ();
3390 SUB_CONFLICTS_IN_B (subscript) = conflict_fn_not_known ();
3391 SUB_LAST_CONFLICT (subscript) = chrec_dont_know;
3392 SUB_DISTANCE (subscript) = chrec_dont_know;
3393 DDR_SUBSCRIPTS (res).safe_push (subscript);
3394 }
3395
3396 return res;
3397 }
3398
3399 /* Frees memory used by the conflict function F. */
3400
3401 static void
3402 free_conflict_function (conflict_function *f)
3403 {
3404 unsigned i;
3405
3406 if (CF_NONTRIVIAL_P (f))
3407 {
3408 for (i = 0; i < f->n; i++)
3409 affine_fn_free (f->fns[i]);
3410 }
3411 free (f);
3412 }
3413
3414 /* Frees memory used by SUBSCRIPTS. */
3415
3416 static void
3417 free_subscripts (vec<subscript_p> subscripts)
3418 {
3419 unsigned i;
3420 subscript_p s;
3421
3422 FOR_EACH_VEC_ELT (subscripts, i, s)
3423 {
3424 free_conflict_function (s->conflicting_iterations_in_a);
3425 free_conflict_function (s->conflicting_iterations_in_b);
3426 free (s);
3427 }
3428 subscripts.release ();
3429 }
3430
3431 /* Set DDR_ARE_DEPENDENT to CHREC and finalize the subscript overlap
3432 description. */
3433
3434 static inline void
3435 finalize_ddr_dependent (struct data_dependence_relation *ddr,
3436 tree chrec)
3437 {
3438 DDR_ARE_DEPENDENT (ddr) = chrec;
3439 free_subscripts (DDR_SUBSCRIPTS (ddr));
3440 DDR_SUBSCRIPTS (ddr).create (0);
3441 }
3442
3443 /* The dependence relation DDR cannot be represented by a distance
3444 vector. */
3445
3446 static inline void
3447 non_affine_dependence_relation (struct data_dependence_relation *ddr)
3448 {
3449 if (dump_file && (dump_flags & TDF_DETAILS))
3450 fprintf (dump_file, "(Dependence relation cannot be represented by distance vector.) \n");
3451
3452 DDR_AFFINE_P (ddr) = false;
3453 }
3454
3455 \f
3456
3457 /* This section contains the classic Banerjee tests. */
3458
3459 /* Returns true iff CHREC_A and CHREC_B are not dependent on any index
3460 variables, i.e., if the ZIV (Zero Index Variable) test is true. */
3461
3462 static inline bool
3463 ziv_subscript_p (const_tree chrec_a, const_tree chrec_b)
3464 {
3465 return (evolution_function_is_constant_p (chrec_a)
3466 && evolution_function_is_constant_p (chrec_b));
3467 }
3468
3469 /* Returns true iff CHREC_A and CHREC_B are dependent on an index
3470 variable, i.e., if the SIV (Single Index Variable) test is true. */
3471
3472 static bool
3473 siv_subscript_p (const_tree chrec_a, const_tree chrec_b)
3474 {
3475 if ((evolution_function_is_constant_p (chrec_a)
3476 && evolution_function_is_univariate_p (chrec_b))
3477 || (evolution_function_is_constant_p (chrec_b)
3478 && evolution_function_is_univariate_p (chrec_a)))
3479 return true;
3480
3481 if (evolution_function_is_univariate_p (chrec_a)
3482 && evolution_function_is_univariate_p (chrec_b))
3483 {
3484 switch (TREE_CODE (chrec_a))
3485 {
3486 case POLYNOMIAL_CHREC:
3487 switch (TREE_CODE (chrec_b))
3488 {
3489 case POLYNOMIAL_CHREC:
3490 if (CHREC_VARIABLE (chrec_a) != CHREC_VARIABLE (chrec_b))
3491 return false;
3492 /* FALLTHRU */
3493
3494 default:
3495 return true;
3496 }
3497
3498 default:
3499 return true;
3500 }
3501 }
3502
3503 return false;
3504 }
3505
3506 /* Creates a conflict function with N dimensions. The affine functions
3507 in each dimension follow. */
3508
3509 static conflict_function *
3510 conflict_fn (unsigned n, ...)
3511 {
3512 unsigned i;
3513 conflict_function *ret = XCNEW (conflict_function);
3514 va_list ap;
3515
3516 gcc_assert (n > 0 && n <= MAX_DIM);
3517 va_start (ap, n);
3518
3519 ret->n = n;
3520 for (i = 0; i < n; i++)
3521 ret->fns[i] = va_arg (ap, affine_fn);
3522 va_end (ap);
3523
3524 return ret;
3525 }
3526
3527 /* Returns constant affine function with value CST. */
3528
3529 static affine_fn
3530 affine_fn_cst (tree cst)
3531 {
3532 affine_fn fn;
3533 fn.create (1);
3534 fn.quick_push (cst);
3535 return fn;
3536 }
3537
3538 /* Returns affine function with single variable, CST + COEF * x_DIM. */
3539
3540 static affine_fn
3541 affine_fn_univar (tree cst, unsigned dim, tree coef)
3542 {
3543 affine_fn fn;
3544 fn.create (dim + 1);
3545 unsigned i;
3546
3547 gcc_assert (dim > 0);
3548 fn.quick_push (cst);
3549 for (i = 1; i < dim; i++)
3550 fn.quick_push (integer_zero_node);
3551 fn.quick_push (coef);
3552 return fn;
3553 }
3554
3555 /* Analyze a ZIV (Zero Index Variable) subscript. *OVERLAPS_A and
3556 *OVERLAPS_B are initialized to the functions that describe the
3557 relation between the elements accessed twice by CHREC_A and
3558 CHREC_B. For k >= 0, the following property is verified:
3559
3560 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
3561
3562 static void
3563 analyze_ziv_subscript (tree chrec_a,
3564 tree chrec_b,
3565 conflict_function **overlaps_a,
3566 conflict_function **overlaps_b,
3567 tree *last_conflicts)
3568 {
3569 tree type, difference;
3570 dependence_stats.num_ziv++;
3571
3572 if (dump_file && (dump_flags & TDF_DETAILS))
3573 fprintf (dump_file, "(analyze_ziv_subscript \n");
3574
3575 type = signed_type_for_types (TREE_TYPE (chrec_a), TREE_TYPE (chrec_b));
3576 chrec_a = chrec_convert (type, chrec_a, NULL);
3577 chrec_b = chrec_convert (type, chrec_b, NULL);
3578 difference = chrec_fold_minus (type, chrec_a, chrec_b);
3579
3580 switch (TREE_CODE (difference))
3581 {
3582 case INTEGER_CST:
3583 if (integer_zerop (difference))
3584 {
3585 /* The difference is equal to zero: the accessed index
3586 overlaps for each iteration in the loop. */
3587 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
3588 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node));
3589 *last_conflicts = chrec_dont_know;
3590 dependence_stats.num_ziv_dependent++;
3591 }
3592 else
3593 {
3594 /* The accesses do not overlap. */
3595 *overlaps_a = conflict_fn_no_dependence ();
3596 *overlaps_b = conflict_fn_no_dependence ();
3597 *last_conflicts = integer_zero_node;
3598 dependence_stats.num_ziv_independent++;
3599 }
3600 break;
3601
3602 default:
3603 /* We're not sure whether the indexes overlap. For the moment,
3604 conservatively answer "don't know". */
3605 if (dump_file && (dump_flags & TDF_DETAILS))
3606 fprintf (dump_file, "ziv test failed: difference is non-integer.\n");
3607
3608 *overlaps_a = conflict_fn_not_known ();
3609 *overlaps_b = conflict_fn_not_known ();
3610 *last_conflicts = chrec_dont_know;
3611 dependence_stats.num_ziv_unimplemented++;
3612 break;
3613 }
3614
3615 if (dump_file && (dump_flags & TDF_DETAILS))
3616 fprintf (dump_file, ")\n");
3617 }
3618
3619 /* Similar to max_stmt_executions_int, but returns the bound as a tree,
3620 and only if it fits to the int type. If this is not the case, or the
3621 bound on the number of iterations of LOOP could not be derived, returns
3622 chrec_dont_know. */
3623
3624 static tree
3625 max_stmt_executions_tree (class loop *loop)
3626 {
3627 widest_int nit;
3628
3629 if (!max_stmt_executions (loop, &nit))
3630 return chrec_dont_know;
3631
3632 if (!wi::fits_to_tree_p (nit, unsigned_type_node))
3633 return chrec_dont_know;
3634
3635 return wide_int_to_tree (unsigned_type_node, nit);
3636 }
3637
3638 /* Determine whether the CHREC is always positive/negative. If the expression
3639 cannot be statically analyzed, return false, otherwise set the answer into
3640 VALUE. */
3641
3642 static bool
3643 chrec_is_positive (tree chrec, bool *value)
3644 {
3645 bool value0, value1, value2;
3646 tree end_value, nb_iter;
3647
3648 switch (TREE_CODE (chrec))
3649 {
3650 case POLYNOMIAL_CHREC:
3651 if (!chrec_is_positive (CHREC_LEFT (chrec), &value0)
3652 || !chrec_is_positive (CHREC_RIGHT (chrec), &value1))
3653 return false;
3654
3655 /* FIXME -- overflows. */
3656 if (value0 == value1)
3657 {
3658 *value = value0;
3659 return true;
3660 }
3661
3662 /* Otherwise the chrec is under the form: "{-197, +, 2}_1",
3663 and the proof consists in showing that the sign never
3664 changes during the execution of the loop, from 0 to
3665 loop->nb_iterations. */
3666 if (!evolution_function_is_affine_p (chrec))
3667 return false;
3668
3669 nb_iter = number_of_latch_executions (get_chrec_loop (chrec));
3670 if (chrec_contains_undetermined (nb_iter))
3671 return false;
3672
3673 #if 0
3674 /* TODO -- If the test is after the exit, we may decrease the number of
3675 iterations by one. */
3676 if (after_exit)
3677 nb_iter = chrec_fold_minus (type, nb_iter, build_int_cst (type, 1));
3678 #endif
3679
3680 end_value = chrec_apply (CHREC_VARIABLE (chrec), chrec, nb_iter);
3681
3682 if (!chrec_is_positive (end_value, &value2))
3683 return false;
3684
3685 *value = value0;
3686 return value0 == value1;
3687
3688 case INTEGER_CST:
3689 switch (tree_int_cst_sgn (chrec))
3690 {
3691 case -1:
3692 *value = false;
3693 break;
3694 case 1:
3695 *value = true;
3696 break;
3697 default:
3698 return false;
3699 }
3700 return true;
3701
3702 default:
3703 return false;
3704 }
3705 }
3706
3707
3708 /* Analyze a SIV (Single Index Variable) subscript where CHREC_A is a
3709 constant, and CHREC_B is an affine function. *OVERLAPS_A and
3710 *OVERLAPS_B are initialized to the functions that describe the
3711 relation between the elements accessed twice by CHREC_A and
3712 CHREC_B. For k >= 0, the following property is verified:
3713
3714 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
3715
3716 static void
3717 analyze_siv_subscript_cst_affine (tree chrec_a,
3718 tree chrec_b,
3719 conflict_function **overlaps_a,
3720 conflict_function **overlaps_b,
3721 tree *last_conflicts)
3722 {
3723 bool value0, value1, value2;
3724 tree type, difference, tmp;
3725
3726 type = signed_type_for_types (TREE_TYPE (chrec_a), TREE_TYPE (chrec_b));
3727 chrec_a = chrec_convert (type, chrec_a, NULL);
3728 chrec_b = chrec_convert (type, chrec_b, NULL);
3729 difference = chrec_fold_minus (type, initial_condition (chrec_b), chrec_a);
3730
3731 /* Special case overlap in the first iteration. */
3732 if (integer_zerop (difference))
3733 {
3734 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
3735 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node));
3736 *last_conflicts = integer_one_node;
3737 return;
3738 }
3739
3740 if (!chrec_is_positive (initial_condition (difference), &value0))
3741 {
3742 if (dump_file && (dump_flags & TDF_DETAILS))
3743 fprintf (dump_file, "siv test failed: chrec is not positive.\n");
3744
3745 dependence_stats.num_siv_unimplemented++;
3746 *overlaps_a = conflict_fn_not_known ();
3747 *overlaps_b = conflict_fn_not_known ();
3748 *last_conflicts = chrec_dont_know;
3749 return;
3750 }
3751 else
3752 {
3753 if (value0 == false)
3754 {
3755 if (TREE_CODE (chrec_b) != POLYNOMIAL_CHREC
3756 || !chrec_is_positive (CHREC_RIGHT (chrec_b), &value1))
3757 {
3758 if (dump_file && (dump_flags & TDF_DETAILS))
3759 fprintf (dump_file, "siv test failed: chrec not positive.\n");
3760
3761 *overlaps_a = conflict_fn_not_known ();
3762 *overlaps_b = conflict_fn_not_known ();
3763 *last_conflicts = chrec_dont_know;
3764 dependence_stats.num_siv_unimplemented++;
3765 return;
3766 }
3767 else
3768 {
3769 if (value1 == true)
3770 {
3771 /* Example:
3772 chrec_a = 12
3773 chrec_b = {10, +, 1}
3774 */
3775
3776 if (tree_fold_divides_p (CHREC_RIGHT (chrec_b), difference))
3777 {
3778 HOST_WIDE_INT numiter;
3779 class loop *loop = get_chrec_loop (chrec_b);
3780
3781 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
3782 tmp = fold_build2 (EXACT_DIV_EXPR, type,
3783 fold_build1 (ABS_EXPR, type, difference),
3784 CHREC_RIGHT (chrec_b));
3785 *overlaps_b = conflict_fn (1, affine_fn_cst (tmp));
3786 *last_conflicts = integer_one_node;
3787
3788
3789 /* Perform weak-zero siv test to see if overlap is
3790 outside the loop bounds. */
3791 numiter = max_stmt_executions_int (loop);
3792
3793 if (numiter >= 0
3794 && compare_tree_int (tmp, numiter) > 0)
3795 {
3796 free_conflict_function (*overlaps_a);
3797 free_conflict_function (*overlaps_b);
3798 *overlaps_a = conflict_fn_no_dependence ();
3799 *overlaps_b = conflict_fn_no_dependence ();
3800 *last_conflicts = integer_zero_node;
3801 dependence_stats.num_siv_independent++;
3802 return;
3803 }
3804 dependence_stats.num_siv_dependent++;
3805 return;
3806 }
3807
3808 /* When the step does not divide the difference, there are
3809 no overlaps. */
3810 else
3811 {
3812 *overlaps_a = conflict_fn_no_dependence ();
3813 *overlaps_b = conflict_fn_no_dependence ();
3814 *last_conflicts = integer_zero_node;
3815 dependence_stats.num_siv_independent++;
3816 return;
3817 }
3818 }
3819
3820 else
3821 {
3822 /* Example:
3823 chrec_a = 12
3824 chrec_b = {10, +, -1}
3825
3826 In this case, chrec_a will not overlap with chrec_b. */
3827 *overlaps_a = conflict_fn_no_dependence ();
3828 *overlaps_b = conflict_fn_no_dependence ();
3829 *last_conflicts = integer_zero_node;
3830 dependence_stats.num_siv_independent++;
3831 return;
3832 }
3833 }
3834 }
3835 else
3836 {
3837 if (TREE_CODE (chrec_b) != POLYNOMIAL_CHREC
3838 || !chrec_is_positive (CHREC_RIGHT (chrec_b), &value2))
3839 {
3840 if (dump_file && (dump_flags & TDF_DETAILS))
3841 fprintf (dump_file, "siv test failed: chrec not positive.\n");
3842
3843 *overlaps_a = conflict_fn_not_known ();
3844 *overlaps_b = conflict_fn_not_known ();
3845 *last_conflicts = chrec_dont_know;
3846 dependence_stats.num_siv_unimplemented++;
3847 return;
3848 }
3849 else
3850 {
3851 if (value2 == false)
3852 {
3853 /* Example:
3854 chrec_a = 3
3855 chrec_b = {10, +, -1}
3856 */
3857 if (tree_fold_divides_p (CHREC_RIGHT (chrec_b), difference))
3858 {
3859 HOST_WIDE_INT numiter;
3860 class loop *loop = get_chrec_loop (chrec_b);
3861
3862 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
3863 tmp = fold_build2 (EXACT_DIV_EXPR, type, difference,
3864 CHREC_RIGHT (chrec_b));
3865 *overlaps_b = conflict_fn (1, affine_fn_cst (tmp));
3866 *last_conflicts = integer_one_node;
3867
3868 /* Perform weak-zero siv test to see if overlap is
3869 outside the loop bounds. */
3870 numiter = max_stmt_executions_int (loop);
3871
3872 if (numiter >= 0
3873 && compare_tree_int (tmp, numiter) > 0)
3874 {
3875 free_conflict_function (*overlaps_a);
3876 free_conflict_function (*overlaps_b);
3877 *overlaps_a = conflict_fn_no_dependence ();
3878 *overlaps_b = conflict_fn_no_dependence ();
3879 *last_conflicts = integer_zero_node;
3880 dependence_stats.num_siv_independent++;
3881 return;
3882 }
3883 dependence_stats.num_siv_dependent++;
3884 return;
3885 }
3886
3887 /* When the step does not divide the difference, there
3888 are no overlaps. */
3889 else
3890 {
3891 *overlaps_a = conflict_fn_no_dependence ();
3892 *overlaps_b = conflict_fn_no_dependence ();
3893 *last_conflicts = integer_zero_node;
3894 dependence_stats.num_siv_independent++;
3895 return;
3896 }
3897 }
3898 else
3899 {
3900 /* Example:
3901 chrec_a = 3
3902 chrec_b = {4, +, 1}
3903
3904 In this case, chrec_a will not overlap with chrec_b. */
3905 *overlaps_a = conflict_fn_no_dependence ();
3906 *overlaps_b = conflict_fn_no_dependence ();
3907 *last_conflicts = integer_zero_node;
3908 dependence_stats.num_siv_independent++;
3909 return;
3910 }
3911 }
3912 }
3913 }
3914 }
3915
3916 /* Helper recursive function for initializing the matrix A. Returns
3917 the initial value of CHREC. */
3918
3919 static tree
3920 initialize_matrix_A (lambda_matrix A, tree chrec, unsigned index, int mult)
3921 {
3922 gcc_assert (chrec);
3923
3924 switch (TREE_CODE (chrec))
3925 {
3926 case POLYNOMIAL_CHREC:
3927 HOST_WIDE_INT chrec_right;
3928 if (!cst_and_fits_in_hwi (CHREC_RIGHT (chrec)))
3929 return chrec_dont_know;
3930 chrec_right = int_cst_value (CHREC_RIGHT (chrec));
3931 /* We want to be able to negate without overflow. */
3932 if (chrec_right == HOST_WIDE_INT_MIN)
3933 return chrec_dont_know;
3934 A[index][0] = mult * chrec_right;
3935 return initialize_matrix_A (A, CHREC_LEFT (chrec), index + 1, mult);
3936
3937 case PLUS_EXPR:
3938 case MULT_EXPR:
3939 case MINUS_EXPR:
3940 {
3941 tree op0 = initialize_matrix_A (A, TREE_OPERAND (chrec, 0), index, mult);
3942 tree op1 = initialize_matrix_A (A, TREE_OPERAND (chrec, 1), index, mult);
3943
3944 return chrec_fold_op (TREE_CODE (chrec), chrec_type (chrec), op0, op1);
3945 }
3946
3947 CASE_CONVERT:
3948 {
3949 tree op = initialize_matrix_A (A, TREE_OPERAND (chrec, 0), index, mult);
3950 return chrec_convert (chrec_type (chrec), op, NULL);
3951 }
3952
3953 case BIT_NOT_EXPR:
3954 {
3955 /* Handle ~X as -1 - X. */
3956 tree op = initialize_matrix_A (A, TREE_OPERAND (chrec, 0), index, mult);
3957 return chrec_fold_op (MINUS_EXPR, chrec_type (chrec),
3958 build_int_cst (TREE_TYPE (chrec), -1), op);
3959 }
3960
3961 case INTEGER_CST:
3962 return chrec;
3963
3964 default:
3965 gcc_unreachable ();
3966 return NULL_TREE;
3967 }
3968 }
3969
3970 #define FLOOR_DIV(x,y) ((x) / (y))
3971
3972 /* Solves the special case of the Diophantine equation:
3973 | {0, +, STEP_A}_x (OVERLAPS_A) = {0, +, STEP_B}_y (OVERLAPS_B)
3974
3975 Computes the descriptions OVERLAPS_A and OVERLAPS_B. NITER is the
3976 number of iterations that loops X and Y run. The overlaps will be
3977 constructed as evolutions in dimension DIM. */
3978
3979 static void
3980 compute_overlap_steps_for_affine_univar (HOST_WIDE_INT niter,
3981 HOST_WIDE_INT step_a,
3982 HOST_WIDE_INT step_b,
3983 affine_fn *overlaps_a,
3984 affine_fn *overlaps_b,
3985 tree *last_conflicts, int dim)
3986 {
3987 if (((step_a > 0 && step_b > 0)
3988 || (step_a < 0 && step_b < 0)))
3989 {
3990 HOST_WIDE_INT step_overlaps_a, step_overlaps_b;
3991 HOST_WIDE_INT gcd_steps_a_b, last_conflict, tau2;
3992
3993 gcd_steps_a_b = gcd (step_a, step_b);
3994 step_overlaps_a = step_b / gcd_steps_a_b;
3995 step_overlaps_b = step_a / gcd_steps_a_b;
3996
3997 if (niter > 0)
3998 {
3999 tau2 = FLOOR_DIV (niter, step_overlaps_a);
4000 tau2 = MIN (tau2, FLOOR_DIV (niter, step_overlaps_b));
4001 last_conflict = tau2;
4002 *last_conflicts = build_int_cst (NULL_TREE, last_conflict);
4003 }
4004 else
4005 *last_conflicts = chrec_dont_know;
4006
4007 *overlaps_a = affine_fn_univar (integer_zero_node, dim,
4008 build_int_cst (NULL_TREE,
4009 step_overlaps_a));
4010 *overlaps_b = affine_fn_univar (integer_zero_node, dim,
4011 build_int_cst (NULL_TREE,
4012 step_overlaps_b));
4013 }
4014
4015 else
4016 {
4017 *overlaps_a = affine_fn_cst (integer_zero_node);
4018 *overlaps_b = affine_fn_cst (integer_zero_node);
4019 *last_conflicts = integer_zero_node;
4020 }
4021 }
4022
4023 /* Solves the special case of a Diophantine equation where CHREC_A is
4024 an affine bivariate function, and CHREC_B is an affine univariate
4025 function. For example,
4026
4027 | {{0, +, 1}_x, +, 1335}_y = {0, +, 1336}_z
4028
4029 has the following overlapping functions:
4030
4031 | x (t, u, v) = {{0, +, 1336}_t, +, 1}_v
4032 | y (t, u, v) = {{0, +, 1336}_u, +, 1}_v
4033 | z (t, u, v) = {{{0, +, 1}_t, +, 1335}_u, +, 1}_v
4034
4035 FORNOW: This is a specialized implementation for a case occurring in
4036 a common benchmark. Implement the general algorithm. */
4037
4038 static void
4039 compute_overlap_steps_for_affine_1_2 (tree chrec_a, tree chrec_b,
4040 conflict_function **overlaps_a,
4041 conflict_function **overlaps_b,
4042 tree *last_conflicts)
4043 {
4044 bool xz_p, yz_p, xyz_p;
4045 HOST_WIDE_INT step_x, step_y, step_z;
4046 HOST_WIDE_INT niter_x, niter_y, niter_z, niter;
4047 affine_fn overlaps_a_xz, overlaps_b_xz;
4048 affine_fn overlaps_a_yz, overlaps_b_yz;
4049 affine_fn overlaps_a_xyz, overlaps_b_xyz;
4050 affine_fn ova1, ova2, ovb;
4051 tree last_conflicts_xz, last_conflicts_yz, last_conflicts_xyz;
4052
4053 step_x = int_cst_value (CHREC_RIGHT (CHREC_LEFT (chrec_a)));
4054 step_y = int_cst_value (CHREC_RIGHT (chrec_a));
4055 step_z = int_cst_value (CHREC_RIGHT (chrec_b));
4056
4057 niter_x = max_stmt_executions_int (get_chrec_loop (CHREC_LEFT (chrec_a)));
4058 niter_y = max_stmt_executions_int (get_chrec_loop (chrec_a));
4059 niter_z = max_stmt_executions_int (get_chrec_loop (chrec_b));
4060
4061 if (niter_x < 0 || niter_y < 0 || niter_z < 0)
4062 {
4063 if (dump_file && (dump_flags & TDF_DETAILS))
4064 fprintf (dump_file, "overlap steps test failed: no iteration counts.\n");
4065
4066 *overlaps_a = conflict_fn_not_known ();
4067 *overlaps_b = conflict_fn_not_known ();
4068 *last_conflicts = chrec_dont_know;
4069 return;
4070 }
4071
4072 niter = MIN (niter_x, niter_z);
4073 compute_overlap_steps_for_affine_univar (niter, step_x, step_z,
4074 &overlaps_a_xz,
4075 &overlaps_b_xz,
4076 &last_conflicts_xz, 1);
4077 niter = MIN (niter_y, niter_z);
4078 compute_overlap_steps_for_affine_univar (niter, step_y, step_z,
4079 &overlaps_a_yz,
4080 &overlaps_b_yz,
4081 &last_conflicts_yz, 2);
4082 niter = MIN (niter_x, niter_z);
4083 niter = MIN (niter_y, niter);
4084 compute_overlap_steps_for_affine_univar (niter, step_x + step_y, step_z,
4085 &overlaps_a_xyz,
4086 &overlaps_b_xyz,
4087 &last_conflicts_xyz, 3);
4088
4089 xz_p = !integer_zerop (last_conflicts_xz);
4090 yz_p = !integer_zerop (last_conflicts_yz);
4091 xyz_p = !integer_zerop (last_conflicts_xyz);
4092
4093 if (xz_p || yz_p || xyz_p)
4094 {
4095 ova1 = affine_fn_cst (integer_zero_node);
4096 ova2 = affine_fn_cst (integer_zero_node);
4097 ovb = affine_fn_cst (integer_zero_node);
4098 if (xz_p)
4099 {
4100 affine_fn t0 = ova1;
4101 affine_fn t2 = ovb;
4102
4103 ova1 = affine_fn_plus (ova1, overlaps_a_xz);
4104 ovb = affine_fn_plus (ovb, overlaps_b_xz);
4105 affine_fn_free (t0);
4106 affine_fn_free (t2);
4107 *last_conflicts = last_conflicts_xz;
4108 }
4109 if (yz_p)
4110 {
4111 affine_fn t0 = ova2;
4112 affine_fn t2 = ovb;
4113
4114 ova2 = affine_fn_plus (ova2, overlaps_a_yz);
4115 ovb = affine_fn_plus (ovb, overlaps_b_yz);
4116 affine_fn_free (t0);
4117 affine_fn_free (t2);
4118 *last_conflicts = last_conflicts_yz;
4119 }
4120 if (xyz_p)
4121 {
4122 affine_fn t0 = ova1;
4123 affine_fn t2 = ova2;
4124 affine_fn t4 = ovb;
4125
4126 ova1 = affine_fn_plus (ova1, overlaps_a_xyz);
4127 ova2 = affine_fn_plus (ova2, overlaps_a_xyz);
4128 ovb = affine_fn_plus (ovb, overlaps_b_xyz);
4129 affine_fn_free (t0);
4130 affine_fn_free (t2);
4131 affine_fn_free (t4);
4132 *last_conflicts = last_conflicts_xyz;
4133 }
4134 *overlaps_a = conflict_fn (2, ova1, ova2);
4135 *overlaps_b = conflict_fn (1, ovb);
4136 }
4137 else
4138 {
4139 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
4140 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node));
4141 *last_conflicts = integer_zero_node;
4142 }
4143
4144 affine_fn_free (overlaps_a_xz);
4145 affine_fn_free (overlaps_b_xz);
4146 affine_fn_free (overlaps_a_yz);
4147 affine_fn_free (overlaps_b_yz);
4148 affine_fn_free (overlaps_a_xyz);
4149 affine_fn_free (overlaps_b_xyz);
4150 }
4151
4152 /* Copy the elements of vector VEC1 with length SIZE to VEC2. */
4153
4154 static void
4155 lambda_vector_copy (lambda_vector vec1, lambda_vector vec2,
4156 int size)
4157 {
4158 memcpy (vec2, vec1, size * sizeof (*vec1));
4159 }
4160
4161 /* Copy the elements of M x N matrix MAT1 to MAT2. */
4162
4163 static void
4164 lambda_matrix_copy (lambda_matrix mat1, lambda_matrix mat2,
4165 int m, int n)
4166 {
4167 int i;
4168
4169 for (i = 0; i < m; i++)
4170 lambda_vector_copy (mat1[i], mat2[i], n);
4171 }
4172
4173 /* Store the N x N identity matrix in MAT. */
4174
4175 static void
4176 lambda_matrix_id (lambda_matrix mat, int size)
4177 {
4178 int i, j;
4179
4180 for (i = 0; i < size; i++)
4181 for (j = 0; j < size; j++)
4182 mat[i][j] = (i == j) ? 1 : 0;
4183 }
4184
4185 /* Return the index of the first nonzero element of vector VEC1 between
4186 START and N. We must have START <= N.
4187 Returns N if VEC1 is the zero vector. */
4188
4189 static int
4190 lambda_vector_first_nz (lambda_vector vec1, int n, int start)
4191 {
4192 int j = start;
4193 while (j < n && vec1[j] == 0)
4194 j++;
4195 return j;
4196 }
4197
4198 /* Add a multiple of row R1 of matrix MAT with N columns to row R2:
4199 R2 = R2 + CONST1 * R1. */
4200
4201 static bool
4202 lambda_matrix_row_add (lambda_matrix mat, int n, int r1, int r2,
4203 lambda_int const1)
4204 {
4205 int i;
4206
4207 if (const1 == 0)
4208 return true;
4209
4210 for (i = 0; i < n; i++)
4211 {
4212 bool ovf;
4213 lambda_int tem = mul_hwi (mat[r1][i], const1, &ovf);
4214 if (ovf)
4215 return false;
4216 lambda_int tem2 = add_hwi (mat[r2][i], tem, &ovf);
4217 if (ovf || tem2 == HOST_WIDE_INT_MIN)
4218 return false;
4219 mat[r2][i] = tem2;
4220 }
4221
4222 return true;
4223 }
4224
4225 /* Multiply vector VEC1 of length SIZE by a constant CONST1,
4226 and store the result in VEC2. */
4227
4228 static void
4229 lambda_vector_mult_const (lambda_vector vec1, lambda_vector vec2,
4230 int size, lambda_int const1)
4231 {
4232 int i;
4233
4234 if (const1 == 0)
4235 lambda_vector_clear (vec2, size);
4236 else
4237 for (i = 0; i < size; i++)
4238 vec2[i] = const1 * vec1[i];
4239 }
4240
4241 /* Negate vector VEC1 with length SIZE and store it in VEC2. */
4242
4243 static void
4244 lambda_vector_negate (lambda_vector vec1, lambda_vector vec2,
4245 int size)
4246 {
4247 lambda_vector_mult_const (vec1, vec2, size, -1);
4248 }
4249
4250 /* Negate row R1 of matrix MAT which has N columns. */
4251
4252 static void
4253 lambda_matrix_row_negate (lambda_matrix mat, int n, int r1)
4254 {
4255 lambda_vector_negate (mat[r1], mat[r1], n);
4256 }
4257
4258 /* Return true if two vectors are equal. */
4259
4260 static bool
4261 lambda_vector_equal (lambda_vector vec1, lambda_vector vec2, int size)
4262 {
4263 int i;
4264 for (i = 0; i < size; i++)
4265 if (vec1[i] != vec2[i])
4266 return false;
4267 return true;
4268 }
4269
4270 /* Given an M x N integer matrix A, this function determines an M x
4271 M unimodular matrix U, and an M x N echelon matrix S such that
4272 "U.A = S". This decomposition is also known as "right Hermite".
4273
4274 Ref: Algorithm 2.1 page 33 in "Loop Transformations for
4275 Restructuring Compilers" Utpal Banerjee. */
4276
4277 static bool
4278 lambda_matrix_right_hermite (lambda_matrix A, int m, int n,
4279 lambda_matrix S, lambda_matrix U)
4280 {
4281 int i, j, i0 = 0;
4282
4283 lambda_matrix_copy (A, S, m, n);
4284 lambda_matrix_id (U, m);
4285
4286 for (j = 0; j < n; j++)
4287 {
4288 if (lambda_vector_first_nz (S[j], m, i0) < m)
4289 {
4290 ++i0;
4291 for (i = m - 1; i >= i0; i--)
4292 {
4293 while (S[i][j] != 0)
4294 {
4295 lambda_int factor, a, b;
4296
4297 a = S[i-1][j];
4298 b = S[i][j];
4299 gcc_assert (a != HOST_WIDE_INT_MIN);
4300 factor = a / b;
4301
4302 if (!lambda_matrix_row_add (S, n, i, i-1, -factor))
4303 return false;
4304 std::swap (S[i], S[i-1]);
4305
4306 if (!lambda_matrix_row_add (U, m, i, i-1, -factor))
4307 return false;
4308 std::swap (U[i], U[i-1]);
4309 }
4310 }
4311 }
4312 }
4313
4314 return true;
4315 }
4316
4317 /* Determines the overlapping elements due to accesses CHREC_A and
4318 CHREC_B, that are affine functions. This function cannot handle
4319 symbolic evolution functions, ie. when initial conditions are
4320 parameters, because it uses lambda matrices of integers. */
4321
4322 static void
4323 analyze_subscript_affine_affine (tree chrec_a,
4324 tree chrec_b,
4325 conflict_function **overlaps_a,
4326 conflict_function **overlaps_b,
4327 tree *last_conflicts)
4328 {
4329 unsigned nb_vars_a, nb_vars_b, dim;
4330 lambda_int gamma, gcd_alpha_beta;
4331 lambda_matrix A, U, S;
4332 struct obstack scratch_obstack;
4333
4334 if (eq_evolutions_p (chrec_a, chrec_b))
4335 {
4336 /* The accessed index overlaps for each iteration in the
4337 loop. */
4338 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
4339 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node));
4340 *last_conflicts = chrec_dont_know;
4341 return;
4342 }
4343 if (dump_file && (dump_flags & TDF_DETAILS))
4344 fprintf (dump_file, "(analyze_subscript_affine_affine \n");
4345
4346 /* For determining the initial intersection, we have to solve a
4347 Diophantine equation. This is the most time consuming part.
4348
4349 For answering to the question: "Is there a dependence?" we have
4350 to prove that there exists a solution to the Diophantine
4351 equation, and that the solution is in the iteration domain,
4352 i.e. the solution is positive or zero, and that the solution
4353 happens before the upper bound loop.nb_iterations. Otherwise
4354 there is no dependence. This function outputs a description of
4355 the iterations that hold the intersections. */
4356
4357 nb_vars_a = nb_vars_in_chrec (chrec_a);
4358 nb_vars_b = nb_vars_in_chrec (chrec_b);
4359
4360 gcc_obstack_init (&scratch_obstack);
4361
4362 dim = nb_vars_a + nb_vars_b;
4363 U = lambda_matrix_new (dim, dim, &scratch_obstack);
4364 A = lambda_matrix_new (dim, 1, &scratch_obstack);
4365 S = lambda_matrix_new (dim, 1, &scratch_obstack);
4366
4367 tree init_a = initialize_matrix_A (A, chrec_a, 0, 1);
4368 tree init_b = initialize_matrix_A (A, chrec_b, nb_vars_a, -1);
4369 if (init_a == chrec_dont_know
4370 || init_b == chrec_dont_know)
4371 {
4372 if (dump_file && (dump_flags & TDF_DETAILS))
4373 fprintf (dump_file, "affine-affine test failed: "
4374 "representation issue.\n");
4375 *overlaps_a = conflict_fn_not_known ();
4376 *overlaps_b = conflict_fn_not_known ();
4377 *last_conflicts = chrec_dont_know;
4378 goto end_analyze_subs_aa;
4379 }
4380 gamma = int_cst_value (init_b) - int_cst_value (init_a);
4381
4382 /* Don't do all the hard work of solving the Diophantine equation
4383 when we already know the solution: for example,
4384 | {3, +, 1}_1
4385 | {3, +, 4}_2
4386 | gamma = 3 - 3 = 0.
4387 Then the first overlap occurs during the first iterations:
4388 | {3, +, 1}_1 ({0, +, 4}_x) = {3, +, 4}_2 ({0, +, 1}_x)
4389 */
4390 if (gamma == 0)
4391 {
4392 if (nb_vars_a == 1 && nb_vars_b == 1)
4393 {
4394 HOST_WIDE_INT step_a, step_b;
4395 HOST_WIDE_INT niter, niter_a, niter_b;
4396 affine_fn ova, ovb;
4397
4398 niter_a = max_stmt_executions_int (get_chrec_loop (chrec_a));
4399 niter_b = max_stmt_executions_int (get_chrec_loop (chrec_b));
4400 niter = MIN (niter_a, niter_b);
4401 step_a = int_cst_value (CHREC_RIGHT (chrec_a));
4402 step_b = int_cst_value (CHREC_RIGHT (chrec_b));
4403
4404 compute_overlap_steps_for_affine_univar (niter, step_a, step_b,
4405 &ova, &ovb,
4406 last_conflicts, 1);
4407 *overlaps_a = conflict_fn (1, ova);
4408 *overlaps_b = conflict_fn (1, ovb);
4409 }
4410
4411 else if (nb_vars_a == 2 && nb_vars_b == 1)
4412 compute_overlap_steps_for_affine_1_2
4413 (chrec_a, chrec_b, overlaps_a, overlaps_b, last_conflicts);
4414
4415 else if (nb_vars_a == 1 && nb_vars_b == 2)
4416 compute_overlap_steps_for_affine_1_2
4417 (chrec_b, chrec_a, overlaps_b, overlaps_a, last_conflicts);
4418
4419 else
4420 {
4421 if (dump_file && (dump_flags & TDF_DETAILS))
4422 fprintf (dump_file, "affine-affine test failed: too many variables.\n");
4423 *overlaps_a = conflict_fn_not_known ();
4424 *overlaps_b = conflict_fn_not_known ();
4425 *last_conflicts = chrec_dont_know;
4426 }
4427 goto end_analyze_subs_aa;
4428 }
4429
4430 /* U.A = S */
4431 if (!lambda_matrix_right_hermite (A, dim, 1, S, U))
4432 {
4433 *overlaps_a = conflict_fn_not_known ();
4434 *overlaps_b = conflict_fn_not_known ();
4435 *last_conflicts = chrec_dont_know;
4436 goto end_analyze_subs_aa;
4437 }
4438
4439 if (S[0][0] < 0)
4440 {
4441 S[0][0] *= -1;
4442 lambda_matrix_row_negate (U, dim, 0);
4443 }
4444 gcd_alpha_beta = S[0][0];
4445
4446 /* Something went wrong: for example in {1, +, 0}_5 vs. {0, +, 0}_5,
4447 but that is a quite strange case. Instead of ICEing, answer
4448 don't know. */
4449 if (gcd_alpha_beta == 0)
4450 {
4451 *overlaps_a = conflict_fn_not_known ();
4452 *overlaps_b = conflict_fn_not_known ();
4453 *last_conflicts = chrec_dont_know;
4454 goto end_analyze_subs_aa;
4455 }
4456
4457 /* The classic "gcd-test". */
4458 if (!int_divides_p (gcd_alpha_beta, gamma))
4459 {
4460 /* The "gcd-test" has determined that there is no integer
4461 solution, i.e. there is no dependence. */
4462 *overlaps_a = conflict_fn_no_dependence ();
4463 *overlaps_b = conflict_fn_no_dependence ();
4464 *last_conflicts = integer_zero_node;
4465 }
4466
4467 /* Both access functions are univariate. This includes SIV and MIV cases. */
4468 else if (nb_vars_a == 1 && nb_vars_b == 1)
4469 {
4470 /* Both functions should have the same evolution sign. */
4471 if (((A[0][0] > 0 && -A[1][0] > 0)
4472 || (A[0][0] < 0 && -A[1][0] < 0)))
4473 {
4474 /* The solutions are given by:
4475 |
4476 | [GAMMA/GCD_ALPHA_BETA t].[u11 u12] = [x0]
4477 | [u21 u22] [y0]
4478
4479 For a given integer t. Using the following variables,
4480
4481 | i0 = u11 * gamma / gcd_alpha_beta
4482 | j0 = u12 * gamma / gcd_alpha_beta
4483 | i1 = u21
4484 | j1 = u22
4485
4486 the solutions are:
4487
4488 | x0 = i0 + i1 * t,
4489 | y0 = j0 + j1 * t. */
4490 HOST_WIDE_INT i0, j0, i1, j1;
4491
4492 i0 = U[0][0] * gamma / gcd_alpha_beta;
4493 j0 = U[0][1] * gamma / gcd_alpha_beta;
4494 i1 = U[1][0];
4495 j1 = U[1][1];
4496
4497 if ((i1 == 0 && i0 < 0)
4498 || (j1 == 0 && j0 < 0))
4499 {
4500 /* There is no solution.
4501 FIXME: The case "i0 > nb_iterations, j0 > nb_iterations"
4502 falls in here, but for the moment we don't look at the
4503 upper bound of the iteration domain. */
4504 *overlaps_a = conflict_fn_no_dependence ();
4505 *overlaps_b = conflict_fn_no_dependence ();
4506 *last_conflicts = integer_zero_node;
4507 goto end_analyze_subs_aa;
4508 }
4509
4510 if (i1 > 0 && j1 > 0)
4511 {
4512 HOST_WIDE_INT niter_a
4513 = max_stmt_executions_int (get_chrec_loop (chrec_a));
4514 HOST_WIDE_INT niter_b
4515 = max_stmt_executions_int (get_chrec_loop (chrec_b));
4516 HOST_WIDE_INT niter = MIN (niter_a, niter_b);
4517
4518 /* (X0, Y0) is a solution of the Diophantine equation:
4519 "chrec_a (X0) = chrec_b (Y0)". */
4520 HOST_WIDE_INT tau1 = MAX (CEIL (-i0, i1),
4521 CEIL (-j0, j1));
4522 HOST_WIDE_INT x0 = i1 * tau1 + i0;
4523 HOST_WIDE_INT y0 = j1 * tau1 + j0;
4524
4525 /* (X1, Y1) is the smallest positive solution of the eq
4526 "chrec_a (X1) = chrec_b (Y1)", i.e. this is where the
4527 first conflict occurs. */
4528 HOST_WIDE_INT min_multiple = MIN (x0 / i1, y0 / j1);
4529 HOST_WIDE_INT x1 = x0 - i1 * min_multiple;
4530 HOST_WIDE_INT y1 = y0 - j1 * min_multiple;
4531
4532 if (niter > 0)
4533 {
4534 /* If the overlap occurs outside of the bounds of the
4535 loop, there is no dependence. */
4536 if (x1 >= niter_a || y1 >= niter_b)
4537 {
4538 *overlaps_a = conflict_fn_no_dependence ();
4539 *overlaps_b = conflict_fn_no_dependence ();
4540 *last_conflicts = integer_zero_node;
4541 goto end_analyze_subs_aa;
4542 }
4543
4544 /* max stmt executions can get quite large, avoid
4545 overflows by using wide ints here. */
4546 widest_int tau2
4547 = wi::smin (wi::sdiv_floor (wi::sub (niter_a, i0), i1),
4548 wi::sdiv_floor (wi::sub (niter_b, j0), j1));
4549 widest_int last_conflict = wi::sub (tau2, (x1 - i0)/i1);
4550 if (wi::min_precision (last_conflict, SIGNED)
4551 <= TYPE_PRECISION (integer_type_node))
4552 *last_conflicts
4553 = build_int_cst (integer_type_node,
4554 last_conflict.to_shwi ());
4555 else
4556 *last_conflicts = chrec_dont_know;
4557 }
4558 else
4559 *last_conflicts = chrec_dont_know;
4560
4561 *overlaps_a
4562 = conflict_fn (1,
4563 affine_fn_univar (build_int_cst (NULL_TREE, x1),
4564 1,
4565 build_int_cst (NULL_TREE, i1)));
4566 *overlaps_b
4567 = conflict_fn (1,
4568 affine_fn_univar (build_int_cst (NULL_TREE, y1),
4569 1,
4570 build_int_cst (NULL_TREE, j1)));
4571 }
4572 else
4573 {
4574 /* FIXME: For the moment, the upper bound of the
4575 iteration domain for i and j is not checked. */
4576 if (dump_file && (dump_flags & TDF_DETAILS))
4577 fprintf (dump_file, "affine-affine test failed: unimplemented.\n");
4578 *overlaps_a = conflict_fn_not_known ();
4579 *overlaps_b = conflict_fn_not_known ();
4580 *last_conflicts = chrec_dont_know;
4581 }
4582 }
4583 else
4584 {
4585 if (dump_file && (dump_flags & TDF_DETAILS))
4586 fprintf (dump_file, "affine-affine test failed: unimplemented.\n");
4587 *overlaps_a = conflict_fn_not_known ();
4588 *overlaps_b = conflict_fn_not_known ();
4589 *last_conflicts = chrec_dont_know;
4590 }
4591 }
4592 else
4593 {
4594 if (dump_file && (dump_flags & TDF_DETAILS))
4595 fprintf (dump_file, "affine-affine test failed: unimplemented.\n");
4596 *overlaps_a = conflict_fn_not_known ();
4597 *overlaps_b = conflict_fn_not_known ();
4598 *last_conflicts = chrec_dont_know;
4599 }
4600
4601 end_analyze_subs_aa:
4602 obstack_free (&scratch_obstack, NULL);
4603 if (dump_file && (dump_flags & TDF_DETAILS))
4604 {
4605 fprintf (dump_file, " (overlaps_a = ");
4606 dump_conflict_function (dump_file, *overlaps_a);
4607 fprintf (dump_file, ")\n (overlaps_b = ");
4608 dump_conflict_function (dump_file, *overlaps_b);
4609 fprintf (dump_file, "))\n");
4610 }
4611 }
4612
4613 /* Returns true when analyze_subscript_affine_affine can be used for
4614 determining the dependence relation between chrec_a and chrec_b,
4615 that contain symbols. This function modifies chrec_a and chrec_b
4616 such that the analysis result is the same, and such that they don't
4617 contain symbols, and then can safely be passed to the analyzer.
4618
4619 Example: The analysis of the following tuples of evolutions produce
4620 the same results: {x+1, +, 1}_1 vs. {x+3, +, 1}_1, and {-2, +, 1}_1
4621 vs. {0, +, 1}_1
4622
4623 {x+1, +, 1}_1 ({2, +, 1}_1) = {x+3, +, 1}_1 ({0, +, 1}_1)
4624 {-2, +, 1}_1 ({2, +, 1}_1) = {0, +, 1}_1 ({0, +, 1}_1)
4625 */
4626
4627 static bool
4628 can_use_analyze_subscript_affine_affine (tree *chrec_a, tree *chrec_b)
4629 {
4630 tree diff, type, left_a, left_b, right_b;
4631
4632 if (chrec_contains_symbols (CHREC_RIGHT (*chrec_a))
4633 || chrec_contains_symbols (CHREC_RIGHT (*chrec_b)))
4634 /* FIXME: For the moment not handled. Might be refined later. */
4635 return false;
4636
4637 type = chrec_type (*chrec_a);
4638 left_a = CHREC_LEFT (*chrec_a);
4639 left_b = chrec_convert (type, CHREC_LEFT (*chrec_b), NULL);
4640 diff = chrec_fold_minus (type, left_a, left_b);
4641
4642 if (!evolution_function_is_constant_p (diff))
4643 return false;
4644
4645 if (dump_file && (dump_flags & TDF_DETAILS))
4646 fprintf (dump_file, "can_use_subscript_aff_aff_for_symbolic \n");
4647
4648 *chrec_a = build_polynomial_chrec (CHREC_VARIABLE (*chrec_a),
4649 diff, CHREC_RIGHT (*chrec_a));
4650 right_b = chrec_convert (type, CHREC_RIGHT (*chrec_b), NULL);
4651 *chrec_b = build_polynomial_chrec (CHREC_VARIABLE (*chrec_b),
4652 build_int_cst (type, 0),
4653 right_b);
4654 return true;
4655 }
4656
4657 /* Analyze a SIV (Single Index Variable) subscript. *OVERLAPS_A and
4658 *OVERLAPS_B are initialized to the functions that describe the
4659 relation between the elements accessed twice by CHREC_A and
4660 CHREC_B. For k >= 0, the following property is verified:
4661
4662 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
4663
4664 static void
4665 analyze_siv_subscript (tree chrec_a,
4666 tree chrec_b,
4667 conflict_function **overlaps_a,
4668 conflict_function **overlaps_b,
4669 tree *last_conflicts,
4670 int loop_nest_num)
4671 {
4672 dependence_stats.num_siv++;
4673
4674 if (dump_file && (dump_flags & TDF_DETAILS))
4675 fprintf (dump_file, "(analyze_siv_subscript \n");
4676
4677 if (evolution_function_is_constant_p (chrec_a)
4678 && evolution_function_is_affine_in_loop (chrec_b, loop_nest_num))
4679 analyze_siv_subscript_cst_affine (chrec_a, chrec_b,
4680 overlaps_a, overlaps_b, last_conflicts);
4681
4682 else if (evolution_function_is_affine_in_loop (chrec_a, loop_nest_num)
4683 && evolution_function_is_constant_p (chrec_b))
4684 analyze_siv_subscript_cst_affine (chrec_b, chrec_a,
4685 overlaps_b, overlaps_a, last_conflicts);
4686
4687 else if (evolution_function_is_affine_in_loop (chrec_a, loop_nest_num)
4688 && evolution_function_is_affine_in_loop (chrec_b, loop_nest_num))
4689 {
4690 if (!chrec_contains_symbols (chrec_a)
4691 && !chrec_contains_symbols (chrec_b))
4692 {
4693 analyze_subscript_affine_affine (chrec_a, chrec_b,
4694 overlaps_a, overlaps_b,
4695 last_conflicts);
4696
4697 if (CF_NOT_KNOWN_P (*overlaps_a)
4698 || CF_NOT_KNOWN_P (*overlaps_b))
4699 dependence_stats.num_siv_unimplemented++;
4700 else if (CF_NO_DEPENDENCE_P (*overlaps_a)
4701 || CF_NO_DEPENDENCE_P (*overlaps_b))
4702 dependence_stats.num_siv_independent++;
4703 else
4704 dependence_stats.num_siv_dependent++;
4705 }
4706 else if (can_use_analyze_subscript_affine_affine (&chrec_a,
4707 &chrec_b))
4708 {
4709 analyze_subscript_affine_affine (chrec_a, chrec_b,
4710 overlaps_a, overlaps_b,
4711 last_conflicts);
4712
4713 if (CF_NOT_KNOWN_P (*overlaps_a)
4714 || CF_NOT_KNOWN_P (*overlaps_b))
4715 dependence_stats.num_siv_unimplemented++;
4716 else if (CF_NO_DEPENDENCE_P (*overlaps_a)
4717 || CF_NO_DEPENDENCE_P (*overlaps_b))
4718 dependence_stats.num_siv_independent++;
4719 else
4720 dependence_stats.num_siv_dependent++;
4721 }
4722 else
4723 goto siv_subscript_dontknow;
4724 }
4725
4726 else
4727 {
4728 siv_subscript_dontknow:;
4729 if (dump_file && (dump_flags & TDF_DETAILS))
4730 fprintf (dump_file, " siv test failed: unimplemented");
4731 *overlaps_a = conflict_fn_not_known ();
4732 *overlaps_b = conflict_fn_not_known ();
4733 *last_conflicts = chrec_dont_know;
4734 dependence_stats.num_siv_unimplemented++;
4735 }
4736
4737 if (dump_file && (dump_flags & TDF_DETAILS))
4738 fprintf (dump_file, ")\n");
4739 }
4740
4741 /* Returns false if we can prove that the greatest common divisor of the steps
4742 of CHREC does not divide CST, false otherwise. */
4743
4744 static bool
4745 gcd_of_steps_may_divide_p (const_tree chrec, const_tree cst)
4746 {
4747 HOST_WIDE_INT cd = 0, val;
4748 tree step;
4749
4750 if (!tree_fits_shwi_p (cst))
4751 return true;
4752 val = tree_to_shwi (cst);
4753
4754 while (TREE_CODE (chrec) == POLYNOMIAL_CHREC)
4755 {
4756 step = CHREC_RIGHT (chrec);
4757 if (!tree_fits_shwi_p (step))
4758 return true;
4759 cd = gcd (cd, tree_to_shwi (step));
4760 chrec = CHREC_LEFT (chrec);
4761 }
4762
4763 return val % cd == 0;
4764 }
4765
4766 /* Analyze a MIV (Multiple Index Variable) subscript with respect to
4767 LOOP_NEST. *OVERLAPS_A and *OVERLAPS_B are initialized to the
4768 functions that describe the relation between the elements accessed
4769 twice by CHREC_A and CHREC_B. For k >= 0, the following property
4770 is verified:
4771
4772 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
4773
4774 static void
4775 analyze_miv_subscript (tree chrec_a,
4776 tree chrec_b,
4777 conflict_function **overlaps_a,
4778 conflict_function **overlaps_b,
4779 tree *last_conflicts,
4780 class loop *loop_nest)
4781 {
4782 tree type, difference;
4783
4784 dependence_stats.num_miv++;
4785 if (dump_file && (dump_flags & TDF_DETAILS))
4786 fprintf (dump_file, "(analyze_miv_subscript \n");
4787
4788 type = signed_type_for_types (TREE_TYPE (chrec_a), TREE_TYPE (chrec_b));
4789 chrec_a = chrec_convert (type, chrec_a, NULL);
4790 chrec_b = chrec_convert (type, chrec_b, NULL);
4791 difference = chrec_fold_minus (type, chrec_a, chrec_b);
4792
4793 if (eq_evolutions_p (chrec_a, chrec_b))
4794 {
4795 /* Access functions are the same: all the elements are accessed
4796 in the same order. */
4797 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
4798 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node));
4799 *last_conflicts = max_stmt_executions_tree (get_chrec_loop (chrec_a));
4800 dependence_stats.num_miv_dependent++;
4801 }
4802
4803 else if (evolution_function_is_constant_p (difference)
4804 && evolution_function_is_affine_multivariate_p (chrec_a,
4805 loop_nest->num)
4806 && !gcd_of_steps_may_divide_p (chrec_a, difference))
4807 {
4808 /* testsuite/.../ssa-chrec-33.c
4809 {{21, +, 2}_1, +, -2}_2 vs. {{20, +, 2}_1, +, -2}_2
4810
4811 The difference is 1, and all the evolution steps are multiples
4812 of 2, consequently there are no overlapping elements. */
4813 *overlaps_a = conflict_fn_no_dependence ();
4814 *overlaps_b = conflict_fn_no_dependence ();
4815 *last_conflicts = integer_zero_node;
4816 dependence_stats.num_miv_independent++;
4817 }
4818
4819 else if (evolution_function_is_affine_in_loop (chrec_a, loop_nest->num)
4820 && !chrec_contains_symbols (chrec_a, loop_nest)
4821 && evolution_function_is_affine_in_loop (chrec_b, loop_nest->num)
4822 && !chrec_contains_symbols (chrec_b, loop_nest))
4823 {
4824 /* testsuite/.../ssa-chrec-35.c
4825 {0, +, 1}_2 vs. {0, +, 1}_3
4826 the overlapping elements are respectively located at iterations:
4827 {0, +, 1}_x and {0, +, 1}_x,
4828 in other words, we have the equality:
4829 {0, +, 1}_2 ({0, +, 1}_x) = {0, +, 1}_3 ({0, +, 1}_x)
4830
4831 Other examples:
4832 {{0, +, 1}_1, +, 2}_2 ({0, +, 1}_x, {0, +, 1}_y) =
4833 {0, +, 1}_1 ({{0, +, 1}_x, +, 2}_y)
4834
4835 {{0, +, 2}_1, +, 3}_2 ({0, +, 1}_y, {0, +, 1}_x) =
4836 {{0, +, 3}_1, +, 2}_2 ({0, +, 1}_x, {0, +, 1}_y)
4837 */
4838 analyze_subscript_affine_affine (chrec_a, chrec_b,
4839 overlaps_a, overlaps_b, last_conflicts);
4840
4841 if (CF_NOT_KNOWN_P (*overlaps_a)
4842 || CF_NOT_KNOWN_P (*overlaps_b))
4843 dependence_stats.num_miv_unimplemented++;
4844 else if (CF_NO_DEPENDENCE_P (*overlaps_a)
4845 || CF_NO_DEPENDENCE_P (*overlaps_b))
4846 dependence_stats.num_miv_independent++;
4847 else
4848 dependence_stats.num_miv_dependent++;
4849 }
4850
4851 else
4852 {
4853 /* When the analysis is too difficult, answer "don't know". */
4854 if (dump_file && (dump_flags & TDF_DETAILS))
4855 fprintf (dump_file, "analyze_miv_subscript test failed: unimplemented.\n");
4856
4857 *overlaps_a = conflict_fn_not_known ();
4858 *overlaps_b = conflict_fn_not_known ();
4859 *last_conflicts = chrec_dont_know;
4860 dependence_stats.num_miv_unimplemented++;
4861 }
4862
4863 if (dump_file && (dump_flags & TDF_DETAILS))
4864 fprintf (dump_file, ")\n");
4865 }
4866
4867 /* Determines the iterations for which CHREC_A is equal to CHREC_B in
4868 with respect to LOOP_NEST. OVERLAP_ITERATIONS_A and
4869 OVERLAP_ITERATIONS_B are initialized with two functions that
4870 describe the iterations that contain conflicting elements.
4871
4872 Remark: For an integer k >= 0, the following equality is true:
4873
4874 CHREC_A (OVERLAP_ITERATIONS_A (k)) == CHREC_B (OVERLAP_ITERATIONS_B (k)).
4875 */
4876
4877 static void
4878 analyze_overlapping_iterations (tree chrec_a,
4879 tree chrec_b,
4880 conflict_function **overlap_iterations_a,
4881 conflict_function **overlap_iterations_b,
4882 tree *last_conflicts, class loop *loop_nest)
4883 {
4884 unsigned int lnn = loop_nest->num;
4885
4886 dependence_stats.num_subscript_tests++;
4887
4888 if (dump_file && (dump_flags & TDF_DETAILS))
4889 {
4890 fprintf (dump_file, "(analyze_overlapping_iterations \n");
4891 fprintf (dump_file, " (chrec_a = ");
4892 print_generic_expr (dump_file, chrec_a);
4893 fprintf (dump_file, ")\n (chrec_b = ");
4894 print_generic_expr (dump_file, chrec_b);
4895 fprintf (dump_file, ")\n");
4896 }
4897
4898 if (chrec_a == NULL_TREE
4899 || chrec_b == NULL_TREE
4900 || chrec_contains_undetermined (chrec_a)
4901 || chrec_contains_undetermined (chrec_b))
4902 {
4903 dependence_stats.num_subscript_undetermined++;
4904
4905 *overlap_iterations_a = conflict_fn_not_known ();
4906 *overlap_iterations_b = conflict_fn_not_known ();
4907 }
4908
4909 /* If they are the same chrec, and are affine, they overlap
4910 on every iteration. */
4911 else if (eq_evolutions_p (chrec_a, chrec_b)
4912 && (evolution_function_is_affine_multivariate_p (chrec_a, lnn)
4913 || operand_equal_p (chrec_a, chrec_b, 0)))
4914 {
4915 dependence_stats.num_same_subscript_function++;
4916 *overlap_iterations_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
4917 *overlap_iterations_b = conflict_fn (1, affine_fn_cst (integer_zero_node));
4918 *last_conflicts = chrec_dont_know;
4919 }
4920
4921 /* If they aren't the same, and aren't affine, we can't do anything
4922 yet. */
4923 else if ((chrec_contains_symbols (chrec_a)
4924 || chrec_contains_symbols (chrec_b))
4925 && (!evolution_function_is_affine_multivariate_p (chrec_a, lnn)
4926 || !evolution_function_is_affine_multivariate_p (chrec_b, lnn)))
4927 {
4928 dependence_stats.num_subscript_undetermined++;
4929 *overlap_iterations_a = conflict_fn_not_known ();
4930 *overlap_iterations_b = conflict_fn_not_known ();
4931 }
4932
4933 else if (ziv_subscript_p (chrec_a, chrec_b))
4934 analyze_ziv_subscript (chrec_a, chrec_b,
4935 overlap_iterations_a, overlap_iterations_b,
4936 last_conflicts);
4937
4938 else if (siv_subscript_p (chrec_a, chrec_b))
4939 analyze_siv_subscript (chrec_a, chrec_b,
4940 overlap_iterations_a, overlap_iterations_b,
4941 last_conflicts, lnn);
4942
4943 else
4944 analyze_miv_subscript (chrec_a, chrec_b,
4945 overlap_iterations_a, overlap_iterations_b,
4946 last_conflicts, loop_nest);
4947
4948 if (dump_file && (dump_flags & TDF_DETAILS))
4949 {
4950 fprintf (dump_file, " (overlap_iterations_a = ");
4951 dump_conflict_function (dump_file, *overlap_iterations_a);
4952 fprintf (dump_file, ")\n (overlap_iterations_b = ");
4953 dump_conflict_function (dump_file, *overlap_iterations_b);
4954 fprintf (dump_file, "))\n");
4955 }
4956 }
4957
4958 /* Helper function for uniquely inserting distance vectors. */
4959
4960 static void
4961 save_dist_v (struct data_dependence_relation *ddr, lambda_vector dist_v)
4962 {
4963 unsigned i;
4964 lambda_vector v;
4965
4966 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, v)
4967 if (lambda_vector_equal (v, dist_v, DDR_NB_LOOPS (ddr)))
4968 return;
4969
4970 DDR_DIST_VECTS (ddr).safe_push (dist_v);
4971 }
4972
4973 /* Helper function for uniquely inserting direction vectors. */
4974
4975 static void
4976 save_dir_v (struct data_dependence_relation *ddr, lambda_vector dir_v)
4977 {
4978 unsigned i;
4979 lambda_vector v;
4980
4981 FOR_EACH_VEC_ELT (DDR_DIR_VECTS (ddr), i, v)
4982 if (lambda_vector_equal (v, dir_v, DDR_NB_LOOPS (ddr)))
4983 return;
4984
4985 DDR_DIR_VECTS (ddr).safe_push (dir_v);
4986 }
4987
4988 /* Add a distance of 1 on all the loops outer than INDEX. If we
4989 haven't yet determined a distance for this outer loop, push a new
4990 distance vector composed of the previous distance, and a distance
4991 of 1 for this outer loop. Example:
4992
4993 | loop_1
4994 | loop_2
4995 | A[10]
4996 | endloop_2
4997 | endloop_1
4998
4999 Saved vectors are of the form (dist_in_1, dist_in_2). First, we
5000 save (0, 1), then we have to save (1, 0). */
5001
5002 static void
5003 add_outer_distances (struct data_dependence_relation *ddr,
5004 lambda_vector dist_v, int index)
5005 {
5006 /* For each outer loop where init_v is not set, the accesses are
5007 in dependence of distance 1 in the loop. */
5008 while (--index >= 0)
5009 {
5010 lambda_vector save_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
5011 lambda_vector_copy (dist_v, save_v, DDR_NB_LOOPS (ddr));
5012 save_v[index] = 1;
5013 save_dist_v (ddr, save_v);
5014 }
5015 }
5016
5017 /* Return false when fail to represent the data dependence as a
5018 distance vector. A_INDEX is the index of the first reference
5019 (0 for DDR_A, 1 for DDR_B) and B_INDEX is the index of the
5020 second reference. INIT_B is set to true when a component has been
5021 added to the distance vector DIST_V. INDEX_CARRY is then set to
5022 the index in DIST_V that carries the dependence. */
5023
5024 static bool
5025 build_classic_dist_vector_1 (struct data_dependence_relation *ddr,
5026 unsigned int a_index, unsigned int b_index,
5027 lambda_vector dist_v, bool *init_b,
5028 int *index_carry)
5029 {
5030 unsigned i;
5031 lambda_vector init_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
5032 class loop *loop = DDR_LOOP_NEST (ddr)[0];
5033
5034 for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++)
5035 {
5036 tree access_fn_a, access_fn_b;
5037 struct subscript *subscript = DDR_SUBSCRIPT (ddr, i);
5038
5039 if (chrec_contains_undetermined (SUB_DISTANCE (subscript)))
5040 {
5041 non_affine_dependence_relation (ddr);
5042 return false;
5043 }
5044
5045 access_fn_a = SUB_ACCESS_FN (subscript, a_index);
5046 access_fn_b = SUB_ACCESS_FN (subscript, b_index);
5047
5048 if (TREE_CODE (access_fn_a) == POLYNOMIAL_CHREC
5049 && TREE_CODE (access_fn_b) == POLYNOMIAL_CHREC)
5050 {
5051 HOST_WIDE_INT dist;
5052 int index;
5053 int var_a = CHREC_VARIABLE (access_fn_a);
5054 int var_b = CHREC_VARIABLE (access_fn_b);
5055
5056 if (var_a != var_b
5057 || chrec_contains_undetermined (SUB_DISTANCE (subscript)))
5058 {
5059 non_affine_dependence_relation (ddr);
5060 return false;
5061 }
5062
5063 /* When data references are collected in a loop while data
5064 dependences are analyzed in loop nest nested in the loop, we
5065 would have more number of access functions than number of
5066 loops. Skip access functions of loops not in the loop nest.
5067
5068 See PR89725 for more information. */
5069 if (flow_loop_nested_p (get_loop (cfun, var_a), loop))
5070 continue;
5071
5072 dist = int_cst_value (SUB_DISTANCE (subscript));
5073 index = index_in_loop_nest (var_a, DDR_LOOP_NEST (ddr));
5074 *index_carry = MIN (index, *index_carry);
5075
5076 /* This is the subscript coupling test. If we have already
5077 recorded a distance for this loop (a distance coming from
5078 another subscript), it should be the same. For example,
5079 in the following code, there is no dependence:
5080
5081 | loop i = 0, N, 1
5082 | T[i+1][i] = ...
5083 | ... = T[i][i]
5084 | endloop
5085 */
5086 if (init_v[index] != 0 && dist_v[index] != dist)
5087 {
5088 finalize_ddr_dependent (ddr, chrec_known);
5089 return false;
5090 }
5091
5092 dist_v[index] = dist;
5093 init_v[index] = 1;
5094 *init_b = true;
5095 }
5096 else if (!operand_equal_p (access_fn_a, access_fn_b, 0))
5097 {
5098 /* This can be for example an affine vs. constant dependence
5099 (T[i] vs. T[3]) that is not an affine dependence and is
5100 not representable as a distance vector. */
5101 non_affine_dependence_relation (ddr);
5102 return false;
5103 }
5104 }
5105
5106 return true;
5107 }
5108
5109 /* Return true when the DDR contains only invariant access functions wrto. loop
5110 number LNUM. */
5111
5112 static bool
5113 invariant_access_functions (const struct data_dependence_relation *ddr,
5114 int lnum)
5115 {
5116 unsigned i;
5117 subscript *sub;
5118
5119 FOR_EACH_VEC_ELT (DDR_SUBSCRIPTS (ddr), i, sub)
5120 if (!evolution_function_is_invariant_p (SUB_ACCESS_FN (sub, 0), lnum)
5121 || !evolution_function_is_invariant_p (SUB_ACCESS_FN (sub, 1), lnum))
5122 return false;
5123
5124 return true;
5125 }
5126
5127 /* Helper function for the case where DDR_A and DDR_B are the same
5128 multivariate access function with a constant step. For an example
5129 see pr34635-1.c. */
5130
5131 static void
5132 add_multivariate_self_dist (struct data_dependence_relation *ddr, tree c_2)
5133 {
5134 int x_1, x_2;
5135 tree c_1 = CHREC_LEFT (c_2);
5136 tree c_0 = CHREC_LEFT (c_1);
5137 lambda_vector dist_v;
5138 HOST_WIDE_INT v1, v2, cd;
5139
5140 /* Polynomials with more than 2 variables are not handled yet. When
5141 the evolution steps are parameters, it is not possible to
5142 represent the dependence using classical distance vectors. */
5143 if (TREE_CODE (c_0) != INTEGER_CST
5144 || TREE_CODE (CHREC_RIGHT (c_1)) != INTEGER_CST
5145 || TREE_CODE (CHREC_RIGHT (c_2)) != INTEGER_CST)
5146 {
5147 DDR_AFFINE_P (ddr) = false;
5148 return;
5149 }
5150
5151 x_2 = index_in_loop_nest (CHREC_VARIABLE (c_2), DDR_LOOP_NEST (ddr));
5152 x_1 = index_in_loop_nest (CHREC_VARIABLE (c_1), DDR_LOOP_NEST (ddr));
5153
5154 /* For "{{0, +, 2}_1, +, 3}_2" the distance vector is (3, -2). */
5155 dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
5156 v1 = int_cst_value (CHREC_RIGHT (c_1));
5157 v2 = int_cst_value (CHREC_RIGHT (c_2));
5158 cd = gcd (v1, v2);
5159 v1 /= cd;
5160 v2 /= cd;
5161
5162 if (v2 < 0)
5163 {
5164 v2 = -v2;
5165 v1 = -v1;
5166 }
5167
5168 dist_v[x_1] = v2;
5169 dist_v[x_2] = -v1;
5170 save_dist_v (ddr, dist_v);
5171
5172 add_outer_distances (ddr, dist_v, x_1);
5173 }
5174
5175 /* Helper function for the case where DDR_A and DDR_B are the same
5176 access functions. */
5177
5178 static void
5179 add_other_self_distances (struct data_dependence_relation *ddr)
5180 {
5181 lambda_vector dist_v;
5182 unsigned i;
5183 int index_carry = DDR_NB_LOOPS (ddr);
5184 subscript *sub;
5185 class loop *loop = DDR_LOOP_NEST (ddr)[0];
5186
5187 FOR_EACH_VEC_ELT (DDR_SUBSCRIPTS (ddr), i, sub)
5188 {
5189 tree access_fun = SUB_ACCESS_FN (sub, 0);
5190
5191 if (TREE_CODE (access_fun) == POLYNOMIAL_CHREC)
5192 {
5193 if (!evolution_function_is_univariate_p (access_fun, loop->num))
5194 {
5195 if (DDR_NUM_SUBSCRIPTS (ddr) != 1)
5196 {
5197 DDR_ARE_DEPENDENT (ddr) = chrec_dont_know;
5198 return;
5199 }
5200
5201 access_fun = SUB_ACCESS_FN (DDR_SUBSCRIPT (ddr, 0), 0);
5202
5203 if (TREE_CODE (CHREC_LEFT (access_fun)) == POLYNOMIAL_CHREC)
5204 add_multivariate_self_dist (ddr, access_fun);
5205 else
5206 /* The evolution step is not constant: it varies in
5207 the outer loop, so this cannot be represented by a
5208 distance vector. For example in pr34635.c the
5209 evolution is {0, +, {0, +, 4}_1}_2. */
5210 DDR_AFFINE_P (ddr) = false;
5211
5212 return;
5213 }
5214
5215 /* When data references are collected in a loop while data
5216 dependences are analyzed in loop nest nested in the loop, we
5217 would have more number of access functions than number of
5218 loops. Skip access functions of loops not in the loop nest.
5219
5220 See PR89725 for more information. */
5221 if (flow_loop_nested_p (get_loop (cfun, CHREC_VARIABLE (access_fun)),
5222 loop))
5223 continue;
5224
5225 index_carry = MIN (index_carry,
5226 index_in_loop_nest (CHREC_VARIABLE (access_fun),
5227 DDR_LOOP_NEST (ddr)));
5228 }
5229 }
5230
5231 dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
5232 add_outer_distances (ddr, dist_v, index_carry);
5233 }
5234
5235 static void
5236 insert_innermost_unit_dist_vector (struct data_dependence_relation *ddr)
5237 {
5238 lambda_vector dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
5239
5240 dist_v[0] = 1;
5241 save_dist_v (ddr, dist_v);
5242 }
5243
5244 /* Adds a unit distance vector to DDR when there is a 0 overlap. This
5245 is the case for example when access functions are the same and
5246 equal to a constant, as in:
5247
5248 | loop_1
5249 | A[3] = ...
5250 | ... = A[3]
5251 | endloop_1
5252
5253 in which case the distance vectors are (0) and (1). */
5254
5255 static void
5256 add_distance_for_zero_overlaps (struct data_dependence_relation *ddr)
5257 {
5258 unsigned i, j;
5259
5260 for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++)
5261 {
5262 subscript_p sub = DDR_SUBSCRIPT (ddr, i);
5263 conflict_function *ca = SUB_CONFLICTS_IN_A (sub);
5264 conflict_function *cb = SUB_CONFLICTS_IN_B (sub);
5265
5266 for (j = 0; j < ca->n; j++)
5267 if (affine_function_zero_p (ca->fns[j]))
5268 {
5269 insert_innermost_unit_dist_vector (ddr);
5270 return;
5271 }
5272
5273 for (j = 0; j < cb->n; j++)
5274 if (affine_function_zero_p (cb->fns[j]))
5275 {
5276 insert_innermost_unit_dist_vector (ddr);
5277 return;
5278 }
5279 }
5280 }
5281
5282 /* Return true when the DDR contains two data references that have the
5283 same access functions. */
5284
5285 static inline bool
5286 same_access_functions (const struct data_dependence_relation *ddr)
5287 {
5288 unsigned i;
5289 subscript *sub;
5290
5291 FOR_EACH_VEC_ELT (DDR_SUBSCRIPTS (ddr), i, sub)
5292 if (!eq_evolutions_p (SUB_ACCESS_FN (sub, 0),
5293 SUB_ACCESS_FN (sub, 1)))
5294 return false;
5295
5296 return true;
5297 }
5298
5299 /* Compute the classic per loop distance vector. DDR is the data
5300 dependence relation to build a vector from. Return false when fail
5301 to represent the data dependence as a distance vector. */
5302
5303 static bool
5304 build_classic_dist_vector (struct data_dependence_relation *ddr,
5305 class loop *loop_nest)
5306 {
5307 bool init_b = false;
5308 int index_carry = DDR_NB_LOOPS (ddr);
5309 lambda_vector dist_v;
5310
5311 if (DDR_ARE_DEPENDENT (ddr) != NULL_TREE)
5312 return false;
5313
5314 if (same_access_functions (ddr))
5315 {
5316 /* Save the 0 vector. */
5317 dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
5318 save_dist_v (ddr, dist_v);
5319
5320 if (invariant_access_functions (ddr, loop_nest->num))
5321 add_distance_for_zero_overlaps (ddr);
5322
5323 if (DDR_NB_LOOPS (ddr) > 1)
5324 add_other_self_distances (ddr);
5325
5326 return true;
5327 }
5328
5329 dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
5330 if (!build_classic_dist_vector_1 (ddr, 0, 1, dist_v, &init_b, &index_carry))
5331 return false;
5332
5333 /* Save the distance vector if we initialized one. */
5334 if (init_b)
5335 {
5336 /* Verify a basic constraint: classic distance vectors should
5337 always be lexicographically positive.
5338
5339 Data references are collected in the order of execution of
5340 the program, thus for the following loop
5341
5342 | for (i = 1; i < 100; i++)
5343 | for (j = 1; j < 100; j++)
5344 | {
5345 | t = T[j+1][i-1]; // A
5346 | T[j][i] = t + 2; // B
5347 | }
5348
5349 references are collected following the direction of the wind:
5350 A then B. The data dependence tests are performed also
5351 following this order, such that we're looking at the distance
5352 separating the elements accessed by A from the elements later
5353 accessed by B. But in this example, the distance returned by
5354 test_dep (A, B) is lexicographically negative (-1, 1), that
5355 means that the access A occurs later than B with respect to
5356 the outer loop, ie. we're actually looking upwind. In this
5357 case we solve test_dep (B, A) looking downwind to the
5358 lexicographically positive solution, that returns the
5359 distance vector (1, -1). */
5360 if (!lambda_vector_lexico_pos (dist_v, DDR_NB_LOOPS (ddr)))
5361 {
5362 lambda_vector save_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
5363 if (!subscript_dependence_tester_1 (ddr, 1, 0, loop_nest))
5364 return false;
5365 compute_subscript_distance (ddr);
5366 if (!build_classic_dist_vector_1 (ddr, 1, 0, save_v, &init_b,
5367 &index_carry))
5368 return false;
5369 save_dist_v (ddr, save_v);
5370 DDR_REVERSED_P (ddr) = true;
5371
5372 /* In this case there is a dependence forward for all the
5373 outer loops:
5374
5375 | for (k = 1; k < 100; k++)
5376 | for (i = 1; i < 100; i++)
5377 | for (j = 1; j < 100; j++)
5378 | {
5379 | t = T[j+1][i-1]; // A
5380 | T[j][i] = t + 2; // B
5381 | }
5382
5383 the vectors are:
5384 (0, 1, -1)
5385 (1, 1, -1)
5386 (1, -1, 1)
5387 */
5388 if (DDR_NB_LOOPS (ddr) > 1)
5389 {
5390 add_outer_distances (ddr, save_v, index_carry);
5391 add_outer_distances (ddr, dist_v, index_carry);
5392 }
5393 }
5394 else
5395 {
5396 lambda_vector save_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
5397 lambda_vector_copy (dist_v, save_v, DDR_NB_LOOPS (ddr));
5398
5399 if (DDR_NB_LOOPS (ddr) > 1)
5400 {
5401 lambda_vector opposite_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
5402
5403 if (!subscript_dependence_tester_1 (ddr, 1, 0, loop_nest))
5404 return false;
5405 compute_subscript_distance (ddr);
5406 if (!build_classic_dist_vector_1 (ddr, 1, 0, opposite_v, &init_b,
5407 &index_carry))
5408 return false;
5409
5410 save_dist_v (ddr, save_v);
5411 add_outer_distances (ddr, dist_v, index_carry);
5412 add_outer_distances (ddr, opposite_v, index_carry);
5413 }
5414 else
5415 save_dist_v (ddr, save_v);
5416 }
5417 }
5418 else
5419 {
5420 /* There is a distance of 1 on all the outer loops: Example:
5421 there is a dependence of distance 1 on loop_1 for the array A.
5422
5423 | loop_1
5424 | A[5] = ...
5425 | endloop
5426 */
5427 add_outer_distances (ddr, dist_v,
5428 lambda_vector_first_nz (dist_v,
5429 DDR_NB_LOOPS (ddr), 0));
5430 }
5431
5432 if (dump_file && (dump_flags & TDF_DETAILS))
5433 {
5434 unsigned i;
5435
5436 fprintf (dump_file, "(build_classic_dist_vector\n");
5437 for (i = 0; i < DDR_NUM_DIST_VECTS (ddr); i++)
5438 {
5439 fprintf (dump_file, " dist_vector = (");
5440 print_lambda_vector (dump_file, DDR_DIST_VECT (ddr, i),
5441 DDR_NB_LOOPS (ddr));
5442 fprintf (dump_file, " )\n");
5443 }
5444 fprintf (dump_file, ")\n");
5445 }
5446
5447 return true;
5448 }
5449
5450 /* Return the direction for a given distance.
5451 FIXME: Computing dir this way is suboptimal, since dir can catch
5452 cases that dist is unable to represent. */
5453
5454 static inline enum data_dependence_direction
5455 dir_from_dist (int dist)
5456 {
5457 if (dist > 0)
5458 return dir_positive;
5459 else if (dist < 0)
5460 return dir_negative;
5461 else
5462 return dir_equal;
5463 }
5464
5465 /* Compute the classic per loop direction vector. DDR is the data
5466 dependence relation to build a vector from. */
5467
5468 static void
5469 build_classic_dir_vector (struct data_dependence_relation *ddr)
5470 {
5471 unsigned i, j;
5472 lambda_vector dist_v;
5473
5474 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
5475 {
5476 lambda_vector dir_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
5477
5478 for (j = 0; j < DDR_NB_LOOPS (ddr); j++)
5479 dir_v[j] = dir_from_dist (dist_v[j]);
5480
5481 save_dir_v (ddr, dir_v);
5482 }
5483 }
5484
5485 /* Helper function. Returns true when there is a dependence between the
5486 data references. A_INDEX is the index of the first reference (0 for
5487 DDR_A, 1 for DDR_B) and B_INDEX is the index of the second reference. */
5488
5489 static bool
5490 subscript_dependence_tester_1 (struct data_dependence_relation *ddr,
5491 unsigned int a_index, unsigned int b_index,
5492 class loop *loop_nest)
5493 {
5494 unsigned int i;
5495 tree last_conflicts;
5496 struct subscript *subscript;
5497 tree res = NULL_TREE;
5498
5499 for (i = 0; DDR_SUBSCRIPTS (ddr).iterate (i, &subscript); i++)
5500 {
5501 conflict_function *overlaps_a, *overlaps_b;
5502
5503 analyze_overlapping_iterations (SUB_ACCESS_FN (subscript, a_index),
5504 SUB_ACCESS_FN (subscript, b_index),
5505 &overlaps_a, &overlaps_b,
5506 &last_conflicts, loop_nest);
5507
5508 if (SUB_CONFLICTS_IN_A (subscript))
5509 free_conflict_function (SUB_CONFLICTS_IN_A (subscript));
5510 if (SUB_CONFLICTS_IN_B (subscript))
5511 free_conflict_function (SUB_CONFLICTS_IN_B (subscript));
5512
5513 SUB_CONFLICTS_IN_A (subscript) = overlaps_a;
5514 SUB_CONFLICTS_IN_B (subscript) = overlaps_b;
5515 SUB_LAST_CONFLICT (subscript) = last_conflicts;
5516
5517 /* If there is any undetermined conflict function we have to
5518 give a conservative answer in case we cannot prove that
5519 no dependence exists when analyzing another subscript. */
5520 if (CF_NOT_KNOWN_P (overlaps_a)
5521 || CF_NOT_KNOWN_P (overlaps_b))
5522 {
5523 res = chrec_dont_know;
5524 continue;
5525 }
5526
5527 /* When there is a subscript with no dependence we can stop. */
5528 else if (CF_NO_DEPENDENCE_P (overlaps_a)
5529 || CF_NO_DEPENDENCE_P (overlaps_b))
5530 {
5531 res = chrec_known;
5532 break;
5533 }
5534 }
5535
5536 if (res == NULL_TREE)
5537 return true;
5538
5539 if (res == chrec_known)
5540 dependence_stats.num_dependence_independent++;
5541 else
5542 dependence_stats.num_dependence_undetermined++;
5543 finalize_ddr_dependent (ddr, res);
5544 return false;
5545 }
5546
5547 /* Computes the conflicting iterations in LOOP_NEST, and initialize DDR. */
5548
5549 static void
5550 subscript_dependence_tester (struct data_dependence_relation *ddr,
5551 class loop *loop_nest)
5552 {
5553 if (subscript_dependence_tester_1 (ddr, 0, 1, loop_nest))
5554 dependence_stats.num_dependence_dependent++;
5555
5556 compute_subscript_distance (ddr);
5557 if (build_classic_dist_vector (ddr, loop_nest))
5558 build_classic_dir_vector (ddr);
5559 }
5560
5561 /* Returns true when all the access functions of A are affine or
5562 constant with respect to LOOP_NEST. */
5563
5564 static bool
5565 access_functions_are_affine_or_constant_p (const struct data_reference *a,
5566 const class loop *loop_nest)
5567 {
5568 unsigned int i;
5569 vec<tree> fns = DR_ACCESS_FNS (a);
5570 tree t;
5571
5572 FOR_EACH_VEC_ELT (fns, i, t)
5573 if (!evolution_function_is_invariant_p (t, loop_nest->num)
5574 && !evolution_function_is_affine_multivariate_p (t, loop_nest->num))
5575 return false;
5576
5577 return true;
5578 }
5579
5580 /* This computes the affine dependence relation between A and B with
5581 respect to LOOP_NEST. CHREC_KNOWN is used for representing the
5582 independence between two accesses, while CHREC_DONT_KNOW is used
5583 for representing the unknown relation.
5584
5585 Note that it is possible to stop the computation of the dependence
5586 relation the first time we detect a CHREC_KNOWN element for a given
5587 subscript. */
5588
5589 void
5590 compute_affine_dependence (struct data_dependence_relation *ddr,
5591 class loop *loop_nest)
5592 {
5593 struct data_reference *dra = DDR_A (ddr);
5594 struct data_reference *drb = DDR_B (ddr);
5595
5596 if (dump_file && (dump_flags & TDF_DETAILS))
5597 {
5598 fprintf (dump_file, "(compute_affine_dependence\n");
5599 fprintf (dump_file, " stmt_a: ");
5600 print_gimple_stmt (dump_file, DR_STMT (dra), 0, TDF_SLIM);
5601 fprintf (dump_file, " stmt_b: ");
5602 print_gimple_stmt (dump_file, DR_STMT (drb), 0, TDF_SLIM);
5603 }
5604
5605 /* Analyze only when the dependence relation is not yet known. */
5606 if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE)
5607 {
5608 dependence_stats.num_dependence_tests++;
5609
5610 if (access_functions_are_affine_or_constant_p (dra, loop_nest)
5611 && access_functions_are_affine_or_constant_p (drb, loop_nest))
5612 subscript_dependence_tester (ddr, loop_nest);
5613
5614 /* As a last case, if the dependence cannot be determined, or if
5615 the dependence is considered too difficult to determine, answer
5616 "don't know". */
5617 else
5618 {
5619 dependence_stats.num_dependence_undetermined++;
5620
5621 if (dump_file && (dump_flags & TDF_DETAILS))
5622 {
5623 fprintf (dump_file, "Data ref a:\n");
5624 dump_data_reference (dump_file, dra);
5625 fprintf (dump_file, "Data ref b:\n");
5626 dump_data_reference (dump_file, drb);
5627 fprintf (dump_file, "affine dependence test not usable: access function not affine or constant.\n");
5628 }
5629 finalize_ddr_dependent (ddr, chrec_dont_know);
5630 }
5631 }
5632
5633 if (dump_file && (dump_flags & TDF_DETAILS))
5634 {
5635 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
5636 fprintf (dump_file, ") -> no dependence\n");
5637 else if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
5638 fprintf (dump_file, ") -> dependence analysis failed\n");
5639 else
5640 fprintf (dump_file, ")\n");
5641 }
5642 }
5643
5644 /* Compute in DEPENDENCE_RELATIONS the data dependence graph for all
5645 the data references in DATAREFS, in the LOOP_NEST. When
5646 COMPUTE_SELF_AND_RR is FALSE, don't compute read-read and self
5647 relations. Return true when successful, i.e. data references number
5648 is small enough to be handled. */
5649
5650 bool
5651 compute_all_dependences (vec<data_reference_p> datarefs,
5652 vec<ddr_p> *dependence_relations,
5653 vec<loop_p> loop_nest,
5654 bool compute_self_and_rr)
5655 {
5656 struct data_dependence_relation *ddr;
5657 struct data_reference *a, *b;
5658 unsigned int i, j;
5659
5660 if ((int) datarefs.length ()
5661 > param_loop_max_datarefs_for_datadeps)
5662 {
5663 struct data_dependence_relation *ddr;
5664
5665 /* Insert a single relation into dependence_relations:
5666 chrec_dont_know. */
5667 ddr = initialize_data_dependence_relation (NULL, NULL, loop_nest);
5668 dependence_relations->safe_push (ddr);
5669 return false;
5670 }
5671
5672 FOR_EACH_VEC_ELT (datarefs, i, a)
5673 for (j = i + 1; datarefs.iterate (j, &b); j++)
5674 if (DR_IS_WRITE (a) || DR_IS_WRITE (b) || compute_self_and_rr)
5675 {
5676 ddr = initialize_data_dependence_relation (a, b, loop_nest);
5677 dependence_relations->safe_push (ddr);
5678 if (loop_nest.exists ())
5679 compute_affine_dependence (ddr, loop_nest[0]);
5680 }
5681
5682 if (compute_self_and_rr)
5683 FOR_EACH_VEC_ELT (datarefs, i, a)
5684 {
5685 ddr = initialize_data_dependence_relation (a, a, loop_nest);
5686 dependence_relations->safe_push (ddr);
5687 if (loop_nest.exists ())
5688 compute_affine_dependence (ddr, loop_nest[0]);
5689 }
5690
5691 return true;
5692 }
5693
5694 /* Describes a location of a memory reference. */
5695
5696 struct data_ref_loc
5697 {
5698 /* The memory reference. */
5699 tree ref;
5700
5701 /* True if the memory reference is read. */
5702 bool is_read;
5703
5704 /* True if the data reference is conditional within the containing
5705 statement, i.e. if it might not occur even when the statement
5706 is executed and runs to completion. */
5707 bool is_conditional_in_stmt;
5708 };
5709
5710
5711 /* Stores the locations of memory references in STMT to REFERENCES. Returns
5712 true if STMT clobbers memory, false otherwise. */
5713
5714 static bool
5715 get_references_in_stmt (gimple *stmt, vec<data_ref_loc, va_heap> *references)
5716 {
5717 bool clobbers_memory = false;
5718 data_ref_loc ref;
5719 tree op0, op1;
5720 enum gimple_code stmt_code = gimple_code (stmt);
5721
5722 /* ASM_EXPR and CALL_EXPR may embed arbitrary side effects.
5723 As we cannot model data-references to not spelled out
5724 accesses give up if they may occur. */
5725 if (stmt_code == GIMPLE_CALL
5726 && !(gimple_call_flags (stmt) & ECF_CONST))
5727 {
5728 /* Allow IFN_GOMP_SIMD_LANE in their own loops. */
5729 if (gimple_call_internal_p (stmt))
5730 switch (gimple_call_internal_fn (stmt))
5731 {
5732 case IFN_GOMP_SIMD_LANE:
5733 {
5734 class loop *loop = gimple_bb (stmt)->loop_father;
5735 tree uid = gimple_call_arg (stmt, 0);
5736 gcc_assert (TREE_CODE (uid) == SSA_NAME);
5737 if (loop == NULL
5738 || loop->simduid != SSA_NAME_VAR (uid))
5739 clobbers_memory = true;
5740 break;
5741 }
5742 case IFN_MASK_LOAD:
5743 case IFN_MASK_STORE:
5744 break;
5745 default:
5746 clobbers_memory = true;
5747 break;
5748 }
5749 else
5750 clobbers_memory = true;
5751 }
5752 else if (stmt_code == GIMPLE_ASM
5753 && (gimple_asm_volatile_p (as_a <gasm *> (stmt))
5754 || gimple_vuse (stmt)))
5755 clobbers_memory = true;
5756
5757 if (!gimple_vuse (stmt))
5758 return clobbers_memory;
5759
5760 if (stmt_code == GIMPLE_ASSIGN)
5761 {
5762 tree base;
5763 op0 = gimple_assign_lhs (stmt);
5764 op1 = gimple_assign_rhs1 (stmt);
5765
5766 if (DECL_P (op1)
5767 || (REFERENCE_CLASS_P (op1)
5768 && (base = get_base_address (op1))
5769 && TREE_CODE (base) != SSA_NAME
5770 && !is_gimple_min_invariant (base)))
5771 {
5772 ref.ref = op1;
5773 ref.is_read = true;
5774 ref.is_conditional_in_stmt = false;
5775 references->safe_push (ref);
5776 }
5777 }
5778 else if (stmt_code == GIMPLE_CALL)
5779 {
5780 unsigned i, n;
5781 tree ptr, type;
5782 unsigned int align;
5783
5784 ref.is_read = false;
5785 if (gimple_call_internal_p (stmt))
5786 switch (gimple_call_internal_fn (stmt))
5787 {
5788 case IFN_MASK_LOAD:
5789 if (gimple_call_lhs (stmt) == NULL_TREE)
5790 break;
5791 ref.is_read = true;
5792 /* FALLTHRU */
5793 case IFN_MASK_STORE:
5794 ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)), 0);
5795 align = tree_to_shwi (gimple_call_arg (stmt, 1));
5796 if (ref.is_read)
5797 type = TREE_TYPE (gimple_call_lhs (stmt));
5798 else
5799 type = TREE_TYPE (gimple_call_arg (stmt, 3));
5800 if (TYPE_ALIGN (type) != align)
5801 type = build_aligned_type (type, align);
5802 ref.is_conditional_in_stmt = true;
5803 ref.ref = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0),
5804 ptr);
5805 references->safe_push (ref);
5806 return false;
5807 default:
5808 break;
5809 }
5810
5811 op0 = gimple_call_lhs (stmt);
5812 n = gimple_call_num_args (stmt);
5813 for (i = 0; i < n; i++)
5814 {
5815 op1 = gimple_call_arg (stmt, i);
5816
5817 if (DECL_P (op1)
5818 || (REFERENCE_CLASS_P (op1) && get_base_address (op1)))
5819 {
5820 ref.ref = op1;
5821 ref.is_read = true;
5822 ref.is_conditional_in_stmt = false;
5823 references->safe_push (ref);
5824 }
5825 }
5826 }
5827 else
5828 return clobbers_memory;
5829
5830 if (op0
5831 && (DECL_P (op0)
5832 || (REFERENCE_CLASS_P (op0) && get_base_address (op0))))
5833 {
5834 ref.ref = op0;
5835 ref.is_read = false;
5836 ref.is_conditional_in_stmt = false;
5837 references->safe_push (ref);
5838 }
5839 return clobbers_memory;
5840 }
5841
5842
5843 /* Returns true if the loop-nest has any data reference. */
5844
5845 bool
5846 loop_nest_has_data_refs (loop_p loop)
5847 {
5848 basic_block *bbs = get_loop_body (loop);
5849 auto_vec<data_ref_loc, 3> references;
5850
5851 for (unsigned i = 0; i < loop->num_nodes; i++)
5852 {
5853 basic_block bb = bbs[i];
5854 gimple_stmt_iterator bsi;
5855
5856 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
5857 {
5858 gimple *stmt = gsi_stmt (bsi);
5859 get_references_in_stmt (stmt, &references);
5860 if (references.length ())
5861 {
5862 free (bbs);
5863 return true;
5864 }
5865 }
5866 }
5867 free (bbs);
5868 return false;
5869 }
5870
5871 /* Stores the data references in STMT to DATAREFS. If there is an unanalyzable
5872 reference, returns false, otherwise returns true. NEST is the outermost
5873 loop of the loop nest in which the references should be analyzed. */
5874
5875 opt_result
5876 find_data_references_in_stmt (class loop *nest, gimple *stmt,
5877 vec<data_reference_p> *datarefs)
5878 {
5879 unsigned i;
5880 auto_vec<data_ref_loc, 2> references;
5881 data_ref_loc *ref;
5882 data_reference_p dr;
5883
5884 if (get_references_in_stmt (stmt, &references))
5885 return opt_result::failure_at (stmt, "statement clobbers memory: %G",
5886 stmt);
5887
5888 FOR_EACH_VEC_ELT (references, i, ref)
5889 {
5890 dr = create_data_ref (nest ? loop_preheader_edge (nest) : NULL,
5891 loop_containing_stmt (stmt), ref->ref,
5892 stmt, ref->is_read, ref->is_conditional_in_stmt);
5893 gcc_assert (dr != NULL);
5894 datarefs->safe_push (dr);
5895 }
5896
5897 return opt_result::success ();
5898 }
5899
5900 /* Stores the data references in STMT to DATAREFS. If there is an
5901 unanalyzable reference, returns false, otherwise returns true.
5902 NEST is the outermost loop of the loop nest in which the references
5903 should be instantiated, LOOP is the loop in which the references
5904 should be analyzed. */
5905
5906 bool
5907 graphite_find_data_references_in_stmt (edge nest, loop_p loop, gimple *stmt,
5908 vec<data_reference_p> *datarefs)
5909 {
5910 unsigned i;
5911 auto_vec<data_ref_loc, 2> references;
5912 data_ref_loc *ref;
5913 bool ret = true;
5914 data_reference_p dr;
5915
5916 if (get_references_in_stmt (stmt, &references))
5917 return false;
5918
5919 FOR_EACH_VEC_ELT (references, i, ref)
5920 {
5921 dr = create_data_ref (nest, loop, ref->ref, stmt, ref->is_read,
5922 ref->is_conditional_in_stmt);
5923 gcc_assert (dr != NULL);
5924 datarefs->safe_push (dr);
5925 }
5926
5927 return ret;
5928 }
5929
5930 /* Search the data references in LOOP, and record the information into
5931 DATAREFS. Returns chrec_dont_know when failing to analyze a
5932 difficult case, returns NULL_TREE otherwise. */
5933
5934 tree
5935 find_data_references_in_bb (class loop *loop, basic_block bb,
5936 vec<data_reference_p> *datarefs)
5937 {
5938 gimple_stmt_iterator bsi;
5939
5940 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
5941 {
5942 gimple *stmt = gsi_stmt (bsi);
5943
5944 if (!find_data_references_in_stmt (loop, stmt, datarefs))
5945 {
5946 struct data_reference *res;
5947 res = XCNEW (struct data_reference);
5948 datarefs->safe_push (res);
5949
5950 return chrec_dont_know;
5951 }
5952 }
5953
5954 return NULL_TREE;
5955 }
5956
5957 /* Search the data references in LOOP, and record the information into
5958 DATAREFS. Returns chrec_dont_know when failing to analyze a
5959 difficult case, returns NULL_TREE otherwise.
5960
5961 TODO: This function should be made smarter so that it can handle address
5962 arithmetic as if they were array accesses, etc. */
5963
5964 tree
5965 find_data_references_in_loop (class loop *loop,
5966 vec<data_reference_p> *datarefs)
5967 {
5968 basic_block bb, *bbs;
5969 unsigned int i;
5970
5971 bbs = get_loop_body_in_dom_order (loop);
5972
5973 for (i = 0; i < loop->num_nodes; i++)
5974 {
5975 bb = bbs[i];
5976
5977 if (find_data_references_in_bb (loop, bb, datarefs) == chrec_dont_know)
5978 {
5979 free (bbs);
5980 return chrec_dont_know;
5981 }
5982 }
5983 free (bbs);
5984
5985 return NULL_TREE;
5986 }
5987
5988 /* Return the alignment in bytes that DRB is guaranteed to have at all
5989 times. */
5990
5991 unsigned int
5992 dr_alignment (innermost_loop_behavior *drb)
5993 {
5994 /* Get the alignment of BASE_ADDRESS + INIT. */
5995 unsigned int alignment = drb->base_alignment;
5996 unsigned int misalignment = (drb->base_misalignment
5997 + TREE_INT_CST_LOW (drb->init));
5998 if (misalignment != 0)
5999 alignment = MIN (alignment, misalignment & -misalignment);
6000
6001 /* Cap it to the alignment of OFFSET. */
6002 if (!integer_zerop (drb->offset))
6003 alignment = MIN (alignment, drb->offset_alignment);
6004
6005 /* Cap it to the alignment of STEP. */
6006 if (!integer_zerop (drb->step))
6007 alignment = MIN (alignment, drb->step_alignment);
6008
6009 return alignment;
6010 }
6011
6012 /* If BASE is a pointer-typed SSA name, try to find the object that it
6013 is based on. Return this object X on success and store the alignment
6014 in bytes of BASE - &X in *ALIGNMENT_OUT. */
6015
6016 static tree
6017 get_base_for_alignment_1 (tree base, unsigned int *alignment_out)
6018 {
6019 if (TREE_CODE (base) != SSA_NAME || !POINTER_TYPE_P (TREE_TYPE (base)))
6020 return NULL_TREE;
6021
6022 gimple *def = SSA_NAME_DEF_STMT (base);
6023 base = analyze_scalar_evolution (loop_containing_stmt (def), base);
6024
6025 /* Peel chrecs and record the minimum alignment preserved by
6026 all steps. */
6027 unsigned int alignment = MAX_OFILE_ALIGNMENT / BITS_PER_UNIT;
6028 while (TREE_CODE (base) == POLYNOMIAL_CHREC)
6029 {
6030 unsigned int step_alignment = highest_pow2_factor (CHREC_RIGHT (base));
6031 alignment = MIN (alignment, step_alignment);
6032 base = CHREC_LEFT (base);
6033 }
6034
6035 /* Punt if the expression is too complicated to handle. */
6036 if (tree_contains_chrecs (base, NULL) || !POINTER_TYPE_P (TREE_TYPE (base)))
6037 return NULL_TREE;
6038
6039 /* The only useful cases are those for which a dereference folds to something
6040 other than an INDIRECT_REF. */
6041 tree ref_type = TREE_TYPE (TREE_TYPE (base));
6042 tree ref = fold_indirect_ref_1 (UNKNOWN_LOCATION, ref_type, base);
6043 if (!ref)
6044 return NULL_TREE;
6045
6046 /* Analyze the base to which the steps we peeled were applied. */
6047 poly_int64 bitsize, bitpos, bytepos;
6048 machine_mode mode;
6049 int unsignedp, reversep, volatilep;
6050 tree offset;
6051 base = get_inner_reference (ref, &bitsize, &bitpos, &offset, &mode,
6052 &unsignedp, &reversep, &volatilep);
6053 if (!base || !multiple_p (bitpos, BITS_PER_UNIT, &bytepos))
6054 return NULL_TREE;
6055
6056 /* Restrict the alignment to that guaranteed by the offsets. */
6057 unsigned int bytepos_alignment = known_alignment (bytepos);
6058 if (bytepos_alignment != 0)
6059 alignment = MIN (alignment, bytepos_alignment);
6060 if (offset)
6061 {
6062 unsigned int offset_alignment = highest_pow2_factor (offset);
6063 alignment = MIN (alignment, offset_alignment);
6064 }
6065
6066 *alignment_out = alignment;
6067 return base;
6068 }
6069
6070 /* Return the object whose alignment would need to be changed in order
6071 to increase the alignment of ADDR. Store the maximum achievable
6072 alignment in *MAX_ALIGNMENT. */
6073
6074 tree
6075 get_base_for_alignment (tree addr, unsigned int *max_alignment)
6076 {
6077 tree base = get_base_for_alignment_1 (addr, max_alignment);
6078 if (base)
6079 return base;
6080
6081 if (TREE_CODE (addr) == ADDR_EXPR)
6082 addr = TREE_OPERAND (addr, 0);
6083 *max_alignment = MAX_OFILE_ALIGNMENT / BITS_PER_UNIT;
6084 return addr;
6085 }
6086
6087 /* Recursive helper function. */
6088
6089 static bool
6090 find_loop_nest_1 (class loop *loop, vec<loop_p> *loop_nest)
6091 {
6092 /* Inner loops of the nest should not contain siblings. Example:
6093 when there are two consecutive loops,
6094
6095 | loop_0
6096 | loop_1
6097 | A[{0, +, 1}_1]
6098 | endloop_1
6099 | loop_2
6100 | A[{0, +, 1}_2]
6101 | endloop_2
6102 | endloop_0
6103
6104 the dependence relation cannot be captured by the distance
6105 abstraction. */
6106 if (loop->next)
6107 return false;
6108
6109 loop_nest->safe_push (loop);
6110 if (loop->inner)
6111 return find_loop_nest_1 (loop->inner, loop_nest);
6112 return true;
6113 }
6114
6115 /* Return false when the LOOP is not well nested. Otherwise return
6116 true and insert in LOOP_NEST the loops of the nest. LOOP_NEST will
6117 contain the loops from the outermost to the innermost, as they will
6118 appear in the classic distance vector. */
6119
6120 bool
6121 find_loop_nest (class loop *loop, vec<loop_p> *loop_nest)
6122 {
6123 loop_nest->safe_push (loop);
6124 if (loop->inner)
6125 return find_loop_nest_1 (loop->inner, loop_nest);
6126 return true;
6127 }
6128
6129 /* Returns true when the data dependences have been computed, false otherwise.
6130 Given a loop nest LOOP, the following vectors are returned:
6131 DATAREFS is initialized to all the array elements contained in this loop,
6132 DEPENDENCE_RELATIONS contains the relations between the data references.
6133 Compute read-read and self relations if
6134 COMPUTE_SELF_AND_READ_READ_DEPENDENCES is TRUE. */
6135
6136 bool
6137 compute_data_dependences_for_loop (class loop *loop,
6138 bool compute_self_and_read_read_dependences,
6139 vec<loop_p> *loop_nest,
6140 vec<data_reference_p> *datarefs,
6141 vec<ddr_p> *dependence_relations)
6142 {
6143 bool res = true;
6144
6145 memset (&dependence_stats, 0, sizeof (dependence_stats));
6146
6147 /* If the loop nest is not well formed, or one of the data references
6148 is not computable, give up without spending time to compute other
6149 dependences. */
6150 if (!loop
6151 || !find_loop_nest (loop, loop_nest)
6152 || find_data_references_in_loop (loop, datarefs) == chrec_dont_know
6153 || !compute_all_dependences (*datarefs, dependence_relations, *loop_nest,
6154 compute_self_and_read_read_dependences))
6155 res = false;
6156
6157 if (dump_file && (dump_flags & TDF_STATS))
6158 {
6159 fprintf (dump_file, "Dependence tester statistics:\n");
6160
6161 fprintf (dump_file, "Number of dependence tests: %d\n",
6162 dependence_stats.num_dependence_tests);
6163 fprintf (dump_file, "Number of dependence tests classified dependent: %d\n",
6164 dependence_stats.num_dependence_dependent);
6165 fprintf (dump_file, "Number of dependence tests classified independent: %d\n",
6166 dependence_stats.num_dependence_independent);
6167 fprintf (dump_file, "Number of undetermined dependence tests: %d\n",
6168 dependence_stats.num_dependence_undetermined);
6169
6170 fprintf (dump_file, "Number of subscript tests: %d\n",
6171 dependence_stats.num_subscript_tests);
6172 fprintf (dump_file, "Number of undetermined subscript tests: %d\n",
6173 dependence_stats.num_subscript_undetermined);
6174 fprintf (dump_file, "Number of same subscript function: %d\n",
6175 dependence_stats.num_same_subscript_function);
6176
6177 fprintf (dump_file, "Number of ziv tests: %d\n",
6178 dependence_stats.num_ziv);
6179 fprintf (dump_file, "Number of ziv tests returning dependent: %d\n",
6180 dependence_stats.num_ziv_dependent);
6181 fprintf (dump_file, "Number of ziv tests returning independent: %d\n",
6182 dependence_stats.num_ziv_independent);
6183 fprintf (dump_file, "Number of ziv tests unimplemented: %d\n",
6184 dependence_stats.num_ziv_unimplemented);
6185
6186 fprintf (dump_file, "Number of siv tests: %d\n",
6187 dependence_stats.num_siv);
6188 fprintf (dump_file, "Number of siv tests returning dependent: %d\n",
6189 dependence_stats.num_siv_dependent);
6190 fprintf (dump_file, "Number of siv tests returning independent: %d\n",
6191 dependence_stats.num_siv_independent);
6192 fprintf (dump_file, "Number of siv tests unimplemented: %d\n",
6193 dependence_stats.num_siv_unimplemented);
6194
6195 fprintf (dump_file, "Number of miv tests: %d\n",
6196 dependence_stats.num_miv);
6197 fprintf (dump_file, "Number of miv tests returning dependent: %d\n",
6198 dependence_stats.num_miv_dependent);
6199 fprintf (dump_file, "Number of miv tests returning independent: %d\n",
6200 dependence_stats.num_miv_independent);
6201 fprintf (dump_file, "Number of miv tests unimplemented: %d\n",
6202 dependence_stats.num_miv_unimplemented);
6203 }
6204
6205 return res;
6206 }
6207
6208 /* Free the memory used by a data dependence relation DDR. */
6209
6210 void
6211 free_dependence_relation (struct data_dependence_relation *ddr)
6212 {
6213 if (ddr == NULL)
6214 return;
6215
6216 if (DDR_SUBSCRIPTS (ddr).exists ())
6217 free_subscripts (DDR_SUBSCRIPTS (ddr));
6218 DDR_DIST_VECTS (ddr).release ();
6219 DDR_DIR_VECTS (ddr).release ();
6220
6221 free (ddr);
6222 }
6223
6224 /* Free the memory used by the data dependence relations from
6225 DEPENDENCE_RELATIONS. */
6226
6227 void
6228 free_dependence_relations (vec<ddr_p> dependence_relations)
6229 {
6230 unsigned int i;
6231 struct data_dependence_relation *ddr;
6232
6233 FOR_EACH_VEC_ELT (dependence_relations, i, ddr)
6234 if (ddr)
6235 free_dependence_relation (ddr);
6236
6237 dependence_relations.release ();
6238 }
6239
6240 /* Free the memory used by the data references from DATAREFS. */
6241
6242 void
6243 free_data_refs (vec<data_reference_p> datarefs)
6244 {
6245 unsigned int i;
6246 struct data_reference *dr;
6247
6248 FOR_EACH_VEC_ELT (datarefs, i, dr)
6249 free_data_ref (dr);
6250 datarefs.release ();
6251 }
6252
6253 /* Common routine implementing both dr_direction_indicator and
6254 dr_zero_step_indicator. Return USEFUL_MIN if the indicator is known
6255 to be >= USEFUL_MIN and -1 if the indicator is known to be negative.
6256 Return the step as the indicator otherwise. */
6257
6258 static tree
6259 dr_step_indicator (struct data_reference *dr, int useful_min)
6260 {
6261 tree step = DR_STEP (dr);
6262 if (!step)
6263 return NULL_TREE;
6264 STRIP_NOPS (step);
6265 /* Look for cases where the step is scaled by a positive constant
6266 integer, which will often be the access size. If the multiplication
6267 doesn't change the sign (due to overflow effects) then we can
6268 test the unscaled value instead. */
6269 if (TREE_CODE (step) == MULT_EXPR
6270 && TREE_CODE (TREE_OPERAND (step, 1)) == INTEGER_CST
6271 && tree_int_cst_sgn (TREE_OPERAND (step, 1)) > 0)
6272 {
6273 tree factor = TREE_OPERAND (step, 1);
6274 step = TREE_OPERAND (step, 0);
6275
6276 /* Strip widening and truncating conversions as well as nops. */
6277 if (CONVERT_EXPR_P (step)
6278 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (step, 0))))
6279 step = TREE_OPERAND (step, 0);
6280 tree type = TREE_TYPE (step);
6281
6282 /* Get the range of step values that would not cause overflow. */
6283 widest_int minv = (wi::to_widest (TYPE_MIN_VALUE (ssizetype))
6284 / wi::to_widest (factor));
6285 widest_int maxv = (wi::to_widest (TYPE_MAX_VALUE (ssizetype))
6286 / wi::to_widest (factor));
6287
6288 /* Get the range of values that the unconverted step actually has. */
6289 wide_int step_min, step_max;
6290 if (TREE_CODE (step) != SSA_NAME
6291 || get_range_info (step, &step_min, &step_max) != VR_RANGE)
6292 {
6293 step_min = wi::to_wide (TYPE_MIN_VALUE (type));
6294 step_max = wi::to_wide (TYPE_MAX_VALUE (type));
6295 }
6296
6297 /* Check whether the unconverted step has an acceptable range. */
6298 signop sgn = TYPE_SIGN (type);
6299 if (wi::les_p (minv, widest_int::from (step_min, sgn))
6300 && wi::ges_p (maxv, widest_int::from (step_max, sgn)))
6301 {
6302 if (wi::ge_p (step_min, useful_min, sgn))
6303 return ssize_int (useful_min);
6304 else if (wi::lt_p (step_max, 0, sgn))
6305 return ssize_int (-1);
6306 else
6307 return fold_convert (ssizetype, step);
6308 }
6309 }
6310 return DR_STEP (dr);
6311 }
6312
6313 /* Return a value that is negative iff DR has a negative step. */
6314
6315 tree
6316 dr_direction_indicator (struct data_reference *dr)
6317 {
6318 return dr_step_indicator (dr, 0);
6319 }
6320
6321 /* Return a value that is zero iff DR has a zero step. */
6322
6323 tree
6324 dr_zero_step_indicator (struct data_reference *dr)
6325 {
6326 return dr_step_indicator (dr, 1);
6327 }
6328
6329 /* Return true if DR is known to have a nonnegative (but possibly zero)
6330 step. */
6331
6332 bool
6333 dr_known_forward_stride_p (struct data_reference *dr)
6334 {
6335 tree indicator = dr_direction_indicator (dr);
6336 tree neg_step_val = fold_binary (LT_EXPR, boolean_type_node,
6337 fold_convert (ssizetype, indicator),
6338 ssize_int (0));
6339 return neg_step_val && integer_zerop (neg_step_val);
6340 }