tree-optimization/98758 - fix integer arithmetic in data-ref analysis
[gcc.git] / gcc / tree-data-ref.c
1 /* Data references and dependences detectors.
2 Copyright (C) 2003-2021 Free Software Foundation, Inc.
3 Contributed by Sebastian Pop <pop@cri.ensmp.fr>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* This pass walks a given loop structure searching for array
22 references. The information about the array accesses is recorded
23 in DATA_REFERENCE structures.
24
25 The basic test for determining the dependences is:
26 given two access functions chrec1 and chrec2 to a same array, and
27 x and y two vectors from the iteration domain, the same element of
28 the array is accessed twice at iterations x and y if and only if:
29 | chrec1 (x) == chrec2 (y).
30
31 The goals of this analysis are:
32
33 - to determine the independence: the relation between two
34 independent accesses is qualified with the chrec_known (this
35 information allows a loop parallelization),
36
37 - when two data references access the same data, to qualify the
38 dependence relation with classic dependence representations:
39
40 - distance vectors
41 - direction vectors
42 - loop carried level dependence
43 - polyhedron dependence
44 or with the chains of recurrences based representation,
45
46 - to define a knowledge base for storing the data dependence
47 information,
48
49 - to define an interface to access this data.
50
51
52 Definitions:
53
54 - subscript: given two array accesses a subscript is the tuple
55 composed of the access functions for a given dimension. Example:
56 Given A[f1][f2][f3] and B[g1][g2][g3], there are three subscripts:
57 (f1, g1), (f2, g2), (f3, g3).
58
59 - Diophantine equation: an equation whose coefficients and
60 solutions are integer constants, for example the equation
61 | 3*x + 2*y = 1
62 has an integer solution x = 1 and y = -1.
63
64 References:
65
66 - "Advanced Compilation for High Performance Computing" by Randy
67 Allen and Ken Kennedy.
68 http://citeseer.ist.psu.edu/goff91practical.html
69
70 - "Loop Transformations for Restructuring Compilers - The Foundations"
71 by Utpal Banerjee.
72
73
74 */
75
76 #include "config.h"
77 #include "system.h"
78 #include "coretypes.h"
79 #include "backend.h"
80 #include "rtl.h"
81 #include "tree.h"
82 #include "gimple.h"
83 #include "gimple-pretty-print.h"
84 #include "alias.h"
85 #include "fold-const.h"
86 #include "expr.h"
87 #include "gimple-iterator.h"
88 #include "tree-ssa-loop-niter.h"
89 #include "tree-ssa-loop.h"
90 #include "tree-ssa.h"
91 #include "cfgloop.h"
92 #include "tree-data-ref.h"
93 #include "tree-scalar-evolution.h"
94 #include "dumpfile.h"
95 #include "tree-affine.h"
96 #include "builtins.h"
97 #include "tree-eh.h"
98 #include "ssa.h"
99 #include "internal-fn.h"
100 #include "range-op.h"
101 #include "vr-values.h"
102
103 static struct datadep_stats
104 {
105 int num_dependence_tests;
106 int num_dependence_dependent;
107 int num_dependence_independent;
108 int num_dependence_undetermined;
109
110 int num_subscript_tests;
111 int num_subscript_undetermined;
112 int num_same_subscript_function;
113
114 int num_ziv;
115 int num_ziv_independent;
116 int num_ziv_dependent;
117 int num_ziv_unimplemented;
118
119 int num_siv;
120 int num_siv_independent;
121 int num_siv_dependent;
122 int num_siv_unimplemented;
123
124 int num_miv;
125 int num_miv_independent;
126 int num_miv_dependent;
127 int num_miv_unimplemented;
128 } dependence_stats;
129
130 static bool subscript_dependence_tester_1 (struct data_dependence_relation *,
131 unsigned int, unsigned int,
132 class loop *);
133 /* Returns true iff A divides B. */
134
135 static inline bool
136 tree_fold_divides_p (const_tree a, const_tree b)
137 {
138 gcc_assert (TREE_CODE (a) == INTEGER_CST);
139 gcc_assert (TREE_CODE (b) == INTEGER_CST);
140 return integer_zerop (int_const_binop (TRUNC_MOD_EXPR, b, a));
141 }
142
143 /* Returns true iff A divides B. */
144
145 static inline bool
146 int_divides_p (lambda_int a, lambda_int b)
147 {
148 return ((b % a) == 0);
149 }
150
151 /* Return true if reference REF contains a union access. */
152
153 static bool
154 ref_contains_union_access_p (tree ref)
155 {
156 while (handled_component_p (ref))
157 {
158 ref = TREE_OPERAND (ref, 0);
159 if (TREE_CODE (TREE_TYPE (ref)) == UNION_TYPE
160 || TREE_CODE (TREE_TYPE (ref)) == QUAL_UNION_TYPE)
161 return true;
162 }
163 return false;
164 }
165
166 \f
167
168 /* Dump into FILE all the data references from DATAREFS. */
169
170 static void
171 dump_data_references (FILE *file, vec<data_reference_p> datarefs)
172 {
173 unsigned int i;
174 struct data_reference *dr;
175
176 FOR_EACH_VEC_ELT (datarefs, i, dr)
177 dump_data_reference (file, dr);
178 }
179
180 /* Unified dump into FILE all the data references from DATAREFS. */
181
182 DEBUG_FUNCTION void
183 debug (vec<data_reference_p> &ref)
184 {
185 dump_data_references (stderr, ref);
186 }
187
188 DEBUG_FUNCTION void
189 debug (vec<data_reference_p> *ptr)
190 {
191 if (ptr)
192 debug (*ptr);
193 else
194 fprintf (stderr, "<nil>\n");
195 }
196
197
198 /* Dump into STDERR all the data references from DATAREFS. */
199
200 DEBUG_FUNCTION void
201 debug_data_references (vec<data_reference_p> datarefs)
202 {
203 dump_data_references (stderr, datarefs);
204 }
205
206 /* Print to STDERR the data_reference DR. */
207
208 DEBUG_FUNCTION void
209 debug_data_reference (struct data_reference *dr)
210 {
211 dump_data_reference (stderr, dr);
212 }
213
214 /* Dump function for a DATA_REFERENCE structure. */
215
216 void
217 dump_data_reference (FILE *outf,
218 struct data_reference *dr)
219 {
220 unsigned int i;
221
222 fprintf (outf, "#(Data Ref: \n");
223 fprintf (outf, "# bb: %d \n", gimple_bb (DR_STMT (dr))->index);
224 fprintf (outf, "# stmt: ");
225 print_gimple_stmt (outf, DR_STMT (dr), 0);
226 fprintf (outf, "# ref: ");
227 print_generic_stmt (outf, DR_REF (dr));
228 fprintf (outf, "# base_object: ");
229 print_generic_stmt (outf, DR_BASE_OBJECT (dr));
230
231 for (i = 0; i < DR_NUM_DIMENSIONS (dr); i++)
232 {
233 fprintf (outf, "# Access function %d: ", i);
234 print_generic_stmt (outf, DR_ACCESS_FN (dr, i));
235 }
236 fprintf (outf, "#)\n");
237 }
238
239 /* Unified dump function for a DATA_REFERENCE structure. */
240
241 DEBUG_FUNCTION void
242 debug (data_reference &ref)
243 {
244 dump_data_reference (stderr, &ref);
245 }
246
247 DEBUG_FUNCTION void
248 debug (data_reference *ptr)
249 {
250 if (ptr)
251 debug (*ptr);
252 else
253 fprintf (stderr, "<nil>\n");
254 }
255
256
257 /* Dumps the affine function described by FN to the file OUTF. */
258
259 DEBUG_FUNCTION void
260 dump_affine_function (FILE *outf, affine_fn fn)
261 {
262 unsigned i;
263 tree coef;
264
265 print_generic_expr (outf, fn[0], TDF_SLIM);
266 for (i = 1; fn.iterate (i, &coef); i++)
267 {
268 fprintf (outf, " + ");
269 print_generic_expr (outf, coef, TDF_SLIM);
270 fprintf (outf, " * x_%u", i);
271 }
272 }
273
274 /* Dumps the conflict function CF to the file OUTF. */
275
276 DEBUG_FUNCTION void
277 dump_conflict_function (FILE *outf, conflict_function *cf)
278 {
279 unsigned i;
280
281 if (cf->n == NO_DEPENDENCE)
282 fprintf (outf, "no dependence");
283 else if (cf->n == NOT_KNOWN)
284 fprintf (outf, "not known");
285 else
286 {
287 for (i = 0; i < cf->n; i++)
288 {
289 if (i != 0)
290 fprintf (outf, " ");
291 fprintf (outf, "[");
292 dump_affine_function (outf, cf->fns[i]);
293 fprintf (outf, "]");
294 }
295 }
296 }
297
298 /* Dump function for a SUBSCRIPT structure. */
299
300 DEBUG_FUNCTION void
301 dump_subscript (FILE *outf, struct subscript *subscript)
302 {
303 conflict_function *cf = SUB_CONFLICTS_IN_A (subscript);
304
305 fprintf (outf, "\n (subscript \n");
306 fprintf (outf, " iterations_that_access_an_element_twice_in_A: ");
307 dump_conflict_function (outf, cf);
308 if (CF_NONTRIVIAL_P (cf))
309 {
310 tree last_iteration = SUB_LAST_CONFLICT (subscript);
311 fprintf (outf, "\n last_conflict: ");
312 print_generic_expr (outf, last_iteration);
313 }
314
315 cf = SUB_CONFLICTS_IN_B (subscript);
316 fprintf (outf, "\n iterations_that_access_an_element_twice_in_B: ");
317 dump_conflict_function (outf, cf);
318 if (CF_NONTRIVIAL_P (cf))
319 {
320 tree last_iteration = SUB_LAST_CONFLICT (subscript);
321 fprintf (outf, "\n last_conflict: ");
322 print_generic_expr (outf, last_iteration);
323 }
324
325 fprintf (outf, "\n (Subscript distance: ");
326 print_generic_expr (outf, SUB_DISTANCE (subscript));
327 fprintf (outf, " ))\n");
328 }
329
330 /* Print the classic direction vector DIRV to OUTF. */
331
332 DEBUG_FUNCTION void
333 print_direction_vector (FILE *outf,
334 lambda_vector dirv,
335 int length)
336 {
337 int eq;
338
339 for (eq = 0; eq < length; eq++)
340 {
341 enum data_dependence_direction dir = ((enum data_dependence_direction)
342 dirv[eq]);
343
344 switch (dir)
345 {
346 case dir_positive:
347 fprintf (outf, " +");
348 break;
349 case dir_negative:
350 fprintf (outf, " -");
351 break;
352 case dir_equal:
353 fprintf (outf, " =");
354 break;
355 case dir_positive_or_equal:
356 fprintf (outf, " +=");
357 break;
358 case dir_positive_or_negative:
359 fprintf (outf, " +-");
360 break;
361 case dir_negative_or_equal:
362 fprintf (outf, " -=");
363 break;
364 case dir_star:
365 fprintf (outf, " *");
366 break;
367 default:
368 fprintf (outf, "indep");
369 break;
370 }
371 }
372 fprintf (outf, "\n");
373 }
374
375 /* Print a vector of direction vectors. */
376
377 DEBUG_FUNCTION void
378 print_dir_vectors (FILE *outf, vec<lambda_vector> dir_vects,
379 int length)
380 {
381 unsigned j;
382 lambda_vector v;
383
384 FOR_EACH_VEC_ELT (dir_vects, j, v)
385 print_direction_vector (outf, v, length);
386 }
387
388 /* Print out a vector VEC of length N to OUTFILE. */
389
390 DEBUG_FUNCTION void
391 print_lambda_vector (FILE * outfile, lambda_vector vector, int n)
392 {
393 int i;
394
395 for (i = 0; i < n; i++)
396 fprintf (outfile, "%3d ", (int)vector[i]);
397 fprintf (outfile, "\n");
398 }
399
400 /* Print a vector of distance vectors. */
401
402 DEBUG_FUNCTION void
403 print_dist_vectors (FILE *outf, vec<lambda_vector> dist_vects,
404 int length)
405 {
406 unsigned j;
407 lambda_vector v;
408
409 FOR_EACH_VEC_ELT (dist_vects, j, v)
410 print_lambda_vector (outf, v, length);
411 }
412
413 /* Dump function for a DATA_DEPENDENCE_RELATION structure. */
414
415 DEBUG_FUNCTION void
416 dump_data_dependence_relation (FILE *outf,
417 struct data_dependence_relation *ddr)
418 {
419 struct data_reference *dra, *drb;
420
421 fprintf (outf, "(Data Dep: \n");
422
423 if (!ddr || DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
424 {
425 if (ddr)
426 {
427 dra = DDR_A (ddr);
428 drb = DDR_B (ddr);
429 if (dra)
430 dump_data_reference (outf, dra);
431 else
432 fprintf (outf, " (nil)\n");
433 if (drb)
434 dump_data_reference (outf, drb);
435 else
436 fprintf (outf, " (nil)\n");
437 }
438 fprintf (outf, " (don't know)\n)\n");
439 return;
440 }
441
442 dra = DDR_A (ddr);
443 drb = DDR_B (ddr);
444 dump_data_reference (outf, dra);
445 dump_data_reference (outf, drb);
446
447 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
448 fprintf (outf, " (no dependence)\n");
449
450 else if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE)
451 {
452 unsigned int i;
453 class loop *loopi;
454
455 subscript *sub;
456 FOR_EACH_VEC_ELT (DDR_SUBSCRIPTS (ddr), i, sub)
457 {
458 fprintf (outf, " access_fn_A: ");
459 print_generic_stmt (outf, SUB_ACCESS_FN (sub, 0));
460 fprintf (outf, " access_fn_B: ");
461 print_generic_stmt (outf, SUB_ACCESS_FN (sub, 1));
462 dump_subscript (outf, sub);
463 }
464
465 fprintf (outf, " loop nest: (");
466 FOR_EACH_VEC_ELT (DDR_LOOP_NEST (ddr), i, loopi)
467 fprintf (outf, "%d ", loopi->num);
468 fprintf (outf, ")\n");
469
470 for (i = 0; i < DDR_NUM_DIST_VECTS (ddr); i++)
471 {
472 fprintf (outf, " distance_vector: ");
473 print_lambda_vector (outf, DDR_DIST_VECT (ddr, i),
474 DDR_NB_LOOPS (ddr));
475 }
476
477 for (i = 0; i < DDR_NUM_DIR_VECTS (ddr); i++)
478 {
479 fprintf (outf, " direction_vector: ");
480 print_direction_vector (outf, DDR_DIR_VECT (ddr, i),
481 DDR_NB_LOOPS (ddr));
482 }
483 }
484
485 fprintf (outf, ")\n");
486 }
487
488 /* Debug version. */
489
490 DEBUG_FUNCTION void
491 debug_data_dependence_relation (struct data_dependence_relation *ddr)
492 {
493 dump_data_dependence_relation (stderr, ddr);
494 }
495
496 /* Dump into FILE all the dependence relations from DDRS. */
497
498 DEBUG_FUNCTION void
499 dump_data_dependence_relations (FILE *file,
500 vec<ddr_p> ddrs)
501 {
502 unsigned int i;
503 struct data_dependence_relation *ddr;
504
505 FOR_EACH_VEC_ELT (ddrs, i, ddr)
506 dump_data_dependence_relation (file, ddr);
507 }
508
509 DEBUG_FUNCTION void
510 debug (vec<ddr_p> &ref)
511 {
512 dump_data_dependence_relations (stderr, ref);
513 }
514
515 DEBUG_FUNCTION void
516 debug (vec<ddr_p> *ptr)
517 {
518 if (ptr)
519 debug (*ptr);
520 else
521 fprintf (stderr, "<nil>\n");
522 }
523
524
525 /* Dump to STDERR all the dependence relations from DDRS. */
526
527 DEBUG_FUNCTION void
528 debug_data_dependence_relations (vec<ddr_p> ddrs)
529 {
530 dump_data_dependence_relations (stderr, ddrs);
531 }
532
533 /* Dumps the distance and direction vectors in FILE. DDRS contains
534 the dependence relations, and VECT_SIZE is the size of the
535 dependence vectors, or in other words the number of loops in the
536 considered nest. */
537
538 DEBUG_FUNCTION void
539 dump_dist_dir_vectors (FILE *file, vec<ddr_p> ddrs)
540 {
541 unsigned int i, j;
542 struct data_dependence_relation *ddr;
543 lambda_vector v;
544
545 FOR_EACH_VEC_ELT (ddrs, i, ddr)
546 if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE && DDR_AFFINE_P (ddr))
547 {
548 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), j, v)
549 {
550 fprintf (file, "DISTANCE_V (");
551 print_lambda_vector (file, v, DDR_NB_LOOPS (ddr));
552 fprintf (file, ")\n");
553 }
554
555 FOR_EACH_VEC_ELT (DDR_DIR_VECTS (ddr), j, v)
556 {
557 fprintf (file, "DIRECTION_V (");
558 print_direction_vector (file, v, DDR_NB_LOOPS (ddr));
559 fprintf (file, ")\n");
560 }
561 }
562
563 fprintf (file, "\n\n");
564 }
565
566 /* Dumps the data dependence relations DDRS in FILE. */
567
568 DEBUG_FUNCTION void
569 dump_ddrs (FILE *file, vec<ddr_p> ddrs)
570 {
571 unsigned int i;
572 struct data_dependence_relation *ddr;
573
574 FOR_EACH_VEC_ELT (ddrs, i, ddr)
575 dump_data_dependence_relation (file, ddr);
576
577 fprintf (file, "\n\n");
578 }
579
580 DEBUG_FUNCTION void
581 debug_ddrs (vec<ddr_p> ddrs)
582 {
583 dump_ddrs (stderr, ddrs);
584 }
585
586 /* If RESULT_RANGE is nonnull, set *RESULT_RANGE to the range of
587 OP0 CODE OP1, where:
588
589 - OP0 CODE OP1 has integral type TYPE
590 - the range of OP0 is given by OP0_RANGE and
591 - the range of OP1 is given by OP1_RANGE.
592
593 Independently of RESULT_RANGE, try to compute:
594
595 DELTA = ((sizetype) OP0 CODE (sizetype) OP1)
596 - (sizetype) (OP0 CODE OP1)
597
598 as a constant and subtract DELTA from the ssizetype constant in *OFF.
599 Return true on success, or false if DELTA is not known at compile time.
600
601 Truncation and sign changes are known to distribute over CODE, i.e.
602
603 (itype) (A CODE B) == (itype) A CODE (itype) B
604
605 for any integral type ITYPE whose precision is no greater than the
606 precision of A and B. */
607
608 static bool
609 compute_distributive_range (tree type, value_range &op0_range,
610 tree_code code, value_range &op1_range,
611 tree *off, value_range *result_range)
612 {
613 gcc_assert (INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_TRAPS (type));
614 if (result_range)
615 {
616 range_operator *op = range_op_handler (code, type);
617 op->fold_range (*result_range, type, op0_range, op1_range);
618 }
619
620 /* The distributive property guarantees that if TYPE is no narrower
621 than SIZETYPE,
622
623 (sizetype) (OP0 CODE OP1) == (sizetype) OP0 CODE (sizetype) OP1
624
625 and so we can treat DELTA as zero. */
626 if (TYPE_PRECISION (type) >= TYPE_PRECISION (sizetype))
627 return true;
628
629 /* If overflow is undefined, we can assume that:
630
631 X == (ssizetype) OP0 CODE (ssizetype) OP1
632
633 is within the range of TYPE, i.e.:
634
635 X == (ssizetype) (TYPE) X
636
637 Distributing the (TYPE) truncation over X gives:
638
639 X == (ssizetype) (OP0 CODE OP1)
640
641 Casting both sides to sizetype and distributing the sizetype cast
642 over X gives:
643
644 (sizetype) OP0 CODE (sizetype) OP1 == (sizetype) (OP0 CODE OP1)
645
646 and so we can treat DELTA as zero. */
647 if (TYPE_OVERFLOW_UNDEFINED (type))
648 return true;
649
650 /* Compute the range of:
651
652 (ssizetype) OP0 CODE (ssizetype) OP1
653
654 The distributive property guarantees that this has the same bitpattern as:
655
656 (sizetype) OP0 CODE (sizetype) OP1
657
658 but its range is more conducive to analysis. */
659 range_cast (op0_range, ssizetype);
660 range_cast (op1_range, ssizetype);
661 value_range wide_range;
662 range_operator *op = range_op_handler (code, ssizetype);
663 bool saved_flag_wrapv = flag_wrapv;
664 flag_wrapv = 1;
665 op->fold_range (wide_range, ssizetype, op0_range, op1_range);
666 flag_wrapv = saved_flag_wrapv;
667 if (wide_range.num_pairs () != 1 || !range_int_cst_p (&wide_range))
668 return false;
669
670 wide_int lb = wide_range.lower_bound ();
671 wide_int ub = wide_range.upper_bound ();
672
673 /* Calculate the number of times that each end of the range overflows or
674 underflows TYPE. We can only calculate DELTA if the numbers match. */
675 unsigned int precision = TYPE_PRECISION (type);
676 if (!TYPE_UNSIGNED (type))
677 {
678 wide_int type_min = wi::mask (precision - 1, true, lb.get_precision ());
679 lb -= type_min;
680 ub -= type_min;
681 }
682 wide_int upper_bits = wi::mask (precision, true, lb.get_precision ());
683 lb &= upper_bits;
684 ub &= upper_bits;
685 if (lb != ub)
686 return false;
687
688 /* OP0 CODE OP1 overflows exactly arshift (LB, PRECISION) times, with
689 negative values indicating underflow. The low PRECISION bits of LB
690 are clear, so DELTA is therefore LB (== UB). */
691 *off = wide_int_to_tree (ssizetype, wi::to_wide (*off) - lb);
692 return true;
693 }
694
695 /* Return true if (sizetype) OP == (sizetype) (TO_TYPE) OP,
696 given that OP has type FROM_TYPE and range RANGE. Both TO_TYPE and
697 FROM_TYPE are integral types. */
698
699 static bool
700 nop_conversion_for_offset_p (tree to_type, tree from_type, value_range &range)
701 {
702 gcc_assert (INTEGRAL_TYPE_P (to_type)
703 && INTEGRAL_TYPE_P (from_type)
704 && !TYPE_OVERFLOW_TRAPS (to_type)
705 && !TYPE_OVERFLOW_TRAPS (from_type));
706
707 /* Converting to something no narrower than sizetype and then to sizetype
708 is equivalent to converting directly to sizetype. */
709 if (TYPE_PRECISION (to_type) >= TYPE_PRECISION (sizetype))
710 return true;
711
712 /* Check whether TO_TYPE can represent all values that FROM_TYPE can. */
713 if (TYPE_PRECISION (from_type) < TYPE_PRECISION (to_type)
714 && (TYPE_UNSIGNED (from_type) || !TYPE_UNSIGNED (to_type)))
715 return true;
716
717 /* For narrowing conversions, we could in principle test whether
718 the bits in FROM_TYPE but not in TO_TYPE have a fixed value
719 and apply a constant adjustment.
720
721 For other conversions (which involve a sign change) we could
722 check that the signs are always equal, and apply a constant
723 adjustment if the signs are negative.
724
725 However, both cases should be rare. */
726 return range_fits_type_p (&range, TYPE_PRECISION (to_type),
727 TYPE_SIGN (to_type));
728 }
729
730 static void
731 split_constant_offset (tree type, tree *var, tree *off,
732 value_range *result_range,
733 hash_map<tree, std::pair<tree, tree> > &cache,
734 unsigned *limit);
735
736 /* Helper function for split_constant_offset. If TYPE is a pointer type,
737 try to express OP0 CODE OP1 as:
738
739 POINTER_PLUS <*VAR, (sizetype) *OFF>
740
741 where:
742
743 - *VAR has type TYPE
744 - *OFF is a constant of type ssizetype.
745
746 If TYPE is an integral type, try to express (sizetype) (OP0 CODE OP1) as:
747
748 *VAR + (sizetype) *OFF
749
750 where:
751
752 - *VAR has type sizetype
753 - *OFF is a constant of type ssizetype.
754
755 In both cases, OP0 CODE OP1 has type TYPE.
756
757 Return true on success. A false return value indicates that we can't
758 do better than set *OFF to zero.
759
760 When returning true, set RESULT_RANGE to the range of OP0 CODE OP1,
761 if RESULT_RANGE is nonnull and if we can do better than assume VR_VARYING.
762
763 CACHE caches {*VAR, *OFF} pairs for SSA names that we've previously
764 visited. LIMIT counts down the number of SSA names that we are
765 allowed to process before giving up. */
766
767 static bool
768 split_constant_offset_1 (tree type, tree op0, enum tree_code code, tree op1,
769 tree *var, tree *off, value_range *result_range,
770 hash_map<tree, std::pair<tree, tree> > &cache,
771 unsigned *limit)
772 {
773 tree var0, var1;
774 tree off0, off1;
775 value_range op0_range, op1_range;
776
777 *var = NULL_TREE;
778 *off = NULL_TREE;
779
780 switch (code)
781 {
782 case INTEGER_CST:
783 *var = size_int (0);
784 *off = fold_convert (ssizetype, op0);
785 if (result_range)
786 result_range->set (op0, op0);
787 return true;
788
789 case POINTER_PLUS_EXPR:
790 split_constant_offset (op0, &var0, &off0, nullptr, cache, limit);
791 split_constant_offset (op1, &var1, &off1, nullptr, cache, limit);
792 *var = fold_build2 (POINTER_PLUS_EXPR, type, var0, var1);
793 *off = size_binop (PLUS_EXPR, off0, off1);
794 return true;
795
796 case PLUS_EXPR:
797 case MINUS_EXPR:
798 split_constant_offset (op0, &var0, &off0, &op0_range, cache, limit);
799 split_constant_offset (op1, &var1, &off1, &op1_range, cache, limit);
800 *off = size_binop (code, off0, off1);
801 if (!compute_distributive_range (type, op0_range, code, op1_range,
802 off, result_range))
803 return false;
804 *var = fold_build2 (code, sizetype, var0, var1);
805 return true;
806
807 case MULT_EXPR:
808 if (TREE_CODE (op1) != INTEGER_CST)
809 return false;
810
811 split_constant_offset (op0, &var0, &off0, &op0_range, cache, limit);
812 op1_range.set (op1, op1);
813 *off = size_binop (MULT_EXPR, off0, fold_convert (ssizetype, op1));
814 if (!compute_distributive_range (type, op0_range, code, op1_range,
815 off, result_range))
816 return false;
817 *var = fold_build2 (MULT_EXPR, sizetype, var0,
818 fold_convert (sizetype, op1));
819 return true;
820
821 case ADDR_EXPR:
822 {
823 tree base, poffset;
824 poly_int64 pbitsize, pbitpos, pbytepos;
825 machine_mode pmode;
826 int punsignedp, preversep, pvolatilep;
827
828 op0 = TREE_OPERAND (op0, 0);
829 base
830 = get_inner_reference (op0, &pbitsize, &pbitpos, &poffset, &pmode,
831 &punsignedp, &preversep, &pvolatilep);
832
833 if (!multiple_p (pbitpos, BITS_PER_UNIT, &pbytepos))
834 return false;
835 base = build_fold_addr_expr (base);
836 off0 = ssize_int (pbytepos);
837
838 if (poffset)
839 {
840 split_constant_offset (poffset, &poffset, &off1, nullptr,
841 cache, limit);
842 off0 = size_binop (PLUS_EXPR, off0, off1);
843 base = fold_build_pointer_plus (base, poffset);
844 }
845
846 var0 = fold_convert (type, base);
847
848 /* If variable length types are involved, punt, otherwise casts
849 might be converted into ARRAY_REFs in gimplify_conversion.
850 To compute that ARRAY_REF's element size TYPE_SIZE_UNIT, which
851 possibly no longer appears in current GIMPLE, might resurface.
852 This perhaps could run
853 if (CONVERT_EXPR_P (var0))
854 {
855 gimplify_conversion (&var0);
856 // Attempt to fill in any within var0 found ARRAY_REF's
857 // element size from corresponding op embedded ARRAY_REF,
858 // if unsuccessful, just punt.
859 } */
860 while (POINTER_TYPE_P (type))
861 type = TREE_TYPE (type);
862 if (int_size_in_bytes (type) < 0)
863 return false;
864
865 *var = var0;
866 *off = off0;
867 return true;
868 }
869
870 case SSA_NAME:
871 {
872 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op0))
873 return false;
874
875 gimple *def_stmt = SSA_NAME_DEF_STMT (op0);
876 enum tree_code subcode;
877
878 if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
879 return false;
880
881 subcode = gimple_assign_rhs_code (def_stmt);
882
883 /* We are using a cache to avoid un-CSEing large amounts of code. */
884 bool use_cache = false;
885 if (!has_single_use (op0)
886 && (subcode == POINTER_PLUS_EXPR
887 || subcode == PLUS_EXPR
888 || subcode == MINUS_EXPR
889 || subcode == MULT_EXPR
890 || subcode == ADDR_EXPR
891 || CONVERT_EXPR_CODE_P (subcode)))
892 {
893 use_cache = true;
894 bool existed;
895 std::pair<tree, tree> &e = cache.get_or_insert (op0, &existed);
896 if (existed)
897 {
898 if (integer_zerop (e.second))
899 return false;
900 *var = e.first;
901 *off = e.second;
902 /* The caller sets the range in this case. */
903 return true;
904 }
905 e = std::make_pair (op0, ssize_int (0));
906 }
907
908 if (*limit == 0)
909 return false;
910 --*limit;
911
912 var0 = gimple_assign_rhs1 (def_stmt);
913 var1 = gimple_assign_rhs2 (def_stmt);
914
915 bool res = split_constant_offset_1 (type, var0, subcode, var1,
916 var, off, nullptr, cache, limit);
917 if (res && use_cache)
918 *cache.get (op0) = std::make_pair (*var, *off);
919 /* The caller sets the range in this case. */
920 return res;
921 }
922 CASE_CONVERT:
923 {
924 /* We can only handle the following conversions:
925
926 - Conversions from one pointer type to another pointer type.
927
928 - Conversions from one non-trapping integral type to another
929 non-trapping integral type. In this case, the recursive
930 call makes sure that:
931
932 (sizetype) OP0
933
934 can be expressed as a sizetype operation involving VAR and OFF,
935 and all we need to do is check whether:
936
937 (sizetype) OP0 == (sizetype) (TYPE) OP0
938
939 - Conversions from a non-trapping sizetype-size integral type to
940 a like-sized pointer type. In this case, the recursive call
941 makes sure that:
942
943 (sizetype) OP0 == *VAR + (sizetype) *OFF
944
945 and we can convert that to:
946
947 POINTER_PLUS <(TYPE) *VAR, (sizetype) *OFF>
948
949 - Conversions from a sizetype-sized pointer type to a like-sized
950 non-trapping integral type. In this case, the recursive call
951 makes sure that:
952
953 OP0 == POINTER_PLUS <*VAR, (sizetype) *OFF>
954
955 where the POINTER_PLUS and *VAR have the same precision as
956 TYPE (and the same precision as sizetype). Then:
957
958 (sizetype) (TYPE) OP0 == (sizetype) *VAR + (sizetype) *OFF. */
959 tree itype = TREE_TYPE (op0);
960 if ((POINTER_TYPE_P (itype)
961 || (INTEGRAL_TYPE_P (itype) && !TYPE_OVERFLOW_TRAPS (itype)))
962 && (POINTER_TYPE_P (type)
963 || (INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_TRAPS (type)))
964 && (POINTER_TYPE_P (type) == POINTER_TYPE_P (itype)
965 || (TYPE_PRECISION (type) == TYPE_PRECISION (sizetype)
966 && TYPE_PRECISION (itype) == TYPE_PRECISION (sizetype))))
967 {
968 if (POINTER_TYPE_P (type))
969 {
970 split_constant_offset (op0, var, off, nullptr, cache, limit);
971 *var = fold_convert (type, *var);
972 }
973 else if (POINTER_TYPE_P (itype))
974 {
975 split_constant_offset (op0, var, off, nullptr, cache, limit);
976 *var = fold_convert (sizetype, *var);
977 }
978 else
979 {
980 split_constant_offset (op0, var, off, &op0_range,
981 cache, limit);
982 if (!nop_conversion_for_offset_p (type, itype, op0_range))
983 return false;
984 if (result_range)
985 {
986 *result_range = op0_range;
987 range_cast (*result_range, type);
988 }
989 }
990 return true;
991 }
992 return false;
993 }
994
995 default:
996 return false;
997 }
998 }
999
1000 /* If EXP has pointer type, try to express it as:
1001
1002 POINTER_PLUS <*VAR, (sizetype) *OFF>
1003
1004 where:
1005
1006 - *VAR has the same type as EXP
1007 - *OFF is a constant of type ssizetype.
1008
1009 If EXP has an integral type, try to express (sizetype) EXP as:
1010
1011 *VAR + (sizetype) *OFF
1012
1013 where:
1014
1015 - *VAR has type sizetype
1016 - *OFF is a constant of type ssizetype.
1017
1018 If EXP_RANGE is nonnull, set it to the range of EXP.
1019
1020 CACHE caches {*VAR, *OFF} pairs for SSA names that we've previously
1021 visited. LIMIT counts down the number of SSA names that we are
1022 allowed to process before giving up. */
1023
1024 static void
1025 split_constant_offset (tree exp, tree *var, tree *off, value_range *exp_range,
1026 hash_map<tree, std::pair<tree, tree> > &cache,
1027 unsigned *limit)
1028 {
1029 tree type = TREE_TYPE (exp), op0, op1;
1030 enum tree_code code;
1031
1032 code = TREE_CODE (exp);
1033 if (exp_range)
1034 {
1035 *exp_range = type;
1036 if (code == SSA_NAME)
1037 {
1038 wide_int var_min, var_max;
1039 value_range_kind vr_kind = get_range_info (exp, &var_min, &var_max);
1040 wide_int var_nonzero = get_nonzero_bits (exp);
1041 vr_kind = intersect_range_with_nonzero_bits (vr_kind,
1042 &var_min, &var_max,
1043 var_nonzero,
1044 TYPE_SIGN (type));
1045 if (vr_kind == VR_RANGE)
1046 *exp_range = value_range (type, var_min, var_max);
1047 }
1048 }
1049
1050 if (!tree_is_chrec (exp)
1051 && get_gimple_rhs_class (TREE_CODE (exp)) != GIMPLE_TERNARY_RHS)
1052 {
1053 extract_ops_from_tree (exp, &code, &op0, &op1);
1054 if (split_constant_offset_1 (type, op0, code, op1, var, off,
1055 exp_range, cache, limit))
1056 return;
1057 }
1058
1059 *var = exp;
1060 if (INTEGRAL_TYPE_P (type))
1061 *var = fold_convert (sizetype, *var);
1062 *off = ssize_int (0);
1063 if (exp_range && code != SSA_NAME)
1064 {
1065 wide_int var_min, var_max;
1066 if (determine_value_range (exp, &var_min, &var_max) == VR_RANGE)
1067 *exp_range = value_range (type, var_min, var_max);
1068 }
1069 }
1070
1071 /* Expresses EXP as VAR + OFF, where OFF is a constant. VAR has the same
1072 type as EXP while OFF has type ssizetype. */
1073
1074 void
1075 split_constant_offset (tree exp, tree *var, tree *off)
1076 {
1077 unsigned limit = param_ssa_name_def_chain_limit;
1078 static hash_map<tree, std::pair<tree, tree> > *cache;
1079 if (!cache)
1080 cache = new hash_map<tree, std::pair<tree, tree> > (37);
1081 split_constant_offset (exp, var, off, nullptr, *cache, &limit);
1082 *var = fold_convert (TREE_TYPE (exp), *var);
1083 cache->empty ();
1084 }
1085
1086 /* Returns the address ADDR of an object in a canonical shape (without nop
1087 casts, and with type of pointer to the object). */
1088
1089 static tree
1090 canonicalize_base_object_address (tree addr)
1091 {
1092 tree orig = addr;
1093
1094 STRIP_NOPS (addr);
1095
1096 /* The base address may be obtained by casting from integer, in that case
1097 keep the cast. */
1098 if (!POINTER_TYPE_P (TREE_TYPE (addr)))
1099 return orig;
1100
1101 if (TREE_CODE (addr) != ADDR_EXPR)
1102 return addr;
1103
1104 return build_fold_addr_expr (TREE_OPERAND (addr, 0));
1105 }
1106
1107 /* Analyze the behavior of memory reference REF within STMT.
1108 There are two modes:
1109
1110 - BB analysis. In this case we simply split the address into base,
1111 init and offset components, without reference to any containing loop.
1112 The resulting base and offset are general expressions and they can
1113 vary arbitrarily from one iteration of the containing loop to the next.
1114 The step is always zero.
1115
1116 - loop analysis. In this case we analyze the reference both wrt LOOP
1117 and on the basis that the reference occurs (is "used") in LOOP;
1118 see the comment above analyze_scalar_evolution_in_loop for more
1119 information about this distinction. The base, init, offset and
1120 step fields are all invariant in LOOP.
1121
1122 Perform BB analysis if LOOP is null, or if LOOP is the function's
1123 dummy outermost loop. In other cases perform loop analysis.
1124
1125 Return true if the analysis succeeded and store the results in DRB if so.
1126 BB analysis can only fail for bitfield or reversed-storage accesses. */
1127
1128 opt_result
1129 dr_analyze_innermost (innermost_loop_behavior *drb, tree ref,
1130 class loop *loop, const gimple *stmt)
1131 {
1132 poly_int64 pbitsize, pbitpos;
1133 tree base, poffset;
1134 machine_mode pmode;
1135 int punsignedp, preversep, pvolatilep;
1136 affine_iv base_iv, offset_iv;
1137 tree init, dinit, step;
1138 bool in_loop = (loop && loop->num);
1139
1140 if (dump_file && (dump_flags & TDF_DETAILS))
1141 fprintf (dump_file, "analyze_innermost: ");
1142
1143 base = get_inner_reference (ref, &pbitsize, &pbitpos, &poffset, &pmode,
1144 &punsignedp, &preversep, &pvolatilep);
1145 gcc_assert (base != NULL_TREE);
1146
1147 poly_int64 pbytepos;
1148 if (!multiple_p (pbitpos, BITS_PER_UNIT, &pbytepos))
1149 return opt_result::failure_at (stmt,
1150 "failed: bit offset alignment.\n");
1151
1152 if (preversep)
1153 return opt_result::failure_at (stmt,
1154 "failed: reverse storage order.\n");
1155
1156 /* Calculate the alignment and misalignment for the inner reference. */
1157 unsigned int HOST_WIDE_INT bit_base_misalignment;
1158 unsigned int bit_base_alignment;
1159 get_object_alignment_1 (base, &bit_base_alignment, &bit_base_misalignment);
1160
1161 /* There are no bitfield references remaining in BASE, so the values
1162 we got back must be whole bytes. */
1163 gcc_assert (bit_base_alignment % BITS_PER_UNIT == 0
1164 && bit_base_misalignment % BITS_PER_UNIT == 0);
1165 unsigned int base_alignment = bit_base_alignment / BITS_PER_UNIT;
1166 poly_int64 base_misalignment = bit_base_misalignment / BITS_PER_UNIT;
1167
1168 if (TREE_CODE (base) == MEM_REF)
1169 {
1170 if (!integer_zerop (TREE_OPERAND (base, 1)))
1171 {
1172 /* Subtract MOFF from the base and add it to POFFSET instead.
1173 Adjust the misalignment to reflect the amount we subtracted. */
1174 poly_offset_int moff = mem_ref_offset (base);
1175 base_misalignment -= moff.force_shwi ();
1176 tree mofft = wide_int_to_tree (sizetype, moff);
1177 if (!poffset)
1178 poffset = mofft;
1179 else
1180 poffset = size_binop (PLUS_EXPR, poffset, mofft);
1181 }
1182 base = TREE_OPERAND (base, 0);
1183 }
1184 else
1185 base = build_fold_addr_expr (base);
1186
1187 if (in_loop)
1188 {
1189 if (!simple_iv (loop, loop, base, &base_iv, true))
1190 return opt_result::failure_at
1191 (stmt, "failed: evolution of base is not affine.\n");
1192 }
1193 else
1194 {
1195 base_iv.base = base;
1196 base_iv.step = ssize_int (0);
1197 base_iv.no_overflow = true;
1198 }
1199
1200 if (!poffset)
1201 {
1202 offset_iv.base = ssize_int (0);
1203 offset_iv.step = ssize_int (0);
1204 }
1205 else
1206 {
1207 if (!in_loop)
1208 {
1209 offset_iv.base = poffset;
1210 offset_iv.step = ssize_int (0);
1211 }
1212 else if (!simple_iv (loop, loop, poffset, &offset_iv, true))
1213 return opt_result::failure_at
1214 (stmt, "failed: evolution of offset is not affine.\n");
1215 }
1216
1217 init = ssize_int (pbytepos);
1218
1219 /* Subtract any constant component from the base and add it to INIT instead.
1220 Adjust the misalignment to reflect the amount we subtracted. */
1221 split_constant_offset (base_iv.base, &base_iv.base, &dinit);
1222 init = size_binop (PLUS_EXPR, init, dinit);
1223 base_misalignment -= TREE_INT_CST_LOW (dinit);
1224
1225 split_constant_offset (offset_iv.base, &offset_iv.base, &dinit);
1226 init = size_binop (PLUS_EXPR, init, dinit);
1227
1228 step = size_binop (PLUS_EXPR,
1229 fold_convert (ssizetype, base_iv.step),
1230 fold_convert (ssizetype, offset_iv.step));
1231
1232 base = canonicalize_base_object_address (base_iv.base);
1233
1234 /* See if get_pointer_alignment can guarantee a higher alignment than
1235 the one we calculated above. */
1236 unsigned int HOST_WIDE_INT alt_misalignment;
1237 unsigned int alt_alignment;
1238 get_pointer_alignment_1 (base, &alt_alignment, &alt_misalignment);
1239
1240 /* As above, these values must be whole bytes. */
1241 gcc_assert (alt_alignment % BITS_PER_UNIT == 0
1242 && alt_misalignment % BITS_PER_UNIT == 0);
1243 alt_alignment /= BITS_PER_UNIT;
1244 alt_misalignment /= BITS_PER_UNIT;
1245
1246 if (base_alignment < alt_alignment)
1247 {
1248 base_alignment = alt_alignment;
1249 base_misalignment = alt_misalignment;
1250 }
1251
1252 drb->base_address = base;
1253 drb->offset = fold_convert (ssizetype, offset_iv.base);
1254 drb->init = init;
1255 drb->step = step;
1256 if (known_misalignment (base_misalignment, base_alignment,
1257 &drb->base_misalignment))
1258 drb->base_alignment = base_alignment;
1259 else
1260 {
1261 drb->base_alignment = known_alignment (base_misalignment);
1262 drb->base_misalignment = 0;
1263 }
1264 drb->offset_alignment = highest_pow2_factor (offset_iv.base);
1265 drb->step_alignment = highest_pow2_factor (step);
1266
1267 if (dump_file && (dump_flags & TDF_DETAILS))
1268 fprintf (dump_file, "success.\n");
1269
1270 return opt_result::success ();
1271 }
1272
1273 /* Return true if OP is a valid component reference for a DR access
1274 function. This accepts a subset of what handled_component_p accepts. */
1275
1276 static bool
1277 access_fn_component_p (tree op)
1278 {
1279 switch (TREE_CODE (op))
1280 {
1281 case REALPART_EXPR:
1282 case IMAGPART_EXPR:
1283 case ARRAY_REF:
1284 return true;
1285
1286 case COMPONENT_REF:
1287 return TREE_CODE (TREE_TYPE (TREE_OPERAND (op, 0))) == RECORD_TYPE;
1288
1289 default:
1290 return false;
1291 }
1292 }
1293
1294 /* Returns whether BASE can have a access_fn_component_p with BASE
1295 as base. */
1296
1297 static bool
1298 base_supports_access_fn_components_p (tree base)
1299 {
1300 switch (TREE_CODE (TREE_TYPE (base)))
1301 {
1302 case COMPLEX_TYPE:
1303 case ARRAY_TYPE:
1304 case RECORD_TYPE:
1305 return true;
1306 default:
1307 return false;
1308 }
1309 }
1310
1311 /* Determines the base object and the list of indices of memory reference
1312 DR, analyzed in LOOP and instantiated before NEST. */
1313
1314 static void
1315 dr_analyze_indices (struct data_reference *dr, edge nest, loop_p loop)
1316 {
1317 vec<tree> access_fns = vNULL;
1318 tree ref, op;
1319 tree base, off, access_fn;
1320
1321 /* If analyzing a basic-block there are no indices to analyze
1322 and thus no access functions. */
1323 if (!nest)
1324 {
1325 DR_BASE_OBJECT (dr) = DR_REF (dr);
1326 DR_ACCESS_FNS (dr).create (0);
1327 return;
1328 }
1329
1330 ref = DR_REF (dr);
1331
1332 /* REALPART_EXPR and IMAGPART_EXPR can be handled like accesses
1333 into a two element array with a constant index. The base is
1334 then just the immediate underlying object. */
1335 if (TREE_CODE (ref) == REALPART_EXPR)
1336 {
1337 ref = TREE_OPERAND (ref, 0);
1338 access_fns.safe_push (integer_zero_node);
1339 }
1340 else if (TREE_CODE (ref) == IMAGPART_EXPR)
1341 {
1342 ref = TREE_OPERAND (ref, 0);
1343 access_fns.safe_push (integer_one_node);
1344 }
1345
1346 /* Analyze access functions of dimensions we know to be independent.
1347 The list of component references handled here should be kept in
1348 sync with access_fn_component_p. */
1349 while (handled_component_p (ref))
1350 {
1351 if (TREE_CODE (ref) == ARRAY_REF)
1352 {
1353 op = TREE_OPERAND (ref, 1);
1354 access_fn = analyze_scalar_evolution (loop, op);
1355 access_fn = instantiate_scev (nest, loop, access_fn);
1356 access_fns.safe_push (access_fn);
1357 }
1358 else if (TREE_CODE (ref) == COMPONENT_REF
1359 && TREE_CODE (TREE_TYPE (TREE_OPERAND (ref, 0))) == RECORD_TYPE)
1360 {
1361 /* For COMPONENT_REFs of records (but not unions!) use the
1362 FIELD_DECL offset as constant access function so we can
1363 disambiguate a[i].f1 and a[i].f2. */
1364 tree off = component_ref_field_offset (ref);
1365 off = size_binop (PLUS_EXPR,
1366 size_binop (MULT_EXPR,
1367 fold_convert (bitsizetype, off),
1368 bitsize_int (BITS_PER_UNIT)),
1369 DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1)));
1370 access_fns.safe_push (off);
1371 }
1372 else
1373 /* If we have an unhandled component we could not translate
1374 to an access function stop analyzing. We have determined
1375 our base object in this case. */
1376 break;
1377
1378 ref = TREE_OPERAND (ref, 0);
1379 }
1380
1381 /* If the address operand of a MEM_REF base has an evolution in the
1382 analyzed nest, add it as an additional independent access-function. */
1383 if (TREE_CODE (ref) == MEM_REF)
1384 {
1385 op = TREE_OPERAND (ref, 0);
1386 access_fn = analyze_scalar_evolution (loop, op);
1387 access_fn = instantiate_scev (nest, loop, access_fn);
1388 if (TREE_CODE (access_fn) == POLYNOMIAL_CHREC)
1389 {
1390 tree orig_type;
1391 tree memoff = TREE_OPERAND (ref, 1);
1392 base = initial_condition (access_fn);
1393 orig_type = TREE_TYPE (base);
1394 STRIP_USELESS_TYPE_CONVERSION (base);
1395 split_constant_offset (base, &base, &off);
1396 STRIP_USELESS_TYPE_CONVERSION (base);
1397 /* Fold the MEM_REF offset into the evolutions initial
1398 value to make more bases comparable. */
1399 if (!integer_zerop (memoff))
1400 {
1401 off = size_binop (PLUS_EXPR, off,
1402 fold_convert (ssizetype, memoff));
1403 memoff = build_int_cst (TREE_TYPE (memoff), 0);
1404 }
1405 /* Adjust the offset so it is a multiple of the access type
1406 size and thus we separate bases that can possibly be used
1407 to produce partial overlaps (which the access_fn machinery
1408 cannot handle). */
1409 wide_int rem;
1410 if (TYPE_SIZE_UNIT (TREE_TYPE (ref))
1411 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (ref))) == INTEGER_CST
1412 && !integer_zerop (TYPE_SIZE_UNIT (TREE_TYPE (ref))))
1413 rem = wi::mod_trunc
1414 (wi::to_wide (off),
1415 wi::to_wide (TYPE_SIZE_UNIT (TREE_TYPE (ref))),
1416 SIGNED);
1417 else
1418 /* If we can't compute the remainder simply force the initial
1419 condition to zero. */
1420 rem = wi::to_wide (off);
1421 off = wide_int_to_tree (ssizetype, wi::to_wide (off) - rem);
1422 memoff = wide_int_to_tree (TREE_TYPE (memoff), rem);
1423 /* And finally replace the initial condition. */
1424 access_fn = chrec_replace_initial_condition
1425 (access_fn, fold_convert (orig_type, off));
1426 /* ??? This is still not a suitable base object for
1427 dr_may_alias_p - the base object needs to be an
1428 access that covers the object as whole. With
1429 an evolution in the pointer this cannot be
1430 guaranteed.
1431 As a band-aid, mark the access so we can special-case
1432 it in dr_may_alias_p. */
1433 tree old = ref;
1434 ref = fold_build2_loc (EXPR_LOCATION (ref),
1435 MEM_REF, TREE_TYPE (ref),
1436 base, memoff);
1437 MR_DEPENDENCE_CLIQUE (ref) = MR_DEPENDENCE_CLIQUE (old);
1438 MR_DEPENDENCE_BASE (ref) = MR_DEPENDENCE_BASE (old);
1439 DR_UNCONSTRAINED_BASE (dr) = true;
1440 access_fns.safe_push (access_fn);
1441 }
1442 }
1443 else if (DECL_P (ref))
1444 {
1445 /* Canonicalize DR_BASE_OBJECT to MEM_REF form. */
1446 ref = build2 (MEM_REF, TREE_TYPE (ref),
1447 build_fold_addr_expr (ref),
1448 build_int_cst (reference_alias_ptr_type (ref), 0));
1449 }
1450
1451 DR_BASE_OBJECT (dr) = ref;
1452 DR_ACCESS_FNS (dr) = access_fns;
1453 }
1454
1455 /* Extracts the alias analysis information from the memory reference DR. */
1456
1457 static void
1458 dr_analyze_alias (struct data_reference *dr)
1459 {
1460 tree ref = DR_REF (dr);
1461 tree base = get_base_address (ref), addr;
1462
1463 if (INDIRECT_REF_P (base)
1464 || TREE_CODE (base) == MEM_REF)
1465 {
1466 addr = TREE_OPERAND (base, 0);
1467 if (TREE_CODE (addr) == SSA_NAME)
1468 DR_PTR_INFO (dr) = SSA_NAME_PTR_INFO (addr);
1469 }
1470 }
1471
1472 /* Frees data reference DR. */
1473
1474 void
1475 free_data_ref (data_reference_p dr)
1476 {
1477 DR_ACCESS_FNS (dr).release ();
1478 free (dr);
1479 }
1480
1481 /* Analyze memory reference MEMREF, which is accessed in STMT.
1482 The reference is a read if IS_READ is true, otherwise it is a write.
1483 IS_CONDITIONAL_IN_STMT indicates that the reference is conditional
1484 within STMT, i.e. that it might not occur even if STMT is executed
1485 and runs to completion.
1486
1487 Return the data_reference description of MEMREF. NEST is the outermost
1488 loop in which the reference should be instantiated, LOOP is the loop
1489 in which the data reference should be analyzed. */
1490
1491 struct data_reference *
1492 create_data_ref (edge nest, loop_p loop, tree memref, gimple *stmt,
1493 bool is_read, bool is_conditional_in_stmt)
1494 {
1495 struct data_reference *dr;
1496
1497 if (dump_file && (dump_flags & TDF_DETAILS))
1498 {
1499 fprintf (dump_file, "Creating dr for ");
1500 print_generic_expr (dump_file, memref, TDF_SLIM);
1501 fprintf (dump_file, "\n");
1502 }
1503
1504 dr = XCNEW (struct data_reference);
1505 DR_STMT (dr) = stmt;
1506 DR_REF (dr) = memref;
1507 DR_IS_READ (dr) = is_read;
1508 DR_IS_CONDITIONAL_IN_STMT (dr) = is_conditional_in_stmt;
1509
1510 dr_analyze_innermost (&DR_INNERMOST (dr), memref,
1511 nest != NULL ? loop : NULL, stmt);
1512 dr_analyze_indices (dr, nest, loop);
1513 dr_analyze_alias (dr);
1514
1515 if (dump_file && (dump_flags & TDF_DETAILS))
1516 {
1517 unsigned i;
1518 fprintf (dump_file, "\tbase_address: ");
1519 print_generic_expr (dump_file, DR_BASE_ADDRESS (dr), TDF_SLIM);
1520 fprintf (dump_file, "\n\toffset from base address: ");
1521 print_generic_expr (dump_file, DR_OFFSET (dr), TDF_SLIM);
1522 fprintf (dump_file, "\n\tconstant offset from base address: ");
1523 print_generic_expr (dump_file, DR_INIT (dr), TDF_SLIM);
1524 fprintf (dump_file, "\n\tstep: ");
1525 print_generic_expr (dump_file, DR_STEP (dr), TDF_SLIM);
1526 fprintf (dump_file, "\n\tbase alignment: %d", DR_BASE_ALIGNMENT (dr));
1527 fprintf (dump_file, "\n\tbase misalignment: %d",
1528 DR_BASE_MISALIGNMENT (dr));
1529 fprintf (dump_file, "\n\toffset alignment: %d",
1530 DR_OFFSET_ALIGNMENT (dr));
1531 fprintf (dump_file, "\n\tstep alignment: %d", DR_STEP_ALIGNMENT (dr));
1532 fprintf (dump_file, "\n\tbase_object: ");
1533 print_generic_expr (dump_file, DR_BASE_OBJECT (dr), TDF_SLIM);
1534 fprintf (dump_file, "\n");
1535 for (i = 0; i < DR_NUM_DIMENSIONS (dr); i++)
1536 {
1537 fprintf (dump_file, "\tAccess function %d: ", i);
1538 print_generic_stmt (dump_file, DR_ACCESS_FN (dr, i), TDF_SLIM);
1539 }
1540 }
1541
1542 return dr;
1543 }
1544
1545 /* A helper function computes order between two tree expressions T1 and T2.
1546 This is used in comparator functions sorting objects based on the order
1547 of tree expressions. The function returns -1, 0, or 1. */
1548
1549 int
1550 data_ref_compare_tree (tree t1, tree t2)
1551 {
1552 int i, cmp;
1553 enum tree_code code;
1554 char tclass;
1555
1556 if (t1 == t2)
1557 return 0;
1558 if (t1 == NULL)
1559 return -1;
1560 if (t2 == NULL)
1561 return 1;
1562
1563 STRIP_USELESS_TYPE_CONVERSION (t1);
1564 STRIP_USELESS_TYPE_CONVERSION (t2);
1565 if (t1 == t2)
1566 return 0;
1567
1568 if (TREE_CODE (t1) != TREE_CODE (t2)
1569 && ! (CONVERT_EXPR_P (t1) && CONVERT_EXPR_P (t2)))
1570 return TREE_CODE (t1) < TREE_CODE (t2) ? -1 : 1;
1571
1572 code = TREE_CODE (t1);
1573 switch (code)
1574 {
1575 case INTEGER_CST:
1576 return tree_int_cst_compare (t1, t2);
1577
1578 case STRING_CST:
1579 if (TREE_STRING_LENGTH (t1) != TREE_STRING_LENGTH (t2))
1580 return TREE_STRING_LENGTH (t1) < TREE_STRING_LENGTH (t2) ? -1 : 1;
1581 return memcmp (TREE_STRING_POINTER (t1), TREE_STRING_POINTER (t2),
1582 TREE_STRING_LENGTH (t1));
1583
1584 case SSA_NAME:
1585 if (SSA_NAME_VERSION (t1) != SSA_NAME_VERSION (t2))
1586 return SSA_NAME_VERSION (t1) < SSA_NAME_VERSION (t2) ? -1 : 1;
1587 break;
1588
1589 default:
1590 if (POLY_INT_CST_P (t1))
1591 return compare_sizes_for_sort (wi::to_poly_widest (t1),
1592 wi::to_poly_widest (t2));
1593
1594 tclass = TREE_CODE_CLASS (code);
1595
1596 /* For decls, compare their UIDs. */
1597 if (tclass == tcc_declaration)
1598 {
1599 if (DECL_UID (t1) != DECL_UID (t2))
1600 return DECL_UID (t1) < DECL_UID (t2) ? -1 : 1;
1601 break;
1602 }
1603 /* For expressions, compare their operands recursively. */
1604 else if (IS_EXPR_CODE_CLASS (tclass))
1605 {
1606 for (i = TREE_OPERAND_LENGTH (t1) - 1; i >= 0; --i)
1607 {
1608 cmp = data_ref_compare_tree (TREE_OPERAND (t1, i),
1609 TREE_OPERAND (t2, i));
1610 if (cmp != 0)
1611 return cmp;
1612 }
1613 }
1614 else
1615 gcc_unreachable ();
1616 }
1617
1618 return 0;
1619 }
1620
1621 /* Return TRUE it's possible to resolve data dependence DDR by runtime alias
1622 check. */
1623
1624 opt_result
1625 runtime_alias_check_p (ddr_p ddr, class loop *loop, bool speed_p)
1626 {
1627 if (dump_enabled_p ())
1628 dump_printf (MSG_NOTE,
1629 "consider run-time aliasing test between %T and %T\n",
1630 DR_REF (DDR_A (ddr)), DR_REF (DDR_B (ddr)));
1631
1632 if (!speed_p)
1633 return opt_result::failure_at (DR_STMT (DDR_A (ddr)),
1634 "runtime alias check not supported when"
1635 " optimizing for size.\n");
1636
1637 /* FORNOW: We don't support versioning with outer-loop in either
1638 vectorization or loop distribution. */
1639 if (loop != NULL && loop->inner != NULL)
1640 return opt_result::failure_at (DR_STMT (DDR_A (ddr)),
1641 "runtime alias check not supported for"
1642 " outer loop.\n");
1643
1644 return opt_result::success ();
1645 }
1646
1647 /* Operator == between two dr_with_seg_len objects.
1648
1649 This equality operator is used to make sure two data refs
1650 are the same one so that we will consider to combine the
1651 aliasing checks of those two pairs of data dependent data
1652 refs. */
1653
1654 static bool
1655 operator == (const dr_with_seg_len& d1,
1656 const dr_with_seg_len& d2)
1657 {
1658 return (operand_equal_p (DR_BASE_ADDRESS (d1.dr),
1659 DR_BASE_ADDRESS (d2.dr), 0)
1660 && data_ref_compare_tree (DR_OFFSET (d1.dr), DR_OFFSET (d2.dr)) == 0
1661 && data_ref_compare_tree (DR_INIT (d1.dr), DR_INIT (d2.dr)) == 0
1662 && data_ref_compare_tree (d1.seg_len, d2.seg_len) == 0
1663 && known_eq (d1.access_size, d2.access_size)
1664 && d1.align == d2.align);
1665 }
1666
1667 /* Comparison function for sorting objects of dr_with_seg_len_pair_t
1668 so that we can combine aliasing checks in one scan. */
1669
1670 static int
1671 comp_dr_with_seg_len_pair (const void *pa_, const void *pb_)
1672 {
1673 const dr_with_seg_len_pair_t* pa = (const dr_with_seg_len_pair_t *) pa_;
1674 const dr_with_seg_len_pair_t* pb = (const dr_with_seg_len_pair_t *) pb_;
1675 const dr_with_seg_len &a1 = pa->first, &a2 = pa->second;
1676 const dr_with_seg_len &b1 = pb->first, &b2 = pb->second;
1677
1678 /* For DR pairs (a, b) and (c, d), we only consider to merge the alias checks
1679 if a and c have the same basic address snd step, and b and d have the same
1680 address and step. Therefore, if any a&c or b&d don't have the same address
1681 and step, we don't care the order of those two pairs after sorting. */
1682 int comp_res;
1683
1684 if ((comp_res = data_ref_compare_tree (DR_BASE_ADDRESS (a1.dr),
1685 DR_BASE_ADDRESS (b1.dr))) != 0)
1686 return comp_res;
1687 if ((comp_res = data_ref_compare_tree (DR_BASE_ADDRESS (a2.dr),
1688 DR_BASE_ADDRESS (b2.dr))) != 0)
1689 return comp_res;
1690 if ((comp_res = data_ref_compare_tree (DR_STEP (a1.dr),
1691 DR_STEP (b1.dr))) != 0)
1692 return comp_res;
1693 if ((comp_res = data_ref_compare_tree (DR_STEP (a2.dr),
1694 DR_STEP (b2.dr))) != 0)
1695 return comp_res;
1696 if ((comp_res = data_ref_compare_tree (DR_OFFSET (a1.dr),
1697 DR_OFFSET (b1.dr))) != 0)
1698 return comp_res;
1699 if ((comp_res = data_ref_compare_tree (DR_INIT (a1.dr),
1700 DR_INIT (b1.dr))) != 0)
1701 return comp_res;
1702 if ((comp_res = data_ref_compare_tree (DR_OFFSET (a2.dr),
1703 DR_OFFSET (b2.dr))) != 0)
1704 return comp_res;
1705 if ((comp_res = data_ref_compare_tree (DR_INIT (a2.dr),
1706 DR_INIT (b2.dr))) != 0)
1707 return comp_res;
1708
1709 return 0;
1710 }
1711
1712 /* Dump information about ALIAS_PAIR, indenting each line by INDENT. */
1713
1714 static void
1715 dump_alias_pair (dr_with_seg_len_pair_t *alias_pair, const char *indent)
1716 {
1717 dump_printf (MSG_NOTE, "%sreference: %T vs. %T\n", indent,
1718 DR_REF (alias_pair->first.dr),
1719 DR_REF (alias_pair->second.dr));
1720
1721 dump_printf (MSG_NOTE, "%ssegment length: %T", indent,
1722 alias_pair->first.seg_len);
1723 if (!operand_equal_p (alias_pair->first.seg_len,
1724 alias_pair->second.seg_len, 0))
1725 dump_printf (MSG_NOTE, " vs. %T", alias_pair->second.seg_len);
1726
1727 dump_printf (MSG_NOTE, "\n%saccess size: ", indent);
1728 dump_dec (MSG_NOTE, alias_pair->first.access_size);
1729 if (maybe_ne (alias_pair->first.access_size, alias_pair->second.access_size))
1730 {
1731 dump_printf (MSG_NOTE, " vs. ");
1732 dump_dec (MSG_NOTE, alias_pair->second.access_size);
1733 }
1734
1735 dump_printf (MSG_NOTE, "\n%salignment: %d", indent,
1736 alias_pair->first.align);
1737 if (alias_pair->first.align != alias_pair->second.align)
1738 dump_printf (MSG_NOTE, " vs. %d", alias_pair->second.align);
1739
1740 dump_printf (MSG_NOTE, "\n%sflags: ", indent);
1741 if (alias_pair->flags & DR_ALIAS_RAW)
1742 dump_printf (MSG_NOTE, " RAW");
1743 if (alias_pair->flags & DR_ALIAS_WAR)
1744 dump_printf (MSG_NOTE, " WAR");
1745 if (alias_pair->flags & DR_ALIAS_WAW)
1746 dump_printf (MSG_NOTE, " WAW");
1747 if (alias_pair->flags & DR_ALIAS_ARBITRARY)
1748 dump_printf (MSG_NOTE, " ARBITRARY");
1749 if (alias_pair->flags & DR_ALIAS_SWAPPED)
1750 dump_printf (MSG_NOTE, " SWAPPED");
1751 if (alias_pair->flags & DR_ALIAS_UNSWAPPED)
1752 dump_printf (MSG_NOTE, " UNSWAPPED");
1753 if (alias_pair->flags & DR_ALIAS_MIXED_STEPS)
1754 dump_printf (MSG_NOTE, " MIXED_STEPS");
1755 if (alias_pair->flags == 0)
1756 dump_printf (MSG_NOTE, " <none>");
1757 dump_printf (MSG_NOTE, "\n");
1758 }
1759
1760 /* Merge alias checks recorded in ALIAS_PAIRS and remove redundant ones.
1761 FACTOR is number of iterations that each data reference is accessed.
1762
1763 Basically, for each pair of dependent data refs store_ptr_0 & load_ptr_0,
1764 we create an expression:
1765
1766 ((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
1767 || (load_ptr_0 + load_segment_length_0) <= store_ptr_0))
1768
1769 for aliasing checks. However, in some cases we can decrease the number
1770 of checks by combining two checks into one. For example, suppose we have
1771 another pair of data refs store_ptr_0 & load_ptr_1, and if the following
1772 condition is satisfied:
1773
1774 load_ptr_0 < load_ptr_1 &&
1775 load_ptr_1 - load_ptr_0 - load_segment_length_0 < store_segment_length_0
1776
1777 (this condition means, in each iteration of vectorized loop, the accessed
1778 memory of store_ptr_0 cannot be between the memory of load_ptr_0 and
1779 load_ptr_1.)
1780
1781 we then can use only the following expression to finish the alising checks
1782 between store_ptr_0 & load_ptr_0 and store_ptr_0 & load_ptr_1:
1783
1784 ((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
1785 || (load_ptr_1 + load_segment_length_1 <= store_ptr_0))
1786
1787 Note that we only consider that load_ptr_0 and load_ptr_1 have the same
1788 basic address. */
1789
1790 void
1791 prune_runtime_alias_test_list (vec<dr_with_seg_len_pair_t> *alias_pairs,
1792 poly_uint64)
1793 {
1794 if (alias_pairs->is_empty ())
1795 return;
1796
1797 /* Canonicalize each pair so that the base components are ordered wrt
1798 data_ref_compare_tree. This allows the loop below to merge more
1799 cases. */
1800 unsigned int i;
1801 dr_with_seg_len_pair_t *alias_pair;
1802 FOR_EACH_VEC_ELT (*alias_pairs, i, alias_pair)
1803 {
1804 data_reference_p dr_a = alias_pair->first.dr;
1805 data_reference_p dr_b = alias_pair->second.dr;
1806 int comp_res = data_ref_compare_tree (DR_BASE_ADDRESS (dr_a),
1807 DR_BASE_ADDRESS (dr_b));
1808 if (comp_res == 0)
1809 comp_res = data_ref_compare_tree (DR_OFFSET (dr_a), DR_OFFSET (dr_b));
1810 if (comp_res == 0)
1811 comp_res = data_ref_compare_tree (DR_INIT (dr_a), DR_INIT (dr_b));
1812 if (comp_res > 0)
1813 {
1814 std::swap (alias_pair->first, alias_pair->second);
1815 alias_pair->flags |= DR_ALIAS_SWAPPED;
1816 }
1817 else
1818 alias_pair->flags |= DR_ALIAS_UNSWAPPED;
1819 }
1820
1821 /* Sort the collected data ref pairs so that we can scan them once to
1822 combine all possible aliasing checks. */
1823 alias_pairs->qsort (comp_dr_with_seg_len_pair);
1824
1825 /* Scan the sorted dr pairs and check if we can combine alias checks
1826 of two neighboring dr pairs. */
1827 unsigned int last = 0;
1828 for (i = 1; i < alias_pairs->length (); ++i)
1829 {
1830 /* Deal with two ddrs (dr_a1, dr_b1) and (dr_a2, dr_b2). */
1831 dr_with_seg_len_pair_t *alias_pair1 = &(*alias_pairs)[last];
1832 dr_with_seg_len_pair_t *alias_pair2 = &(*alias_pairs)[i];
1833
1834 dr_with_seg_len *dr_a1 = &alias_pair1->first;
1835 dr_with_seg_len *dr_b1 = &alias_pair1->second;
1836 dr_with_seg_len *dr_a2 = &alias_pair2->first;
1837 dr_with_seg_len *dr_b2 = &alias_pair2->second;
1838
1839 /* Remove duplicate data ref pairs. */
1840 if (*dr_a1 == *dr_a2 && *dr_b1 == *dr_b2)
1841 {
1842 if (dump_enabled_p ())
1843 dump_printf (MSG_NOTE, "found equal ranges %T, %T and %T, %T\n",
1844 DR_REF (dr_a1->dr), DR_REF (dr_b1->dr),
1845 DR_REF (dr_a2->dr), DR_REF (dr_b2->dr));
1846 alias_pair1->flags |= alias_pair2->flags;
1847 continue;
1848 }
1849
1850 /* Assume that we won't be able to merge the pairs, then correct
1851 if we do. */
1852 last += 1;
1853 if (last != i)
1854 (*alias_pairs)[last] = (*alias_pairs)[i];
1855
1856 if (*dr_a1 == *dr_a2 || *dr_b1 == *dr_b2)
1857 {
1858 /* We consider the case that DR_B1 and DR_B2 are same memrefs,
1859 and DR_A1 and DR_A2 are two consecutive memrefs. */
1860 if (*dr_a1 == *dr_a2)
1861 {
1862 std::swap (dr_a1, dr_b1);
1863 std::swap (dr_a2, dr_b2);
1864 }
1865
1866 poly_int64 init_a1, init_a2;
1867 /* Only consider cases in which the distance between the initial
1868 DR_A1 and the initial DR_A2 is known at compile time. */
1869 if (!operand_equal_p (DR_BASE_ADDRESS (dr_a1->dr),
1870 DR_BASE_ADDRESS (dr_a2->dr), 0)
1871 || !operand_equal_p (DR_OFFSET (dr_a1->dr),
1872 DR_OFFSET (dr_a2->dr), 0)
1873 || !poly_int_tree_p (DR_INIT (dr_a1->dr), &init_a1)
1874 || !poly_int_tree_p (DR_INIT (dr_a2->dr), &init_a2))
1875 continue;
1876
1877 /* Don't combine if we can't tell which one comes first. */
1878 if (!ordered_p (init_a1, init_a2))
1879 continue;
1880
1881 /* Work out what the segment length would be if we did combine
1882 DR_A1 and DR_A2:
1883
1884 - If DR_A1 and DR_A2 have equal lengths, that length is
1885 also the combined length.
1886
1887 - If DR_A1 and DR_A2 both have negative "lengths", the combined
1888 length is the lower bound on those lengths.
1889
1890 - If DR_A1 and DR_A2 both have positive lengths, the combined
1891 length is the upper bound on those lengths.
1892
1893 Other cases are unlikely to give a useful combination.
1894
1895 The lengths both have sizetype, so the sign is taken from
1896 the step instead. */
1897 poly_uint64 new_seg_len = 0;
1898 bool new_seg_len_p = !operand_equal_p (dr_a1->seg_len,
1899 dr_a2->seg_len, 0);
1900 if (new_seg_len_p)
1901 {
1902 poly_uint64 seg_len_a1, seg_len_a2;
1903 if (!poly_int_tree_p (dr_a1->seg_len, &seg_len_a1)
1904 || !poly_int_tree_p (dr_a2->seg_len, &seg_len_a2))
1905 continue;
1906
1907 tree indicator_a = dr_direction_indicator (dr_a1->dr);
1908 if (TREE_CODE (indicator_a) != INTEGER_CST)
1909 continue;
1910
1911 tree indicator_b = dr_direction_indicator (dr_a2->dr);
1912 if (TREE_CODE (indicator_b) != INTEGER_CST)
1913 continue;
1914
1915 int sign_a = tree_int_cst_sgn (indicator_a);
1916 int sign_b = tree_int_cst_sgn (indicator_b);
1917
1918 if (sign_a <= 0 && sign_b <= 0)
1919 new_seg_len = lower_bound (seg_len_a1, seg_len_a2);
1920 else if (sign_a >= 0 && sign_b >= 0)
1921 new_seg_len = upper_bound (seg_len_a1, seg_len_a2);
1922 else
1923 continue;
1924 }
1925 /* At this point we're committed to merging the refs. */
1926
1927 /* Make sure dr_a1 starts left of dr_a2. */
1928 if (maybe_gt (init_a1, init_a2))
1929 {
1930 std::swap (*dr_a1, *dr_a2);
1931 std::swap (init_a1, init_a2);
1932 }
1933
1934 /* The DR_Bs are equal, so only the DR_As can introduce
1935 mixed steps. */
1936 if (!operand_equal_p (DR_STEP (dr_a1->dr), DR_STEP (dr_a2->dr), 0))
1937 alias_pair1->flags |= DR_ALIAS_MIXED_STEPS;
1938
1939 if (new_seg_len_p)
1940 {
1941 dr_a1->seg_len = build_int_cst (TREE_TYPE (dr_a1->seg_len),
1942 new_seg_len);
1943 dr_a1->align = MIN (dr_a1->align, known_alignment (new_seg_len));
1944 }
1945
1946 /* This is always positive due to the swap above. */
1947 poly_uint64 diff = init_a2 - init_a1;
1948
1949 /* The new check will start at DR_A1. Make sure that its access
1950 size encompasses the initial DR_A2. */
1951 if (maybe_lt (dr_a1->access_size, diff + dr_a2->access_size))
1952 {
1953 dr_a1->access_size = upper_bound (dr_a1->access_size,
1954 diff + dr_a2->access_size);
1955 unsigned int new_align = known_alignment (dr_a1->access_size);
1956 dr_a1->align = MIN (dr_a1->align, new_align);
1957 }
1958 if (dump_enabled_p ())
1959 dump_printf (MSG_NOTE, "merging ranges for %T, %T and %T, %T\n",
1960 DR_REF (dr_a1->dr), DR_REF (dr_b1->dr),
1961 DR_REF (dr_a2->dr), DR_REF (dr_b2->dr));
1962 alias_pair1->flags |= alias_pair2->flags;
1963 last -= 1;
1964 }
1965 }
1966 alias_pairs->truncate (last + 1);
1967
1968 /* Try to restore the original dr_with_seg_len order within each
1969 dr_with_seg_len_pair_t. If we ended up combining swapped and
1970 unswapped pairs into the same check, we have to invalidate any
1971 RAW, WAR and WAW information for it. */
1972 if (dump_enabled_p ())
1973 dump_printf (MSG_NOTE, "merged alias checks:\n");
1974 FOR_EACH_VEC_ELT (*alias_pairs, i, alias_pair)
1975 {
1976 unsigned int swap_mask = (DR_ALIAS_SWAPPED | DR_ALIAS_UNSWAPPED);
1977 unsigned int swapped = (alias_pair->flags & swap_mask);
1978 if (swapped == DR_ALIAS_SWAPPED)
1979 std::swap (alias_pair->first, alias_pair->second);
1980 else if (swapped != DR_ALIAS_UNSWAPPED)
1981 alias_pair->flags |= DR_ALIAS_ARBITRARY;
1982 alias_pair->flags &= ~swap_mask;
1983 if (dump_enabled_p ())
1984 dump_alias_pair (alias_pair, " ");
1985 }
1986 }
1987
1988 /* A subroutine of create_intersect_range_checks, with a subset of the
1989 same arguments. Try to use IFN_CHECK_RAW_PTRS and IFN_CHECK_WAR_PTRS
1990 to optimize cases in which the references form a simple RAW, WAR or
1991 WAR dependence. */
1992
1993 static bool
1994 create_ifn_alias_checks (tree *cond_expr,
1995 const dr_with_seg_len_pair_t &alias_pair)
1996 {
1997 const dr_with_seg_len& dr_a = alias_pair.first;
1998 const dr_with_seg_len& dr_b = alias_pair.second;
1999
2000 /* Check for cases in which:
2001
2002 (a) we have a known RAW, WAR or WAR dependence
2003 (b) the accesses are well-ordered in both the original and new code
2004 (see the comment above the DR_ALIAS_* flags for details); and
2005 (c) the DR_STEPs describe all access pairs covered by ALIAS_PAIR. */
2006 if (alias_pair.flags & ~(DR_ALIAS_RAW | DR_ALIAS_WAR | DR_ALIAS_WAW))
2007 return false;
2008
2009 /* Make sure that both DRs access the same pattern of bytes,
2010 with a constant length and step. */
2011 poly_uint64 seg_len;
2012 if (!operand_equal_p (dr_a.seg_len, dr_b.seg_len, 0)
2013 || !poly_int_tree_p (dr_a.seg_len, &seg_len)
2014 || maybe_ne (dr_a.access_size, dr_b.access_size)
2015 || !operand_equal_p (DR_STEP (dr_a.dr), DR_STEP (dr_b.dr), 0)
2016 || !tree_fits_uhwi_p (DR_STEP (dr_a.dr)))
2017 return false;
2018
2019 unsigned HOST_WIDE_INT bytes = tree_to_uhwi (DR_STEP (dr_a.dr));
2020 tree addr_a = DR_BASE_ADDRESS (dr_a.dr);
2021 tree addr_b = DR_BASE_ADDRESS (dr_b.dr);
2022
2023 /* See whether the target suports what we want to do. WAW checks are
2024 equivalent to WAR checks here. */
2025 internal_fn ifn = (alias_pair.flags & DR_ALIAS_RAW
2026 ? IFN_CHECK_RAW_PTRS
2027 : IFN_CHECK_WAR_PTRS);
2028 unsigned int align = MIN (dr_a.align, dr_b.align);
2029 poly_uint64 full_length = seg_len + bytes;
2030 if (!internal_check_ptrs_fn_supported_p (ifn, TREE_TYPE (addr_a),
2031 full_length, align))
2032 {
2033 full_length = seg_len + dr_a.access_size;
2034 if (!internal_check_ptrs_fn_supported_p (ifn, TREE_TYPE (addr_a),
2035 full_length, align))
2036 return false;
2037 }
2038
2039 /* Commit to using this form of test. */
2040 addr_a = fold_build_pointer_plus (addr_a, DR_OFFSET (dr_a.dr));
2041 addr_a = fold_build_pointer_plus (addr_a, DR_INIT (dr_a.dr));
2042
2043 addr_b = fold_build_pointer_plus (addr_b, DR_OFFSET (dr_b.dr));
2044 addr_b = fold_build_pointer_plus (addr_b, DR_INIT (dr_b.dr));
2045
2046 *cond_expr = build_call_expr_internal_loc (UNKNOWN_LOCATION,
2047 ifn, boolean_type_node,
2048 4, addr_a, addr_b,
2049 size_int (full_length),
2050 size_int (align));
2051
2052 if (dump_enabled_p ())
2053 {
2054 if (ifn == IFN_CHECK_RAW_PTRS)
2055 dump_printf (MSG_NOTE, "using an IFN_CHECK_RAW_PTRS test\n");
2056 else
2057 dump_printf (MSG_NOTE, "using an IFN_CHECK_WAR_PTRS test\n");
2058 }
2059 return true;
2060 }
2061
2062 /* Try to generate a runtime condition that is true if ALIAS_PAIR is
2063 free of aliases, using a condition based on index values instead
2064 of a condition based on addresses. Return true on success,
2065 storing the condition in *COND_EXPR.
2066
2067 This can only be done if the two data references in ALIAS_PAIR access
2068 the same array object and the index is the only difference. For example,
2069 if the two data references are DR_A and DR_B:
2070
2071 DR_A DR_B
2072 data-ref arr[i] arr[j]
2073 base_object arr arr
2074 index {i_0, +, 1}_loop {j_0, +, 1}_loop
2075
2076 The addresses and their index are like:
2077
2078 |<- ADDR_A ->| |<- ADDR_B ->|
2079 ------------------------------------------------------->
2080 | | | | | | | | | |
2081 ------------------------------------------------------->
2082 i_0 ... i_0+4 j_0 ... j_0+4
2083
2084 We can create expression based on index rather than address:
2085
2086 (unsigned) (i_0 - j_0 + 3) <= 6
2087
2088 i.e. the indices are less than 4 apart.
2089
2090 Note evolution step of index needs to be considered in comparison. */
2091
2092 static bool
2093 create_intersect_range_checks_index (class loop *loop, tree *cond_expr,
2094 const dr_with_seg_len_pair_t &alias_pair)
2095 {
2096 const dr_with_seg_len &dr_a = alias_pair.first;
2097 const dr_with_seg_len &dr_b = alias_pair.second;
2098 if ((alias_pair.flags & DR_ALIAS_MIXED_STEPS)
2099 || integer_zerop (DR_STEP (dr_a.dr))
2100 || integer_zerop (DR_STEP (dr_b.dr))
2101 || DR_NUM_DIMENSIONS (dr_a.dr) != DR_NUM_DIMENSIONS (dr_b.dr))
2102 return false;
2103
2104 poly_uint64 seg_len1, seg_len2;
2105 if (!poly_int_tree_p (dr_a.seg_len, &seg_len1)
2106 || !poly_int_tree_p (dr_b.seg_len, &seg_len2))
2107 return false;
2108
2109 if (!tree_fits_shwi_p (DR_STEP (dr_a.dr)))
2110 return false;
2111
2112 if (!operand_equal_p (DR_BASE_OBJECT (dr_a.dr), DR_BASE_OBJECT (dr_b.dr), 0))
2113 return false;
2114
2115 if (!operand_equal_p (DR_STEP (dr_a.dr), DR_STEP (dr_b.dr), 0))
2116 return false;
2117
2118 gcc_assert (TREE_CODE (DR_STEP (dr_a.dr)) == INTEGER_CST);
2119
2120 bool neg_step = tree_int_cst_compare (DR_STEP (dr_a.dr), size_zero_node) < 0;
2121 unsigned HOST_WIDE_INT abs_step = tree_to_shwi (DR_STEP (dr_a.dr));
2122 if (neg_step)
2123 {
2124 abs_step = -abs_step;
2125 seg_len1 = (-wi::to_poly_wide (dr_a.seg_len)).force_uhwi ();
2126 seg_len2 = (-wi::to_poly_wide (dr_b.seg_len)).force_uhwi ();
2127 }
2128
2129 /* Infer the number of iterations with which the memory segment is accessed
2130 by DR. In other words, alias is checked if memory segment accessed by
2131 DR_A in some iterations intersect with memory segment accessed by DR_B
2132 in the same amount iterations.
2133 Note segnment length is a linear function of number of iterations with
2134 DR_STEP as the coefficient. */
2135 poly_uint64 niter_len1, niter_len2;
2136 if (!can_div_trunc_p (seg_len1 + abs_step - 1, abs_step, &niter_len1)
2137 || !can_div_trunc_p (seg_len2 + abs_step - 1, abs_step, &niter_len2))
2138 return false;
2139
2140 /* Divide each access size by the byte step, rounding up. */
2141 poly_uint64 niter_access1, niter_access2;
2142 if (!can_div_trunc_p (dr_a.access_size + abs_step - 1,
2143 abs_step, &niter_access1)
2144 || !can_div_trunc_p (dr_b.access_size + abs_step - 1,
2145 abs_step, &niter_access2))
2146 return false;
2147
2148 bool waw_or_war_p = (alias_pair.flags & ~(DR_ALIAS_WAR | DR_ALIAS_WAW)) == 0;
2149
2150 unsigned int i;
2151 for (i = 0; i < DR_NUM_DIMENSIONS (dr_a.dr); i++)
2152 {
2153 tree access1 = DR_ACCESS_FN (dr_a.dr, i);
2154 tree access2 = DR_ACCESS_FN (dr_b.dr, i);
2155 /* Two indices must be the same if they are not scev, or not scev wrto
2156 current loop being vecorized. */
2157 if (TREE_CODE (access1) != POLYNOMIAL_CHREC
2158 || TREE_CODE (access2) != POLYNOMIAL_CHREC
2159 || CHREC_VARIABLE (access1) != (unsigned)loop->num
2160 || CHREC_VARIABLE (access2) != (unsigned)loop->num)
2161 {
2162 if (operand_equal_p (access1, access2, 0))
2163 continue;
2164
2165 return false;
2166 }
2167 /* The two indices must have the same step. */
2168 if (!operand_equal_p (CHREC_RIGHT (access1), CHREC_RIGHT (access2), 0))
2169 return false;
2170
2171 tree idx_step = CHREC_RIGHT (access1);
2172 /* Index must have const step, otherwise DR_STEP won't be constant. */
2173 gcc_assert (TREE_CODE (idx_step) == INTEGER_CST);
2174 /* Index must evaluate in the same direction as DR. */
2175 gcc_assert (!neg_step || tree_int_cst_sign_bit (idx_step) == 1);
2176
2177 tree min1 = CHREC_LEFT (access1);
2178 tree min2 = CHREC_LEFT (access2);
2179 if (!types_compatible_p (TREE_TYPE (min1), TREE_TYPE (min2)))
2180 return false;
2181
2182 /* Ideally, alias can be checked against loop's control IV, but we
2183 need to prove linear mapping between control IV and reference
2184 index. Although that should be true, we check against (array)
2185 index of data reference. Like segment length, index length is
2186 linear function of the number of iterations with index_step as
2187 the coefficient, i.e, niter_len * idx_step. */
2188 offset_int abs_idx_step = offset_int::from (wi::to_wide (idx_step),
2189 SIGNED);
2190 if (neg_step)
2191 abs_idx_step = -abs_idx_step;
2192 poly_offset_int idx_len1 = abs_idx_step * niter_len1;
2193 poly_offset_int idx_len2 = abs_idx_step * niter_len2;
2194 poly_offset_int idx_access1 = abs_idx_step * niter_access1;
2195 poly_offset_int idx_access2 = abs_idx_step * niter_access2;
2196
2197 gcc_assert (known_ge (idx_len1, 0)
2198 && known_ge (idx_len2, 0)
2199 && known_ge (idx_access1, 0)
2200 && known_ge (idx_access2, 0));
2201
2202 /* Each access has the following pattern, with lengths measured
2203 in units of INDEX:
2204
2205 <-- idx_len -->
2206 <--- A: -ve step --->
2207 +-----+-------+-----+-------+-----+
2208 | n-1 | ..... | 0 | ..... | n-1 |
2209 +-----+-------+-----+-------+-----+
2210 <--- B: +ve step --->
2211 <-- idx_len -->
2212 |
2213 min
2214
2215 where "n" is the number of scalar iterations covered by the segment
2216 and where each access spans idx_access units.
2217
2218 A is the range of bytes accessed when the step is negative,
2219 B is the range when the step is positive.
2220
2221 When checking for general overlap, we need to test whether
2222 the range:
2223
2224 [min1 + low_offset1, min2 + high_offset1 + idx_access1 - 1]
2225
2226 overlaps:
2227
2228 [min2 + low_offset2, min2 + high_offset2 + idx_access2 - 1]
2229
2230 where:
2231
2232 low_offsetN = +ve step ? 0 : -idx_lenN;
2233 high_offsetN = +ve step ? idx_lenN : 0;
2234
2235 This is equivalent to testing whether:
2236
2237 min1 + low_offset1 <= min2 + high_offset2 + idx_access2 - 1
2238 && min2 + low_offset2 <= min1 + high_offset1 + idx_access1 - 1
2239
2240 Converting this into a single test, there is an overlap if:
2241
2242 0 <= min2 - min1 + bias <= limit
2243
2244 where bias = high_offset2 + idx_access2 - 1 - low_offset1
2245 limit = (high_offset1 - low_offset1 + idx_access1 - 1)
2246 + (high_offset2 - low_offset2 + idx_access2 - 1)
2247 i.e. limit = idx_len1 + idx_access1 - 1 + idx_len2 + idx_access2 - 1
2248
2249 Combining the tests requires limit to be computable in an unsigned
2250 form of the index type; if it isn't, we fall back to the usual
2251 pointer-based checks.
2252
2253 We can do better if DR_B is a write and if DR_A and DR_B are
2254 well-ordered in both the original and the new code (see the
2255 comment above the DR_ALIAS_* flags for details). In this case
2256 we know that for each i in [0, n-1], the write performed by
2257 access i of DR_B occurs after access numbers j<=i of DR_A in
2258 both the original and the new code. Any write or anti
2259 dependencies wrt those DR_A accesses are therefore maintained.
2260
2261 We just need to make sure that each individual write in DR_B does not
2262 overlap any higher-indexed access in DR_A; such DR_A accesses happen
2263 after the DR_B access in the original code but happen before it in
2264 the new code.
2265
2266 We know the steps for both accesses are equal, so by induction, we
2267 just need to test whether the first write of DR_B overlaps a later
2268 access of DR_A. In other words, we need to move min1 along by
2269 one iteration:
2270
2271 min1' = min1 + idx_step
2272
2273 and use the ranges:
2274
2275 [min1' + low_offset1', min1' + high_offset1' + idx_access1 - 1]
2276
2277 and:
2278
2279 [min2, min2 + idx_access2 - 1]
2280
2281 where:
2282
2283 low_offset1' = +ve step ? 0 : -(idx_len1 - |idx_step|)
2284 high_offset1' = +ve_step ? idx_len1 - |idx_step| : 0. */
2285 if (waw_or_war_p)
2286 idx_len1 -= abs_idx_step;
2287
2288 poly_offset_int limit = idx_len1 + idx_access1 - 1 + idx_access2 - 1;
2289 if (!waw_or_war_p)
2290 limit += idx_len2;
2291
2292 tree utype = unsigned_type_for (TREE_TYPE (min1));
2293 if (!wi::fits_to_tree_p (limit, utype))
2294 return false;
2295
2296 poly_offset_int low_offset1 = neg_step ? -idx_len1 : 0;
2297 poly_offset_int high_offset2 = neg_step || waw_or_war_p ? 0 : idx_len2;
2298 poly_offset_int bias = high_offset2 + idx_access2 - 1 - low_offset1;
2299 /* Equivalent to adding IDX_STEP to MIN1. */
2300 if (waw_or_war_p)
2301 bias -= wi::to_offset (idx_step);
2302
2303 tree subject = fold_build2 (MINUS_EXPR, utype,
2304 fold_convert (utype, min2),
2305 fold_convert (utype, min1));
2306 subject = fold_build2 (PLUS_EXPR, utype, subject,
2307 wide_int_to_tree (utype, bias));
2308 tree part_cond_expr = fold_build2 (GT_EXPR, boolean_type_node, subject,
2309 wide_int_to_tree (utype, limit));
2310 if (*cond_expr)
2311 *cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
2312 *cond_expr, part_cond_expr);
2313 else
2314 *cond_expr = part_cond_expr;
2315 }
2316 if (dump_enabled_p ())
2317 {
2318 if (waw_or_war_p)
2319 dump_printf (MSG_NOTE, "using an index-based WAR/WAW test\n");
2320 else
2321 dump_printf (MSG_NOTE, "using an index-based overlap test\n");
2322 }
2323 return true;
2324 }
2325
2326 /* A subroutine of create_intersect_range_checks, with a subset of the
2327 same arguments. Try to optimize cases in which the second access
2328 is a write and in which some overlap is valid. */
2329
2330 static bool
2331 create_waw_or_war_checks (tree *cond_expr,
2332 const dr_with_seg_len_pair_t &alias_pair)
2333 {
2334 const dr_with_seg_len& dr_a = alias_pair.first;
2335 const dr_with_seg_len& dr_b = alias_pair.second;
2336
2337 /* Check for cases in which:
2338
2339 (a) DR_B is always a write;
2340 (b) the accesses are well-ordered in both the original and new code
2341 (see the comment above the DR_ALIAS_* flags for details); and
2342 (c) the DR_STEPs describe all access pairs covered by ALIAS_PAIR. */
2343 if (alias_pair.flags & ~(DR_ALIAS_WAR | DR_ALIAS_WAW))
2344 return false;
2345
2346 /* Check for equal (but possibly variable) steps. */
2347 tree step = DR_STEP (dr_a.dr);
2348 if (!operand_equal_p (step, DR_STEP (dr_b.dr)))
2349 return false;
2350
2351 /* Make sure that we can operate on sizetype without loss of precision. */
2352 tree addr_type = TREE_TYPE (DR_BASE_ADDRESS (dr_a.dr));
2353 if (TYPE_PRECISION (addr_type) != TYPE_PRECISION (sizetype))
2354 return false;
2355
2356 /* All addresses involved are known to have a common alignment ALIGN.
2357 We can therefore subtract ALIGN from an exclusive endpoint to get
2358 an inclusive endpoint. In the best (and common) case, ALIGN is the
2359 same as the access sizes of both DRs, and so subtracting ALIGN
2360 cancels out the addition of an access size. */
2361 unsigned int align = MIN (dr_a.align, dr_b.align);
2362 poly_uint64 last_chunk_a = dr_a.access_size - align;
2363 poly_uint64 last_chunk_b = dr_b.access_size - align;
2364
2365 /* Get a boolean expression that is true when the step is negative. */
2366 tree indicator = dr_direction_indicator (dr_a.dr);
2367 tree neg_step = fold_build2 (LT_EXPR, boolean_type_node,
2368 fold_convert (ssizetype, indicator),
2369 ssize_int (0));
2370
2371 /* Get lengths in sizetype. */
2372 tree seg_len_a
2373 = fold_convert (sizetype, rewrite_to_non_trapping_overflow (dr_a.seg_len));
2374 step = fold_convert (sizetype, rewrite_to_non_trapping_overflow (step));
2375
2376 /* Each access has the following pattern:
2377
2378 <- |seg_len| ->
2379 <--- A: -ve step --->
2380 +-----+-------+-----+-------+-----+
2381 | n-1 | ..... | 0 | ..... | n-1 |
2382 +-----+-------+-----+-------+-----+
2383 <--- B: +ve step --->
2384 <- |seg_len| ->
2385 |
2386 base address
2387
2388 where "n" is the number of scalar iterations covered by the segment.
2389
2390 A is the range of bytes accessed when the step is negative,
2391 B is the range when the step is positive.
2392
2393 We know that DR_B is a write. We also know (from checking that
2394 DR_A and DR_B are well-ordered) that for each i in [0, n-1],
2395 the write performed by access i of DR_B occurs after access numbers
2396 j<=i of DR_A in both the original and the new code. Any write or
2397 anti dependencies wrt those DR_A accesses are therefore maintained.
2398
2399 We just need to make sure that each individual write in DR_B does not
2400 overlap any higher-indexed access in DR_A; such DR_A accesses happen
2401 after the DR_B access in the original code but happen before it in
2402 the new code.
2403
2404 We know the steps for both accesses are equal, so by induction, we
2405 just need to test whether the first write of DR_B overlaps a later
2406 access of DR_A. In other words, we need to move addr_a along by
2407 one iteration:
2408
2409 addr_a' = addr_a + step
2410
2411 and check whether:
2412
2413 [addr_b, addr_b + last_chunk_b]
2414
2415 overlaps:
2416
2417 [addr_a' + low_offset_a, addr_a' + high_offset_a + last_chunk_a]
2418
2419 where [low_offset_a, high_offset_a] spans accesses [1, n-1]. I.e.:
2420
2421 low_offset_a = +ve step ? 0 : seg_len_a - step
2422 high_offset_a = +ve step ? seg_len_a - step : 0
2423
2424 This is equivalent to testing whether:
2425
2426 addr_a' + low_offset_a <= addr_b + last_chunk_b
2427 && addr_b <= addr_a' + high_offset_a + last_chunk_a
2428
2429 Converting this into a single test, there is an overlap if:
2430
2431 0 <= addr_b + last_chunk_b - addr_a' - low_offset_a <= limit
2432
2433 where limit = high_offset_a - low_offset_a + last_chunk_a + last_chunk_b
2434
2435 If DR_A is performed, limit + |step| - last_chunk_b is known to be
2436 less than the size of the object underlying DR_A. We also know
2437 that last_chunk_b <= |step|; this is checked elsewhere if it isn't
2438 guaranteed at compile time. There can therefore be no overflow if
2439 "limit" is calculated in an unsigned type with pointer precision. */
2440 tree addr_a = fold_build_pointer_plus (DR_BASE_ADDRESS (dr_a.dr),
2441 DR_OFFSET (dr_a.dr));
2442 addr_a = fold_build_pointer_plus (addr_a, DR_INIT (dr_a.dr));
2443
2444 tree addr_b = fold_build_pointer_plus (DR_BASE_ADDRESS (dr_b.dr),
2445 DR_OFFSET (dr_b.dr));
2446 addr_b = fold_build_pointer_plus (addr_b, DR_INIT (dr_b.dr));
2447
2448 /* Advance ADDR_A by one iteration and adjust the length to compensate. */
2449 addr_a = fold_build_pointer_plus (addr_a, step);
2450 tree seg_len_a_minus_step = fold_build2 (MINUS_EXPR, sizetype,
2451 seg_len_a, step);
2452 if (!CONSTANT_CLASS_P (seg_len_a_minus_step))
2453 seg_len_a_minus_step = build1 (SAVE_EXPR, sizetype, seg_len_a_minus_step);
2454
2455 tree low_offset_a = fold_build3 (COND_EXPR, sizetype, neg_step,
2456 seg_len_a_minus_step, size_zero_node);
2457 if (!CONSTANT_CLASS_P (low_offset_a))
2458 low_offset_a = build1 (SAVE_EXPR, sizetype, low_offset_a);
2459
2460 /* We could use COND_EXPR <neg_step, size_zero_node, seg_len_a_minus_step>,
2461 but it's usually more efficient to reuse the LOW_OFFSET_A result. */
2462 tree high_offset_a = fold_build2 (MINUS_EXPR, sizetype, seg_len_a_minus_step,
2463 low_offset_a);
2464
2465 /* The amount added to addr_b - addr_a'. */
2466 tree bias = fold_build2 (MINUS_EXPR, sizetype,
2467 size_int (last_chunk_b), low_offset_a);
2468
2469 tree limit = fold_build2 (MINUS_EXPR, sizetype, high_offset_a, low_offset_a);
2470 limit = fold_build2 (PLUS_EXPR, sizetype, limit,
2471 size_int (last_chunk_a + last_chunk_b));
2472
2473 tree subject = fold_build2 (POINTER_DIFF_EXPR, ssizetype, addr_b, addr_a);
2474 subject = fold_build2 (PLUS_EXPR, sizetype,
2475 fold_convert (sizetype, subject), bias);
2476
2477 *cond_expr = fold_build2 (GT_EXPR, boolean_type_node, subject, limit);
2478 if (dump_enabled_p ())
2479 dump_printf (MSG_NOTE, "using an address-based WAR/WAW test\n");
2480 return true;
2481 }
2482
2483 /* If ALIGN is nonzero, set up *SEQ_MIN_OUT and *SEQ_MAX_OUT so that for
2484 every address ADDR accessed by D:
2485
2486 *SEQ_MIN_OUT <= ADDR (== ADDR & -ALIGN) <= *SEQ_MAX_OUT
2487
2488 In this case, every element accessed by D is aligned to at least
2489 ALIGN bytes.
2490
2491 If ALIGN is zero then instead set *SEG_MAX_OUT so that:
2492
2493 *SEQ_MIN_OUT <= ADDR < *SEQ_MAX_OUT. */
2494
2495 static void
2496 get_segment_min_max (const dr_with_seg_len &d, tree *seg_min_out,
2497 tree *seg_max_out, HOST_WIDE_INT align)
2498 {
2499 /* Each access has the following pattern:
2500
2501 <- |seg_len| ->
2502 <--- A: -ve step --->
2503 +-----+-------+-----+-------+-----+
2504 | n-1 | ,.... | 0 | ..... | n-1 |
2505 +-----+-------+-----+-------+-----+
2506 <--- B: +ve step --->
2507 <- |seg_len| ->
2508 |
2509 base address
2510
2511 where "n" is the number of scalar iterations covered by the segment.
2512 (This should be VF for a particular pair if we know that both steps
2513 are the same, otherwise it will be the full number of scalar loop
2514 iterations.)
2515
2516 A is the range of bytes accessed when the step is negative,
2517 B is the range when the step is positive.
2518
2519 If the access size is "access_size" bytes, the lowest addressed byte is:
2520
2521 base + (step < 0 ? seg_len : 0) [LB]
2522
2523 and the highest addressed byte is always below:
2524
2525 base + (step < 0 ? 0 : seg_len) + access_size [UB]
2526
2527 Thus:
2528
2529 LB <= ADDR < UB
2530
2531 If ALIGN is nonzero, all three values are aligned to at least ALIGN
2532 bytes, so:
2533
2534 LB <= ADDR <= UB - ALIGN
2535
2536 where "- ALIGN" folds naturally with the "+ access_size" and often
2537 cancels it out.
2538
2539 We don't try to simplify LB and UB beyond this (e.g. by using
2540 MIN and MAX based on whether seg_len rather than the stride is
2541 negative) because it is possible for the absolute size of the
2542 segment to overflow the range of a ssize_t.
2543
2544 Keeping the pointer_plus outside of the cond_expr should allow
2545 the cond_exprs to be shared with other alias checks. */
2546 tree indicator = dr_direction_indicator (d.dr);
2547 tree neg_step = fold_build2 (LT_EXPR, boolean_type_node,
2548 fold_convert (ssizetype, indicator),
2549 ssize_int (0));
2550 tree addr_base = fold_build_pointer_plus (DR_BASE_ADDRESS (d.dr),
2551 DR_OFFSET (d.dr));
2552 addr_base = fold_build_pointer_plus (addr_base, DR_INIT (d.dr));
2553 tree seg_len
2554 = fold_convert (sizetype, rewrite_to_non_trapping_overflow (d.seg_len));
2555
2556 tree min_reach = fold_build3 (COND_EXPR, sizetype, neg_step,
2557 seg_len, size_zero_node);
2558 tree max_reach = fold_build3 (COND_EXPR, sizetype, neg_step,
2559 size_zero_node, seg_len);
2560 max_reach = fold_build2 (PLUS_EXPR, sizetype, max_reach,
2561 size_int (d.access_size - align));
2562
2563 *seg_min_out = fold_build_pointer_plus (addr_base, min_reach);
2564 *seg_max_out = fold_build_pointer_plus (addr_base, max_reach);
2565 }
2566
2567 /* Generate a runtime condition that is true if ALIAS_PAIR is free of aliases,
2568 storing the condition in *COND_EXPR. The fallback is to generate a
2569 a test that the two accesses do not overlap:
2570
2571 end_a <= start_b || end_b <= start_a. */
2572
2573 static void
2574 create_intersect_range_checks (class loop *loop, tree *cond_expr,
2575 const dr_with_seg_len_pair_t &alias_pair)
2576 {
2577 const dr_with_seg_len& dr_a = alias_pair.first;
2578 const dr_with_seg_len& dr_b = alias_pair.second;
2579 *cond_expr = NULL_TREE;
2580 if (create_intersect_range_checks_index (loop, cond_expr, alias_pair))
2581 return;
2582
2583 if (create_ifn_alias_checks (cond_expr, alias_pair))
2584 return;
2585
2586 if (create_waw_or_war_checks (cond_expr, alias_pair))
2587 return;
2588
2589 unsigned HOST_WIDE_INT min_align;
2590 tree_code cmp_code;
2591 /* We don't have to check DR_ALIAS_MIXED_STEPS here, since both versions
2592 are equivalent. This is just an optimization heuristic. */
2593 if (TREE_CODE (DR_STEP (dr_a.dr)) == INTEGER_CST
2594 && TREE_CODE (DR_STEP (dr_b.dr)) == INTEGER_CST)
2595 {
2596 /* In this case adding access_size to seg_len is likely to give
2597 a simple X * step, where X is either the number of scalar
2598 iterations or the vectorization factor. We're better off
2599 keeping that, rather than subtracting an alignment from it.
2600
2601 In this case the maximum values are exclusive and so there is
2602 no alias if the maximum of one segment equals the minimum
2603 of another. */
2604 min_align = 0;
2605 cmp_code = LE_EXPR;
2606 }
2607 else
2608 {
2609 /* Calculate the minimum alignment shared by all four pointers,
2610 then arrange for this alignment to be subtracted from the
2611 exclusive maximum values to get inclusive maximum values.
2612 This "- min_align" is cumulative with a "+ access_size"
2613 in the calculation of the maximum values. In the best
2614 (and common) case, the two cancel each other out, leaving
2615 us with an inclusive bound based only on seg_len. In the
2616 worst case we're simply adding a smaller number than before.
2617
2618 Because the maximum values are inclusive, there is an alias
2619 if the maximum value of one segment is equal to the minimum
2620 value of the other. */
2621 min_align = MIN (dr_a.align, dr_b.align);
2622 cmp_code = LT_EXPR;
2623 }
2624
2625 tree seg_a_min, seg_a_max, seg_b_min, seg_b_max;
2626 get_segment_min_max (dr_a, &seg_a_min, &seg_a_max, min_align);
2627 get_segment_min_max (dr_b, &seg_b_min, &seg_b_max, min_align);
2628
2629 *cond_expr
2630 = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
2631 fold_build2 (cmp_code, boolean_type_node, seg_a_max, seg_b_min),
2632 fold_build2 (cmp_code, boolean_type_node, seg_b_max, seg_a_min));
2633 if (dump_enabled_p ())
2634 dump_printf (MSG_NOTE, "using an address-based overlap test\n");
2635 }
2636
2637 /* Create a conditional expression that represents the run-time checks for
2638 overlapping of address ranges represented by a list of data references
2639 pairs passed in ALIAS_PAIRS. Data references are in LOOP. The returned
2640 COND_EXPR is the conditional expression to be used in the if statement
2641 that controls which version of the loop gets executed at runtime. */
2642
2643 void
2644 create_runtime_alias_checks (class loop *loop,
2645 vec<dr_with_seg_len_pair_t> *alias_pairs,
2646 tree * cond_expr)
2647 {
2648 tree part_cond_expr;
2649
2650 fold_defer_overflow_warnings ();
2651 dr_with_seg_len_pair_t *alias_pair;
2652 unsigned int i;
2653 FOR_EACH_VEC_ELT (*alias_pairs, i, alias_pair)
2654 {
2655 gcc_assert (alias_pair->flags);
2656 if (dump_enabled_p ())
2657 dump_printf (MSG_NOTE,
2658 "create runtime check for data references %T and %T\n",
2659 DR_REF (alias_pair->first.dr),
2660 DR_REF (alias_pair->second.dr));
2661
2662 /* Create condition expression for each pair data references. */
2663 create_intersect_range_checks (loop, &part_cond_expr, *alias_pair);
2664 if (*cond_expr)
2665 *cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
2666 *cond_expr, part_cond_expr);
2667 else
2668 *cond_expr = part_cond_expr;
2669 }
2670 fold_undefer_and_ignore_overflow_warnings ();
2671 }
2672
2673 /* Check if OFFSET1 and OFFSET2 (DR_OFFSETs of some data-refs) are identical
2674 expressions. */
2675 static bool
2676 dr_equal_offsets_p1 (tree offset1, tree offset2)
2677 {
2678 bool res;
2679
2680 STRIP_NOPS (offset1);
2681 STRIP_NOPS (offset2);
2682
2683 if (offset1 == offset2)
2684 return true;
2685
2686 if (TREE_CODE (offset1) != TREE_CODE (offset2)
2687 || (!BINARY_CLASS_P (offset1) && !UNARY_CLASS_P (offset1)))
2688 return false;
2689
2690 res = dr_equal_offsets_p1 (TREE_OPERAND (offset1, 0),
2691 TREE_OPERAND (offset2, 0));
2692
2693 if (!res || !BINARY_CLASS_P (offset1))
2694 return res;
2695
2696 res = dr_equal_offsets_p1 (TREE_OPERAND (offset1, 1),
2697 TREE_OPERAND (offset2, 1));
2698
2699 return res;
2700 }
2701
2702 /* Check if DRA and DRB have equal offsets. */
2703 bool
2704 dr_equal_offsets_p (struct data_reference *dra,
2705 struct data_reference *drb)
2706 {
2707 tree offset1, offset2;
2708
2709 offset1 = DR_OFFSET (dra);
2710 offset2 = DR_OFFSET (drb);
2711
2712 return dr_equal_offsets_p1 (offset1, offset2);
2713 }
2714
2715 /* Returns true if FNA == FNB. */
2716
2717 static bool
2718 affine_function_equal_p (affine_fn fna, affine_fn fnb)
2719 {
2720 unsigned i, n = fna.length ();
2721
2722 if (n != fnb.length ())
2723 return false;
2724
2725 for (i = 0; i < n; i++)
2726 if (!operand_equal_p (fna[i], fnb[i], 0))
2727 return false;
2728
2729 return true;
2730 }
2731
2732 /* If all the functions in CF are the same, returns one of them,
2733 otherwise returns NULL. */
2734
2735 static affine_fn
2736 common_affine_function (conflict_function *cf)
2737 {
2738 unsigned i;
2739 affine_fn comm;
2740
2741 if (!CF_NONTRIVIAL_P (cf))
2742 return affine_fn ();
2743
2744 comm = cf->fns[0];
2745
2746 for (i = 1; i < cf->n; i++)
2747 if (!affine_function_equal_p (comm, cf->fns[i]))
2748 return affine_fn ();
2749
2750 return comm;
2751 }
2752
2753 /* Returns the base of the affine function FN. */
2754
2755 static tree
2756 affine_function_base (affine_fn fn)
2757 {
2758 return fn[0];
2759 }
2760
2761 /* Returns true if FN is a constant. */
2762
2763 static bool
2764 affine_function_constant_p (affine_fn fn)
2765 {
2766 unsigned i;
2767 tree coef;
2768
2769 for (i = 1; fn.iterate (i, &coef); i++)
2770 if (!integer_zerop (coef))
2771 return false;
2772
2773 return true;
2774 }
2775
2776 /* Returns true if FN is the zero constant function. */
2777
2778 static bool
2779 affine_function_zero_p (affine_fn fn)
2780 {
2781 return (integer_zerop (affine_function_base (fn))
2782 && affine_function_constant_p (fn));
2783 }
2784
2785 /* Returns a signed integer type with the largest precision from TA
2786 and TB. */
2787
2788 static tree
2789 signed_type_for_types (tree ta, tree tb)
2790 {
2791 if (TYPE_PRECISION (ta) > TYPE_PRECISION (tb))
2792 return signed_type_for (ta);
2793 else
2794 return signed_type_for (tb);
2795 }
2796
2797 /* Applies operation OP on affine functions FNA and FNB, and returns the
2798 result. */
2799
2800 static affine_fn
2801 affine_fn_op (enum tree_code op, affine_fn fna, affine_fn fnb)
2802 {
2803 unsigned i, n, m;
2804 affine_fn ret;
2805 tree coef;
2806
2807 if (fnb.length () > fna.length ())
2808 {
2809 n = fna.length ();
2810 m = fnb.length ();
2811 }
2812 else
2813 {
2814 n = fnb.length ();
2815 m = fna.length ();
2816 }
2817
2818 ret.create (m);
2819 for (i = 0; i < n; i++)
2820 {
2821 tree type = signed_type_for_types (TREE_TYPE (fna[i]),
2822 TREE_TYPE (fnb[i]));
2823 ret.quick_push (fold_build2 (op, type, fna[i], fnb[i]));
2824 }
2825
2826 for (; fna.iterate (i, &coef); i++)
2827 ret.quick_push (fold_build2 (op, signed_type_for (TREE_TYPE (coef)),
2828 coef, integer_zero_node));
2829 for (; fnb.iterate (i, &coef); i++)
2830 ret.quick_push (fold_build2 (op, signed_type_for (TREE_TYPE (coef)),
2831 integer_zero_node, coef));
2832
2833 return ret;
2834 }
2835
2836 /* Returns the sum of affine functions FNA and FNB. */
2837
2838 static affine_fn
2839 affine_fn_plus (affine_fn fna, affine_fn fnb)
2840 {
2841 return affine_fn_op (PLUS_EXPR, fna, fnb);
2842 }
2843
2844 /* Returns the difference of affine functions FNA and FNB. */
2845
2846 static affine_fn
2847 affine_fn_minus (affine_fn fna, affine_fn fnb)
2848 {
2849 return affine_fn_op (MINUS_EXPR, fna, fnb);
2850 }
2851
2852 /* Frees affine function FN. */
2853
2854 static void
2855 affine_fn_free (affine_fn fn)
2856 {
2857 fn.release ();
2858 }
2859
2860 /* Determine for each subscript in the data dependence relation DDR
2861 the distance. */
2862
2863 static void
2864 compute_subscript_distance (struct data_dependence_relation *ddr)
2865 {
2866 conflict_function *cf_a, *cf_b;
2867 affine_fn fn_a, fn_b, diff;
2868
2869 if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE)
2870 {
2871 unsigned int i;
2872
2873 for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++)
2874 {
2875 struct subscript *subscript;
2876
2877 subscript = DDR_SUBSCRIPT (ddr, i);
2878 cf_a = SUB_CONFLICTS_IN_A (subscript);
2879 cf_b = SUB_CONFLICTS_IN_B (subscript);
2880
2881 fn_a = common_affine_function (cf_a);
2882 fn_b = common_affine_function (cf_b);
2883 if (!fn_a.exists () || !fn_b.exists ())
2884 {
2885 SUB_DISTANCE (subscript) = chrec_dont_know;
2886 return;
2887 }
2888 diff = affine_fn_minus (fn_a, fn_b);
2889
2890 if (affine_function_constant_p (diff))
2891 SUB_DISTANCE (subscript) = affine_function_base (diff);
2892 else
2893 SUB_DISTANCE (subscript) = chrec_dont_know;
2894
2895 affine_fn_free (diff);
2896 }
2897 }
2898 }
2899
2900 /* Returns the conflict function for "unknown". */
2901
2902 static conflict_function *
2903 conflict_fn_not_known (void)
2904 {
2905 conflict_function *fn = XCNEW (conflict_function);
2906 fn->n = NOT_KNOWN;
2907
2908 return fn;
2909 }
2910
2911 /* Returns the conflict function for "independent". */
2912
2913 static conflict_function *
2914 conflict_fn_no_dependence (void)
2915 {
2916 conflict_function *fn = XCNEW (conflict_function);
2917 fn->n = NO_DEPENDENCE;
2918
2919 return fn;
2920 }
2921
2922 /* Returns true if the address of OBJ is invariant in LOOP. */
2923
2924 static bool
2925 object_address_invariant_in_loop_p (const class loop *loop, const_tree obj)
2926 {
2927 while (handled_component_p (obj))
2928 {
2929 if (TREE_CODE (obj) == ARRAY_REF)
2930 {
2931 for (int i = 1; i < 4; ++i)
2932 if (chrec_contains_symbols_defined_in_loop (TREE_OPERAND (obj, i),
2933 loop->num))
2934 return false;
2935 }
2936 else if (TREE_CODE (obj) == COMPONENT_REF)
2937 {
2938 if (chrec_contains_symbols_defined_in_loop (TREE_OPERAND (obj, 2),
2939 loop->num))
2940 return false;
2941 }
2942 obj = TREE_OPERAND (obj, 0);
2943 }
2944
2945 if (!INDIRECT_REF_P (obj)
2946 && TREE_CODE (obj) != MEM_REF)
2947 return true;
2948
2949 return !chrec_contains_symbols_defined_in_loop (TREE_OPERAND (obj, 0),
2950 loop->num);
2951 }
2952
2953 /* Returns false if we can prove that data references A and B do not alias,
2954 true otherwise. If LOOP_NEST is false no cross-iteration aliases are
2955 considered. */
2956
2957 bool
2958 dr_may_alias_p (const struct data_reference *a, const struct data_reference *b,
2959 class loop *loop_nest)
2960 {
2961 tree addr_a = DR_BASE_OBJECT (a);
2962 tree addr_b = DR_BASE_OBJECT (b);
2963
2964 /* If we are not processing a loop nest but scalar code we
2965 do not need to care about possible cross-iteration dependences
2966 and thus can process the full original reference. Do so,
2967 similar to how loop invariant motion applies extra offset-based
2968 disambiguation. */
2969 if (!loop_nest)
2970 {
2971 aff_tree off1, off2;
2972 poly_widest_int size1, size2;
2973 get_inner_reference_aff (DR_REF (a), &off1, &size1);
2974 get_inner_reference_aff (DR_REF (b), &off2, &size2);
2975 aff_combination_scale (&off1, -1);
2976 aff_combination_add (&off2, &off1);
2977 if (aff_comb_cannot_overlap_p (&off2, size1, size2))
2978 return false;
2979 }
2980
2981 if ((TREE_CODE (addr_a) == MEM_REF || TREE_CODE (addr_a) == TARGET_MEM_REF)
2982 && (TREE_CODE (addr_b) == MEM_REF || TREE_CODE (addr_b) == TARGET_MEM_REF)
2983 /* For cross-iteration dependences the cliques must be valid for the
2984 whole loop, not just individual iterations. */
2985 && (!loop_nest
2986 || MR_DEPENDENCE_CLIQUE (addr_a) == 1
2987 || MR_DEPENDENCE_CLIQUE (addr_a) == loop_nest->owned_clique)
2988 && MR_DEPENDENCE_CLIQUE (addr_a) == MR_DEPENDENCE_CLIQUE (addr_b)
2989 && MR_DEPENDENCE_BASE (addr_a) != MR_DEPENDENCE_BASE (addr_b))
2990 return false;
2991
2992 /* If we had an evolution in a pointer-based MEM_REF BASE_OBJECT we
2993 do not know the size of the base-object. So we cannot do any
2994 offset/overlap based analysis but have to rely on points-to
2995 information only. */
2996 if (TREE_CODE (addr_a) == MEM_REF
2997 && (DR_UNCONSTRAINED_BASE (a)
2998 || TREE_CODE (TREE_OPERAND (addr_a, 0)) == SSA_NAME))
2999 {
3000 /* For true dependences we can apply TBAA. */
3001 if (flag_strict_aliasing
3002 && DR_IS_WRITE (a) && DR_IS_READ (b)
3003 && !alias_sets_conflict_p (get_alias_set (DR_REF (a)),
3004 get_alias_set (DR_REF (b))))
3005 return false;
3006 if (TREE_CODE (addr_b) == MEM_REF)
3007 return ptr_derefs_may_alias_p (TREE_OPERAND (addr_a, 0),
3008 TREE_OPERAND (addr_b, 0));
3009 else
3010 return ptr_derefs_may_alias_p (TREE_OPERAND (addr_a, 0),
3011 build_fold_addr_expr (addr_b));
3012 }
3013 else if (TREE_CODE (addr_b) == MEM_REF
3014 && (DR_UNCONSTRAINED_BASE (b)
3015 || TREE_CODE (TREE_OPERAND (addr_b, 0)) == SSA_NAME))
3016 {
3017 /* For true dependences we can apply TBAA. */
3018 if (flag_strict_aliasing
3019 && DR_IS_WRITE (a) && DR_IS_READ (b)
3020 && !alias_sets_conflict_p (get_alias_set (DR_REF (a)),
3021 get_alias_set (DR_REF (b))))
3022 return false;
3023 if (TREE_CODE (addr_a) == MEM_REF)
3024 return ptr_derefs_may_alias_p (TREE_OPERAND (addr_a, 0),
3025 TREE_OPERAND (addr_b, 0));
3026 else
3027 return ptr_derefs_may_alias_p (build_fold_addr_expr (addr_a),
3028 TREE_OPERAND (addr_b, 0));
3029 }
3030
3031 /* Otherwise DR_BASE_OBJECT is an access that covers the whole object
3032 that is being subsetted in the loop nest. */
3033 if (DR_IS_WRITE (a) && DR_IS_WRITE (b))
3034 return refs_output_dependent_p (addr_a, addr_b);
3035 else if (DR_IS_READ (a) && DR_IS_WRITE (b))
3036 return refs_anti_dependent_p (addr_a, addr_b);
3037 return refs_may_alias_p (addr_a, addr_b);
3038 }
3039
3040 /* REF_A and REF_B both satisfy access_fn_component_p. Return true
3041 if it is meaningful to compare their associated access functions
3042 when checking for dependencies. */
3043
3044 static bool
3045 access_fn_components_comparable_p (tree ref_a, tree ref_b)
3046 {
3047 /* Allow pairs of component refs from the following sets:
3048
3049 { REALPART_EXPR, IMAGPART_EXPR }
3050 { COMPONENT_REF }
3051 { ARRAY_REF }. */
3052 tree_code code_a = TREE_CODE (ref_a);
3053 tree_code code_b = TREE_CODE (ref_b);
3054 if (code_a == IMAGPART_EXPR)
3055 code_a = REALPART_EXPR;
3056 if (code_b == IMAGPART_EXPR)
3057 code_b = REALPART_EXPR;
3058 if (code_a != code_b)
3059 return false;
3060
3061 if (TREE_CODE (ref_a) == COMPONENT_REF)
3062 /* ??? We cannot simply use the type of operand #0 of the refs here as
3063 the Fortran compiler smuggles type punning into COMPONENT_REFs.
3064 Use the DECL_CONTEXT of the FIELD_DECLs instead. */
3065 return (DECL_CONTEXT (TREE_OPERAND (ref_a, 1))
3066 == DECL_CONTEXT (TREE_OPERAND (ref_b, 1)));
3067
3068 return types_compatible_p (TREE_TYPE (TREE_OPERAND (ref_a, 0)),
3069 TREE_TYPE (TREE_OPERAND (ref_b, 0)));
3070 }
3071
3072 /* Initialize a data dependence relation between data accesses A and
3073 B. NB_LOOPS is the number of loops surrounding the references: the
3074 size of the classic distance/direction vectors. */
3075
3076 struct data_dependence_relation *
3077 initialize_data_dependence_relation (struct data_reference *a,
3078 struct data_reference *b,
3079 vec<loop_p> loop_nest)
3080 {
3081 struct data_dependence_relation *res;
3082 unsigned int i;
3083
3084 res = XCNEW (struct data_dependence_relation);
3085 DDR_A (res) = a;
3086 DDR_B (res) = b;
3087 DDR_LOOP_NEST (res).create (0);
3088 DDR_SUBSCRIPTS (res).create (0);
3089 DDR_DIR_VECTS (res).create (0);
3090 DDR_DIST_VECTS (res).create (0);
3091
3092 if (a == NULL || b == NULL)
3093 {
3094 DDR_ARE_DEPENDENT (res) = chrec_dont_know;
3095 return res;
3096 }
3097
3098 /* If the data references do not alias, then they are independent. */
3099 if (!dr_may_alias_p (a, b, loop_nest.exists () ? loop_nest[0] : NULL))
3100 {
3101 DDR_ARE_DEPENDENT (res) = chrec_known;
3102 return res;
3103 }
3104
3105 unsigned int num_dimensions_a = DR_NUM_DIMENSIONS (a);
3106 unsigned int num_dimensions_b = DR_NUM_DIMENSIONS (b);
3107 if (num_dimensions_a == 0 || num_dimensions_b == 0)
3108 {
3109 DDR_ARE_DEPENDENT (res) = chrec_dont_know;
3110 return res;
3111 }
3112
3113 /* For unconstrained bases, the root (highest-indexed) subscript
3114 describes a variation in the base of the original DR_REF rather
3115 than a component access. We have no type that accurately describes
3116 the new DR_BASE_OBJECT (whose TREE_TYPE describes the type *after*
3117 applying this subscript) so limit the search to the last real
3118 component access.
3119
3120 E.g. for:
3121
3122 void
3123 f (int a[][8], int b[][8])
3124 {
3125 for (int i = 0; i < 8; ++i)
3126 a[i * 2][0] = b[i][0];
3127 }
3128
3129 the a and b accesses have a single ARRAY_REF component reference [0]
3130 but have two subscripts. */
3131 if (DR_UNCONSTRAINED_BASE (a))
3132 num_dimensions_a -= 1;
3133 if (DR_UNCONSTRAINED_BASE (b))
3134 num_dimensions_b -= 1;
3135
3136 /* These structures describe sequences of component references in
3137 DR_REF (A) and DR_REF (B). Each component reference is tied to a
3138 specific access function. */
3139 struct {
3140 /* The sequence starts at DR_ACCESS_FN (A, START_A) of A and
3141 DR_ACCESS_FN (B, START_B) of B (inclusive) and extends to higher
3142 indices. In C notation, these are the indices of the rightmost
3143 component references; e.g. for a sequence .b.c.d, the start
3144 index is for .d. */
3145 unsigned int start_a;
3146 unsigned int start_b;
3147
3148 /* The sequence contains LENGTH consecutive access functions from
3149 each DR. */
3150 unsigned int length;
3151
3152 /* The enclosing objects for the A and B sequences respectively,
3153 i.e. the objects to which DR_ACCESS_FN (A, START_A + LENGTH - 1)
3154 and DR_ACCESS_FN (B, START_B + LENGTH - 1) are applied. */
3155 tree object_a;
3156 tree object_b;
3157 } full_seq = {}, struct_seq = {};
3158
3159 /* Before each iteration of the loop:
3160
3161 - REF_A is what you get after applying DR_ACCESS_FN (A, INDEX_A) and
3162 - REF_B is what you get after applying DR_ACCESS_FN (B, INDEX_B). */
3163 unsigned int index_a = 0;
3164 unsigned int index_b = 0;
3165 tree ref_a = DR_REF (a);
3166 tree ref_b = DR_REF (b);
3167
3168 /* Now walk the component references from the final DR_REFs back up to
3169 the enclosing base objects. Each component reference corresponds
3170 to one access function in the DR, with access function 0 being for
3171 the final DR_REF and the highest-indexed access function being the
3172 one that is applied to the base of the DR.
3173
3174 Look for a sequence of component references whose access functions
3175 are comparable (see access_fn_components_comparable_p). If more
3176 than one such sequence exists, pick the one nearest the base
3177 (which is the leftmost sequence in C notation). Store this sequence
3178 in FULL_SEQ.
3179
3180 For example, if we have:
3181
3182 struct foo { struct bar s; ... } (*a)[10], (*b)[10];
3183
3184 A: a[0][i].s.c.d
3185 B: __real b[0][i].s.e[i].f
3186
3187 (where d is the same type as the real component of f) then the access
3188 functions would be:
3189
3190 0 1 2 3
3191 A: .d .c .s [i]
3192
3193 0 1 2 3 4 5
3194 B: __real .f [i] .e .s [i]
3195
3196 The A0/B2 column isn't comparable, since .d is a COMPONENT_REF
3197 and [i] is an ARRAY_REF. However, the A1/B3 column contains two
3198 COMPONENT_REF accesses for struct bar, so is comparable. Likewise
3199 the A2/B4 column contains two COMPONENT_REF accesses for struct foo,
3200 so is comparable. The A3/B5 column contains two ARRAY_REFs that
3201 index foo[10] arrays, so is again comparable. The sequence is
3202 therefore:
3203
3204 A: [1, 3] (i.e. [i].s.c)
3205 B: [3, 5] (i.e. [i].s.e)
3206
3207 Also look for sequences of component references whose access
3208 functions are comparable and whose enclosing objects have the same
3209 RECORD_TYPE. Store this sequence in STRUCT_SEQ. In the above
3210 example, STRUCT_SEQ would be:
3211
3212 A: [1, 2] (i.e. s.c)
3213 B: [3, 4] (i.e. s.e) */
3214 while (index_a < num_dimensions_a && index_b < num_dimensions_b)
3215 {
3216 /* REF_A and REF_B must be one of the component access types
3217 allowed by dr_analyze_indices. */
3218 gcc_checking_assert (access_fn_component_p (ref_a));
3219 gcc_checking_assert (access_fn_component_p (ref_b));
3220
3221 /* Get the immediately-enclosing objects for REF_A and REF_B,
3222 i.e. the references *before* applying DR_ACCESS_FN (A, INDEX_A)
3223 and DR_ACCESS_FN (B, INDEX_B). */
3224 tree object_a = TREE_OPERAND (ref_a, 0);
3225 tree object_b = TREE_OPERAND (ref_b, 0);
3226
3227 tree type_a = TREE_TYPE (object_a);
3228 tree type_b = TREE_TYPE (object_b);
3229 if (access_fn_components_comparable_p (ref_a, ref_b))
3230 {
3231 /* This pair of component accesses is comparable for dependence
3232 analysis, so we can include DR_ACCESS_FN (A, INDEX_A) and
3233 DR_ACCESS_FN (B, INDEX_B) in the sequence. */
3234 if (full_seq.start_a + full_seq.length != index_a
3235 || full_seq.start_b + full_seq.length != index_b)
3236 {
3237 /* The accesses don't extend the current sequence,
3238 so start a new one here. */
3239 full_seq.start_a = index_a;
3240 full_seq.start_b = index_b;
3241 full_seq.length = 0;
3242 }
3243
3244 /* Add this pair of references to the sequence. */
3245 full_seq.length += 1;
3246 full_seq.object_a = object_a;
3247 full_seq.object_b = object_b;
3248
3249 /* If the enclosing objects are structures (and thus have the
3250 same RECORD_TYPE), record the new sequence in STRUCT_SEQ. */
3251 if (TREE_CODE (type_a) == RECORD_TYPE)
3252 struct_seq = full_seq;
3253
3254 /* Move to the next containing reference for both A and B. */
3255 ref_a = object_a;
3256 ref_b = object_b;
3257 index_a += 1;
3258 index_b += 1;
3259 continue;
3260 }
3261
3262 /* Try to approach equal type sizes. */
3263 if (!COMPLETE_TYPE_P (type_a)
3264 || !COMPLETE_TYPE_P (type_b)
3265 || !tree_fits_uhwi_p (TYPE_SIZE_UNIT (type_a))
3266 || !tree_fits_uhwi_p (TYPE_SIZE_UNIT (type_b)))
3267 break;
3268
3269 unsigned HOST_WIDE_INT size_a = tree_to_uhwi (TYPE_SIZE_UNIT (type_a));
3270 unsigned HOST_WIDE_INT size_b = tree_to_uhwi (TYPE_SIZE_UNIT (type_b));
3271 if (size_a <= size_b)
3272 {
3273 index_a += 1;
3274 ref_a = object_a;
3275 }
3276 if (size_b <= size_a)
3277 {
3278 index_b += 1;
3279 ref_b = object_b;
3280 }
3281 }
3282
3283 /* See whether FULL_SEQ ends at the base and whether the two bases
3284 are equal. We do not care about TBAA or alignment info so we can
3285 use OEP_ADDRESS_OF to avoid false negatives. */
3286 tree base_a = DR_BASE_OBJECT (a);
3287 tree base_b = DR_BASE_OBJECT (b);
3288 bool same_base_p = (full_seq.start_a + full_seq.length == num_dimensions_a
3289 && full_seq.start_b + full_seq.length == num_dimensions_b
3290 && DR_UNCONSTRAINED_BASE (a) == DR_UNCONSTRAINED_BASE (b)
3291 && operand_equal_p (base_a, base_b, OEP_ADDRESS_OF)
3292 && (types_compatible_p (TREE_TYPE (base_a),
3293 TREE_TYPE (base_b))
3294 || (!base_supports_access_fn_components_p (base_a)
3295 && !base_supports_access_fn_components_p (base_b)
3296 && operand_equal_p
3297 (TYPE_SIZE (TREE_TYPE (base_a)),
3298 TYPE_SIZE (TREE_TYPE (base_b)), 0)))
3299 && (!loop_nest.exists ()
3300 || (object_address_invariant_in_loop_p
3301 (loop_nest[0], base_a))));
3302
3303 /* If the bases are the same, we can include the base variation too.
3304 E.g. the b accesses in:
3305
3306 for (int i = 0; i < n; ++i)
3307 b[i + 4][0] = b[i][0];
3308
3309 have a definite dependence distance of 4, while for:
3310
3311 for (int i = 0; i < n; ++i)
3312 a[i + 4][0] = b[i][0];
3313
3314 the dependence distance depends on the gap between a and b.
3315
3316 If the bases are different then we can only rely on the sequence
3317 rooted at a structure access, since arrays are allowed to overlap
3318 arbitrarily and change shape arbitrarily. E.g. we treat this as
3319 valid code:
3320
3321 int a[256];
3322 ...
3323 ((int (*)[4][3]) &a[1])[i][0] += ((int (*)[4][3]) &a[2])[i][0];
3324
3325 where two lvalues with the same int[4][3] type overlap, and where
3326 both lvalues are distinct from the object's declared type. */
3327 if (same_base_p)
3328 {
3329 if (DR_UNCONSTRAINED_BASE (a))
3330 full_seq.length += 1;
3331 }
3332 else
3333 full_seq = struct_seq;
3334
3335 /* Punt if we didn't find a suitable sequence. */
3336 if (full_seq.length == 0)
3337 {
3338 DDR_ARE_DEPENDENT (res) = chrec_dont_know;
3339 return res;
3340 }
3341
3342 if (!same_base_p)
3343 {
3344 /* Partial overlap is possible for different bases when strict aliasing
3345 is not in effect. It's also possible if either base involves a union
3346 access; e.g. for:
3347
3348 struct s1 { int a[2]; };
3349 struct s2 { struct s1 b; int c; };
3350 struct s3 { int d; struct s1 e; };
3351 union u { struct s2 f; struct s3 g; } *p, *q;
3352
3353 the s1 at "p->f.b" (base "p->f") partially overlaps the s1 at
3354 "p->g.e" (base "p->g") and might partially overlap the s1 at
3355 "q->g.e" (base "q->g"). */
3356 if (!flag_strict_aliasing
3357 || ref_contains_union_access_p (full_seq.object_a)
3358 || ref_contains_union_access_p (full_seq.object_b))
3359 {
3360 DDR_ARE_DEPENDENT (res) = chrec_dont_know;
3361 return res;
3362 }
3363
3364 DDR_COULD_BE_INDEPENDENT_P (res) = true;
3365 if (!loop_nest.exists ()
3366 || (object_address_invariant_in_loop_p (loop_nest[0],
3367 full_seq.object_a)
3368 && object_address_invariant_in_loop_p (loop_nest[0],
3369 full_seq.object_b)))
3370 {
3371 DDR_OBJECT_A (res) = full_seq.object_a;
3372 DDR_OBJECT_B (res) = full_seq.object_b;
3373 }
3374 }
3375
3376 DDR_AFFINE_P (res) = true;
3377 DDR_ARE_DEPENDENT (res) = NULL_TREE;
3378 DDR_SUBSCRIPTS (res).create (full_seq.length);
3379 DDR_LOOP_NEST (res) = loop_nest;
3380 DDR_SELF_REFERENCE (res) = false;
3381
3382 for (i = 0; i < full_seq.length; ++i)
3383 {
3384 struct subscript *subscript;
3385
3386 subscript = XNEW (struct subscript);
3387 SUB_ACCESS_FN (subscript, 0) = DR_ACCESS_FN (a, full_seq.start_a + i);
3388 SUB_ACCESS_FN (subscript, 1) = DR_ACCESS_FN (b, full_seq.start_b + i);
3389 SUB_CONFLICTS_IN_A (subscript) = conflict_fn_not_known ();
3390 SUB_CONFLICTS_IN_B (subscript) = conflict_fn_not_known ();
3391 SUB_LAST_CONFLICT (subscript) = chrec_dont_know;
3392 SUB_DISTANCE (subscript) = chrec_dont_know;
3393 DDR_SUBSCRIPTS (res).safe_push (subscript);
3394 }
3395
3396 return res;
3397 }
3398
3399 /* Frees memory used by the conflict function F. */
3400
3401 static void
3402 free_conflict_function (conflict_function *f)
3403 {
3404 unsigned i;
3405
3406 if (CF_NONTRIVIAL_P (f))
3407 {
3408 for (i = 0; i < f->n; i++)
3409 affine_fn_free (f->fns[i]);
3410 }
3411 free (f);
3412 }
3413
3414 /* Frees memory used by SUBSCRIPTS. */
3415
3416 static void
3417 free_subscripts (vec<subscript_p> subscripts)
3418 {
3419 unsigned i;
3420 subscript_p s;
3421
3422 FOR_EACH_VEC_ELT (subscripts, i, s)
3423 {
3424 free_conflict_function (s->conflicting_iterations_in_a);
3425 free_conflict_function (s->conflicting_iterations_in_b);
3426 free (s);
3427 }
3428 subscripts.release ();
3429 }
3430
3431 /* Set DDR_ARE_DEPENDENT to CHREC and finalize the subscript overlap
3432 description. */
3433
3434 static inline void
3435 finalize_ddr_dependent (struct data_dependence_relation *ddr,
3436 tree chrec)
3437 {
3438 DDR_ARE_DEPENDENT (ddr) = chrec;
3439 free_subscripts (DDR_SUBSCRIPTS (ddr));
3440 DDR_SUBSCRIPTS (ddr).create (0);
3441 }
3442
3443 /* The dependence relation DDR cannot be represented by a distance
3444 vector. */
3445
3446 static inline void
3447 non_affine_dependence_relation (struct data_dependence_relation *ddr)
3448 {
3449 if (dump_file && (dump_flags & TDF_DETAILS))
3450 fprintf (dump_file, "(Dependence relation cannot be represented by distance vector.) \n");
3451
3452 DDR_AFFINE_P (ddr) = false;
3453 }
3454
3455 \f
3456
3457 /* This section contains the classic Banerjee tests. */
3458
3459 /* Returns true iff CHREC_A and CHREC_B are not dependent on any index
3460 variables, i.e., if the ZIV (Zero Index Variable) test is true. */
3461
3462 static inline bool
3463 ziv_subscript_p (const_tree chrec_a, const_tree chrec_b)
3464 {
3465 return (evolution_function_is_constant_p (chrec_a)
3466 && evolution_function_is_constant_p (chrec_b));
3467 }
3468
3469 /* Returns true iff CHREC_A and CHREC_B are dependent on an index
3470 variable, i.e., if the SIV (Single Index Variable) test is true. */
3471
3472 static bool
3473 siv_subscript_p (const_tree chrec_a, const_tree chrec_b)
3474 {
3475 if ((evolution_function_is_constant_p (chrec_a)
3476 && evolution_function_is_univariate_p (chrec_b))
3477 || (evolution_function_is_constant_p (chrec_b)
3478 && evolution_function_is_univariate_p (chrec_a)))
3479 return true;
3480
3481 if (evolution_function_is_univariate_p (chrec_a)
3482 && evolution_function_is_univariate_p (chrec_b))
3483 {
3484 switch (TREE_CODE (chrec_a))
3485 {
3486 case POLYNOMIAL_CHREC:
3487 switch (TREE_CODE (chrec_b))
3488 {
3489 case POLYNOMIAL_CHREC:
3490 if (CHREC_VARIABLE (chrec_a) != CHREC_VARIABLE (chrec_b))
3491 return false;
3492 /* FALLTHRU */
3493
3494 default:
3495 return true;
3496 }
3497
3498 default:
3499 return true;
3500 }
3501 }
3502
3503 return false;
3504 }
3505
3506 /* Creates a conflict function with N dimensions. The affine functions
3507 in each dimension follow. */
3508
3509 static conflict_function *
3510 conflict_fn (unsigned n, ...)
3511 {
3512 unsigned i;
3513 conflict_function *ret = XCNEW (conflict_function);
3514 va_list ap;
3515
3516 gcc_assert (n > 0 && n <= MAX_DIM);
3517 va_start (ap, n);
3518
3519 ret->n = n;
3520 for (i = 0; i < n; i++)
3521 ret->fns[i] = va_arg (ap, affine_fn);
3522 va_end (ap);
3523
3524 return ret;
3525 }
3526
3527 /* Returns constant affine function with value CST. */
3528
3529 static affine_fn
3530 affine_fn_cst (tree cst)
3531 {
3532 affine_fn fn;
3533 fn.create (1);
3534 fn.quick_push (cst);
3535 return fn;
3536 }
3537
3538 /* Returns affine function with single variable, CST + COEF * x_DIM. */
3539
3540 static affine_fn
3541 affine_fn_univar (tree cst, unsigned dim, tree coef)
3542 {
3543 affine_fn fn;
3544 fn.create (dim + 1);
3545 unsigned i;
3546
3547 gcc_assert (dim > 0);
3548 fn.quick_push (cst);
3549 for (i = 1; i < dim; i++)
3550 fn.quick_push (integer_zero_node);
3551 fn.quick_push (coef);
3552 return fn;
3553 }
3554
3555 /* Analyze a ZIV (Zero Index Variable) subscript. *OVERLAPS_A and
3556 *OVERLAPS_B are initialized to the functions that describe the
3557 relation between the elements accessed twice by CHREC_A and
3558 CHREC_B. For k >= 0, the following property is verified:
3559
3560 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
3561
3562 static void
3563 analyze_ziv_subscript (tree chrec_a,
3564 tree chrec_b,
3565 conflict_function **overlaps_a,
3566 conflict_function **overlaps_b,
3567 tree *last_conflicts)
3568 {
3569 tree type, difference;
3570 dependence_stats.num_ziv++;
3571
3572 if (dump_file && (dump_flags & TDF_DETAILS))
3573 fprintf (dump_file, "(analyze_ziv_subscript \n");
3574
3575 type = signed_type_for_types (TREE_TYPE (chrec_a), TREE_TYPE (chrec_b));
3576 chrec_a = chrec_convert (type, chrec_a, NULL);
3577 chrec_b = chrec_convert (type, chrec_b, NULL);
3578 difference = chrec_fold_minus (type, chrec_a, chrec_b);
3579
3580 switch (TREE_CODE (difference))
3581 {
3582 case INTEGER_CST:
3583 if (integer_zerop (difference))
3584 {
3585 /* The difference is equal to zero: the accessed index
3586 overlaps for each iteration in the loop. */
3587 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
3588 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node));
3589 *last_conflicts = chrec_dont_know;
3590 dependence_stats.num_ziv_dependent++;
3591 }
3592 else
3593 {
3594 /* The accesses do not overlap. */
3595 *overlaps_a = conflict_fn_no_dependence ();
3596 *overlaps_b = conflict_fn_no_dependence ();
3597 *last_conflicts = integer_zero_node;
3598 dependence_stats.num_ziv_independent++;
3599 }
3600 break;
3601
3602 default:
3603 /* We're not sure whether the indexes overlap. For the moment,
3604 conservatively answer "don't know". */
3605 if (dump_file && (dump_flags & TDF_DETAILS))
3606 fprintf (dump_file, "ziv test failed: difference is non-integer.\n");
3607
3608 *overlaps_a = conflict_fn_not_known ();
3609 *overlaps_b = conflict_fn_not_known ();
3610 *last_conflicts = chrec_dont_know;
3611 dependence_stats.num_ziv_unimplemented++;
3612 break;
3613 }
3614
3615 if (dump_file && (dump_flags & TDF_DETAILS))
3616 fprintf (dump_file, ")\n");
3617 }
3618
3619 /* Similar to max_stmt_executions_int, but returns the bound as a tree,
3620 and only if it fits to the int type. If this is not the case, or the
3621 bound on the number of iterations of LOOP could not be derived, returns
3622 chrec_dont_know. */
3623
3624 static tree
3625 max_stmt_executions_tree (class loop *loop)
3626 {
3627 widest_int nit;
3628
3629 if (!max_stmt_executions (loop, &nit))
3630 return chrec_dont_know;
3631
3632 if (!wi::fits_to_tree_p (nit, unsigned_type_node))
3633 return chrec_dont_know;
3634
3635 return wide_int_to_tree (unsigned_type_node, nit);
3636 }
3637
3638 /* Determine whether the CHREC is always positive/negative. If the expression
3639 cannot be statically analyzed, return false, otherwise set the answer into
3640 VALUE. */
3641
3642 static bool
3643 chrec_is_positive (tree chrec, bool *value)
3644 {
3645 bool value0, value1, value2;
3646 tree end_value, nb_iter;
3647
3648 switch (TREE_CODE (chrec))
3649 {
3650 case POLYNOMIAL_CHREC:
3651 if (!chrec_is_positive (CHREC_LEFT (chrec), &value0)
3652 || !chrec_is_positive (CHREC_RIGHT (chrec), &value1))
3653 return false;
3654
3655 /* FIXME -- overflows. */
3656 if (value0 == value1)
3657 {
3658 *value = value0;
3659 return true;
3660 }
3661
3662 /* Otherwise the chrec is under the form: "{-197, +, 2}_1",
3663 and the proof consists in showing that the sign never
3664 changes during the execution of the loop, from 0 to
3665 loop->nb_iterations. */
3666 if (!evolution_function_is_affine_p (chrec))
3667 return false;
3668
3669 nb_iter = number_of_latch_executions (get_chrec_loop (chrec));
3670 if (chrec_contains_undetermined (nb_iter))
3671 return false;
3672
3673 #if 0
3674 /* TODO -- If the test is after the exit, we may decrease the number of
3675 iterations by one. */
3676 if (after_exit)
3677 nb_iter = chrec_fold_minus (type, nb_iter, build_int_cst (type, 1));
3678 #endif
3679
3680 end_value = chrec_apply (CHREC_VARIABLE (chrec), chrec, nb_iter);
3681
3682 if (!chrec_is_positive (end_value, &value2))
3683 return false;
3684
3685 *value = value0;
3686 return value0 == value1;
3687
3688 case INTEGER_CST:
3689 switch (tree_int_cst_sgn (chrec))
3690 {
3691 case -1:
3692 *value = false;
3693 break;
3694 case 1:
3695 *value = true;
3696 break;
3697 default:
3698 return false;
3699 }
3700 return true;
3701
3702 default:
3703 return false;
3704 }
3705 }
3706
3707
3708 /* Analyze a SIV (Single Index Variable) subscript where CHREC_A is a
3709 constant, and CHREC_B is an affine function. *OVERLAPS_A and
3710 *OVERLAPS_B are initialized to the functions that describe the
3711 relation between the elements accessed twice by CHREC_A and
3712 CHREC_B. For k >= 0, the following property is verified:
3713
3714 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
3715
3716 static void
3717 analyze_siv_subscript_cst_affine (tree chrec_a,
3718 tree chrec_b,
3719 conflict_function **overlaps_a,
3720 conflict_function **overlaps_b,
3721 tree *last_conflicts)
3722 {
3723 bool value0, value1, value2;
3724 tree type, difference, tmp;
3725
3726 type = signed_type_for_types (TREE_TYPE (chrec_a), TREE_TYPE (chrec_b));
3727 chrec_a = chrec_convert (type, chrec_a, NULL);
3728 chrec_b = chrec_convert (type, chrec_b, NULL);
3729 difference = chrec_fold_minus (type, initial_condition (chrec_b), chrec_a);
3730
3731 /* Special case overlap in the first iteration. */
3732 if (integer_zerop (difference))
3733 {
3734 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
3735 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node));
3736 *last_conflicts = integer_one_node;
3737 return;
3738 }
3739
3740 if (!chrec_is_positive (initial_condition (difference), &value0))
3741 {
3742 if (dump_file && (dump_flags & TDF_DETAILS))
3743 fprintf (dump_file, "siv test failed: chrec is not positive.\n");
3744
3745 dependence_stats.num_siv_unimplemented++;
3746 *overlaps_a = conflict_fn_not_known ();
3747 *overlaps_b = conflict_fn_not_known ();
3748 *last_conflicts = chrec_dont_know;
3749 return;
3750 }
3751 else
3752 {
3753 if (value0 == false)
3754 {
3755 if (TREE_CODE (chrec_b) != POLYNOMIAL_CHREC
3756 || !chrec_is_positive (CHREC_RIGHT (chrec_b), &value1))
3757 {
3758 if (dump_file && (dump_flags & TDF_DETAILS))
3759 fprintf (dump_file, "siv test failed: chrec not positive.\n");
3760
3761 *overlaps_a = conflict_fn_not_known ();
3762 *overlaps_b = conflict_fn_not_known ();
3763 *last_conflicts = chrec_dont_know;
3764 dependence_stats.num_siv_unimplemented++;
3765 return;
3766 }
3767 else
3768 {
3769 if (value1 == true)
3770 {
3771 /* Example:
3772 chrec_a = 12
3773 chrec_b = {10, +, 1}
3774 */
3775
3776 if (tree_fold_divides_p (CHREC_RIGHT (chrec_b), difference))
3777 {
3778 HOST_WIDE_INT numiter;
3779 class loop *loop = get_chrec_loop (chrec_b);
3780
3781 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
3782 tmp = fold_build2 (EXACT_DIV_EXPR, type,
3783 fold_build1 (ABS_EXPR, type, difference),
3784 CHREC_RIGHT (chrec_b));
3785 *overlaps_b = conflict_fn (1, affine_fn_cst (tmp));
3786 *last_conflicts = integer_one_node;
3787
3788
3789 /* Perform weak-zero siv test to see if overlap is
3790 outside the loop bounds. */
3791 numiter = max_stmt_executions_int (loop);
3792
3793 if (numiter >= 0
3794 && compare_tree_int (tmp, numiter) > 0)
3795 {
3796 free_conflict_function (*overlaps_a);
3797 free_conflict_function (*overlaps_b);
3798 *overlaps_a = conflict_fn_no_dependence ();
3799 *overlaps_b = conflict_fn_no_dependence ();
3800 *last_conflicts = integer_zero_node;
3801 dependence_stats.num_siv_independent++;
3802 return;
3803 }
3804 dependence_stats.num_siv_dependent++;
3805 return;
3806 }
3807
3808 /* When the step does not divide the difference, there are
3809 no overlaps. */
3810 else
3811 {
3812 *overlaps_a = conflict_fn_no_dependence ();
3813 *overlaps_b = conflict_fn_no_dependence ();
3814 *last_conflicts = integer_zero_node;
3815 dependence_stats.num_siv_independent++;
3816 return;
3817 }
3818 }
3819
3820 else
3821 {
3822 /* Example:
3823 chrec_a = 12
3824 chrec_b = {10, +, -1}
3825
3826 In this case, chrec_a will not overlap with chrec_b. */
3827 *overlaps_a = conflict_fn_no_dependence ();
3828 *overlaps_b = conflict_fn_no_dependence ();
3829 *last_conflicts = integer_zero_node;
3830 dependence_stats.num_siv_independent++;
3831 return;
3832 }
3833 }
3834 }
3835 else
3836 {
3837 if (TREE_CODE (chrec_b) != POLYNOMIAL_CHREC
3838 || !chrec_is_positive (CHREC_RIGHT (chrec_b), &value2))
3839 {
3840 if (dump_file && (dump_flags & TDF_DETAILS))
3841 fprintf (dump_file, "siv test failed: chrec not positive.\n");
3842
3843 *overlaps_a = conflict_fn_not_known ();
3844 *overlaps_b = conflict_fn_not_known ();
3845 *last_conflicts = chrec_dont_know;
3846 dependence_stats.num_siv_unimplemented++;
3847 return;
3848 }
3849 else
3850 {
3851 if (value2 == false)
3852 {
3853 /* Example:
3854 chrec_a = 3
3855 chrec_b = {10, +, -1}
3856 */
3857 if (tree_fold_divides_p (CHREC_RIGHT (chrec_b), difference))
3858 {
3859 HOST_WIDE_INT numiter;
3860 class loop *loop = get_chrec_loop (chrec_b);
3861
3862 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
3863 tmp = fold_build2 (EXACT_DIV_EXPR, type, difference,
3864 CHREC_RIGHT (chrec_b));
3865 *overlaps_b = conflict_fn (1, affine_fn_cst (tmp));
3866 *last_conflicts = integer_one_node;
3867
3868 /* Perform weak-zero siv test to see if overlap is
3869 outside the loop bounds. */
3870 numiter = max_stmt_executions_int (loop);
3871
3872 if (numiter >= 0
3873 && compare_tree_int (tmp, numiter) > 0)
3874 {
3875 free_conflict_function (*overlaps_a);
3876 free_conflict_function (*overlaps_b);
3877 *overlaps_a = conflict_fn_no_dependence ();
3878 *overlaps_b = conflict_fn_no_dependence ();
3879 *last_conflicts = integer_zero_node;
3880 dependence_stats.num_siv_independent++;
3881 return;
3882 }
3883 dependence_stats.num_siv_dependent++;
3884 return;
3885 }
3886
3887 /* When the step does not divide the difference, there
3888 are no overlaps. */
3889 else
3890 {
3891 *overlaps_a = conflict_fn_no_dependence ();
3892 *overlaps_b = conflict_fn_no_dependence ();
3893 *last_conflicts = integer_zero_node;
3894 dependence_stats.num_siv_independent++;
3895 return;
3896 }
3897 }
3898 else
3899 {
3900 /* Example:
3901 chrec_a = 3
3902 chrec_b = {4, +, 1}
3903
3904 In this case, chrec_a will not overlap with chrec_b. */
3905 *overlaps_a = conflict_fn_no_dependence ();
3906 *overlaps_b = conflict_fn_no_dependence ();
3907 *last_conflicts = integer_zero_node;
3908 dependence_stats.num_siv_independent++;
3909 return;
3910 }
3911 }
3912 }
3913 }
3914 }
3915
3916 /* Helper recursive function for initializing the matrix A. Returns
3917 the initial value of CHREC. */
3918
3919 static tree
3920 initialize_matrix_A (lambda_matrix A, tree chrec, unsigned index, int mult)
3921 {
3922 gcc_assert (chrec);
3923
3924 switch (TREE_CODE (chrec))
3925 {
3926 case POLYNOMIAL_CHREC:
3927 if (!cst_and_fits_in_hwi (CHREC_RIGHT (chrec)))
3928 return chrec_dont_know;
3929 A[index][0] = mult * int_cst_value (CHREC_RIGHT (chrec));
3930 return initialize_matrix_A (A, CHREC_LEFT (chrec), index + 1, mult);
3931
3932 case PLUS_EXPR:
3933 case MULT_EXPR:
3934 case MINUS_EXPR:
3935 {
3936 tree op0 = initialize_matrix_A (A, TREE_OPERAND (chrec, 0), index, mult);
3937 tree op1 = initialize_matrix_A (A, TREE_OPERAND (chrec, 1), index, mult);
3938
3939 return chrec_fold_op (TREE_CODE (chrec), chrec_type (chrec), op0, op1);
3940 }
3941
3942 CASE_CONVERT:
3943 {
3944 tree op = initialize_matrix_A (A, TREE_OPERAND (chrec, 0), index, mult);
3945 return chrec_convert (chrec_type (chrec), op, NULL);
3946 }
3947
3948 case BIT_NOT_EXPR:
3949 {
3950 /* Handle ~X as -1 - X. */
3951 tree op = initialize_matrix_A (A, TREE_OPERAND (chrec, 0), index, mult);
3952 return chrec_fold_op (MINUS_EXPR, chrec_type (chrec),
3953 build_int_cst (TREE_TYPE (chrec), -1), op);
3954 }
3955
3956 case INTEGER_CST:
3957 return chrec;
3958
3959 default:
3960 gcc_unreachable ();
3961 return NULL_TREE;
3962 }
3963 }
3964
3965 #define FLOOR_DIV(x,y) ((x) / (y))
3966
3967 /* Solves the special case of the Diophantine equation:
3968 | {0, +, STEP_A}_x (OVERLAPS_A) = {0, +, STEP_B}_y (OVERLAPS_B)
3969
3970 Computes the descriptions OVERLAPS_A and OVERLAPS_B. NITER is the
3971 number of iterations that loops X and Y run. The overlaps will be
3972 constructed as evolutions in dimension DIM. */
3973
3974 static void
3975 compute_overlap_steps_for_affine_univar (HOST_WIDE_INT niter,
3976 HOST_WIDE_INT step_a,
3977 HOST_WIDE_INT step_b,
3978 affine_fn *overlaps_a,
3979 affine_fn *overlaps_b,
3980 tree *last_conflicts, int dim)
3981 {
3982 if (((step_a > 0 && step_b > 0)
3983 || (step_a < 0 && step_b < 0)))
3984 {
3985 HOST_WIDE_INT step_overlaps_a, step_overlaps_b;
3986 HOST_WIDE_INT gcd_steps_a_b, last_conflict, tau2;
3987
3988 gcd_steps_a_b = gcd (step_a, step_b);
3989 step_overlaps_a = step_b / gcd_steps_a_b;
3990 step_overlaps_b = step_a / gcd_steps_a_b;
3991
3992 if (niter > 0)
3993 {
3994 tau2 = FLOOR_DIV (niter, step_overlaps_a);
3995 tau2 = MIN (tau2, FLOOR_DIV (niter, step_overlaps_b));
3996 last_conflict = tau2;
3997 *last_conflicts = build_int_cst (NULL_TREE, last_conflict);
3998 }
3999 else
4000 *last_conflicts = chrec_dont_know;
4001
4002 *overlaps_a = affine_fn_univar (integer_zero_node, dim,
4003 build_int_cst (NULL_TREE,
4004 step_overlaps_a));
4005 *overlaps_b = affine_fn_univar (integer_zero_node, dim,
4006 build_int_cst (NULL_TREE,
4007 step_overlaps_b));
4008 }
4009
4010 else
4011 {
4012 *overlaps_a = affine_fn_cst (integer_zero_node);
4013 *overlaps_b = affine_fn_cst (integer_zero_node);
4014 *last_conflicts = integer_zero_node;
4015 }
4016 }
4017
4018 /* Solves the special case of a Diophantine equation where CHREC_A is
4019 an affine bivariate function, and CHREC_B is an affine univariate
4020 function. For example,
4021
4022 | {{0, +, 1}_x, +, 1335}_y = {0, +, 1336}_z
4023
4024 has the following overlapping functions:
4025
4026 | x (t, u, v) = {{0, +, 1336}_t, +, 1}_v
4027 | y (t, u, v) = {{0, +, 1336}_u, +, 1}_v
4028 | z (t, u, v) = {{{0, +, 1}_t, +, 1335}_u, +, 1}_v
4029
4030 FORNOW: This is a specialized implementation for a case occurring in
4031 a common benchmark. Implement the general algorithm. */
4032
4033 static void
4034 compute_overlap_steps_for_affine_1_2 (tree chrec_a, tree chrec_b,
4035 conflict_function **overlaps_a,
4036 conflict_function **overlaps_b,
4037 tree *last_conflicts)
4038 {
4039 bool xz_p, yz_p, xyz_p;
4040 HOST_WIDE_INT step_x, step_y, step_z;
4041 HOST_WIDE_INT niter_x, niter_y, niter_z, niter;
4042 affine_fn overlaps_a_xz, overlaps_b_xz;
4043 affine_fn overlaps_a_yz, overlaps_b_yz;
4044 affine_fn overlaps_a_xyz, overlaps_b_xyz;
4045 affine_fn ova1, ova2, ovb;
4046 tree last_conflicts_xz, last_conflicts_yz, last_conflicts_xyz;
4047
4048 step_x = int_cst_value (CHREC_RIGHT (CHREC_LEFT (chrec_a)));
4049 step_y = int_cst_value (CHREC_RIGHT (chrec_a));
4050 step_z = int_cst_value (CHREC_RIGHT (chrec_b));
4051
4052 niter_x = max_stmt_executions_int (get_chrec_loop (CHREC_LEFT (chrec_a)));
4053 niter_y = max_stmt_executions_int (get_chrec_loop (chrec_a));
4054 niter_z = max_stmt_executions_int (get_chrec_loop (chrec_b));
4055
4056 if (niter_x < 0 || niter_y < 0 || niter_z < 0)
4057 {
4058 if (dump_file && (dump_flags & TDF_DETAILS))
4059 fprintf (dump_file, "overlap steps test failed: no iteration counts.\n");
4060
4061 *overlaps_a = conflict_fn_not_known ();
4062 *overlaps_b = conflict_fn_not_known ();
4063 *last_conflicts = chrec_dont_know;
4064 return;
4065 }
4066
4067 niter = MIN (niter_x, niter_z);
4068 compute_overlap_steps_for_affine_univar (niter, step_x, step_z,
4069 &overlaps_a_xz,
4070 &overlaps_b_xz,
4071 &last_conflicts_xz, 1);
4072 niter = MIN (niter_y, niter_z);
4073 compute_overlap_steps_for_affine_univar (niter, step_y, step_z,
4074 &overlaps_a_yz,
4075 &overlaps_b_yz,
4076 &last_conflicts_yz, 2);
4077 niter = MIN (niter_x, niter_z);
4078 niter = MIN (niter_y, niter);
4079 compute_overlap_steps_for_affine_univar (niter, step_x + step_y, step_z,
4080 &overlaps_a_xyz,
4081 &overlaps_b_xyz,
4082 &last_conflicts_xyz, 3);
4083
4084 xz_p = !integer_zerop (last_conflicts_xz);
4085 yz_p = !integer_zerop (last_conflicts_yz);
4086 xyz_p = !integer_zerop (last_conflicts_xyz);
4087
4088 if (xz_p || yz_p || xyz_p)
4089 {
4090 ova1 = affine_fn_cst (integer_zero_node);
4091 ova2 = affine_fn_cst (integer_zero_node);
4092 ovb = affine_fn_cst (integer_zero_node);
4093 if (xz_p)
4094 {
4095 affine_fn t0 = ova1;
4096 affine_fn t2 = ovb;
4097
4098 ova1 = affine_fn_plus (ova1, overlaps_a_xz);
4099 ovb = affine_fn_plus (ovb, overlaps_b_xz);
4100 affine_fn_free (t0);
4101 affine_fn_free (t2);
4102 *last_conflicts = last_conflicts_xz;
4103 }
4104 if (yz_p)
4105 {
4106 affine_fn t0 = ova2;
4107 affine_fn t2 = ovb;
4108
4109 ova2 = affine_fn_plus (ova2, overlaps_a_yz);
4110 ovb = affine_fn_plus (ovb, overlaps_b_yz);
4111 affine_fn_free (t0);
4112 affine_fn_free (t2);
4113 *last_conflicts = last_conflicts_yz;
4114 }
4115 if (xyz_p)
4116 {
4117 affine_fn t0 = ova1;
4118 affine_fn t2 = ova2;
4119 affine_fn t4 = ovb;
4120
4121 ova1 = affine_fn_plus (ova1, overlaps_a_xyz);
4122 ova2 = affine_fn_plus (ova2, overlaps_a_xyz);
4123 ovb = affine_fn_plus (ovb, overlaps_b_xyz);
4124 affine_fn_free (t0);
4125 affine_fn_free (t2);
4126 affine_fn_free (t4);
4127 *last_conflicts = last_conflicts_xyz;
4128 }
4129 *overlaps_a = conflict_fn (2, ova1, ova2);
4130 *overlaps_b = conflict_fn (1, ovb);
4131 }
4132 else
4133 {
4134 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
4135 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node));
4136 *last_conflicts = integer_zero_node;
4137 }
4138
4139 affine_fn_free (overlaps_a_xz);
4140 affine_fn_free (overlaps_b_xz);
4141 affine_fn_free (overlaps_a_yz);
4142 affine_fn_free (overlaps_b_yz);
4143 affine_fn_free (overlaps_a_xyz);
4144 affine_fn_free (overlaps_b_xyz);
4145 }
4146
4147 /* Copy the elements of vector VEC1 with length SIZE to VEC2. */
4148
4149 static void
4150 lambda_vector_copy (lambda_vector vec1, lambda_vector vec2,
4151 int size)
4152 {
4153 memcpy (vec2, vec1, size * sizeof (*vec1));
4154 }
4155
4156 /* Copy the elements of M x N matrix MAT1 to MAT2. */
4157
4158 static void
4159 lambda_matrix_copy (lambda_matrix mat1, lambda_matrix mat2,
4160 int m, int n)
4161 {
4162 int i;
4163
4164 for (i = 0; i < m; i++)
4165 lambda_vector_copy (mat1[i], mat2[i], n);
4166 }
4167
4168 /* Store the N x N identity matrix in MAT. */
4169
4170 static void
4171 lambda_matrix_id (lambda_matrix mat, int size)
4172 {
4173 int i, j;
4174
4175 for (i = 0; i < size; i++)
4176 for (j = 0; j < size; j++)
4177 mat[i][j] = (i == j) ? 1 : 0;
4178 }
4179
4180 /* Return the index of the first nonzero element of vector VEC1 between
4181 START and N. We must have START <= N.
4182 Returns N if VEC1 is the zero vector. */
4183
4184 static int
4185 lambda_vector_first_nz (lambda_vector vec1, int n, int start)
4186 {
4187 int j = start;
4188 while (j < n && vec1[j] == 0)
4189 j++;
4190 return j;
4191 }
4192
4193 /* Add a multiple of row R1 of matrix MAT with N columns to row R2:
4194 R2 = R2 + CONST1 * R1. */
4195
4196 static void
4197 lambda_matrix_row_add (lambda_matrix mat, int n, int r1, int r2,
4198 lambda_int const1)
4199 {
4200 int i;
4201
4202 if (const1 == 0)
4203 return;
4204
4205 for (i = 0; i < n; i++)
4206 mat[r2][i] += const1 * mat[r1][i];
4207 }
4208
4209 /* Multiply vector VEC1 of length SIZE by a constant CONST1,
4210 and store the result in VEC2. */
4211
4212 static void
4213 lambda_vector_mult_const (lambda_vector vec1, lambda_vector vec2,
4214 int size, lambda_int const1)
4215 {
4216 int i;
4217
4218 if (const1 == 0)
4219 lambda_vector_clear (vec2, size);
4220 else
4221 for (i = 0; i < size; i++)
4222 vec2[i] = const1 * vec1[i];
4223 }
4224
4225 /* Negate vector VEC1 with length SIZE and store it in VEC2. */
4226
4227 static void
4228 lambda_vector_negate (lambda_vector vec1, lambda_vector vec2,
4229 int size)
4230 {
4231 lambda_vector_mult_const (vec1, vec2, size, -1);
4232 }
4233
4234 /* Negate row R1 of matrix MAT which has N columns. */
4235
4236 static void
4237 lambda_matrix_row_negate (lambda_matrix mat, int n, int r1)
4238 {
4239 lambda_vector_negate (mat[r1], mat[r1], n);
4240 }
4241
4242 /* Return true if two vectors are equal. */
4243
4244 static bool
4245 lambda_vector_equal (lambda_vector vec1, lambda_vector vec2, int size)
4246 {
4247 int i;
4248 for (i = 0; i < size; i++)
4249 if (vec1[i] != vec2[i])
4250 return false;
4251 return true;
4252 }
4253
4254 /* Given an M x N integer matrix A, this function determines an M x
4255 M unimodular matrix U, and an M x N echelon matrix S such that
4256 "U.A = S". This decomposition is also known as "right Hermite".
4257
4258 Ref: Algorithm 2.1 page 33 in "Loop Transformations for
4259 Restructuring Compilers" Utpal Banerjee. */
4260
4261 static void
4262 lambda_matrix_right_hermite (lambda_matrix A, int m, int n,
4263 lambda_matrix S, lambda_matrix U)
4264 {
4265 int i, j, i0 = 0;
4266
4267 lambda_matrix_copy (A, S, m, n);
4268 lambda_matrix_id (U, m);
4269
4270 for (j = 0; j < n; j++)
4271 {
4272 if (lambda_vector_first_nz (S[j], m, i0) < m)
4273 {
4274 ++i0;
4275 for (i = m - 1; i >= i0; i--)
4276 {
4277 while (S[i][j] != 0)
4278 {
4279 lambda_int sigma, factor, a, b;
4280
4281 a = S[i-1][j];
4282 b = S[i][j];
4283 sigma = ((a < 0) ^ (b < 0)) ? -1: 1;
4284 unsigned HOST_WIDE_INT abs_a = absu_hwi (a);
4285 unsigned HOST_WIDE_INT abs_b = absu_hwi (b);
4286 factor = sigma * (lambda_int)(abs_a / abs_b);
4287
4288 lambda_matrix_row_add (S, n, i, i-1, -factor);
4289 std::swap (S[i], S[i-1]);
4290
4291 lambda_matrix_row_add (U, m, i, i-1, -factor);
4292 std::swap (U[i], U[i-1]);
4293 }
4294 }
4295 }
4296 }
4297 }
4298
4299 /* Determines the overlapping elements due to accesses CHREC_A and
4300 CHREC_B, that are affine functions. This function cannot handle
4301 symbolic evolution functions, ie. when initial conditions are
4302 parameters, because it uses lambda matrices of integers. */
4303
4304 static void
4305 analyze_subscript_affine_affine (tree chrec_a,
4306 tree chrec_b,
4307 conflict_function **overlaps_a,
4308 conflict_function **overlaps_b,
4309 tree *last_conflicts)
4310 {
4311 unsigned nb_vars_a, nb_vars_b, dim;
4312 lambda_int gamma, gcd_alpha_beta;
4313 lambda_matrix A, U, S;
4314 struct obstack scratch_obstack;
4315
4316 if (eq_evolutions_p (chrec_a, chrec_b))
4317 {
4318 /* The accessed index overlaps for each iteration in the
4319 loop. */
4320 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
4321 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node));
4322 *last_conflicts = chrec_dont_know;
4323 return;
4324 }
4325 if (dump_file && (dump_flags & TDF_DETAILS))
4326 fprintf (dump_file, "(analyze_subscript_affine_affine \n");
4327
4328 /* For determining the initial intersection, we have to solve a
4329 Diophantine equation. This is the most time consuming part.
4330
4331 For answering to the question: "Is there a dependence?" we have
4332 to prove that there exists a solution to the Diophantine
4333 equation, and that the solution is in the iteration domain,
4334 i.e. the solution is positive or zero, and that the solution
4335 happens before the upper bound loop.nb_iterations. Otherwise
4336 there is no dependence. This function outputs a description of
4337 the iterations that hold the intersections. */
4338
4339 nb_vars_a = nb_vars_in_chrec (chrec_a);
4340 nb_vars_b = nb_vars_in_chrec (chrec_b);
4341
4342 gcc_obstack_init (&scratch_obstack);
4343
4344 dim = nb_vars_a + nb_vars_b;
4345 U = lambda_matrix_new (dim, dim, &scratch_obstack);
4346 A = lambda_matrix_new (dim, 1, &scratch_obstack);
4347 S = lambda_matrix_new (dim, 1, &scratch_obstack);
4348
4349 tree init_a = initialize_matrix_A (A, chrec_a, 0, 1);
4350 tree init_b = initialize_matrix_A (A, chrec_b, nb_vars_a, -1);
4351 if (init_a == chrec_dont_know
4352 || init_b == chrec_dont_know)
4353 {
4354 if (dump_file && (dump_flags & TDF_DETAILS))
4355 fprintf (dump_file, "affine-affine test failed: "
4356 "representation issue.\n");
4357 *overlaps_a = conflict_fn_not_known ();
4358 *overlaps_b = conflict_fn_not_known ();
4359 *last_conflicts = chrec_dont_know;
4360 goto end_analyze_subs_aa;
4361 }
4362 gamma = int_cst_value (init_b) - int_cst_value (init_a);
4363
4364 /* Don't do all the hard work of solving the Diophantine equation
4365 when we already know the solution: for example,
4366 | {3, +, 1}_1
4367 | {3, +, 4}_2
4368 | gamma = 3 - 3 = 0.
4369 Then the first overlap occurs during the first iterations:
4370 | {3, +, 1}_1 ({0, +, 4}_x) = {3, +, 4}_2 ({0, +, 1}_x)
4371 */
4372 if (gamma == 0)
4373 {
4374 if (nb_vars_a == 1 && nb_vars_b == 1)
4375 {
4376 HOST_WIDE_INT step_a, step_b;
4377 HOST_WIDE_INT niter, niter_a, niter_b;
4378 affine_fn ova, ovb;
4379
4380 niter_a = max_stmt_executions_int (get_chrec_loop (chrec_a));
4381 niter_b = max_stmt_executions_int (get_chrec_loop (chrec_b));
4382 niter = MIN (niter_a, niter_b);
4383 step_a = int_cst_value (CHREC_RIGHT (chrec_a));
4384 step_b = int_cst_value (CHREC_RIGHT (chrec_b));
4385
4386 compute_overlap_steps_for_affine_univar (niter, step_a, step_b,
4387 &ova, &ovb,
4388 last_conflicts, 1);
4389 *overlaps_a = conflict_fn (1, ova);
4390 *overlaps_b = conflict_fn (1, ovb);
4391 }
4392
4393 else if (nb_vars_a == 2 && nb_vars_b == 1)
4394 compute_overlap_steps_for_affine_1_2
4395 (chrec_a, chrec_b, overlaps_a, overlaps_b, last_conflicts);
4396
4397 else if (nb_vars_a == 1 && nb_vars_b == 2)
4398 compute_overlap_steps_for_affine_1_2
4399 (chrec_b, chrec_a, overlaps_b, overlaps_a, last_conflicts);
4400
4401 else
4402 {
4403 if (dump_file && (dump_flags & TDF_DETAILS))
4404 fprintf (dump_file, "affine-affine test failed: too many variables.\n");
4405 *overlaps_a = conflict_fn_not_known ();
4406 *overlaps_b = conflict_fn_not_known ();
4407 *last_conflicts = chrec_dont_know;
4408 }
4409 goto end_analyze_subs_aa;
4410 }
4411
4412 /* U.A = S */
4413 lambda_matrix_right_hermite (A, dim, 1, S, U);
4414
4415 if (S[0][0] < 0)
4416 {
4417 S[0][0] *= -1;
4418 lambda_matrix_row_negate (U, dim, 0);
4419 }
4420 gcd_alpha_beta = S[0][0];
4421
4422 /* Something went wrong: for example in {1, +, 0}_5 vs. {0, +, 0}_5,
4423 but that is a quite strange case. Instead of ICEing, answer
4424 don't know. */
4425 if (gcd_alpha_beta == 0)
4426 {
4427 *overlaps_a = conflict_fn_not_known ();
4428 *overlaps_b = conflict_fn_not_known ();
4429 *last_conflicts = chrec_dont_know;
4430 goto end_analyze_subs_aa;
4431 }
4432
4433 /* The classic "gcd-test". */
4434 if (!int_divides_p (gcd_alpha_beta, gamma))
4435 {
4436 /* The "gcd-test" has determined that there is no integer
4437 solution, i.e. there is no dependence. */
4438 *overlaps_a = conflict_fn_no_dependence ();
4439 *overlaps_b = conflict_fn_no_dependence ();
4440 *last_conflicts = integer_zero_node;
4441 }
4442
4443 /* Both access functions are univariate. This includes SIV and MIV cases. */
4444 else if (nb_vars_a == 1 && nb_vars_b == 1)
4445 {
4446 /* Both functions should have the same evolution sign. */
4447 if (((A[0][0] > 0 && -A[1][0] > 0)
4448 || (A[0][0] < 0 && -A[1][0] < 0)))
4449 {
4450 /* The solutions are given by:
4451 |
4452 | [GAMMA/GCD_ALPHA_BETA t].[u11 u12] = [x0]
4453 | [u21 u22] [y0]
4454
4455 For a given integer t. Using the following variables,
4456
4457 | i0 = u11 * gamma / gcd_alpha_beta
4458 | j0 = u12 * gamma / gcd_alpha_beta
4459 | i1 = u21
4460 | j1 = u22
4461
4462 the solutions are:
4463
4464 | x0 = i0 + i1 * t,
4465 | y0 = j0 + j1 * t. */
4466 HOST_WIDE_INT i0, j0, i1, j1;
4467
4468 i0 = U[0][0] * gamma / gcd_alpha_beta;
4469 j0 = U[0][1] * gamma / gcd_alpha_beta;
4470 i1 = U[1][0];
4471 j1 = U[1][1];
4472
4473 if ((i1 == 0 && i0 < 0)
4474 || (j1 == 0 && j0 < 0))
4475 {
4476 /* There is no solution.
4477 FIXME: The case "i0 > nb_iterations, j0 > nb_iterations"
4478 falls in here, but for the moment we don't look at the
4479 upper bound of the iteration domain. */
4480 *overlaps_a = conflict_fn_no_dependence ();
4481 *overlaps_b = conflict_fn_no_dependence ();
4482 *last_conflicts = integer_zero_node;
4483 goto end_analyze_subs_aa;
4484 }
4485
4486 if (i1 > 0 && j1 > 0)
4487 {
4488 HOST_WIDE_INT niter_a
4489 = max_stmt_executions_int (get_chrec_loop (chrec_a));
4490 HOST_WIDE_INT niter_b
4491 = max_stmt_executions_int (get_chrec_loop (chrec_b));
4492 HOST_WIDE_INT niter = MIN (niter_a, niter_b);
4493
4494 /* (X0, Y0) is a solution of the Diophantine equation:
4495 "chrec_a (X0) = chrec_b (Y0)". */
4496 HOST_WIDE_INT tau1 = MAX (CEIL (-i0, i1),
4497 CEIL (-j0, j1));
4498 HOST_WIDE_INT x0 = i1 * tau1 + i0;
4499 HOST_WIDE_INT y0 = j1 * tau1 + j0;
4500
4501 /* (X1, Y1) is the smallest positive solution of the eq
4502 "chrec_a (X1) = chrec_b (Y1)", i.e. this is where the
4503 first conflict occurs. */
4504 HOST_WIDE_INT min_multiple = MIN (x0 / i1, y0 / j1);
4505 HOST_WIDE_INT x1 = x0 - i1 * min_multiple;
4506 HOST_WIDE_INT y1 = y0 - j1 * min_multiple;
4507
4508 if (niter > 0)
4509 {
4510 /* If the overlap occurs outside of the bounds of the
4511 loop, there is no dependence. */
4512 if (x1 >= niter_a || y1 >= niter_b)
4513 {
4514 *overlaps_a = conflict_fn_no_dependence ();
4515 *overlaps_b = conflict_fn_no_dependence ();
4516 *last_conflicts = integer_zero_node;
4517 goto end_analyze_subs_aa;
4518 }
4519
4520 /* max stmt executions can get quite large, avoid
4521 overflows by using wide ints here. */
4522 widest_int tau2
4523 = wi::smin (wi::sdiv_floor (wi::sub (niter_a, i0), i1),
4524 wi::sdiv_floor (wi::sub (niter_b, j0), j1));
4525 widest_int last_conflict = wi::sub (tau2, (x1 - i0)/i1);
4526 if (wi::min_precision (last_conflict, SIGNED)
4527 <= TYPE_PRECISION (integer_type_node))
4528 *last_conflicts
4529 = build_int_cst (integer_type_node,
4530 last_conflict.to_shwi ());
4531 else
4532 *last_conflicts = chrec_dont_know;
4533 }
4534 else
4535 *last_conflicts = chrec_dont_know;
4536
4537 *overlaps_a
4538 = conflict_fn (1,
4539 affine_fn_univar (build_int_cst (NULL_TREE, x1),
4540 1,
4541 build_int_cst (NULL_TREE, i1)));
4542 *overlaps_b
4543 = conflict_fn (1,
4544 affine_fn_univar (build_int_cst (NULL_TREE, y1),
4545 1,
4546 build_int_cst (NULL_TREE, j1)));
4547 }
4548 else
4549 {
4550 /* FIXME: For the moment, the upper bound of the
4551 iteration domain for i and j is not checked. */
4552 if (dump_file && (dump_flags & TDF_DETAILS))
4553 fprintf (dump_file, "affine-affine test failed: unimplemented.\n");
4554 *overlaps_a = conflict_fn_not_known ();
4555 *overlaps_b = conflict_fn_not_known ();
4556 *last_conflicts = chrec_dont_know;
4557 }
4558 }
4559 else
4560 {
4561 if (dump_file && (dump_flags & TDF_DETAILS))
4562 fprintf (dump_file, "affine-affine test failed: unimplemented.\n");
4563 *overlaps_a = conflict_fn_not_known ();
4564 *overlaps_b = conflict_fn_not_known ();
4565 *last_conflicts = chrec_dont_know;
4566 }
4567 }
4568 else
4569 {
4570 if (dump_file && (dump_flags & TDF_DETAILS))
4571 fprintf (dump_file, "affine-affine test failed: unimplemented.\n");
4572 *overlaps_a = conflict_fn_not_known ();
4573 *overlaps_b = conflict_fn_not_known ();
4574 *last_conflicts = chrec_dont_know;
4575 }
4576
4577 end_analyze_subs_aa:
4578 obstack_free (&scratch_obstack, NULL);
4579 if (dump_file && (dump_flags & TDF_DETAILS))
4580 {
4581 fprintf (dump_file, " (overlaps_a = ");
4582 dump_conflict_function (dump_file, *overlaps_a);
4583 fprintf (dump_file, ")\n (overlaps_b = ");
4584 dump_conflict_function (dump_file, *overlaps_b);
4585 fprintf (dump_file, "))\n");
4586 }
4587 }
4588
4589 /* Returns true when analyze_subscript_affine_affine can be used for
4590 determining the dependence relation between chrec_a and chrec_b,
4591 that contain symbols. This function modifies chrec_a and chrec_b
4592 such that the analysis result is the same, and such that they don't
4593 contain symbols, and then can safely be passed to the analyzer.
4594
4595 Example: The analysis of the following tuples of evolutions produce
4596 the same results: {x+1, +, 1}_1 vs. {x+3, +, 1}_1, and {-2, +, 1}_1
4597 vs. {0, +, 1}_1
4598
4599 {x+1, +, 1}_1 ({2, +, 1}_1) = {x+3, +, 1}_1 ({0, +, 1}_1)
4600 {-2, +, 1}_1 ({2, +, 1}_1) = {0, +, 1}_1 ({0, +, 1}_1)
4601 */
4602
4603 static bool
4604 can_use_analyze_subscript_affine_affine (tree *chrec_a, tree *chrec_b)
4605 {
4606 tree diff, type, left_a, left_b, right_b;
4607
4608 if (chrec_contains_symbols (CHREC_RIGHT (*chrec_a))
4609 || chrec_contains_symbols (CHREC_RIGHT (*chrec_b)))
4610 /* FIXME: For the moment not handled. Might be refined later. */
4611 return false;
4612
4613 type = chrec_type (*chrec_a);
4614 left_a = CHREC_LEFT (*chrec_a);
4615 left_b = chrec_convert (type, CHREC_LEFT (*chrec_b), NULL);
4616 diff = chrec_fold_minus (type, left_a, left_b);
4617
4618 if (!evolution_function_is_constant_p (diff))
4619 return false;
4620
4621 if (dump_file && (dump_flags & TDF_DETAILS))
4622 fprintf (dump_file, "can_use_subscript_aff_aff_for_symbolic \n");
4623
4624 *chrec_a = build_polynomial_chrec (CHREC_VARIABLE (*chrec_a),
4625 diff, CHREC_RIGHT (*chrec_a));
4626 right_b = chrec_convert (type, CHREC_RIGHT (*chrec_b), NULL);
4627 *chrec_b = build_polynomial_chrec (CHREC_VARIABLE (*chrec_b),
4628 build_int_cst (type, 0),
4629 right_b);
4630 return true;
4631 }
4632
4633 /* Analyze a SIV (Single Index Variable) subscript. *OVERLAPS_A and
4634 *OVERLAPS_B are initialized to the functions that describe the
4635 relation between the elements accessed twice by CHREC_A and
4636 CHREC_B. For k >= 0, the following property is verified:
4637
4638 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
4639
4640 static void
4641 analyze_siv_subscript (tree chrec_a,
4642 tree chrec_b,
4643 conflict_function **overlaps_a,
4644 conflict_function **overlaps_b,
4645 tree *last_conflicts,
4646 int loop_nest_num)
4647 {
4648 dependence_stats.num_siv++;
4649
4650 if (dump_file && (dump_flags & TDF_DETAILS))
4651 fprintf (dump_file, "(analyze_siv_subscript \n");
4652
4653 if (evolution_function_is_constant_p (chrec_a)
4654 && evolution_function_is_affine_in_loop (chrec_b, loop_nest_num))
4655 analyze_siv_subscript_cst_affine (chrec_a, chrec_b,
4656 overlaps_a, overlaps_b, last_conflicts);
4657
4658 else if (evolution_function_is_affine_in_loop (chrec_a, loop_nest_num)
4659 && evolution_function_is_constant_p (chrec_b))
4660 analyze_siv_subscript_cst_affine (chrec_b, chrec_a,
4661 overlaps_b, overlaps_a, last_conflicts);
4662
4663 else if (evolution_function_is_affine_in_loop (chrec_a, loop_nest_num)
4664 && evolution_function_is_affine_in_loop (chrec_b, loop_nest_num))
4665 {
4666 if (!chrec_contains_symbols (chrec_a)
4667 && !chrec_contains_symbols (chrec_b))
4668 {
4669 analyze_subscript_affine_affine (chrec_a, chrec_b,
4670 overlaps_a, overlaps_b,
4671 last_conflicts);
4672
4673 if (CF_NOT_KNOWN_P (*overlaps_a)
4674 || CF_NOT_KNOWN_P (*overlaps_b))
4675 dependence_stats.num_siv_unimplemented++;
4676 else if (CF_NO_DEPENDENCE_P (*overlaps_a)
4677 || CF_NO_DEPENDENCE_P (*overlaps_b))
4678 dependence_stats.num_siv_independent++;
4679 else
4680 dependence_stats.num_siv_dependent++;
4681 }
4682 else if (can_use_analyze_subscript_affine_affine (&chrec_a,
4683 &chrec_b))
4684 {
4685 analyze_subscript_affine_affine (chrec_a, chrec_b,
4686 overlaps_a, overlaps_b,
4687 last_conflicts);
4688
4689 if (CF_NOT_KNOWN_P (*overlaps_a)
4690 || CF_NOT_KNOWN_P (*overlaps_b))
4691 dependence_stats.num_siv_unimplemented++;
4692 else if (CF_NO_DEPENDENCE_P (*overlaps_a)
4693 || CF_NO_DEPENDENCE_P (*overlaps_b))
4694 dependence_stats.num_siv_independent++;
4695 else
4696 dependence_stats.num_siv_dependent++;
4697 }
4698 else
4699 goto siv_subscript_dontknow;
4700 }
4701
4702 else
4703 {
4704 siv_subscript_dontknow:;
4705 if (dump_file && (dump_flags & TDF_DETAILS))
4706 fprintf (dump_file, " siv test failed: unimplemented");
4707 *overlaps_a = conflict_fn_not_known ();
4708 *overlaps_b = conflict_fn_not_known ();
4709 *last_conflicts = chrec_dont_know;
4710 dependence_stats.num_siv_unimplemented++;
4711 }
4712
4713 if (dump_file && (dump_flags & TDF_DETAILS))
4714 fprintf (dump_file, ")\n");
4715 }
4716
4717 /* Returns false if we can prove that the greatest common divisor of the steps
4718 of CHREC does not divide CST, false otherwise. */
4719
4720 static bool
4721 gcd_of_steps_may_divide_p (const_tree chrec, const_tree cst)
4722 {
4723 HOST_WIDE_INT cd = 0, val;
4724 tree step;
4725
4726 if (!tree_fits_shwi_p (cst))
4727 return true;
4728 val = tree_to_shwi (cst);
4729
4730 while (TREE_CODE (chrec) == POLYNOMIAL_CHREC)
4731 {
4732 step = CHREC_RIGHT (chrec);
4733 if (!tree_fits_shwi_p (step))
4734 return true;
4735 cd = gcd (cd, tree_to_shwi (step));
4736 chrec = CHREC_LEFT (chrec);
4737 }
4738
4739 return val % cd == 0;
4740 }
4741
4742 /* Analyze a MIV (Multiple Index Variable) subscript with respect to
4743 LOOP_NEST. *OVERLAPS_A and *OVERLAPS_B are initialized to the
4744 functions that describe the relation between the elements accessed
4745 twice by CHREC_A and CHREC_B. For k >= 0, the following property
4746 is verified:
4747
4748 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
4749
4750 static void
4751 analyze_miv_subscript (tree chrec_a,
4752 tree chrec_b,
4753 conflict_function **overlaps_a,
4754 conflict_function **overlaps_b,
4755 tree *last_conflicts,
4756 class loop *loop_nest)
4757 {
4758 tree type, difference;
4759
4760 dependence_stats.num_miv++;
4761 if (dump_file && (dump_flags & TDF_DETAILS))
4762 fprintf (dump_file, "(analyze_miv_subscript \n");
4763
4764 type = signed_type_for_types (TREE_TYPE (chrec_a), TREE_TYPE (chrec_b));
4765 chrec_a = chrec_convert (type, chrec_a, NULL);
4766 chrec_b = chrec_convert (type, chrec_b, NULL);
4767 difference = chrec_fold_minus (type, chrec_a, chrec_b);
4768
4769 if (eq_evolutions_p (chrec_a, chrec_b))
4770 {
4771 /* Access functions are the same: all the elements are accessed
4772 in the same order. */
4773 *overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
4774 *overlaps_b = conflict_fn (1, affine_fn_cst (integer_zero_node));
4775 *last_conflicts = max_stmt_executions_tree (get_chrec_loop (chrec_a));
4776 dependence_stats.num_miv_dependent++;
4777 }
4778
4779 else if (evolution_function_is_constant_p (difference)
4780 && evolution_function_is_affine_multivariate_p (chrec_a,
4781 loop_nest->num)
4782 && !gcd_of_steps_may_divide_p (chrec_a, difference))
4783 {
4784 /* testsuite/.../ssa-chrec-33.c
4785 {{21, +, 2}_1, +, -2}_2 vs. {{20, +, 2}_1, +, -2}_2
4786
4787 The difference is 1, and all the evolution steps are multiples
4788 of 2, consequently there are no overlapping elements. */
4789 *overlaps_a = conflict_fn_no_dependence ();
4790 *overlaps_b = conflict_fn_no_dependence ();
4791 *last_conflicts = integer_zero_node;
4792 dependence_stats.num_miv_independent++;
4793 }
4794
4795 else if (evolution_function_is_affine_in_loop (chrec_a, loop_nest->num)
4796 && !chrec_contains_symbols (chrec_a, loop_nest)
4797 && evolution_function_is_affine_in_loop (chrec_b, loop_nest->num)
4798 && !chrec_contains_symbols (chrec_b, loop_nest))
4799 {
4800 /* testsuite/.../ssa-chrec-35.c
4801 {0, +, 1}_2 vs. {0, +, 1}_3
4802 the overlapping elements are respectively located at iterations:
4803 {0, +, 1}_x and {0, +, 1}_x,
4804 in other words, we have the equality:
4805 {0, +, 1}_2 ({0, +, 1}_x) = {0, +, 1}_3 ({0, +, 1}_x)
4806
4807 Other examples:
4808 {{0, +, 1}_1, +, 2}_2 ({0, +, 1}_x, {0, +, 1}_y) =
4809 {0, +, 1}_1 ({{0, +, 1}_x, +, 2}_y)
4810
4811 {{0, +, 2}_1, +, 3}_2 ({0, +, 1}_y, {0, +, 1}_x) =
4812 {{0, +, 3}_1, +, 2}_2 ({0, +, 1}_x, {0, +, 1}_y)
4813 */
4814 analyze_subscript_affine_affine (chrec_a, chrec_b,
4815 overlaps_a, overlaps_b, last_conflicts);
4816
4817 if (CF_NOT_KNOWN_P (*overlaps_a)
4818 || CF_NOT_KNOWN_P (*overlaps_b))
4819 dependence_stats.num_miv_unimplemented++;
4820 else if (CF_NO_DEPENDENCE_P (*overlaps_a)
4821 || CF_NO_DEPENDENCE_P (*overlaps_b))
4822 dependence_stats.num_miv_independent++;
4823 else
4824 dependence_stats.num_miv_dependent++;
4825 }
4826
4827 else
4828 {
4829 /* When the analysis is too difficult, answer "don't know". */
4830 if (dump_file && (dump_flags & TDF_DETAILS))
4831 fprintf (dump_file, "analyze_miv_subscript test failed: unimplemented.\n");
4832
4833 *overlaps_a = conflict_fn_not_known ();
4834 *overlaps_b = conflict_fn_not_known ();
4835 *last_conflicts = chrec_dont_know;
4836 dependence_stats.num_miv_unimplemented++;
4837 }
4838
4839 if (dump_file && (dump_flags & TDF_DETAILS))
4840 fprintf (dump_file, ")\n");
4841 }
4842
4843 /* Determines the iterations for which CHREC_A is equal to CHREC_B in
4844 with respect to LOOP_NEST. OVERLAP_ITERATIONS_A and
4845 OVERLAP_ITERATIONS_B are initialized with two functions that
4846 describe the iterations that contain conflicting elements.
4847
4848 Remark: For an integer k >= 0, the following equality is true:
4849
4850 CHREC_A (OVERLAP_ITERATIONS_A (k)) == CHREC_B (OVERLAP_ITERATIONS_B (k)).
4851 */
4852
4853 static void
4854 analyze_overlapping_iterations (tree chrec_a,
4855 tree chrec_b,
4856 conflict_function **overlap_iterations_a,
4857 conflict_function **overlap_iterations_b,
4858 tree *last_conflicts, class loop *loop_nest)
4859 {
4860 unsigned int lnn = loop_nest->num;
4861
4862 dependence_stats.num_subscript_tests++;
4863
4864 if (dump_file && (dump_flags & TDF_DETAILS))
4865 {
4866 fprintf (dump_file, "(analyze_overlapping_iterations \n");
4867 fprintf (dump_file, " (chrec_a = ");
4868 print_generic_expr (dump_file, chrec_a);
4869 fprintf (dump_file, ")\n (chrec_b = ");
4870 print_generic_expr (dump_file, chrec_b);
4871 fprintf (dump_file, ")\n");
4872 }
4873
4874 if (chrec_a == NULL_TREE
4875 || chrec_b == NULL_TREE
4876 || chrec_contains_undetermined (chrec_a)
4877 || chrec_contains_undetermined (chrec_b))
4878 {
4879 dependence_stats.num_subscript_undetermined++;
4880
4881 *overlap_iterations_a = conflict_fn_not_known ();
4882 *overlap_iterations_b = conflict_fn_not_known ();
4883 }
4884
4885 /* If they are the same chrec, and are affine, they overlap
4886 on every iteration. */
4887 else if (eq_evolutions_p (chrec_a, chrec_b)
4888 && (evolution_function_is_affine_multivariate_p (chrec_a, lnn)
4889 || operand_equal_p (chrec_a, chrec_b, 0)))
4890 {
4891 dependence_stats.num_same_subscript_function++;
4892 *overlap_iterations_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
4893 *overlap_iterations_b = conflict_fn (1, affine_fn_cst (integer_zero_node));
4894 *last_conflicts = chrec_dont_know;
4895 }
4896
4897 /* If they aren't the same, and aren't affine, we can't do anything
4898 yet. */
4899 else if ((chrec_contains_symbols (chrec_a)
4900 || chrec_contains_symbols (chrec_b))
4901 && (!evolution_function_is_affine_multivariate_p (chrec_a, lnn)
4902 || !evolution_function_is_affine_multivariate_p (chrec_b, lnn)))
4903 {
4904 dependence_stats.num_subscript_undetermined++;
4905 *overlap_iterations_a = conflict_fn_not_known ();
4906 *overlap_iterations_b = conflict_fn_not_known ();
4907 }
4908
4909 else if (ziv_subscript_p (chrec_a, chrec_b))
4910 analyze_ziv_subscript (chrec_a, chrec_b,
4911 overlap_iterations_a, overlap_iterations_b,
4912 last_conflicts);
4913
4914 else if (siv_subscript_p (chrec_a, chrec_b))
4915 analyze_siv_subscript (chrec_a, chrec_b,
4916 overlap_iterations_a, overlap_iterations_b,
4917 last_conflicts, lnn);
4918
4919 else
4920 analyze_miv_subscript (chrec_a, chrec_b,
4921 overlap_iterations_a, overlap_iterations_b,
4922 last_conflicts, loop_nest);
4923
4924 if (dump_file && (dump_flags & TDF_DETAILS))
4925 {
4926 fprintf (dump_file, " (overlap_iterations_a = ");
4927 dump_conflict_function (dump_file, *overlap_iterations_a);
4928 fprintf (dump_file, ")\n (overlap_iterations_b = ");
4929 dump_conflict_function (dump_file, *overlap_iterations_b);
4930 fprintf (dump_file, "))\n");
4931 }
4932 }
4933
4934 /* Helper function for uniquely inserting distance vectors. */
4935
4936 static void
4937 save_dist_v (struct data_dependence_relation *ddr, lambda_vector dist_v)
4938 {
4939 unsigned i;
4940 lambda_vector v;
4941
4942 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, v)
4943 if (lambda_vector_equal (v, dist_v, DDR_NB_LOOPS (ddr)))
4944 return;
4945
4946 DDR_DIST_VECTS (ddr).safe_push (dist_v);
4947 }
4948
4949 /* Helper function for uniquely inserting direction vectors. */
4950
4951 static void
4952 save_dir_v (struct data_dependence_relation *ddr, lambda_vector dir_v)
4953 {
4954 unsigned i;
4955 lambda_vector v;
4956
4957 FOR_EACH_VEC_ELT (DDR_DIR_VECTS (ddr), i, v)
4958 if (lambda_vector_equal (v, dir_v, DDR_NB_LOOPS (ddr)))
4959 return;
4960
4961 DDR_DIR_VECTS (ddr).safe_push (dir_v);
4962 }
4963
4964 /* Add a distance of 1 on all the loops outer than INDEX. If we
4965 haven't yet determined a distance for this outer loop, push a new
4966 distance vector composed of the previous distance, and a distance
4967 of 1 for this outer loop. Example:
4968
4969 | loop_1
4970 | loop_2
4971 | A[10]
4972 | endloop_2
4973 | endloop_1
4974
4975 Saved vectors are of the form (dist_in_1, dist_in_2). First, we
4976 save (0, 1), then we have to save (1, 0). */
4977
4978 static void
4979 add_outer_distances (struct data_dependence_relation *ddr,
4980 lambda_vector dist_v, int index)
4981 {
4982 /* For each outer loop where init_v is not set, the accesses are
4983 in dependence of distance 1 in the loop. */
4984 while (--index >= 0)
4985 {
4986 lambda_vector save_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
4987 lambda_vector_copy (dist_v, save_v, DDR_NB_LOOPS (ddr));
4988 save_v[index] = 1;
4989 save_dist_v (ddr, save_v);
4990 }
4991 }
4992
4993 /* Return false when fail to represent the data dependence as a
4994 distance vector. A_INDEX is the index of the first reference
4995 (0 for DDR_A, 1 for DDR_B) and B_INDEX is the index of the
4996 second reference. INIT_B is set to true when a component has been
4997 added to the distance vector DIST_V. INDEX_CARRY is then set to
4998 the index in DIST_V that carries the dependence. */
4999
5000 static bool
5001 build_classic_dist_vector_1 (struct data_dependence_relation *ddr,
5002 unsigned int a_index, unsigned int b_index,
5003 lambda_vector dist_v, bool *init_b,
5004 int *index_carry)
5005 {
5006 unsigned i;
5007 lambda_vector init_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
5008 class loop *loop = DDR_LOOP_NEST (ddr)[0];
5009
5010 for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++)
5011 {
5012 tree access_fn_a, access_fn_b;
5013 struct subscript *subscript = DDR_SUBSCRIPT (ddr, i);
5014
5015 if (chrec_contains_undetermined (SUB_DISTANCE (subscript)))
5016 {
5017 non_affine_dependence_relation (ddr);
5018 return false;
5019 }
5020
5021 access_fn_a = SUB_ACCESS_FN (subscript, a_index);
5022 access_fn_b = SUB_ACCESS_FN (subscript, b_index);
5023
5024 if (TREE_CODE (access_fn_a) == POLYNOMIAL_CHREC
5025 && TREE_CODE (access_fn_b) == POLYNOMIAL_CHREC)
5026 {
5027 HOST_WIDE_INT dist;
5028 int index;
5029 int var_a = CHREC_VARIABLE (access_fn_a);
5030 int var_b = CHREC_VARIABLE (access_fn_b);
5031
5032 if (var_a != var_b
5033 || chrec_contains_undetermined (SUB_DISTANCE (subscript)))
5034 {
5035 non_affine_dependence_relation (ddr);
5036 return false;
5037 }
5038
5039 /* When data references are collected in a loop while data
5040 dependences are analyzed in loop nest nested in the loop, we
5041 would have more number of access functions than number of
5042 loops. Skip access functions of loops not in the loop nest.
5043
5044 See PR89725 for more information. */
5045 if (flow_loop_nested_p (get_loop (cfun, var_a), loop))
5046 continue;
5047
5048 dist = int_cst_value (SUB_DISTANCE (subscript));
5049 index = index_in_loop_nest (var_a, DDR_LOOP_NEST (ddr));
5050 *index_carry = MIN (index, *index_carry);
5051
5052 /* This is the subscript coupling test. If we have already
5053 recorded a distance for this loop (a distance coming from
5054 another subscript), it should be the same. For example,
5055 in the following code, there is no dependence:
5056
5057 | loop i = 0, N, 1
5058 | T[i+1][i] = ...
5059 | ... = T[i][i]
5060 | endloop
5061 */
5062 if (init_v[index] != 0 && dist_v[index] != dist)
5063 {
5064 finalize_ddr_dependent (ddr, chrec_known);
5065 return false;
5066 }
5067
5068 dist_v[index] = dist;
5069 init_v[index] = 1;
5070 *init_b = true;
5071 }
5072 else if (!operand_equal_p (access_fn_a, access_fn_b, 0))
5073 {
5074 /* This can be for example an affine vs. constant dependence
5075 (T[i] vs. T[3]) that is not an affine dependence and is
5076 not representable as a distance vector. */
5077 non_affine_dependence_relation (ddr);
5078 return false;
5079 }
5080 }
5081
5082 return true;
5083 }
5084
5085 /* Return true when the DDR contains only invariant access functions wrto. loop
5086 number LNUM. */
5087
5088 static bool
5089 invariant_access_functions (const struct data_dependence_relation *ddr,
5090 int lnum)
5091 {
5092 unsigned i;
5093 subscript *sub;
5094
5095 FOR_EACH_VEC_ELT (DDR_SUBSCRIPTS (ddr), i, sub)
5096 if (!evolution_function_is_invariant_p (SUB_ACCESS_FN (sub, 0), lnum)
5097 || !evolution_function_is_invariant_p (SUB_ACCESS_FN (sub, 1), lnum))
5098 return false;
5099
5100 return true;
5101 }
5102
5103 /* Helper function for the case where DDR_A and DDR_B are the same
5104 multivariate access function with a constant step. For an example
5105 see pr34635-1.c. */
5106
5107 static void
5108 add_multivariate_self_dist (struct data_dependence_relation *ddr, tree c_2)
5109 {
5110 int x_1, x_2;
5111 tree c_1 = CHREC_LEFT (c_2);
5112 tree c_0 = CHREC_LEFT (c_1);
5113 lambda_vector dist_v;
5114 HOST_WIDE_INT v1, v2, cd;
5115
5116 /* Polynomials with more than 2 variables are not handled yet. When
5117 the evolution steps are parameters, it is not possible to
5118 represent the dependence using classical distance vectors. */
5119 if (TREE_CODE (c_0) != INTEGER_CST
5120 || TREE_CODE (CHREC_RIGHT (c_1)) != INTEGER_CST
5121 || TREE_CODE (CHREC_RIGHT (c_2)) != INTEGER_CST)
5122 {
5123 DDR_AFFINE_P (ddr) = false;
5124 return;
5125 }
5126
5127 x_2 = index_in_loop_nest (CHREC_VARIABLE (c_2), DDR_LOOP_NEST (ddr));
5128 x_1 = index_in_loop_nest (CHREC_VARIABLE (c_1), DDR_LOOP_NEST (ddr));
5129
5130 /* For "{{0, +, 2}_1, +, 3}_2" the distance vector is (3, -2). */
5131 dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
5132 v1 = int_cst_value (CHREC_RIGHT (c_1));
5133 v2 = int_cst_value (CHREC_RIGHT (c_2));
5134 cd = gcd (v1, v2);
5135 v1 /= cd;
5136 v2 /= cd;
5137
5138 if (v2 < 0)
5139 {
5140 v2 = -v2;
5141 v1 = -v1;
5142 }
5143
5144 dist_v[x_1] = v2;
5145 dist_v[x_2] = -v1;
5146 save_dist_v (ddr, dist_v);
5147
5148 add_outer_distances (ddr, dist_v, x_1);
5149 }
5150
5151 /* Helper function for the case where DDR_A and DDR_B are the same
5152 access functions. */
5153
5154 static void
5155 add_other_self_distances (struct data_dependence_relation *ddr)
5156 {
5157 lambda_vector dist_v;
5158 unsigned i;
5159 int index_carry = DDR_NB_LOOPS (ddr);
5160 subscript *sub;
5161 class loop *loop = DDR_LOOP_NEST (ddr)[0];
5162
5163 FOR_EACH_VEC_ELT (DDR_SUBSCRIPTS (ddr), i, sub)
5164 {
5165 tree access_fun = SUB_ACCESS_FN (sub, 0);
5166
5167 if (TREE_CODE (access_fun) == POLYNOMIAL_CHREC)
5168 {
5169 if (!evolution_function_is_univariate_p (access_fun, loop->num))
5170 {
5171 if (DDR_NUM_SUBSCRIPTS (ddr) != 1)
5172 {
5173 DDR_ARE_DEPENDENT (ddr) = chrec_dont_know;
5174 return;
5175 }
5176
5177 access_fun = SUB_ACCESS_FN (DDR_SUBSCRIPT (ddr, 0), 0);
5178
5179 if (TREE_CODE (CHREC_LEFT (access_fun)) == POLYNOMIAL_CHREC)
5180 add_multivariate_self_dist (ddr, access_fun);
5181 else
5182 /* The evolution step is not constant: it varies in
5183 the outer loop, so this cannot be represented by a
5184 distance vector. For example in pr34635.c the
5185 evolution is {0, +, {0, +, 4}_1}_2. */
5186 DDR_AFFINE_P (ddr) = false;
5187
5188 return;
5189 }
5190
5191 /* When data references are collected in a loop while data
5192 dependences are analyzed in loop nest nested in the loop, we
5193 would have more number of access functions than number of
5194 loops. Skip access functions of loops not in the loop nest.
5195
5196 See PR89725 for more information. */
5197 if (flow_loop_nested_p (get_loop (cfun, CHREC_VARIABLE (access_fun)),
5198 loop))
5199 continue;
5200
5201 index_carry = MIN (index_carry,
5202 index_in_loop_nest (CHREC_VARIABLE (access_fun),
5203 DDR_LOOP_NEST (ddr)));
5204 }
5205 }
5206
5207 dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
5208 add_outer_distances (ddr, dist_v, index_carry);
5209 }
5210
5211 static void
5212 insert_innermost_unit_dist_vector (struct data_dependence_relation *ddr)
5213 {
5214 lambda_vector dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
5215
5216 dist_v[0] = 1;
5217 save_dist_v (ddr, dist_v);
5218 }
5219
5220 /* Adds a unit distance vector to DDR when there is a 0 overlap. This
5221 is the case for example when access functions are the same and
5222 equal to a constant, as in:
5223
5224 | loop_1
5225 | A[3] = ...
5226 | ... = A[3]
5227 | endloop_1
5228
5229 in which case the distance vectors are (0) and (1). */
5230
5231 static void
5232 add_distance_for_zero_overlaps (struct data_dependence_relation *ddr)
5233 {
5234 unsigned i, j;
5235
5236 for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++)
5237 {
5238 subscript_p sub = DDR_SUBSCRIPT (ddr, i);
5239 conflict_function *ca = SUB_CONFLICTS_IN_A (sub);
5240 conflict_function *cb = SUB_CONFLICTS_IN_B (sub);
5241
5242 for (j = 0; j < ca->n; j++)
5243 if (affine_function_zero_p (ca->fns[j]))
5244 {
5245 insert_innermost_unit_dist_vector (ddr);
5246 return;
5247 }
5248
5249 for (j = 0; j < cb->n; j++)
5250 if (affine_function_zero_p (cb->fns[j]))
5251 {
5252 insert_innermost_unit_dist_vector (ddr);
5253 return;
5254 }
5255 }
5256 }
5257
5258 /* Return true when the DDR contains two data references that have the
5259 same access functions. */
5260
5261 static inline bool
5262 same_access_functions (const struct data_dependence_relation *ddr)
5263 {
5264 unsigned i;
5265 subscript *sub;
5266
5267 FOR_EACH_VEC_ELT (DDR_SUBSCRIPTS (ddr), i, sub)
5268 if (!eq_evolutions_p (SUB_ACCESS_FN (sub, 0),
5269 SUB_ACCESS_FN (sub, 1)))
5270 return false;
5271
5272 return true;
5273 }
5274
5275 /* Compute the classic per loop distance vector. DDR is the data
5276 dependence relation to build a vector from. Return false when fail
5277 to represent the data dependence as a distance vector. */
5278
5279 static bool
5280 build_classic_dist_vector (struct data_dependence_relation *ddr,
5281 class loop *loop_nest)
5282 {
5283 bool init_b = false;
5284 int index_carry = DDR_NB_LOOPS (ddr);
5285 lambda_vector dist_v;
5286
5287 if (DDR_ARE_DEPENDENT (ddr) != NULL_TREE)
5288 return false;
5289
5290 if (same_access_functions (ddr))
5291 {
5292 /* Save the 0 vector. */
5293 dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
5294 save_dist_v (ddr, dist_v);
5295
5296 if (invariant_access_functions (ddr, loop_nest->num))
5297 add_distance_for_zero_overlaps (ddr);
5298
5299 if (DDR_NB_LOOPS (ddr) > 1)
5300 add_other_self_distances (ddr);
5301
5302 return true;
5303 }
5304
5305 dist_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
5306 if (!build_classic_dist_vector_1 (ddr, 0, 1, dist_v, &init_b, &index_carry))
5307 return false;
5308
5309 /* Save the distance vector if we initialized one. */
5310 if (init_b)
5311 {
5312 /* Verify a basic constraint: classic distance vectors should
5313 always be lexicographically positive.
5314
5315 Data references are collected in the order of execution of
5316 the program, thus for the following loop
5317
5318 | for (i = 1; i < 100; i++)
5319 | for (j = 1; j < 100; j++)
5320 | {
5321 | t = T[j+1][i-1]; // A
5322 | T[j][i] = t + 2; // B
5323 | }
5324
5325 references are collected following the direction of the wind:
5326 A then B. The data dependence tests are performed also
5327 following this order, such that we're looking at the distance
5328 separating the elements accessed by A from the elements later
5329 accessed by B. But in this example, the distance returned by
5330 test_dep (A, B) is lexicographically negative (-1, 1), that
5331 means that the access A occurs later than B with respect to
5332 the outer loop, ie. we're actually looking upwind. In this
5333 case we solve test_dep (B, A) looking downwind to the
5334 lexicographically positive solution, that returns the
5335 distance vector (1, -1). */
5336 if (!lambda_vector_lexico_pos (dist_v, DDR_NB_LOOPS (ddr)))
5337 {
5338 lambda_vector save_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
5339 if (!subscript_dependence_tester_1 (ddr, 1, 0, loop_nest))
5340 return false;
5341 compute_subscript_distance (ddr);
5342 if (!build_classic_dist_vector_1 (ddr, 1, 0, save_v, &init_b,
5343 &index_carry))
5344 return false;
5345 save_dist_v (ddr, save_v);
5346 DDR_REVERSED_P (ddr) = true;
5347
5348 /* In this case there is a dependence forward for all the
5349 outer loops:
5350
5351 | for (k = 1; k < 100; k++)
5352 | for (i = 1; i < 100; i++)
5353 | for (j = 1; j < 100; j++)
5354 | {
5355 | t = T[j+1][i-1]; // A
5356 | T[j][i] = t + 2; // B
5357 | }
5358
5359 the vectors are:
5360 (0, 1, -1)
5361 (1, 1, -1)
5362 (1, -1, 1)
5363 */
5364 if (DDR_NB_LOOPS (ddr) > 1)
5365 {
5366 add_outer_distances (ddr, save_v, index_carry);
5367 add_outer_distances (ddr, dist_v, index_carry);
5368 }
5369 }
5370 else
5371 {
5372 lambda_vector save_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
5373 lambda_vector_copy (dist_v, save_v, DDR_NB_LOOPS (ddr));
5374
5375 if (DDR_NB_LOOPS (ddr) > 1)
5376 {
5377 lambda_vector opposite_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
5378
5379 if (!subscript_dependence_tester_1 (ddr, 1, 0, loop_nest))
5380 return false;
5381 compute_subscript_distance (ddr);
5382 if (!build_classic_dist_vector_1 (ddr, 1, 0, opposite_v, &init_b,
5383 &index_carry))
5384 return false;
5385
5386 save_dist_v (ddr, save_v);
5387 add_outer_distances (ddr, dist_v, index_carry);
5388 add_outer_distances (ddr, opposite_v, index_carry);
5389 }
5390 else
5391 save_dist_v (ddr, save_v);
5392 }
5393 }
5394 else
5395 {
5396 /* There is a distance of 1 on all the outer loops: Example:
5397 there is a dependence of distance 1 on loop_1 for the array A.
5398
5399 | loop_1
5400 | A[5] = ...
5401 | endloop
5402 */
5403 add_outer_distances (ddr, dist_v,
5404 lambda_vector_first_nz (dist_v,
5405 DDR_NB_LOOPS (ddr), 0));
5406 }
5407
5408 if (dump_file && (dump_flags & TDF_DETAILS))
5409 {
5410 unsigned i;
5411
5412 fprintf (dump_file, "(build_classic_dist_vector\n");
5413 for (i = 0; i < DDR_NUM_DIST_VECTS (ddr); i++)
5414 {
5415 fprintf (dump_file, " dist_vector = (");
5416 print_lambda_vector (dump_file, DDR_DIST_VECT (ddr, i),
5417 DDR_NB_LOOPS (ddr));
5418 fprintf (dump_file, " )\n");
5419 }
5420 fprintf (dump_file, ")\n");
5421 }
5422
5423 return true;
5424 }
5425
5426 /* Return the direction for a given distance.
5427 FIXME: Computing dir this way is suboptimal, since dir can catch
5428 cases that dist is unable to represent. */
5429
5430 static inline enum data_dependence_direction
5431 dir_from_dist (int dist)
5432 {
5433 if (dist > 0)
5434 return dir_positive;
5435 else if (dist < 0)
5436 return dir_negative;
5437 else
5438 return dir_equal;
5439 }
5440
5441 /* Compute the classic per loop direction vector. DDR is the data
5442 dependence relation to build a vector from. */
5443
5444 static void
5445 build_classic_dir_vector (struct data_dependence_relation *ddr)
5446 {
5447 unsigned i, j;
5448 lambda_vector dist_v;
5449
5450 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
5451 {
5452 lambda_vector dir_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
5453
5454 for (j = 0; j < DDR_NB_LOOPS (ddr); j++)
5455 dir_v[j] = dir_from_dist (dist_v[j]);
5456
5457 save_dir_v (ddr, dir_v);
5458 }
5459 }
5460
5461 /* Helper function. Returns true when there is a dependence between the
5462 data references. A_INDEX is the index of the first reference (0 for
5463 DDR_A, 1 for DDR_B) and B_INDEX is the index of the second reference. */
5464
5465 static bool
5466 subscript_dependence_tester_1 (struct data_dependence_relation *ddr,
5467 unsigned int a_index, unsigned int b_index,
5468 class loop *loop_nest)
5469 {
5470 unsigned int i;
5471 tree last_conflicts;
5472 struct subscript *subscript;
5473 tree res = NULL_TREE;
5474
5475 for (i = 0; DDR_SUBSCRIPTS (ddr).iterate (i, &subscript); i++)
5476 {
5477 conflict_function *overlaps_a, *overlaps_b;
5478
5479 analyze_overlapping_iterations (SUB_ACCESS_FN (subscript, a_index),
5480 SUB_ACCESS_FN (subscript, b_index),
5481 &overlaps_a, &overlaps_b,
5482 &last_conflicts, loop_nest);
5483
5484 if (SUB_CONFLICTS_IN_A (subscript))
5485 free_conflict_function (SUB_CONFLICTS_IN_A (subscript));
5486 if (SUB_CONFLICTS_IN_B (subscript))
5487 free_conflict_function (SUB_CONFLICTS_IN_B (subscript));
5488
5489 SUB_CONFLICTS_IN_A (subscript) = overlaps_a;
5490 SUB_CONFLICTS_IN_B (subscript) = overlaps_b;
5491 SUB_LAST_CONFLICT (subscript) = last_conflicts;
5492
5493 /* If there is any undetermined conflict function we have to
5494 give a conservative answer in case we cannot prove that
5495 no dependence exists when analyzing another subscript. */
5496 if (CF_NOT_KNOWN_P (overlaps_a)
5497 || CF_NOT_KNOWN_P (overlaps_b))
5498 {
5499 res = chrec_dont_know;
5500 continue;
5501 }
5502
5503 /* When there is a subscript with no dependence we can stop. */
5504 else if (CF_NO_DEPENDENCE_P (overlaps_a)
5505 || CF_NO_DEPENDENCE_P (overlaps_b))
5506 {
5507 res = chrec_known;
5508 break;
5509 }
5510 }
5511
5512 if (res == NULL_TREE)
5513 return true;
5514
5515 if (res == chrec_known)
5516 dependence_stats.num_dependence_independent++;
5517 else
5518 dependence_stats.num_dependence_undetermined++;
5519 finalize_ddr_dependent (ddr, res);
5520 return false;
5521 }
5522
5523 /* Computes the conflicting iterations in LOOP_NEST, and initialize DDR. */
5524
5525 static void
5526 subscript_dependence_tester (struct data_dependence_relation *ddr,
5527 class loop *loop_nest)
5528 {
5529 if (subscript_dependence_tester_1 (ddr, 0, 1, loop_nest))
5530 dependence_stats.num_dependence_dependent++;
5531
5532 compute_subscript_distance (ddr);
5533 if (build_classic_dist_vector (ddr, loop_nest))
5534 build_classic_dir_vector (ddr);
5535 }
5536
5537 /* Returns true when all the access functions of A are affine or
5538 constant with respect to LOOP_NEST. */
5539
5540 static bool
5541 access_functions_are_affine_or_constant_p (const struct data_reference *a,
5542 const class loop *loop_nest)
5543 {
5544 unsigned int i;
5545 vec<tree> fns = DR_ACCESS_FNS (a);
5546 tree t;
5547
5548 FOR_EACH_VEC_ELT (fns, i, t)
5549 if (!evolution_function_is_invariant_p (t, loop_nest->num)
5550 && !evolution_function_is_affine_multivariate_p (t, loop_nest->num))
5551 return false;
5552
5553 return true;
5554 }
5555
5556 /* This computes the affine dependence relation between A and B with
5557 respect to LOOP_NEST. CHREC_KNOWN is used for representing the
5558 independence between two accesses, while CHREC_DONT_KNOW is used
5559 for representing the unknown relation.
5560
5561 Note that it is possible to stop the computation of the dependence
5562 relation the first time we detect a CHREC_KNOWN element for a given
5563 subscript. */
5564
5565 void
5566 compute_affine_dependence (struct data_dependence_relation *ddr,
5567 class loop *loop_nest)
5568 {
5569 struct data_reference *dra = DDR_A (ddr);
5570 struct data_reference *drb = DDR_B (ddr);
5571
5572 if (dump_file && (dump_flags & TDF_DETAILS))
5573 {
5574 fprintf (dump_file, "(compute_affine_dependence\n");
5575 fprintf (dump_file, " stmt_a: ");
5576 print_gimple_stmt (dump_file, DR_STMT (dra), 0, TDF_SLIM);
5577 fprintf (dump_file, " stmt_b: ");
5578 print_gimple_stmt (dump_file, DR_STMT (drb), 0, TDF_SLIM);
5579 }
5580
5581 /* Analyze only when the dependence relation is not yet known. */
5582 if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE)
5583 {
5584 dependence_stats.num_dependence_tests++;
5585
5586 if (access_functions_are_affine_or_constant_p (dra, loop_nest)
5587 && access_functions_are_affine_or_constant_p (drb, loop_nest))
5588 subscript_dependence_tester (ddr, loop_nest);
5589
5590 /* As a last case, if the dependence cannot be determined, or if
5591 the dependence is considered too difficult to determine, answer
5592 "don't know". */
5593 else
5594 {
5595 dependence_stats.num_dependence_undetermined++;
5596
5597 if (dump_file && (dump_flags & TDF_DETAILS))
5598 {
5599 fprintf (dump_file, "Data ref a:\n");
5600 dump_data_reference (dump_file, dra);
5601 fprintf (dump_file, "Data ref b:\n");
5602 dump_data_reference (dump_file, drb);
5603 fprintf (dump_file, "affine dependence test not usable: access function not affine or constant.\n");
5604 }
5605 finalize_ddr_dependent (ddr, chrec_dont_know);
5606 }
5607 }
5608
5609 if (dump_file && (dump_flags & TDF_DETAILS))
5610 {
5611 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
5612 fprintf (dump_file, ") -> no dependence\n");
5613 else if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
5614 fprintf (dump_file, ") -> dependence analysis failed\n");
5615 else
5616 fprintf (dump_file, ")\n");
5617 }
5618 }
5619
5620 /* Compute in DEPENDENCE_RELATIONS the data dependence graph for all
5621 the data references in DATAREFS, in the LOOP_NEST. When
5622 COMPUTE_SELF_AND_RR is FALSE, don't compute read-read and self
5623 relations. Return true when successful, i.e. data references number
5624 is small enough to be handled. */
5625
5626 bool
5627 compute_all_dependences (vec<data_reference_p> datarefs,
5628 vec<ddr_p> *dependence_relations,
5629 vec<loop_p> loop_nest,
5630 bool compute_self_and_rr)
5631 {
5632 struct data_dependence_relation *ddr;
5633 struct data_reference *a, *b;
5634 unsigned int i, j;
5635
5636 if ((int) datarefs.length ()
5637 > param_loop_max_datarefs_for_datadeps)
5638 {
5639 struct data_dependence_relation *ddr;
5640
5641 /* Insert a single relation into dependence_relations:
5642 chrec_dont_know. */
5643 ddr = initialize_data_dependence_relation (NULL, NULL, loop_nest);
5644 dependence_relations->safe_push (ddr);
5645 return false;
5646 }
5647
5648 FOR_EACH_VEC_ELT (datarefs, i, a)
5649 for (j = i + 1; datarefs.iterate (j, &b); j++)
5650 if (DR_IS_WRITE (a) || DR_IS_WRITE (b) || compute_self_and_rr)
5651 {
5652 ddr = initialize_data_dependence_relation (a, b, loop_nest);
5653 dependence_relations->safe_push (ddr);
5654 if (loop_nest.exists ())
5655 compute_affine_dependence (ddr, loop_nest[0]);
5656 }
5657
5658 if (compute_self_and_rr)
5659 FOR_EACH_VEC_ELT (datarefs, i, a)
5660 {
5661 ddr = initialize_data_dependence_relation (a, a, loop_nest);
5662 dependence_relations->safe_push (ddr);
5663 if (loop_nest.exists ())
5664 compute_affine_dependence (ddr, loop_nest[0]);
5665 }
5666
5667 return true;
5668 }
5669
5670 /* Describes a location of a memory reference. */
5671
5672 struct data_ref_loc
5673 {
5674 /* The memory reference. */
5675 tree ref;
5676
5677 /* True if the memory reference is read. */
5678 bool is_read;
5679
5680 /* True if the data reference is conditional within the containing
5681 statement, i.e. if it might not occur even when the statement
5682 is executed and runs to completion. */
5683 bool is_conditional_in_stmt;
5684 };
5685
5686
5687 /* Stores the locations of memory references in STMT to REFERENCES. Returns
5688 true if STMT clobbers memory, false otherwise. */
5689
5690 static bool
5691 get_references_in_stmt (gimple *stmt, vec<data_ref_loc, va_heap> *references)
5692 {
5693 bool clobbers_memory = false;
5694 data_ref_loc ref;
5695 tree op0, op1;
5696 enum gimple_code stmt_code = gimple_code (stmt);
5697
5698 /* ASM_EXPR and CALL_EXPR may embed arbitrary side effects.
5699 As we cannot model data-references to not spelled out
5700 accesses give up if they may occur. */
5701 if (stmt_code == GIMPLE_CALL
5702 && !(gimple_call_flags (stmt) & ECF_CONST))
5703 {
5704 /* Allow IFN_GOMP_SIMD_LANE in their own loops. */
5705 if (gimple_call_internal_p (stmt))
5706 switch (gimple_call_internal_fn (stmt))
5707 {
5708 case IFN_GOMP_SIMD_LANE:
5709 {
5710 class loop *loop = gimple_bb (stmt)->loop_father;
5711 tree uid = gimple_call_arg (stmt, 0);
5712 gcc_assert (TREE_CODE (uid) == SSA_NAME);
5713 if (loop == NULL
5714 || loop->simduid != SSA_NAME_VAR (uid))
5715 clobbers_memory = true;
5716 break;
5717 }
5718 case IFN_MASK_LOAD:
5719 case IFN_MASK_STORE:
5720 break;
5721 default:
5722 clobbers_memory = true;
5723 break;
5724 }
5725 else
5726 clobbers_memory = true;
5727 }
5728 else if (stmt_code == GIMPLE_ASM
5729 && (gimple_asm_volatile_p (as_a <gasm *> (stmt))
5730 || gimple_vuse (stmt)))
5731 clobbers_memory = true;
5732
5733 if (!gimple_vuse (stmt))
5734 return clobbers_memory;
5735
5736 if (stmt_code == GIMPLE_ASSIGN)
5737 {
5738 tree base;
5739 op0 = gimple_assign_lhs (stmt);
5740 op1 = gimple_assign_rhs1 (stmt);
5741
5742 if (DECL_P (op1)
5743 || (REFERENCE_CLASS_P (op1)
5744 && (base = get_base_address (op1))
5745 && TREE_CODE (base) != SSA_NAME
5746 && !is_gimple_min_invariant (base)))
5747 {
5748 ref.ref = op1;
5749 ref.is_read = true;
5750 ref.is_conditional_in_stmt = false;
5751 references->safe_push (ref);
5752 }
5753 }
5754 else if (stmt_code == GIMPLE_CALL)
5755 {
5756 unsigned i, n;
5757 tree ptr, type;
5758 unsigned int align;
5759
5760 ref.is_read = false;
5761 if (gimple_call_internal_p (stmt))
5762 switch (gimple_call_internal_fn (stmt))
5763 {
5764 case IFN_MASK_LOAD:
5765 if (gimple_call_lhs (stmt) == NULL_TREE)
5766 break;
5767 ref.is_read = true;
5768 /* FALLTHRU */
5769 case IFN_MASK_STORE:
5770 ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)), 0);
5771 align = tree_to_shwi (gimple_call_arg (stmt, 1));
5772 if (ref.is_read)
5773 type = TREE_TYPE (gimple_call_lhs (stmt));
5774 else
5775 type = TREE_TYPE (gimple_call_arg (stmt, 3));
5776 if (TYPE_ALIGN (type) != align)
5777 type = build_aligned_type (type, align);
5778 ref.is_conditional_in_stmt = true;
5779 ref.ref = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0),
5780 ptr);
5781 references->safe_push (ref);
5782 return false;
5783 default:
5784 break;
5785 }
5786
5787 op0 = gimple_call_lhs (stmt);
5788 n = gimple_call_num_args (stmt);
5789 for (i = 0; i < n; i++)
5790 {
5791 op1 = gimple_call_arg (stmt, i);
5792
5793 if (DECL_P (op1)
5794 || (REFERENCE_CLASS_P (op1) && get_base_address (op1)))
5795 {
5796 ref.ref = op1;
5797 ref.is_read = true;
5798 ref.is_conditional_in_stmt = false;
5799 references->safe_push (ref);
5800 }
5801 }
5802 }
5803 else
5804 return clobbers_memory;
5805
5806 if (op0
5807 && (DECL_P (op0)
5808 || (REFERENCE_CLASS_P (op0) && get_base_address (op0))))
5809 {
5810 ref.ref = op0;
5811 ref.is_read = false;
5812 ref.is_conditional_in_stmt = false;
5813 references->safe_push (ref);
5814 }
5815 return clobbers_memory;
5816 }
5817
5818
5819 /* Returns true if the loop-nest has any data reference. */
5820
5821 bool
5822 loop_nest_has_data_refs (loop_p loop)
5823 {
5824 basic_block *bbs = get_loop_body (loop);
5825 auto_vec<data_ref_loc, 3> references;
5826
5827 for (unsigned i = 0; i < loop->num_nodes; i++)
5828 {
5829 basic_block bb = bbs[i];
5830 gimple_stmt_iterator bsi;
5831
5832 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
5833 {
5834 gimple *stmt = gsi_stmt (bsi);
5835 get_references_in_stmt (stmt, &references);
5836 if (references.length ())
5837 {
5838 free (bbs);
5839 return true;
5840 }
5841 }
5842 }
5843 free (bbs);
5844 return false;
5845 }
5846
5847 /* Stores the data references in STMT to DATAREFS. If there is an unanalyzable
5848 reference, returns false, otherwise returns true. NEST is the outermost
5849 loop of the loop nest in which the references should be analyzed. */
5850
5851 opt_result
5852 find_data_references_in_stmt (class loop *nest, gimple *stmt,
5853 vec<data_reference_p> *datarefs)
5854 {
5855 unsigned i;
5856 auto_vec<data_ref_loc, 2> references;
5857 data_ref_loc *ref;
5858 data_reference_p dr;
5859
5860 if (get_references_in_stmt (stmt, &references))
5861 return opt_result::failure_at (stmt, "statement clobbers memory: %G",
5862 stmt);
5863
5864 FOR_EACH_VEC_ELT (references, i, ref)
5865 {
5866 dr = create_data_ref (nest ? loop_preheader_edge (nest) : NULL,
5867 loop_containing_stmt (stmt), ref->ref,
5868 stmt, ref->is_read, ref->is_conditional_in_stmt);
5869 gcc_assert (dr != NULL);
5870 datarefs->safe_push (dr);
5871 }
5872
5873 return opt_result::success ();
5874 }
5875
5876 /* Stores the data references in STMT to DATAREFS. If there is an
5877 unanalyzable reference, returns false, otherwise returns true.
5878 NEST is the outermost loop of the loop nest in which the references
5879 should be instantiated, LOOP is the loop in which the references
5880 should be analyzed. */
5881
5882 bool
5883 graphite_find_data_references_in_stmt (edge nest, loop_p loop, gimple *stmt,
5884 vec<data_reference_p> *datarefs)
5885 {
5886 unsigned i;
5887 auto_vec<data_ref_loc, 2> references;
5888 data_ref_loc *ref;
5889 bool ret = true;
5890 data_reference_p dr;
5891
5892 if (get_references_in_stmt (stmt, &references))
5893 return false;
5894
5895 FOR_EACH_VEC_ELT (references, i, ref)
5896 {
5897 dr = create_data_ref (nest, loop, ref->ref, stmt, ref->is_read,
5898 ref->is_conditional_in_stmt);
5899 gcc_assert (dr != NULL);
5900 datarefs->safe_push (dr);
5901 }
5902
5903 return ret;
5904 }
5905
5906 /* Search the data references in LOOP, and record the information into
5907 DATAREFS. Returns chrec_dont_know when failing to analyze a
5908 difficult case, returns NULL_TREE otherwise. */
5909
5910 tree
5911 find_data_references_in_bb (class loop *loop, basic_block bb,
5912 vec<data_reference_p> *datarefs)
5913 {
5914 gimple_stmt_iterator bsi;
5915
5916 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
5917 {
5918 gimple *stmt = gsi_stmt (bsi);
5919
5920 if (!find_data_references_in_stmt (loop, stmt, datarefs))
5921 {
5922 struct data_reference *res;
5923 res = XCNEW (struct data_reference);
5924 datarefs->safe_push (res);
5925
5926 return chrec_dont_know;
5927 }
5928 }
5929
5930 return NULL_TREE;
5931 }
5932
5933 /* Search the data references in LOOP, and record the information into
5934 DATAREFS. Returns chrec_dont_know when failing to analyze a
5935 difficult case, returns NULL_TREE otherwise.
5936
5937 TODO: This function should be made smarter so that it can handle address
5938 arithmetic as if they were array accesses, etc. */
5939
5940 tree
5941 find_data_references_in_loop (class loop *loop,
5942 vec<data_reference_p> *datarefs)
5943 {
5944 basic_block bb, *bbs;
5945 unsigned int i;
5946
5947 bbs = get_loop_body_in_dom_order (loop);
5948
5949 for (i = 0; i < loop->num_nodes; i++)
5950 {
5951 bb = bbs[i];
5952
5953 if (find_data_references_in_bb (loop, bb, datarefs) == chrec_dont_know)
5954 {
5955 free (bbs);
5956 return chrec_dont_know;
5957 }
5958 }
5959 free (bbs);
5960
5961 return NULL_TREE;
5962 }
5963
5964 /* Return the alignment in bytes that DRB is guaranteed to have at all
5965 times. */
5966
5967 unsigned int
5968 dr_alignment (innermost_loop_behavior *drb)
5969 {
5970 /* Get the alignment of BASE_ADDRESS + INIT. */
5971 unsigned int alignment = drb->base_alignment;
5972 unsigned int misalignment = (drb->base_misalignment
5973 + TREE_INT_CST_LOW (drb->init));
5974 if (misalignment != 0)
5975 alignment = MIN (alignment, misalignment & -misalignment);
5976
5977 /* Cap it to the alignment of OFFSET. */
5978 if (!integer_zerop (drb->offset))
5979 alignment = MIN (alignment, drb->offset_alignment);
5980
5981 /* Cap it to the alignment of STEP. */
5982 if (!integer_zerop (drb->step))
5983 alignment = MIN (alignment, drb->step_alignment);
5984
5985 return alignment;
5986 }
5987
5988 /* If BASE is a pointer-typed SSA name, try to find the object that it
5989 is based on. Return this object X on success and store the alignment
5990 in bytes of BASE - &X in *ALIGNMENT_OUT. */
5991
5992 static tree
5993 get_base_for_alignment_1 (tree base, unsigned int *alignment_out)
5994 {
5995 if (TREE_CODE (base) != SSA_NAME || !POINTER_TYPE_P (TREE_TYPE (base)))
5996 return NULL_TREE;
5997
5998 gimple *def = SSA_NAME_DEF_STMT (base);
5999 base = analyze_scalar_evolution (loop_containing_stmt (def), base);
6000
6001 /* Peel chrecs and record the minimum alignment preserved by
6002 all steps. */
6003 unsigned int alignment = MAX_OFILE_ALIGNMENT / BITS_PER_UNIT;
6004 while (TREE_CODE (base) == POLYNOMIAL_CHREC)
6005 {
6006 unsigned int step_alignment = highest_pow2_factor (CHREC_RIGHT (base));
6007 alignment = MIN (alignment, step_alignment);
6008 base = CHREC_LEFT (base);
6009 }
6010
6011 /* Punt if the expression is too complicated to handle. */
6012 if (tree_contains_chrecs (base, NULL) || !POINTER_TYPE_P (TREE_TYPE (base)))
6013 return NULL_TREE;
6014
6015 /* The only useful cases are those for which a dereference folds to something
6016 other than an INDIRECT_REF. */
6017 tree ref_type = TREE_TYPE (TREE_TYPE (base));
6018 tree ref = fold_indirect_ref_1 (UNKNOWN_LOCATION, ref_type, base);
6019 if (!ref)
6020 return NULL_TREE;
6021
6022 /* Analyze the base to which the steps we peeled were applied. */
6023 poly_int64 bitsize, bitpos, bytepos;
6024 machine_mode mode;
6025 int unsignedp, reversep, volatilep;
6026 tree offset;
6027 base = get_inner_reference (ref, &bitsize, &bitpos, &offset, &mode,
6028 &unsignedp, &reversep, &volatilep);
6029 if (!base || !multiple_p (bitpos, BITS_PER_UNIT, &bytepos))
6030 return NULL_TREE;
6031
6032 /* Restrict the alignment to that guaranteed by the offsets. */
6033 unsigned int bytepos_alignment = known_alignment (bytepos);
6034 if (bytepos_alignment != 0)
6035 alignment = MIN (alignment, bytepos_alignment);
6036 if (offset)
6037 {
6038 unsigned int offset_alignment = highest_pow2_factor (offset);
6039 alignment = MIN (alignment, offset_alignment);
6040 }
6041
6042 *alignment_out = alignment;
6043 return base;
6044 }
6045
6046 /* Return the object whose alignment would need to be changed in order
6047 to increase the alignment of ADDR. Store the maximum achievable
6048 alignment in *MAX_ALIGNMENT. */
6049
6050 tree
6051 get_base_for_alignment (tree addr, unsigned int *max_alignment)
6052 {
6053 tree base = get_base_for_alignment_1 (addr, max_alignment);
6054 if (base)
6055 return base;
6056
6057 if (TREE_CODE (addr) == ADDR_EXPR)
6058 addr = TREE_OPERAND (addr, 0);
6059 *max_alignment = MAX_OFILE_ALIGNMENT / BITS_PER_UNIT;
6060 return addr;
6061 }
6062
6063 /* Recursive helper function. */
6064
6065 static bool
6066 find_loop_nest_1 (class loop *loop, vec<loop_p> *loop_nest)
6067 {
6068 /* Inner loops of the nest should not contain siblings. Example:
6069 when there are two consecutive loops,
6070
6071 | loop_0
6072 | loop_1
6073 | A[{0, +, 1}_1]
6074 | endloop_1
6075 | loop_2
6076 | A[{0, +, 1}_2]
6077 | endloop_2
6078 | endloop_0
6079
6080 the dependence relation cannot be captured by the distance
6081 abstraction. */
6082 if (loop->next)
6083 return false;
6084
6085 loop_nest->safe_push (loop);
6086 if (loop->inner)
6087 return find_loop_nest_1 (loop->inner, loop_nest);
6088 return true;
6089 }
6090
6091 /* Return false when the LOOP is not well nested. Otherwise return
6092 true and insert in LOOP_NEST the loops of the nest. LOOP_NEST will
6093 contain the loops from the outermost to the innermost, as they will
6094 appear in the classic distance vector. */
6095
6096 bool
6097 find_loop_nest (class loop *loop, vec<loop_p> *loop_nest)
6098 {
6099 loop_nest->safe_push (loop);
6100 if (loop->inner)
6101 return find_loop_nest_1 (loop->inner, loop_nest);
6102 return true;
6103 }
6104
6105 /* Returns true when the data dependences have been computed, false otherwise.
6106 Given a loop nest LOOP, the following vectors are returned:
6107 DATAREFS is initialized to all the array elements contained in this loop,
6108 DEPENDENCE_RELATIONS contains the relations between the data references.
6109 Compute read-read and self relations if
6110 COMPUTE_SELF_AND_READ_READ_DEPENDENCES is TRUE. */
6111
6112 bool
6113 compute_data_dependences_for_loop (class loop *loop,
6114 bool compute_self_and_read_read_dependences,
6115 vec<loop_p> *loop_nest,
6116 vec<data_reference_p> *datarefs,
6117 vec<ddr_p> *dependence_relations)
6118 {
6119 bool res = true;
6120
6121 memset (&dependence_stats, 0, sizeof (dependence_stats));
6122
6123 /* If the loop nest is not well formed, or one of the data references
6124 is not computable, give up without spending time to compute other
6125 dependences. */
6126 if (!loop
6127 || !find_loop_nest (loop, loop_nest)
6128 || find_data_references_in_loop (loop, datarefs) == chrec_dont_know
6129 || !compute_all_dependences (*datarefs, dependence_relations, *loop_nest,
6130 compute_self_and_read_read_dependences))
6131 res = false;
6132
6133 if (dump_file && (dump_flags & TDF_STATS))
6134 {
6135 fprintf (dump_file, "Dependence tester statistics:\n");
6136
6137 fprintf (dump_file, "Number of dependence tests: %d\n",
6138 dependence_stats.num_dependence_tests);
6139 fprintf (dump_file, "Number of dependence tests classified dependent: %d\n",
6140 dependence_stats.num_dependence_dependent);
6141 fprintf (dump_file, "Number of dependence tests classified independent: %d\n",
6142 dependence_stats.num_dependence_independent);
6143 fprintf (dump_file, "Number of undetermined dependence tests: %d\n",
6144 dependence_stats.num_dependence_undetermined);
6145
6146 fprintf (dump_file, "Number of subscript tests: %d\n",
6147 dependence_stats.num_subscript_tests);
6148 fprintf (dump_file, "Number of undetermined subscript tests: %d\n",
6149 dependence_stats.num_subscript_undetermined);
6150 fprintf (dump_file, "Number of same subscript function: %d\n",
6151 dependence_stats.num_same_subscript_function);
6152
6153 fprintf (dump_file, "Number of ziv tests: %d\n",
6154 dependence_stats.num_ziv);
6155 fprintf (dump_file, "Number of ziv tests returning dependent: %d\n",
6156 dependence_stats.num_ziv_dependent);
6157 fprintf (dump_file, "Number of ziv tests returning independent: %d\n",
6158 dependence_stats.num_ziv_independent);
6159 fprintf (dump_file, "Number of ziv tests unimplemented: %d\n",
6160 dependence_stats.num_ziv_unimplemented);
6161
6162 fprintf (dump_file, "Number of siv tests: %d\n",
6163 dependence_stats.num_siv);
6164 fprintf (dump_file, "Number of siv tests returning dependent: %d\n",
6165 dependence_stats.num_siv_dependent);
6166 fprintf (dump_file, "Number of siv tests returning independent: %d\n",
6167 dependence_stats.num_siv_independent);
6168 fprintf (dump_file, "Number of siv tests unimplemented: %d\n",
6169 dependence_stats.num_siv_unimplemented);
6170
6171 fprintf (dump_file, "Number of miv tests: %d\n",
6172 dependence_stats.num_miv);
6173 fprintf (dump_file, "Number of miv tests returning dependent: %d\n",
6174 dependence_stats.num_miv_dependent);
6175 fprintf (dump_file, "Number of miv tests returning independent: %d\n",
6176 dependence_stats.num_miv_independent);
6177 fprintf (dump_file, "Number of miv tests unimplemented: %d\n",
6178 dependence_stats.num_miv_unimplemented);
6179 }
6180
6181 return res;
6182 }
6183
6184 /* Free the memory used by a data dependence relation DDR. */
6185
6186 void
6187 free_dependence_relation (struct data_dependence_relation *ddr)
6188 {
6189 if (ddr == NULL)
6190 return;
6191
6192 if (DDR_SUBSCRIPTS (ddr).exists ())
6193 free_subscripts (DDR_SUBSCRIPTS (ddr));
6194 DDR_DIST_VECTS (ddr).release ();
6195 DDR_DIR_VECTS (ddr).release ();
6196
6197 free (ddr);
6198 }
6199
6200 /* Free the memory used by the data dependence relations from
6201 DEPENDENCE_RELATIONS. */
6202
6203 void
6204 free_dependence_relations (vec<ddr_p> dependence_relations)
6205 {
6206 unsigned int i;
6207 struct data_dependence_relation *ddr;
6208
6209 FOR_EACH_VEC_ELT (dependence_relations, i, ddr)
6210 if (ddr)
6211 free_dependence_relation (ddr);
6212
6213 dependence_relations.release ();
6214 }
6215
6216 /* Free the memory used by the data references from DATAREFS. */
6217
6218 void
6219 free_data_refs (vec<data_reference_p> datarefs)
6220 {
6221 unsigned int i;
6222 struct data_reference *dr;
6223
6224 FOR_EACH_VEC_ELT (datarefs, i, dr)
6225 free_data_ref (dr);
6226 datarefs.release ();
6227 }
6228
6229 /* Common routine implementing both dr_direction_indicator and
6230 dr_zero_step_indicator. Return USEFUL_MIN if the indicator is known
6231 to be >= USEFUL_MIN and -1 if the indicator is known to be negative.
6232 Return the step as the indicator otherwise. */
6233
6234 static tree
6235 dr_step_indicator (struct data_reference *dr, int useful_min)
6236 {
6237 tree step = DR_STEP (dr);
6238 if (!step)
6239 return NULL_TREE;
6240 STRIP_NOPS (step);
6241 /* Look for cases where the step is scaled by a positive constant
6242 integer, which will often be the access size. If the multiplication
6243 doesn't change the sign (due to overflow effects) then we can
6244 test the unscaled value instead. */
6245 if (TREE_CODE (step) == MULT_EXPR
6246 && TREE_CODE (TREE_OPERAND (step, 1)) == INTEGER_CST
6247 && tree_int_cst_sgn (TREE_OPERAND (step, 1)) > 0)
6248 {
6249 tree factor = TREE_OPERAND (step, 1);
6250 step = TREE_OPERAND (step, 0);
6251
6252 /* Strip widening and truncating conversions as well as nops. */
6253 if (CONVERT_EXPR_P (step)
6254 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (step, 0))))
6255 step = TREE_OPERAND (step, 0);
6256 tree type = TREE_TYPE (step);
6257
6258 /* Get the range of step values that would not cause overflow. */
6259 widest_int minv = (wi::to_widest (TYPE_MIN_VALUE (ssizetype))
6260 / wi::to_widest (factor));
6261 widest_int maxv = (wi::to_widest (TYPE_MAX_VALUE (ssizetype))
6262 / wi::to_widest (factor));
6263
6264 /* Get the range of values that the unconverted step actually has. */
6265 wide_int step_min, step_max;
6266 if (TREE_CODE (step) != SSA_NAME
6267 || get_range_info (step, &step_min, &step_max) != VR_RANGE)
6268 {
6269 step_min = wi::to_wide (TYPE_MIN_VALUE (type));
6270 step_max = wi::to_wide (TYPE_MAX_VALUE (type));
6271 }
6272
6273 /* Check whether the unconverted step has an acceptable range. */
6274 signop sgn = TYPE_SIGN (type);
6275 if (wi::les_p (minv, widest_int::from (step_min, sgn))
6276 && wi::ges_p (maxv, widest_int::from (step_max, sgn)))
6277 {
6278 if (wi::ge_p (step_min, useful_min, sgn))
6279 return ssize_int (useful_min);
6280 else if (wi::lt_p (step_max, 0, sgn))
6281 return ssize_int (-1);
6282 else
6283 return fold_convert (ssizetype, step);
6284 }
6285 }
6286 return DR_STEP (dr);
6287 }
6288
6289 /* Return a value that is negative iff DR has a negative step. */
6290
6291 tree
6292 dr_direction_indicator (struct data_reference *dr)
6293 {
6294 return dr_step_indicator (dr, 0);
6295 }
6296
6297 /* Return a value that is zero iff DR has a zero step. */
6298
6299 tree
6300 dr_zero_step_indicator (struct data_reference *dr)
6301 {
6302 return dr_step_indicator (dr, 1);
6303 }
6304
6305 /* Return true if DR is known to have a nonnegative (but possibly zero)
6306 step. */
6307
6308 bool
6309 dr_known_forward_stride_p (struct data_reference *dr)
6310 {
6311 tree indicator = dr_direction_indicator (dr);
6312 tree neg_step_val = fold_binary (LT_EXPR, boolean_type_node,
6313 fold_convert (ssizetype, indicator),
6314 ssize_int (0));
6315 return neg_step_val && integer_zerop (neg_step_val);
6316 }