1 /* Data references and dependences detectors.
2 Copyright (C) 2003-2021 Free Software Foundation, Inc.
3 Contributed by Sebastian Pop <pop@cri.ensmp.fr>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* This pass walks a given loop structure searching for array
22 references. The information about the array accesses is recorded
23 in DATA_REFERENCE structures.
25 The basic test for determining the dependences is:
26 given two access functions chrec1 and chrec2 to a same array, and
27 x and y two vectors from the iteration domain, the same element of
28 the array is accessed twice at iterations x and y if and only if:
29 | chrec1 (x) == chrec2 (y).
31 The goals of this analysis are:
33 - to determine the independence: the relation between two
34 independent accesses is qualified with the chrec_known (this
35 information allows a loop parallelization),
37 - when two data references access the same data, to qualify the
38 dependence relation with classic dependence representations:
42 - loop carried level dependence
43 - polyhedron dependence
44 or with the chains of recurrences based representation,
46 - to define a knowledge base for storing the data dependence
49 - to define an interface to access this data.
54 - subscript: given two array accesses a subscript is the tuple
55 composed of the access functions for a given dimension. Example:
56 Given A[f1][f2][f3] and B[g1][g2][g3], there are three subscripts:
57 (f1, g1), (f2, g2), (f3, g3).
59 - Diophantine equation: an equation whose coefficients and
60 solutions are integer constants, for example the equation
62 has an integer solution x = 1 and y = -1.
66 - "Advanced Compilation for High Performance Computing" by Randy
67 Allen and Ken Kennedy.
68 http://citeseer.ist.psu.edu/goff91practical.html
70 - "Loop Transformations for Restructuring Compilers - The Foundations"
78 #include "coretypes.h"
83 #include "gimple-pretty-print.h"
85 #include "fold-const.h"
87 #include "gimple-iterator.h"
88 #include "tree-ssa-loop-niter.h"
89 #include "tree-ssa-loop.h"
92 #include "tree-data-ref.h"
93 #include "tree-scalar-evolution.h"
95 #include "tree-affine.h"
99 #include "internal-fn.h"
100 #include "range-op.h"
101 #include "vr-values.h"
103 static struct datadep_stats
105 int num_dependence_tests
;
106 int num_dependence_dependent
;
107 int num_dependence_independent
;
108 int num_dependence_undetermined
;
110 int num_subscript_tests
;
111 int num_subscript_undetermined
;
112 int num_same_subscript_function
;
115 int num_ziv_independent
;
116 int num_ziv_dependent
;
117 int num_ziv_unimplemented
;
120 int num_siv_independent
;
121 int num_siv_dependent
;
122 int num_siv_unimplemented
;
125 int num_miv_independent
;
126 int num_miv_dependent
;
127 int num_miv_unimplemented
;
130 static bool subscript_dependence_tester_1 (struct data_dependence_relation
*,
131 unsigned int, unsigned int,
133 /* Returns true iff A divides B. */
136 tree_fold_divides_p (const_tree a
, const_tree b
)
138 gcc_assert (TREE_CODE (a
) == INTEGER_CST
);
139 gcc_assert (TREE_CODE (b
) == INTEGER_CST
);
140 return integer_zerop (int_const_binop (TRUNC_MOD_EXPR
, b
, a
));
143 /* Returns true iff A divides B. */
146 int_divides_p (lambda_int a
, lambda_int b
)
148 return ((b
% a
) == 0);
151 /* Return true if reference REF contains a union access. */
154 ref_contains_union_access_p (tree ref
)
156 while (handled_component_p (ref
))
158 ref
= TREE_OPERAND (ref
, 0);
159 if (TREE_CODE (TREE_TYPE (ref
)) == UNION_TYPE
160 || TREE_CODE (TREE_TYPE (ref
)) == QUAL_UNION_TYPE
)
168 /* Dump into FILE all the data references from DATAREFS. */
171 dump_data_references (FILE *file
, vec
<data_reference_p
> datarefs
)
174 struct data_reference
*dr
;
176 FOR_EACH_VEC_ELT (datarefs
, i
, dr
)
177 dump_data_reference (file
, dr
);
180 /* Unified dump into FILE all the data references from DATAREFS. */
183 debug (vec
<data_reference_p
> &ref
)
185 dump_data_references (stderr
, ref
);
189 debug (vec
<data_reference_p
> *ptr
)
194 fprintf (stderr
, "<nil>\n");
198 /* Dump into STDERR all the data references from DATAREFS. */
201 debug_data_references (vec
<data_reference_p
> datarefs
)
203 dump_data_references (stderr
, datarefs
);
206 /* Print to STDERR the data_reference DR. */
209 debug_data_reference (struct data_reference
*dr
)
211 dump_data_reference (stderr
, dr
);
214 /* Dump function for a DATA_REFERENCE structure. */
217 dump_data_reference (FILE *outf
,
218 struct data_reference
*dr
)
222 fprintf (outf
, "#(Data Ref: \n");
223 fprintf (outf
, "# bb: %d \n", gimple_bb (DR_STMT (dr
))->index
);
224 fprintf (outf
, "# stmt: ");
225 print_gimple_stmt (outf
, DR_STMT (dr
), 0);
226 fprintf (outf
, "# ref: ");
227 print_generic_stmt (outf
, DR_REF (dr
));
228 fprintf (outf
, "# base_object: ");
229 print_generic_stmt (outf
, DR_BASE_OBJECT (dr
));
231 for (i
= 0; i
< DR_NUM_DIMENSIONS (dr
); i
++)
233 fprintf (outf
, "# Access function %d: ", i
);
234 print_generic_stmt (outf
, DR_ACCESS_FN (dr
, i
));
236 fprintf (outf
, "#)\n");
239 /* Unified dump function for a DATA_REFERENCE structure. */
242 debug (data_reference
&ref
)
244 dump_data_reference (stderr
, &ref
);
248 debug (data_reference
*ptr
)
253 fprintf (stderr
, "<nil>\n");
257 /* Dumps the affine function described by FN to the file OUTF. */
260 dump_affine_function (FILE *outf
, affine_fn fn
)
265 print_generic_expr (outf
, fn
[0], TDF_SLIM
);
266 for (i
= 1; fn
.iterate (i
, &coef
); i
++)
268 fprintf (outf
, " + ");
269 print_generic_expr (outf
, coef
, TDF_SLIM
);
270 fprintf (outf
, " * x_%u", i
);
274 /* Dumps the conflict function CF to the file OUTF. */
277 dump_conflict_function (FILE *outf
, conflict_function
*cf
)
281 if (cf
->n
== NO_DEPENDENCE
)
282 fprintf (outf
, "no dependence");
283 else if (cf
->n
== NOT_KNOWN
)
284 fprintf (outf
, "not known");
287 for (i
= 0; i
< cf
->n
; i
++)
292 dump_affine_function (outf
, cf
->fns
[i
]);
298 /* Dump function for a SUBSCRIPT structure. */
301 dump_subscript (FILE *outf
, struct subscript
*subscript
)
303 conflict_function
*cf
= SUB_CONFLICTS_IN_A (subscript
);
305 fprintf (outf
, "\n (subscript \n");
306 fprintf (outf
, " iterations_that_access_an_element_twice_in_A: ");
307 dump_conflict_function (outf
, cf
);
308 if (CF_NONTRIVIAL_P (cf
))
310 tree last_iteration
= SUB_LAST_CONFLICT (subscript
);
311 fprintf (outf
, "\n last_conflict: ");
312 print_generic_expr (outf
, last_iteration
);
315 cf
= SUB_CONFLICTS_IN_B (subscript
);
316 fprintf (outf
, "\n iterations_that_access_an_element_twice_in_B: ");
317 dump_conflict_function (outf
, cf
);
318 if (CF_NONTRIVIAL_P (cf
))
320 tree last_iteration
= SUB_LAST_CONFLICT (subscript
);
321 fprintf (outf
, "\n last_conflict: ");
322 print_generic_expr (outf
, last_iteration
);
325 fprintf (outf
, "\n (Subscript distance: ");
326 print_generic_expr (outf
, SUB_DISTANCE (subscript
));
327 fprintf (outf
, " ))\n");
330 /* Print the classic direction vector DIRV to OUTF. */
333 print_direction_vector (FILE *outf
,
339 for (eq
= 0; eq
< length
; eq
++)
341 enum data_dependence_direction dir
= ((enum data_dependence_direction
)
347 fprintf (outf
, " +");
350 fprintf (outf
, " -");
353 fprintf (outf
, " =");
355 case dir_positive_or_equal
:
356 fprintf (outf
, " +=");
358 case dir_positive_or_negative
:
359 fprintf (outf
, " +-");
361 case dir_negative_or_equal
:
362 fprintf (outf
, " -=");
365 fprintf (outf
, " *");
368 fprintf (outf
, "indep");
372 fprintf (outf
, "\n");
375 /* Print a vector of direction vectors. */
378 print_dir_vectors (FILE *outf
, vec
<lambda_vector
> dir_vects
,
384 FOR_EACH_VEC_ELT (dir_vects
, j
, v
)
385 print_direction_vector (outf
, v
, length
);
388 /* Print out a vector VEC of length N to OUTFILE. */
391 print_lambda_vector (FILE * outfile
, lambda_vector vector
, int n
)
395 for (i
= 0; i
< n
; i
++)
396 fprintf (outfile
, "%3d ", (int)vector
[i
]);
397 fprintf (outfile
, "\n");
400 /* Print a vector of distance vectors. */
403 print_dist_vectors (FILE *outf
, vec
<lambda_vector
> dist_vects
,
409 FOR_EACH_VEC_ELT (dist_vects
, j
, v
)
410 print_lambda_vector (outf
, v
, length
);
413 /* Dump function for a DATA_DEPENDENCE_RELATION structure. */
416 dump_data_dependence_relation (FILE *outf
,
417 struct data_dependence_relation
*ddr
)
419 struct data_reference
*dra
, *drb
;
421 fprintf (outf
, "(Data Dep: \n");
423 if (!ddr
|| DDR_ARE_DEPENDENT (ddr
) == chrec_dont_know
)
430 dump_data_reference (outf
, dra
);
432 fprintf (outf
, " (nil)\n");
434 dump_data_reference (outf
, drb
);
436 fprintf (outf
, " (nil)\n");
438 fprintf (outf
, " (don't know)\n)\n");
444 dump_data_reference (outf
, dra
);
445 dump_data_reference (outf
, drb
);
447 if (DDR_ARE_DEPENDENT (ddr
) == chrec_known
)
448 fprintf (outf
, " (no dependence)\n");
450 else if (DDR_ARE_DEPENDENT (ddr
) == NULL_TREE
)
456 FOR_EACH_VEC_ELT (DDR_SUBSCRIPTS (ddr
), i
, sub
)
458 fprintf (outf
, " access_fn_A: ");
459 print_generic_stmt (outf
, SUB_ACCESS_FN (sub
, 0));
460 fprintf (outf
, " access_fn_B: ");
461 print_generic_stmt (outf
, SUB_ACCESS_FN (sub
, 1));
462 dump_subscript (outf
, sub
);
465 fprintf (outf
, " loop nest: (");
466 FOR_EACH_VEC_ELT (DDR_LOOP_NEST (ddr
), i
, loopi
)
467 fprintf (outf
, "%d ", loopi
->num
);
468 fprintf (outf
, ")\n");
470 for (i
= 0; i
< DDR_NUM_DIST_VECTS (ddr
); i
++)
472 fprintf (outf
, " distance_vector: ");
473 print_lambda_vector (outf
, DDR_DIST_VECT (ddr
, i
),
477 for (i
= 0; i
< DDR_NUM_DIR_VECTS (ddr
); i
++)
479 fprintf (outf
, " direction_vector: ");
480 print_direction_vector (outf
, DDR_DIR_VECT (ddr
, i
),
485 fprintf (outf
, ")\n");
491 debug_data_dependence_relation (struct data_dependence_relation
*ddr
)
493 dump_data_dependence_relation (stderr
, ddr
);
496 /* Dump into FILE all the dependence relations from DDRS. */
499 dump_data_dependence_relations (FILE *file
,
503 struct data_dependence_relation
*ddr
;
505 FOR_EACH_VEC_ELT (ddrs
, i
, ddr
)
506 dump_data_dependence_relation (file
, ddr
);
510 debug (vec
<ddr_p
> &ref
)
512 dump_data_dependence_relations (stderr
, ref
);
516 debug (vec
<ddr_p
> *ptr
)
521 fprintf (stderr
, "<nil>\n");
525 /* Dump to STDERR all the dependence relations from DDRS. */
528 debug_data_dependence_relations (vec
<ddr_p
> ddrs
)
530 dump_data_dependence_relations (stderr
, ddrs
);
533 /* Dumps the distance and direction vectors in FILE. DDRS contains
534 the dependence relations, and VECT_SIZE is the size of the
535 dependence vectors, or in other words the number of loops in the
539 dump_dist_dir_vectors (FILE *file
, vec
<ddr_p
> ddrs
)
542 struct data_dependence_relation
*ddr
;
545 FOR_EACH_VEC_ELT (ddrs
, i
, ddr
)
546 if (DDR_ARE_DEPENDENT (ddr
) == NULL_TREE
&& DDR_AFFINE_P (ddr
))
548 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr
), j
, v
)
550 fprintf (file
, "DISTANCE_V (");
551 print_lambda_vector (file
, v
, DDR_NB_LOOPS (ddr
));
552 fprintf (file
, ")\n");
555 FOR_EACH_VEC_ELT (DDR_DIR_VECTS (ddr
), j
, v
)
557 fprintf (file
, "DIRECTION_V (");
558 print_direction_vector (file
, v
, DDR_NB_LOOPS (ddr
));
559 fprintf (file
, ")\n");
563 fprintf (file
, "\n\n");
566 /* Dumps the data dependence relations DDRS in FILE. */
569 dump_ddrs (FILE *file
, vec
<ddr_p
> ddrs
)
572 struct data_dependence_relation
*ddr
;
574 FOR_EACH_VEC_ELT (ddrs
, i
, ddr
)
575 dump_data_dependence_relation (file
, ddr
);
577 fprintf (file
, "\n\n");
581 debug_ddrs (vec
<ddr_p
> ddrs
)
583 dump_ddrs (stderr
, ddrs
);
586 /* If RESULT_RANGE is nonnull, set *RESULT_RANGE to the range of
589 - OP0 CODE OP1 has integral type TYPE
590 - the range of OP0 is given by OP0_RANGE and
591 - the range of OP1 is given by OP1_RANGE.
593 Independently of RESULT_RANGE, try to compute:
595 DELTA = ((sizetype) OP0 CODE (sizetype) OP1)
596 - (sizetype) (OP0 CODE OP1)
598 as a constant and subtract DELTA from the ssizetype constant in *OFF.
599 Return true on success, or false if DELTA is not known at compile time.
601 Truncation and sign changes are known to distribute over CODE, i.e.
603 (itype) (A CODE B) == (itype) A CODE (itype) B
605 for any integral type ITYPE whose precision is no greater than the
606 precision of A and B. */
609 compute_distributive_range (tree type
, value_range
&op0_range
,
610 tree_code code
, value_range
&op1_range
,
611 tree
*off
, value_range
*result_range
)
613 gcc_assert (INTEGRAL_TYPE_P (type
) && !TYPE_OVERFLOW_TRAPS (type
));
616 range_operator
*op
= range_op_handler (code
, type
);
617 op
->fold_range (*result_range
, type
, op0_range
, op1_range
);
620 /* The distributive property guarantees that if TYPE is no narrower
623 (sizetype) (OP0 CODE OP1) == (sizetype) OP0 CODE (sizetype) OP1
625 and so we can treat DELTA as zero. */
626 if (TYPE_PRECISION (type
) >= TYPE_PRECISION (sizetype
))
629 /* If overflow is undefined, we can assume that:
631 X == (ssizetype) OP0 CODE (ssizetype) OP1
633 is within the range of TYPE, i.e.:
635 X == (ssizetype) (TYPE) X
637 Distributing the (TYPE) truncation over X gives:
639 X == (ssizetype) (OP0 CODE OP1)
641 Casting both sides to sizetype and distributing the sizetype cast
644 (sizetype) OP0 CODE (sizetype) OP1 == (sizetype) (OP0 CODE OP1)
646 and so we can treat DELTA as zero. */
647 if (TYPE_OVERFLOW_UNDEFINED (type
))
650 /* Compute the range of:
652 (ssizetype) OP0 CODE (ssizetype) OP1
654 The distributive property guarantees that this has the same bitpattern as:
656 (sizetype) OP0 CODE (sizetype) OP1
658 but its range is more conducive to analysis. */
659 range_cast (op0_range
, ssizetype
);
660 range_cast (op1_range
, ssizetype
);
661 value_range wide_range
;
662 range_operator
*op
= range_op_handler (code
, ssizetype
);
663 bool saved_flag_wrapv
= flag_wrapv
;
665 op
->fold_range (wide_range
, ssizetype
, op0_range
, op1_range
);
666 flag_wrapv
= saved_flag_wrapv
;
667 if (wide_range
.num_pairs () != 1 || !range_int_cst_p (&wide_range
))
670 wide_int lb
= wide_range
.lower_bound ();
671 wide_int ub
= wide_range
.upper_bound ();
673 /* Calculate the number of times that each end of the range overflows or
674 underflows TYPE. We can only calculate DELTA if the numbers match. */
675 unsigned int precision
= TYPE_PRECISION (type
);
676 if (!TYPE_UNSIGNED (type
))
678 wide_int type_min
= wi::mask (precision
- 1, true, lb
.get_precision ());
682 wide_int upper_bits
= wi::mask (precision
, true, lb
.get_precision ());
688 /* OP0 CODE OP1 overflows exactly arshift (LB, PRECISION) times, with
689 negative values indicating underflow. The low PRECISION bits of LB
690 are clear, so DELTA is therefore LB (== UB). */
691 *off
= wide_int_to_tree (ssizetype
, wi::to_wide (*off
) - lb
);
695 /* Return true if (sizetype) OP == (sizetype) (TO_TYPE) OP,
696 given that OP has type FROM_TYPE and range RANGE. Both TO_TYPE and
697 FROM_TYPE are integral types. */
700 nop_conversion_for_offset_p (tree to_type
, tree from_type
, value_range
&range
)
702 gcc_assert (INTEGRAL_TYPE_P (to_type
)
703 && INTEGRAL_TYPE_P (from_type
)
704 && !TYPE_OVERFLOW_TRAPS (to_type
)
705 && !TYPE_OVERFLOW_TRAPS (from_type
));
707 /* Converting to something no narrower than sizetype and then to sizetype
708 is equivalent to converting directly to sizetype. */
709 if (TYPE_PRECISION (to_type
) >= TYPE_PRECISION (sizetype
))
712 /* Check whether TO_TYPE can represent all values that FROM_TYPE can. */
713 if (TYPE_PRECISION (from_type
) < TYPE_PRECISION (to_type
)
714 && (TYPE_UNSIGNED (from_type
) || !TYPE_UNSIGNED (to_type
)))
717 /* For narrowing conversions, we could in principle test whether
718 the bits in FROM_TYPE but not in TO_TYPE have a fixed value
719 and apply a constant adjustment.
721 For other conversions (which involve a sign change) we could
722 check that the signs are always equal, and apply a constant
723 adjustment if the signs are negative.
725 However, both cases should be rare. */
726 return range_fits_type_p (&range
, TYPE_PRECISION (to_type
),
727 TYPE_SIGN (to_type
));
731 split_constant_offset (tree type
, tree
*var
, tree
*off
,
732 value_range
*result_range
,
733 hash_map
<tree
, std::pair
<tree
, tree
> > &cache
,
736 /* Helper function for split_constant_offset. If TYPE is a pointer type,
737 try to express OP0 CODE OP1 as:
739 POINTER_PLUS <*VAR, (sizetype) *OFF>
744 - *OFF is a constant of type ssizetype.
746 If TYPE is an integral type, try to express (sizetype) (OP0 CODE OP1) as:
748 *VAR + (sizetype) *OFF
752 - *VAR has type sizetype
753 - *OFF is a constant of type ssizetype.
755 In both cases, OP0 CODE OP1 has type TYPE.
757 Return true on success. A false return value indicates that we can't
758 do better than set *OFF to zero.
760 When returning true, set RESULT_RANGE to the range of OP0 CODE OP1,
761 if RESULT_RANGE is nonnull and if we can do better than assume VR_VARYING.
763 CACHE caches {*VAR, *OFF} pairs for SSA names that we've previously
764 visited. LIMIT counts down the number of SSA names that we are
765 allowed to process before giving up. */
768 split_constant_offset_1 (tree type
, tree op0
, enum tree_code code
, tree op1
,
769 tree
*var
, tree
*off
, value_range
*result_range
,
770 hash_map
<tree
, std::pair
<tree
, tree
> > &cache
,
775 value_range op0_range
, op1_range
;
784 *off
= fold_convert (ssizetype
, op0
);
786 result_range
->set (op0
, op0
);
789 case POINTER_PLUS_EXPR
:
790 split_constant_offset (op0
, &var0
, &off0
, nullptr, cache
, limit
);
791 split_constant_offset (op1
, &var1
, &off1
, nullptr, cache
, limit
);
792 *var
= fold_build2 (POINTER_PLUS_EXPR
, type
, var0
, var1
);
793 *off
= size_binop (PLUS_EXPR
, off0
, off1
);
798 split_constant_offset (op0
, &var0
, &off0
, &op0_range
, cache
, limit
);
799 split_constant_offset (op1
, &var1
, &off1
, &op1_range
, cache
, limit
);
800 *off
= size_binop (code
, off0
, off1
);
801 if (!compute_distributive_range (type
, op0_range
, code
, op1_range
,
804 *var
= fold_build2 (code
, sizetype
, var0
, var1
);
808 if (TREE_CODE (op1
) != INTEGER_CST
)
811 split_constant_offset (op0
, &var0
, &off0
, &op0_range
, cache
, limit
);
812 op1_range
.set (op1
, op1
);
813 *off
= size_binop (MULT_EXPR
, off0
, fold_convert (ssizetype
, op1
));
814 if (!compute_distributive_range (type
, op0_range
, code
, op1_range
,
817 *var
= fold_build2 (MULT_EXPR
, sizetype
, var0
,
818 fold_convert (sizetype
, op1
));
824 poly_int64 pbitsize
, pbitpos
, pbytepos
;
826 int punsignedp
, preversep
, pvolatilep
;
828 op0
= TREE_OPERAND (op0
, 0);
830 = get_inner_reference (op0
, &pbitsize
, &pbitpos
, &poffset
, &pmode
,
831 &punsignedp
, &preversep
, &pvolatilep
);
833 if (!multiple_p (pbitpos
, BITS_PER_UNIT
, &pbytepos
))
835 base
= build_fold_addr_expr (base
);
836 off0
= ssize_int (pbytepos
);
840 split_constant_offset (poffset
, &poffset
, &off1
, nullptr,
842 off0
= size_binop (PLUS_EXPR
, off0
, off1
);
843 base
= fold_build_pointer_plus (base
, poffset
);
846 var0
= fold_convert (type
, base
);
848 /* If variable length types are involved, punt, otherwise casts
849 might be converted into ARRAY_REFs in gimplify_conversion.
850 To compute that ARRAY_REF's element size TYPE_SIZE_UNIT, which
851 possibly no longer appears in current GIMPLE, might resurface.
852 This perhaps could run
853 if (CONVERT_EXPR_P (var0))
855 gimplify_conversion (&var0);
856 // Attempt to fill in any within var0 found ARRAY_REF's
857 // element size from corresponding op embedded ARRAY_REF,
858 // if unsuccessful, just punt.
860 while (POINTER_TYPE_P (type
))
861 type
= TREE_TYPE (type
);
862 if (int_size_in_bytes (type
) < 0)
872 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op0
))
875 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op0
);
876 enum tree_code subcode
;
878 if (gimple_code (def_stmt
) != GIMPLE_ASSIGN
)
881 subcode
= gimple_assign_rhs_code (def_stmt
);
883 /* We are using a cache to avoid un-CSEing large amounts of code. */
884 bool use_cache
= false;
885 if (!has_single_use (op0
)
886 && (subcode
== POINTER_PLUS_EXPR
887 || subcode
== PLUS_EXPR
888 || subcode
== MINUS_EXPR
889 || subcode
== MULT_EXPR
890 || subcode
== ADDR_EXPR
891 || CONVERT_EXPR_CODE_P (subcode
)))
895 std::pair
<tree
, tree
> &e
= cache
.get_or_insert (op0
, &existed
);
898 if (integer_zerop (e
.second
))
902 /* The caller sets the range in this case. */
905 e
= std::make_pair (op0
, ssize_int (0));
912 var0
= gimple_assign_rhs1 (def_stmt
);
913 var1
= gimple_assign_rhs2 (def_stmt
);
915 bool res
= split_constant_offset_1 (type
, var0
, subcode
, var1
,
916 var
, off
, nullptr, cache
, limit
);
917 if (res
&& use_cache
)
918 *cache
.get (op0
) = std::make_pair (*var
, *off
);
919 /* The caller sets the range in this case. */
924 /* We can only handle the following conversions:
926 - Conversions from one pointer type to another pointer type.
928 - Conversions from one non-trapping integral type to another
929 non-trapping integral type. In this case, the recursive
930 call makes sure that:
934 can be expressed as a sizetype operation involving VAR and OFF,
935 and all we need to do is check whether:
937 (sizetype) OP0 == (sizetype) (TYPE) OP0
939 - Conversions from a non-trapping sizetype-size integral type to
940 a like-sized pointer type. In this case, the recursive call
943 (sizetype) OP0 == *VAR + (sizetype) *OFF
945 and we can convert that to:
947 POINTER_PLUS <(TYPE) *VAR, (sizetype) *OFF>
949 - Conversions from a sizetype-sized pointer type to a like-sized
950 non-trapping integral type. In this case, the recursive call
953 OP0 == POINTER_PLUS <*VAR, (sizetype) *OFF>
955 where the POINTER_PLUS and *VAR have the same precision as
956 TYPE (and the same precision as sizetype). Then:
958 (sizetype) (TYPE) OP0 == (sizetype) *VAR + (sizetype) *OFF. */
959 tree itype
= TREE_TYPE (op0
);
960 if ((POINTER_TYPE_P (itype
)
961 || (INTEGRAL_TYPE_P (itype
) && !TYPE_OVERFLOW_TRAPS (itype
)))
962 && (POINTER_TYPE_P (type
)
963 || (INTEGRAL_TYPE_P (type
) && !TYPE_OVERFLOW_TRAPS (type
)))
964 && (POINTER_TYPE_P (type
) == POINTER_TYPE_P (itype
)
965 || (TYPE_PRECISION (type
) == TYPE_PRECISION (sizetype
)
966 && TYPE_PRECISION (itype
) == TYPE_PRECISION (sizetype
))))
968 if (POINTER_TYPE_P (type
))
970 split_constant_offset (op0
, var
, off
, nullptr, cache
, limit
);
971 *var
= fold_convert (type
, *var
);
973 else if (POINTER_TYPE_P (itype
))
975 split_constant_offset (op0
, var
, off
, nullptr, cache
, limit
);
976 *var
= fold_convert (sizetype
, *var
);
980 split_constant_offset (op0
, var
, off
, &op0_range
,
982 if (!nop_conversion_for_offset_p (type
, itype
, op0_range
))
986 *result_range
= op0_range
;
987 range_cast (*result_range
, type
);
1000 /* If EXP has pointer type, try to express it as:
1002 POINTER_PLUS <*VAR, (sizetype) *OFF>
1006 - *VAR has the same type as EXP
1007 - *OFF is a constant of type ssizetype.
1009 If EXP has an integral type, try to express (sizetype) EXP as:
1011 *VAR + (sizetype) *OFF
1015 - *VAR has type sizetype
1016 - *OFF is a constant of type ssizetype.
1018 If EXP_RANGE is nonnull, set it to the range of EXP.
1020 CACHE caches {*VAR, *OFF} pairs for SSA names that we've previously
1021 visited. LIMIT counts down the number of SSA names that we are
1022 allowed to process before giving up. */
1025 split_constant_offset (tree exp
, tree
*var
, tree
*off
, value_range
*exp_range
,
1026 hash_map
<tree
, std::pair
<tree
, tree
> > &cache
,
1029 tree type
= TREE_TYPE (exp
), op0
, op1
;
1030 enum tree_code code
;
1032 code
= TREE_CODE (exp
);
1036 if (code
== SSA_NAME
)
1038 wide_int var_min
, var_max
;
1039 value_range_kind vr_kind
= get_range_info (exp
, &var_min
, &var_max
);
1040 wide_int var_nonzero
= get_nonzero_bits (exp
);
1041 vr_kind
= intersect_range_with_nonzero_bits (vr_kind
,
1045 if (vr_kind
== VR_RANGE
)
1046 *exp_range
= value_range (type
, var_min
, var_max
);
1050 if (!tree_is_chrec (exp
)
1051 && get_gimple_rhs_class (TREE_CODE (exp
)) != GIMPLE_TERNARY_RHS
)
1053 extract_ops_from_tree (exp
, &code
, &op0
, &op1
);
1054 if (split_constant_offset_1 (type
, op0
, code
, op1
, var
, off
,
1055 exp_range
, cache
, limit
))
1060 if (INTEGRAL_TYPE_P (type
))
1061 *var
= fold_convert (sizetype
, *var
);
1062 *off
= ssize_int (0);
1063 if (exp_range
&& code
!= SSA_NAME
)
1065 wide_int var_min
, var_max
;
1066 if (determine_value_range (exp
, &var_min
, &var_max
) == VR_RANGE
)
1067 *exp_range
= value_range (type
, var_min
, var_max
);
1071 /* Expresses EXP as VAR + OFF, where OFF is a constant. VAR has the same
1072 type as EXP while OFF has type ssizetype. */
1075 split_constant_offset (tree exp
, tree
*var
, tree
*off
)
1077 unsigned limit
= param_ssa_name_def_chain_limit
;
1078 static hash_map
<tree
, std::pair
<tree
, tree
> > *cache
;
1080 cache
= new hash_map
<tree
, std::pair
<tree
, tree
> > (37);
1081 split_constant_offset (exp
, var
, off
, nullptr, *cache
, &limit
);
1082 *var
= fold_convert (TREE_TYPE (exp
), *var
);
1086 /* Returns the address ADDR of an object in a canonical shape (without nop
1087 casts, and with type of pointer to the object). */
1090 canonicalize_base_object_address (tree addr
)
1096 /* The base address may be obtained by casting from integer, in that case
1098 if (!POINTER_TYPE_P (TREE_TYPE (addr
)))
1101 if (TREE_CODE (addr
) != ADDR_EXPR
)
1104 return build_fold_addr_expr (TREE_OPERAND (addr
, 0));
1107 /* Analyze the behavior of memory reference REF within STMT.
1108 There are two modes:
1110 - BB analysis. In this case we simply split the address into base,
1111 init and offset components, without reference to any containing loop.
1112 The resulting base and offset are general expressions and they can
1113 vary arbitrarily from one iteration of the containing loop to the next.
1114 The step is always zero.
1116 - loop analysis. In this case we analyze the reference both wrt LOOP
1117 and on the basis that the reference occurs (is "used") in LOOP;
1118 see the comment above analyze_scalar_evolution_in_loop for more
1119 information about this distinction. The base, init, offset and
1120 step fields are all invariant in LOOP.
1122 Perform BB analysis if LOOP is null, or if LOOP is the function's
1123 dummy outermost loop. In other cases perform loop analysis.
1125 Return true if the analysis succeeded and store the results in DRB if so.
1126 BB analysis can only fail for bitfield or reversed-storage accesses. */
1129 dr_analyze_innermost (innermost_loop_behavior
*drb
, tree ref
,
1130 class loop
*loop
, const gimple
*stmt
)
1132 poly_int64 pbitsize
, pbitpos
;
1135 int punsignedp
, preversep
, pvolatilep
;
1136 affine_iv base_iv
, offset_iv
;
1137 tree init
, dinit
, step
;
1138 bool in_loop
= (loop
&& loop
->num
);
1140 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1141 fprintf (dump_file
, "analyze_innermost: ");
1143 base
= get_inner_reference (ref
, &pbitsize
, &pbitpos
, &poffset
, &pmode
,
1144 &punsignedp
, &preversep
, &pvolatilep
);
1145 gcc_assert (base
!= NULL_TREE
);
1147 poly_int64 pbytepos
;
1148 if (!multiple_p (pbitpos
, BITS_PER_UNIT
, &pbytepos
))
1149 return opt_result::failure_at (stmt
,
1150 "failed: bit offset alignment.\n");
1153 return opt_result::failure_at (stmt
,
1154 "failed: reverse storage order.\n");
1156 /* Calculate the alignment and misalignment for the inner reference. */
1157 unsigned int HOST_WIDE_INT bit_base_misalignment
;
1158 unsigned int bit_base_alignment
;
1159 get_object_alignment_1 (base
, &bit_base_alignment
, &bit_base_misalignment
);
1161 /* There are no bitfield references remaining in BASE, so the values
1162 we got back must be whole bytes. */
1163 gcc_assert (bit_base_alignment
% BITS_PER_UNIT
== 0
1164 && bit_base_misalignment
% BITS_PER_UNIT
== 0);
1165 unsigned int base_alignment
= bit_base_alignment
/ BITS_PER_UNIT
;
1166 poly_int64 base_misalignment
= bit_base_misalignment
/ BITS_PER_UNIT
;
1168 if (TREE_CODE (base
) == MEM_REF
)
1170 if (!integer_zerop (TREE_OPERAND (base
, 1)))
1172 /* Subtract MOFF from the base and add it to POFFSET instead.
1173 Adjust the misalignment to reflect the amount we subtracted. */
1174 poly_offset_int moff
= mem_ref_offset (base
);
1175 base_misalignment
-= moff
.force_shwi ();
1176 tree mofft
= wide_int_to_tree (sizetype
, moff
);
1180 poffset
= size_binop (PLUS_EXPR
, poffset
, mofft
);
1182 base
= TREE_OPERAND (base
, 0);
1185 base
= build_fold_addr_expr (base
);
1189 if (!simple_iv (loop
, loop
, base
, &base_iv
, true))
1190 return opt_result::failure_at
1191 (stmt
, "failed: evolution of base is not affine.\n");
1195 base_iv
.base
= base
;
1196 base_iv
.step
= ssize_int (0);
1197 base_iv
.no_overflow
= true;
1202 offset_iv
.base
= ssize_int (0);
1203 offset_iv
.step
= ssize_int (0);
1209 offset_iv
.base
= poffset
;
1210 offset_iv
.step
= ssize_int (0);
1212 else if (!simple_iv (loop
, loop
, poffset
, &offset_iv
, true))
1213 return opt_result::failure_at
1214 (stmt
, "failed: evolution of offset is not affine.\n");
1217 init
= ssize_int (pbytepos
);
1219 /* Subtract any constant component from the base and add it to INIT instead.
1220 Adjust the misalignment to reflect the amount we subtracted. */
1221 split_constant_offset (base_iv
.base
, &base_iv
.base
, &dinit
);
1222 init
= size_binop (PLUS_EXPR
, init
, dinit
);
1223 base_misalignment
-= TREE_INT_CST_LOW (dinit
);
1225 split_constant_offset (offset_iv
.base
, &offset_iv
.base
, &dinit
);
1226 init
= size_binop (PLUS_EXPR
, init
, dinit
);
1228 step
= size_binop (PLUS_EXPR
,
1229 fold_convert (ssizetype
, base_iv
.step
),
1230 fold_convert (ssizetype
, offset_iv
.step
));
1232 base
= canonicalize_base_object_address (base_iv
.base
);
1234 /* See if get_pointer_alignment can guarantee a higher alignment than
1235 the one we calculated above. */
1236 unsigned int HOST_WIDE_INT alt_misalignment
;
1237 unsigned int alt_alignment
;
1238 get_pointer_alignment_1 (base
, &alt_alignment
, &alt_misalignment
);
1240 /* As above, these values must be whole bytes. */
1241 gcc_assert (alt_alignment
% BITS_PER_UNIT
== 0
1242 && alt_misalignment
% BITS_PER_UNIT
== 0);
1243 alt_alignment
/= BITS_PER_UNIT
;
1244 alt_misalignment
/= BITS_PER_UNIT
;
1246 if (base_alignment
< alt_alignment
)
1248 base_alignment
= alt_alignment
;
1249 base_misalignment
= alt_misalignment
;
1252 drb
->base_address
= base
;
1253 drb
->offset
= fold_convert (ssizetype
, offset_iv
.base
);
1256 if (known_misalignment (base_misalignment
, base_alignment
,
1257 &drb
->base_misalignment
))
1258 drb
->base_alignment
= base_alignment
;
1261 drb
->base_alignment
= known_alignment (base_misalignment
);
1262 drb
->base_misalignment
= 0;
1264 drb
->offset_alignment
= highest_pow2_factor (offset_iv
.base
);
1265 drb
->step_alignment
= highest_pow2_factor (step
);
1267 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1268 fprintf (dump_file
, "success.\n");
1270 return opt_result::success ();
1273 /* Return true if OP is a valid component reference for a DR access
1274 function. This accepts a subset of what handled_component_p accepts. */
1277 access_fn_component_p (tree op
)
1279 switch (TREE_CODE (op
))
1287 return TREE_CODE (TREE_TYPE (TREE_OPERAND (op
, 0))) == RECORD_TYPE
;
1294 /* Returns whether BASE can have a access_fn_component_p with BASE
1298 base_supports_access_fn_components_p (tree base
)
1300 switch (TREE_CODE (TREE_TYPE (base
)))
1311 /* Determines the base object and the list of indices of memory reference
1312 DR, analyzed in LOOP and instantiated before NEST. */
1315 dr_analyze_indices (struct data_reference
*dr
, edge nest
, loop_p loop
)
1317 vec
<tree
> access_fns
= vNULL
;
1319 tree base
, off
, access_fn
;
1321 /* If analyzing a basic-block there are no indices to analyze
1322 and thus no access functions. */
1325 DR_BASE_OBJECT (dr
) = DR_REF (dr
);
1326 DR_ACCESS_FNS (dr
).create (0);
1332 /* REALPART_EXPR and IMAGPART_EXPR can be handled like accesses
1333 into a two element array with a constant index. The base is
1334 then just the immediate underlying object. */
1335 if (TREE_CODE (ref
) == REALPART_EXPR
)
1337 ref
= TREE_OPERAND (ref
, 0);
1338 access_fns
.safe_push (integer_zero_node
);
1340 else if (TREE_CODE (ref
) == IMAGPART_EXPR
)
1342 ref
= TREE_OPERAND (ref
, 0);
1343 access_fns
.safe_push (integer_one_node
);
1346 /* Analyze access functions of dimensions we know to be independent.
1347 The list of component references handled here should be kept in
1348 sync with access_fn_component_p. */
1349 while (handled_component_p (ref
))
1351 if (TREE_CODE (ref
) == ARRAY_REF
)
1353 op
= TREE_OPERAND (ref
, 1);
1354 access_fn
= analyze_scalar_evolution (loop
, op
);
1355 access_fn
= instantiate_scev (nest
, loop
, access_fn
);
1356 access_fns
.safe_push (access_fn
);
1358 else if (TREE_CODE (ref
) == COMPONENT_REF
1359 && TREE_CODE (TREE_TYPE (TREE_OPERAND (ref
, 0))) == RECORD_TYPE
)
1361 /* For COMPONENT_REFs of records (but not unions!) use the
1362 FIELD_DECL offset as constant access function so we can
1363 disambiguate a[i].f1 and a[i].f2. */
1364 tree off
= component_ref_field_offset (ref
);
1365 off
= size_binop (PLUS_EXPR
,
1366 size_binop (MULT_EXPR
,
1367 fold_convert (bitsizetype
, off
),
1368 bitsize_int (BITS_PER_UNIT
)),
1369 DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref
, 1)));
1370 access_fns
.safe_push (off
);
1373 /* If we have an unhandled component we could not translate
1374 to an access function stop analyzing. We have determined
1375 our base object in this case. */
1378 ref
= TREE_OPERAND (ref
, 0);
1381 /* If the address operand of a MEM_REF base has an evolution in the
1382 analyzed nest, add it as an additional independent access-function. */
1383 if (TREE_CODE (ref
) == MEM_REF
)
1385 op
= TREE_OPERAND (ref
, 0);
1386 access_fn
= analyze_scalar_evolution (loop
, op
);
1387 access_fn
= instantiate_scev (nest
, loop
, access_fn
);
1388 if (TREE_CODE (access_fn
) == POLYNOMIAL_CHREC
)
1391 tree memoff
= TREE_OPERAND (ref
, 1);
1392 base
= initial_condition (access_fn
);
1393 orig_type
= TREE_TYPE (base
);
1394 STRIP_USELESS_TYPE_CONVERSION (base
);
1395 split_constant_offset (base
, &base
, &off
);
1396 STRIP_USELESS_TYPE_CONVERSION (base
);
1397 /* Fold the MEM_REF offset into the evolutions initial
1398 value to make more bases comparable. */
1399 if (!integer_zerop (memoff
))
1401 off
= size_binop (PLUS_EXPR
, off
,
1402 fold_convert (ssizetype
, memoff
));
1403 memoff
= build_int_cst (TREE_TYPE (memoff
), 0);
1405 /* Adjust the offset so it is a multiple of the access type
1406 size and thus we separate bases that can possibly be used
1407 to produce partial overlaps (which the access_fn machinery
1410 if (TYPE_SIZE_UNIT (TREE_TYPE (ref
))
1411 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (ref
))) == INTEGER_CST
1412 && !integer_zerop (TYPE_SIZE_UNIT (TREE_TYPE (ref
))))
1415 wi::to_wide (TYPE_SIZE_UNIT (TREE_TYPE (ref
))),
1418 /* If we can't compute the remainder simply force the initial
1419 condition to zero. */
1420 rem
= wi::to_wide (off
);
1421 off
= wide_int_to_tree (ssizetype
, wi::to_wide (off
) - rem
);
1422 memoff
= wide_int_to_tree (TREE_TYPE (memoff
), rem
);
1423 /* And finally replace the initial condition. */
1424 access_fn
= chrec_replace_initial_condition
1425 (access_fn
, fold_convert (orig_type
, off
));
1426 /* ??? This is still not a suitable base object for
1427 dr_may_alias_p - the base object needs to be an
1428 access that covers the object as whole. With
1429 an evolution in the pointer this cannot be
1431 As a band-aid, mark the access so we can special-case
1432 it in dr_may_alias_p. */
1434 ref
= fold_build2_loc (EXPR_LOCATION (ref
),
1435 MEM_REF
, TREE_TYPE (ref
),
1437 MR_DEPENDENCE_CLIQUE (ref
) = MR_DEPENDENCE_CLIQUE (old
);
1438 MR_DEPENDENCE_BASE (ref
) = MR_DEPENDENCE_BASE (old
);
1439 DR_UNCONSTRAINED_BASE (dr
) = true;
1440 access_fns
.safe_push (access_fn
);
1443 else if (DECL_P (ref
))
1445 /* Canonicalize DR_BASE_OBJECT to MEM_REF form. */
1446 ref
= build2 (MEM_REF
, TREE_TYPE (ref
),
1447 build_fold_addr_expr (ref
),
1448 build_int_cst (reference_alias_ptr_type (ref
), 0));
1451 DR_BASE_OBJECT (dr
) = ref
;
1452 DR_ACCESS_FNS (dr
) = access_fns
;
1455 /* Extracts the alias analysis information from the memory reference DR. */
1458 dr_analyze_alias (struct data_reference
*dr
)
1460 tree ref
= DR_REF (dr
);
1461 tree base
= get_base_address (ref
), addr
;
1463 if (INDIRECT_REF_P (base
)
1464 || TREE_CODE (base
) == MEM_REF
)
1466 addr
= TREE_OPERAND (base
, 0);
1467 if (TREE_CODE (addr
) == SSA_NAME
)
1468 DR_PTR_INFO (dr
) = SSA_NAME_PTR_INFO (addr
);
1472 /* Frees data reference DR. */
1475 free_data_ref (data_reference_p dr
)
1477 DR_ACCESS_FNS (dr
).release ();
1481 /* Analyze memory reference MEMREF, which is accessed in STMT.
1482 The reference is a read if IS_READ is true, otherwise it is a write.
1483 IS_CONDITIONAL_IN_STMT indicates that the reference is conditional
1484 within STMT, i.e. that it might not occur even if STMT is executed
1485 and runs to completion.
1487 Return the data_reference description of MEMREF. NEST is the outermost
1488 loop in which the reference should be instantiated, LOOP is the loop
1489 in which the data reference should be analyzed. */
1491 struct data_reference
*
1492 create_data_ref (edge nest
, loop_p loop
, tree memref
, gimple
*stmt
,
1493 bool is_read
, bool is_conditional_in_stmt
)
1495 struct data_reference
*dr
;
1497 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1499 fprintf (dump_file
, "Creating dr for ");
1500 print_generic_expr (dump_file
, memref
, TDF_SLIM
);
1501 fprintf (dump_file
, "\n");
1504 dr
= XCNEW (struct data_reference
);
1505 DR_STMT (dr
) = stmt
;
1506 DR_REF (dr
) = memref
;
1507 DR_IS_READ (dr
) = is_read
;
1508 DR_IS_CONDITIONAL_IN_STMT (dr
) = is_conditional_in_stmt
;
1510 dr_analyze_innermost (&DR_INNERMOST (dr
), memref
,
1511 nest
!= NULL
? loop
: NULL
, stmt
);
1512 dr_analyze_indices (dr
, nest
, loop
);
1513 dr_analyze_alias (dr
);
1515 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1518 fprintf (dump_file
, "\tbase_address: ");
1519 print_generic_expr (dump_file
, DR_BASE_ADDRESS (dr
), TDF_SLIM
);
1520 fprintf (dump_file
, "\n\toffset from base address: ");
1521 print_generic_expr (dump_file
, DR_OFFSET (dr
), TDF_SLIM
);
1522 fprintf (dump_file
, "\n\tconstant offset from base address: ");
1523 print_generic_expr (dump_file
, DR_INIT (dr
), TDF_SLIM
);
1524 fprintf (dump_file
, "\n\tstep: ");
1525 print_generic_expr (dump_file
, DR_STEP (dr
), TDF_SLIM
);
1526 fprintf (dump_file
, "\n\tbase alignment: %d", DR_BASE_ALIGNMENT (dr
));
1527 fprintf (dump_file
, "\n\tbase misalignment: %d",
1528 DR_BASE_MISALIGNMENT (dr
));
1529 fprintf (dump_file
, "\n\toffset alignment: %d",
1530 DR_OFFSET_ALIGNMENT (dr
));
1531 fprintf (dump_file
, "\n\tstep alignment: %d", DR_STEP_ALIGNMENT (dr
));
1532 fprintf (dump_file
, "\n\tbase_object: ");
1533 print_generic_expr (dump_file
, DR_BASE_OBJECT (dr
), TDF_SLIM
);
1534 fprintf (dump_file
, "\n");
1535 for (i
= 0; i
< DR_NUM_DIMENSIONS (dr
); i
++)
1537 fprintf (dump_file
, "\tAccess function %d: ", i
);
1538 print_generic_stmt (dump_file
, DR_ACCESS_FN (dr
, i
), TDF_SLIM
);
1545 /* A helper function computes order between two tree expressions T1 and T2.
1546 This is used in comparator functions sorting objects based on the order
1547 of tree expressions. The function returns -1, 0, or 1. */
1550 data_ref_compare_tree (tree t1
, tree t2
)
1553 enum tree_code code
;
1563 STRIP_USELESS_TYPE_CONVERSION (t1
);
1564 STRIP_USELESS_TYPE_CONVERSION (t2
);
1568 if (TREE_CODE (t1
) != TREE_CODE (t2
)
1569 && ! (CONVERT_EXPR_P (t1
) && CONVERT_EXPR_P (t2
)))
1570 return TREE_CODE (t1
) < TREE_CODE (t2
) ? -1 : 1;
1572 code
= TREE_CODE (t1
);
1576 return tree_int_cst_compare (t1
, t2
);
1579 if (TREE_STRING_LENGTH (t1
) != TREE_STRING_LENGTH (t2
))
1580 return TREE_STRING_LENGTH (t1
) < TREE_STRING_LENGTH (t2
) ? -1 : 1;
1581 return memcmp (TREE_STRING_POINTER (t1
), TREE_STRING_POINTER (t2
),
1582 TREE_STRING_LENGTH (t1
));
1585 if (SSA_NAME_VERSION (t1
) != SSA_NAME_VERSION (t2
))
1586 return SSA_NAME_VERSION (t1
) < SSA_NAME_VERSION (t2
) ? -1 : 1;
1590 if (POLY_INT_CST_P (t1
))
1591 return compare_sizes_for_sort (wi::to_poly_widest (t1
),
1592 wi::to_poly_widest (t2
));
1594 tclass
= TREE_CODE_CLASS (code
);
1596 /* For decls, compare their UIDs. */
1597 if (tclass
== tcc_declaration
)
1599 if (DECL_UID (t1
) != DECL_UID (t2
))
1600 return DECL_UID (t1
) < DECL_UID (t2
) ? -1 : 1;
1603 /* For expressions, compare their operands recursively. */
1604 else if (IS_EXPR_CODE_CLASS (tclass
))
1606 for (i
= TREE_OPERAND_LENGTH (t1
) - 1; i
>= 0; --i
)
1608 cmp
= data_ref_compare_tree (TREE_OPERAND (t1
, i
),
1609 TREE_OPERAND (t2
, i
));
1621 /* Return TRUE it's possible to resolve data dependence DDR by runtime alias
1625 runtime_alias_check_p (ddr_p ddr
, class loop
*loop
, bool speed_p
)
1627 if (dump_enabled_p ())
1628 dump_printf (MSG_NOTE
,
1629 "consider run-time aliasing test between %T and %T\n",
1630 DR_REF (DDR_A (ddr
)), DR_REF (DDR_B (ddr
)));
1633 return opt_result::failure_at (DR_STMT (DDR_A (ddr
)),
1634 "runtime alias check not supported when"
1635 " optimizing for size.\n");
1637 /* FORNOW: We don't support versioning with outer-loop in either
1638 vectorization or loop distribution. */
1639 if (loop
!= NULL
&& loop
->inner
!= NULL
)
1640 return opt_result::failure_at (DR_STMT (DDR_A (ddr
)),
1641 "runtime alias check not supported for"
1644 return opt_result::success ();
1647 /* Operator == between two dr_with_seg_len objects.
1649 This equality operator is used to make sure two data refs
1650 are the same one so that we will consider to combine the
1651 aliasing checks of those two pairs of data dependent data
1655 operator == (const dr_with_seg_len
& d1
,
1656 const dr_with_seg_len
& d2
)
1658 return (operand_equal_p (DR_BASE_ADDRESS (d1
.dr
),
1659 DR_BASE_ADDRESS (d2
.dr
), 0)
1660 && data_ref_compare_tree (DR_OFFSET (d1
.dr
), DR_OFFSET (d2
.dr
)) == 0
1661 && data_ref_compare_tree (DR_INIT (d1
.dr
), DR_INIT (d2
.dr
)) == 0
1662 && data_ref_compare_tree (d1
.seg_len
, d2
.seg_len
) == 0
1663 && known_eq (d1
.access_size
, d2
.access_size
)
1664 && d1
.align
== d2
.align
);
1667 /* Comparison function for sorting objects of dr_with_seg_len_pair_t
1668 so that we can combine aliasing checks in one scan. */
1671 comp_dr_with_seg_len_pair (const void *pa_
, const void *pb_
)
1673 const dr_with_seg_len_pair_t
* pa
= (const dr_with_seg_len_pair_t
*) pa_
;
1674 const dr_with_seg_len_pair_t
* pb
= (const dr_with_seg_len_pair_t
*) pb_
;
1675 const dr_with_seg_len
&a1
= pa
->first
, &a2
= pa
->second
;
1676 const dr_with_seg_len
&b1
= pb
->first
, &b2
= pb
->second
;
1678 /* For DR pairs (a, b) and (c, d), we only consider to merge the alias checks
1679 if a and c have the same basic address snd step, and b and d have the same
1680 address and step. Therefore, if any a&c or b&d don't have the same address
1681 and step, we don't care the order of those two pairs after sorting. */
1684 if ((comp_res
= data_ref_compare_tree (DR_BASE_ADDRESS (a1
.dr
),
1685 DR_BASE_ADDRESS (b1
.dr
))) != 0)
1687 if ((comp_res
= data_ref_compare_tree (DR_BASE_ADDRESS (a2
.dr
),
1688 DR_BASE_ADDRESS (b2
.dr
))) != 0)
1690 if ((comp_res
= data_ref_compare_tree (DR_STEP (a1
.dr
),
1691 DR_STEP (b1
.dr
))) != 0)
1693 if ((comp_res
= data_ref_compare_tree (DR_STEP (a2
.dr
),
1694 DR_STEP (b2
.dr
))) != 0)
1696 if ((comp_res
= data_ref_compare_tree (DR_OFFSET (a1
.dr
),
1697 DR_OFFSET (b1
.dr
))) != 0)
1699 if ((comp_res
= data_ref_compare_tree (DR_INIT (a1
.dr
),
1700 DR_INIT (b1
.dr
))) != 0)
1702 if ((comp_res
= data_ref_compare_tree (DR_OFFSET (a2
.dr
),
1703 DR_OFFSET (b2
.dr
))) != 0)
1705 if ((comp_res
= data_ref_compare_tree (DR_INIT (a2
.dr
),
1706 DR_INIT (b2
.dr
))) != 0)
1712 /* Dump information about ALIAS_PAIR, indenting each line by INDENT. */
1715 dump_alias_pair (dr_with_seg_len_pair_t
*alias_pair
, const char *indent
)
1717 dump_printf (MSG_NOTE
, "%sreference: %T vs. %T\n", indent
,
1718 DR_REF (alias_pair
->first
.dr
),
1719 DR_REF (alias_pair
->second
.dr
));
1721 dump_printf (MSG_NOTE
, "%ssegment length: %T", indent
,
1722 alias_pair
->first
.seg_len
);
1723 if (!operand_equal_p (alias_pair
->first
.seg_len
,
1724 alias_pair
->second
.seg_len
, 0))
1725 dump_printf (MSG_NOTE
, " vs. %T", alias_pair
->second
.seg_len
);
1727 dump_printf (MSG_NOTE
, "\n%saccess size: ", indent
);
1728 dump_dec (MSG_NOTE
, alias_pair
->first
.access_size
);
1729 if (maybe_ne (alias_pair
->first
.access_size
, alias_pair
->second
.access_size
))
1731 dump_printf (MSG_NOTE
, " vs. ");
1732 dump_dec (MSG_NOTE
, alias_pair
->second
.access_size
);
1735 dump_printf (MSG_NOTE
, "\n%salignment: %d", indent
,
1736 alias_pair
->first
.align
);
1737 if (alias_pair
->first
.align
!= alias_pair
->second
.align
)
1738 dump_printf (MSG_NOTE
, " vs. %d", alias_pair
->second
.align
);
1740 dump_printf (MSG_NOTE
, "\n%sflags: ", indent
);
1741 if (alias_pair
->flags
& DR_ALIAS_RAW
)
1742 dump_printf (MSG_NOTE
, " RAW");
1743 if (alias_pair
->flags
& DR_ALIAS_WAR
)
1744 dump_printf (MSG_NOTE
, " WAR");
1745 if (alias_pair
->flags
& DR_ALIAS_WAW
)
1746 dump_printf (MSG_NOTE
, " WAW");
1747 if (alias_pair
->flags
& DR_ALIAS_ARBITRARY
)
1748 dump_printf (MSG_NOTE
, " ARBITRARY");
1749 if (alias_pair
->flags
& DR_ALIAS_SWAPPED
)
1750 dump_printf (MSG_NOTE
, " SWAPPED");
1751 if (alias_pair
->flags
& DR_ALIAS_UNSWAPPED
)
1752 dump_printf (MSG_NOTE
, " UNSWAPPED");
1753 if (alias_pair
->flags
& DR_ALIAS_MIXED_STEPS
)
1754 dump_printf (MSG_NOTE
, " MIXED_STEPS");
1755 if (alias_pair
->flags
== 0)
1756 dump_printf (MSG_NOTE
, " <none>");
1757 dump_printf (MSG_NOTE
, "\n");
1760 /* Merge alias checks recorded in ALIAS_PAIRS and remove redundant ones.
1761 FACTOR is number of iterations that each data reference is accessed.
1763 Basically, for each pair of dependent data refs store_ptr_0 & load_ptr_0,
1764 we create an expression:
1766 ((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
1767 || (load_ptr_0 + load_segment_length_0) <= store_ptr_0))
1769 for aliasing checks. However, in some cases we can decrease the number
1770 of checks by combining two checks into one. For example, suppose we have
1771 another pair of data refs store_ptr_0 & load_ptr_1, and if the following
1772 condition is satisfied:
1774 load_ptr_0 < load_ptr_1 &&
1775 load_ptr_1 - load_ptr_0 - load_segment_length_0 < store_segment_length_0
1777 (this condition means, in each iteration of vectorized loop, the accessed
1778 memory of store_ptr_0 cannot be between the memory of load_ptr_0 and
1781 we then can use only the following expression to finish the alising checks
1782 between store_ptr_0 & load_ptr_0 and store_ptr_0 & load_ptr_1:
1784 ((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
1785 || (load_ptr_1 + load_segment_length_1 <= store_ptr_0))
1787 Note that we only consider that load_ptr_0 and load_ptr_1 have the same
1791 prune_runtime_alias_test_list (vec
<dr_with_seg_len_pair_t
> *alias_pairs
,
1794 if (alias_pairs
->is_empty ())
1797 /* Canonicalize each pair so that the base components are ordered wrt
1798 data_ref_compare_tree. This allows the loop below to merge more
1801 dr_with_seg_len_pair_t
*alias_pair
;
1802 FOR_EACH_VEC_ELT (*alias_pairs
, i
, alias_pair
)
1804 data_reference_p dr_a
= alias_pair
->first
.dr
;
1805 data_reference_p dr_b
= alias_pair
->second
.dr
;
1806 int comp_res
= data_ref_compare_tree (DR_BASE_ADDRESS (dr_a
),
1807 DR_BASE_ADDRESS (dr_b
));
1809 comp_res
= data_ref_compare_tree (DR_OFFSET (dr_a
), DR_OFFSET (dr_b
));
1811 comp_res
= data_ref_compare_tree (DR_INIT (dr_a
), DR_INIT (dr_b
));
1814 std::swap (alias_pair
->first
, alias_pair
->second
);
1815 alias_pair
->flags
|= DR_ALIAS_SWAPPED
;
1818 alias_pair
->flags
|= DR_ALIAS_UNSWAPPED
;
1821 /* Sort the collected data ref pairs so that we can scan them once to
1822 combine all possible aliasing checks. */
1823 alias_pairs
->qsort (comp_dr_with_seg_len_pair
);
1825 /* Scan the sorted dr pairs and check if we can combine alias checks
1826 of two neighboring dr pairs. */
1827 unsigned int last
= 0;
1828 for (i
= 1; i
< alias_pairs
->length (); ++i
)
1830 /* Deal with two ddrs (dr_a1, dr_b1) and (dr_a2, dr_b2). */
1831 dr_with_seg_len_pair_t
*alias_pair1
= &(*alias_pairs
)[last
];
1832 dr_with_seg_len_pair_t
*alias_pair2
= &(*alias_pairs
)[i
];
1834 dr_with_seg_len
*dr_a1
= &alias_pair1
->first
;
1835 dr_with_seg_len
*dr_b1
= &alias_pair1
->second
;
1836 dr_with_seg_len
*dr_a2
= &alias_pair2
->first
;
1837 dr_with_seg_len
*dr_b2
= &alias_pair2
->second
;
1839 /* Remove duplicate data ref pairs. */
1840 if (*dr_a1
== *dr_a2
&& *dr_b1
== *dr_b2
)
1842 if (dump_enabled_p ())
1843 dump_printf (MSG_NOTE
, "found equal ranges %T, %T and %T, %T\n",
1844 DR_REF (dr_a1
->dr
), DR_REF (dr_b1
->dr
),
1845 DR_REF (dr_a2
->dr
), DR_REF (dr_b2
->dr
));
1846 alias_pair1
->flags
|= alias_pair2
->flags
;
1850 /* Assume that we won't be able to merge the pairs, then correct
1854 (*alias_pairs
)[last
] = (*alias_pairs
)[i
];
1856 if (*dr_a1
== *dr_a2
|| *dr_b1
== *dr_b2
)
1858 /* We consider the case that DR_B1 and DR_B2 are same memrefs,
1859 and DR_A1 and DR_A2 are two consecutive memrefs. */
1860 if (*dr_a1
== *dr_a2
)
1862 std::swap (dr_a1
, dr_b1
);
1863 std::swap (dr_a2
, dr_b2
);
1866 poly_int64 init_a1
, init_a2
;
1867 /* Only consider cases in which the distance between the initial
1868 DR_A1 and the initial DR_A2 is known at compile time. */
1869 if (!operand_equal_p (DR_BASE_ADDRESS (dr_a1
->dr
),
1870 DR_BASE_ADDRESS (dr_a2
->dr
), 0)
1871 || !operand_equal_p (DR_OFFSET (dr_a1
->dr
),
1872 DR_OFFSET (dr_a2
->dr
), 0)
1873 || !poly_int_tree_p (DR_INIT (dr_a1
->dr
), &init_a1
)
1874 || !poly_int_tree_p (DR_INIT (dr_a2
->dr
), &init_a2
))
1877 /* Don't combine if we can't tell which one comes first. */
1878 if (!ordered_p (init_a1
, init_a2
))
1881 /* Work out what the segment length would be if we did combine
1884 - If DR_A1 and DR_A2 have equal lengths, that length is
1885 also the combined length.
1887 - If DR_A1 and DR_A2 both have negative "lengths", the combined
1888 length is the lower bound on those lengths.
1890 - If DR_A1 and DR_A2 both have positive lengths, the combined
1891 length is the upper bound on those lengths.
1893 Other cases are unlikely to give a useful combination.
1895 The lengths both have sizetype, so the sign is taken from
1896 the step instead. */
1897 poly_uint64 new_seg_len
= 0;
1898 bool new_seg_len_p
= !operand_equal_p (dr_a1
->seg_len
,
1902 poly_uint64 seg_len_a1
, seg_len_a2
;
1903 if (!poly_int_tree_p (dr_a1
->seg_len
, &seg_len_a1
)
1904 || !poly_int_tree_p (dr_a2
->seg_len
, &seg_len_a2
))
1907 tree indicator_a
= dr_direction_indicator (dr_a1
->dr
);
1908 if (TREE_CODE (indicator_a
) != INTEGER_CST
)
1911 tree indicator_b
= dr_direction_indicator (dr_a2
->dr
);
1912 if (TREE_CODE (indicator_b
) != INTEGER_CST
)
1915 int sign_a
= tree_int_cst_sgn (indicator_a
);
1916 int sign_b
= tree_int_cst_sgn (indicator_b
);
1918 if (sign_a
<= 0 && sign_b
<= 0)
1919 new_seg_len
= lower_bound (seg_len_a1
, seg_len_a2
);
1920 else if (sign_a
>= 0 && sign_b
>= 0)
1921 new_seg_len
= upper_bound (seg_len_a1
, seg_len_a2
);
1925 /* At this point we're committed to merging the refs. */
1927 /* Make sure dr_a1 starts left of dr_a2. */
1928 if (maybe_gt (init_a1
, init_a2
))
1930 std::swap (*dr_a1
, *dr_a2
);
1931 std::swap (init_a1
, init_a2
);
1934 /* The DR_Bs are equal, so only the DR_As can introduce
1936 if (!operand_equal_p (DR_STEP (dr_a1
->dr
), DR_STEP (dr_a2
->dr
), 0))
1937 alias_pair1
->flags
|= DR_ALIAS_MIXED_STEPS
;
1941 dr_a1
->seg_len
= build_int_cst (TREE_TYPE (dr_a1
->seg_len
),
1943 dr_a1
->align
= MIN (dr_a1
->align
, known_alignment (new_seg_len
));
1946 /* This is always positive due to the swap above. */
1947 poly_uint64 diff
= init_a2
- init_a1
;
1949 /* The new check will start at DR_A1. Make sure that its access
1950 size encompasses the initial DR_A2. */
1951 if (maybe_lt (dr_a1
->access_size
, diff
+ dr_a2
->access_size
))
1953 dr_a1
->access_size
= upper_bound (dr_a1
->access_size
,
1954 diff
+ dr_a2
->access_size
);
1955 unsigned int new_align
= known_alignment (dr_a1
->access_size
);
1956 dr_a1
->align
= MIN (dr_a1
->align
, new_align
);
1958 if (dump_enabled_p ())
1959 dump_printf (MSG_NOTE
, "merging ranges for %T, %T and %T, %T\n",
1960 DR_REF (dr_a1
->dr
), DR_REF (dr_b1
->dr
),
1961 DR_REF (dr_a2
->dr
), DR_REF (dr_b2
->dr
));
1962 alias_pair1
->flags
|= alias_pair2
->flags
;
1966 alias_pairs
->truncate (last
+ 1);
1968 /* Try to restore the original dr_with_seg_len order within each
1969 dr_with_seg_len_pair_t. If we ended up combining swapped and
1970 unswapped pairs into the same check, we have to invalidate any
1971 RAW, WAR and WAW information for it. */
1972 if (dump_enabled_p ())
1973 dump_printf (MSG_NOTE
, "merged alias checks:\n");
1974 FOR_EACH_VEC_ELT (*alias_pairs
, i
, alias_pair
)
1976 unsigned int swap_mask
= (DR_ALIAS_SWAPPED
| DR_ALIAS_UNSWAPPED
);
1977 unsigned int swapped
= (alias_pair
->flags
& swap_mask
);
1978 if (swapped
== DR_ALIAS_SWAPPED
)
1979 std::swap (alias_pair
->first
, alias_pair
->second
);
1980 else if (swapped
!= DR_ALIAS_UNSWAPPED
)
1981 alias_pair
->flags
|= DR_ALIAS_ARBITRARY
;
1982 alias_pair
->flags
&= ~swap_mask
;
1983 if (dump_enabled_p ())
1984 dump_alias_pair (alias_pair
, " ");
1988 /* A subroutine of create_intersect_range_checks, with a subset of the
1989 same arguments. Try to use IFN_CHECK_RAW_PTRS and IFN_CHECK_WAR_PTRS
1990 to optimize cases in which the references form a simple RAW, WAR or
1994 create_ifn_alias_checks (tree
*cond_expr
,
1995 const dr_with_seg_len_pair_t
&alias_pair
)
1997 const dr_with_seg_len
& dr_a
= alias_pair
.first
;
1998 const dr_with_seg_len
& dr_b
= alias_pair
.second
;
2000 /* Check for cases in which:
2002 (a) we have a known RAW, WAR or WAR dependence
2003 (b) the accesses are well-ordered in both the original and new code
2004 (see the comment above the DR_ALIAS_* flags for details); and
2005 (c) the DR_STEPs describe all access pairs covered by ALIAS_PAIR. */
2006 if (alias_pair
.flags
& ~(DR_ALIAS_RAW
| DR_ALIAS_WAR
| DR_ALIAS_WAW
))
2009 /* Make sure that both DRs access the same pattern of bytes,
2010 with a constant length and step. */
2011 poly_uint64 seg_len
;
2012 if (!operand_equal_p (dr_a
.seg_len
, dr_b
.seg_len
, 0)
2013 || !poly_int_tree_p (dr_a
.seg_len
, &seg_len
)
2014 || maybe_ne (dr_a
.access_size
, dr_b
.access_size
)
2015 || !operand_equal_p (DR_STEP (dr_a
.dr
), DR_STEP (dr_b
.dr
), 0)
2016 || !tree_fits_uhwi_p (DR_STEP (dr_a
.dr
)))
2019 unsigned HOST_WIDE_INT bytes
= tree_to_uhwi (DR_STEP (dr_a
.dr
));
2020 tree addr_a
= DR_BASE_ADDRESS (dr_a
.dr
);
2021 tree addr_b
= DR_BASE_ADDRESS (dr_b
.dr
);
2023 /* See whether the target suports what we want to do. WAW checks are
2024 equivalent to WAR checks here. */
2025 internal_fn ifn
= (alias_pair
.flags
& DR_ALIAS_RAW
2026 ? IFN_CHECK_RAW_PTRS
2027 : IFN_CHECK_WAR_PTRS
);
2028 unsigned int align
= MIN (dr_a
.align
, dr_b
.align
);
2029 poly_uint64 full_length
= seg_len
+ bytes
;
2030 if (!internal_check_ptrs_fn_supported_p (ifn
, TREE_TYPE (addr_a
),
2031 full_length
, align
))
2033 full_length
= seg_len
+ dr_a
.access_size
;
2034 if (!internal_check_ptrs_fn_supported_p (ifn
, TREE_TYPE (addr_a
),
2035 full_length
, align
))
2039 /* Commit to using this form of test. */
2040 addr_a
= fold_build_pointer_plus (addr_a
, DR_OFFSET (dr_a
.dr
));
2041 addr_a
= fold_build_pointer_plus (addr_a
, DR_INIT (dr_a
.dr
));
2043 addr_b
= fold_build_pointer_plus (addr_b
, DR_OFFSET (dr_b
.dr
));
2044 addr_b
= fold_build_pointer_plus (addr_b
, DR_INIT (dr_b
.dr
));
2046 *cond_expr
= build_call_expr_internal_loc (UNKNOWN_LOCATION
,
2047 ifn
, boolean_type_node
,
2049 size_int (full_length
),
2052 if (dump_enabled_p ())
2054 if (ifn
== IFN_CHECK_RAW_PTRS
)
2055 dump_printf (MSG_NOTE
, "using an IFN_CHECK_RAW_PTRS test\n");
2057 dump_printf (MSG_NOTE
, "using an IFN_CHECK_WAR_PTRS test\n");
2062 /* Try to generate a runtime condition that is true if ALIAS_PAIR is
2063 free of aliases, using a condition based on index values instead
2064 of a condition based on addresses. Return true on success,
2065 storing the condition in *COND_EXPR.
2067 This can only be done if the two data references in ALIAS_PAIR access
2068 the same array object and the index is the only difference. For example,
2069 if the two data references are DR_A and DR_B:
2072 data-ref arr[i] arr[j]
2074 index {i_0, +, 1}_loop {j_0, +, 1}_loop
2076 The addresses and their index are like:
2078 |<- ADDR_A ->| |<- ADDR_B ->|
2079 ------------------------------------------------------->
2081 ------------------------------------------------------->
2082 i_0 ... i_0+4 j_0 ... j_0+4
2084 We can create expression based on index rather than address:
2086 (unsigned) (i_0 - j_0 + 3) <= 6
2088 i.e. the indices are less than 4 apart.
2090 Note evolution step of index needs to be considered in comparison. */
2093 create_intersect_range_checks_index (class loop
*loop
, tree
*cond_expr
,
2094 const dr_with_seg_len_pair_t
&alias_pair
)
2096 const dr_with_seg_len
&dr_a
= alias_pair
.first
;
2097 const dr_with_seg_len
&dr_b
= alias_pair
.second
;
2098 if ((alias_pair
.flags
& DR_ALIAS_MIXED_STEPS
)
2099 || integer_zerop (DR_STEP (dr_a
.dr
))
2100 || integer_zerop (DR_STEP (dr_b
.dr
))
2101 || DR_NUM_DIMENSIONS (dr_a
.dr
) != DR_NUM_DIMENSIONS (dr_b
.dr
))
2104 poly_uint64 seg_len1
, seg_len2
;
2105 if (!poly_int_tree_p (dr_a
.seg_len
, &seg_len1
)
2106 || !poly_int_tree_p (dr_b
.seg_len
, &seg_len2
))
2109 if (!tree_fits_shwi_p (DR_STEP (dr_a
.dr
)))
2112 if (!operand_equal_p (DR_BASE_OBJECT (dr_a
.dr
), DR_BASE_OBJECT (dr_b
.dr
), 0))
2115 if (!operand_equal_p (DR_STEP (dr_a
.dr
), DR_STEP (dr_b
.dr
), 0))
2118 gcc_assert (TREE_CODE (DR_STEP (dr_a
.dr
)) == INTEGER_CST
);
2120 bool neg_step
= tree_int_cst_compare (DR_STEP (dr_a
.dr
), size_zero_node
) < 0;
2121 unsigned HOST_WIDE_INT abs_step
= tree_to_shwi (DR_STEP (dr_a
.dr
));
2124 abs_step
= -abs_step
;
2125 seg_len1
= (-wi::to_poly_wide (dr_a
.seg_len
)).force_uhwi ();
2126 seg_len2
= (-wi::to_poly_wide (dr_b
.seg_len
)).force_uhwi ();
2129 /* Infer the number of iterations with which the memory segment is accessed
2130 by DR. In other words, alias is checked if memory segment accessed by
2131 DR_A in some iterations intersect with memory segment accessed by DR_B
2132 in the same amount iterations.
2133 Note segnment length is a linear function of number of iterations with
2134 DR_STEP as the coefficient. */
2135 poly_uint64 niter_len1
, niter_len2
;
2136 if (!can_div_trunc_p (seg_len1
+ abs_step
- 1, abs_step
, &niter_len1
)
2137 || !can_div_trunc_p (seg_len2
+ abs_step
- 1, abs_step
, &niter_len2
))
2140 /* Divide each access size by the byte step, rounding up. */
2141 poly_uint64 niter_access1
, niter_access2
;
2142 if (!can_div_trunc_p (dr_a
.access_size
+ abs_step
- 1,
2143 abs_step
, &niter_access1
)
2144 || !can_div_trunc_p (dr_b
.access_size
+ abs_step
- 1,
2145 abs_step
, &niter_access2
))
2148 bool waw_or_war_p
= (alias_pair
.flags
& ~(DR_ALIAS_WAR
| DR_ALIAS_WAW
)) == 0;
2151 for (i
= 0; i
< DR_NUM_DIMENSIONS (dr_a
.dr
); i
++)
2153 tree access1
= DR_ACCESS_FN (dr_a
.dr
, i
);
2154 tree access2
= DR_ACCESS_FN (dr_b
.dr
, i
);
2155 /* Two indices must be the same if they are not scev, or not scev wrto
2156 current loop being vecorized. */
2157 if (TREE_CODE (access1
) != POLYNOMIAL_CHREC
2158 || TREE_CODE (access2
) != POLYNOMIAL_CHREC
2159 || CHREC_VARIABLE (access1
) != (unsigned)loop
->num
2160 || CHREC_VARIABLE (access2
) != (unsigned)loop
->num
)
2162 if (operand_equal_p (access1
, access2
, 0))
2167 /* The two indices must have the same step. */
2168 if (!operand_equal_p (CHREC_RIGHT (access1
), CHREC_RIGHT (access2
), 0))
2171 tree idx_step
= CHREC_RIGHT (access1
);
2172 /* Index must have const step, otherwise DR_STEP won't be constant. */
2173 gcc_assert (TREE_CODE (idx_step
) == INTEGER_CST
);
2174 /* Index must evaluate in the same direction as DR. */
2175 gcc_assert (!neg_step
|| tree_int_cst_sign_bit (idx_step
) == 1);
2177 tree min1
= CHREC_LEFT (access1
);
2178 tree min2
= CHREC_LEFT (access2
);
2179 if (!types_compatible_p (TREE_TYPE (min1
), TREE_TYPE (min2
)))
2182 /* Ideally, alias can be checked against loop's control IV, but we
2183 need to prove linear mapping between control IV and reference
2184 index. Although that should be true, we check against (array)
2185 index of data reference. Like segment length, index length is
2186 linear function of the number of iterations with index_step as
2187 the coefficient, i.e, niter_len * idx_step. */
2188 offset_int abs_idx_step
= offset_int::from (wi::to_wide (idx_step
),
2191 abs_idx_step
= -abs_idx_step
;
2192 poly_offset_int idx_len1
= abs_idx_step
* niter_len1
;
2193 poly_offset_int idx_len2
= abs_idx_step
* niter_len2
;
2194 poly_offset_int idx_access1
= abs_idx_step
* niter_access1
;
2195 poly_offset_int idx_access2
= abs_idx_step
* niter_access2
;
2197 gcc_assert (known_ge (idx_len1
, 0)
2198 && known_ge (idx_len2
, 0)
2199 && known_ge (idx_access1
, 0)
2200 && known_ge (idx_access2
, 0));
2202 /* Each access has the following pattern, with lengths measured
2206 <--- A: -ve step --->
2207 +-----+-------+-----+-------+-----+
2208 | n-1 | ..... | 0 | ..... | n-1 |
2209 +-----+-------+-----+-------+-----+
2210 <--- B: +ve step --->
2215 where "n" is the number of scalar iterations covered by the segment
2216 and where each access spans idx_access units.
2218 A is the range of bytes accessed when the step is negative,
2219 B is the range when the step is positive.
2221 When checking for general overlap, we need to test whether
2224 [min1 + low_offset1, min2 + high_offset1 + idx_access1 - 1]
2228 [min2 + low_offset2, min2 + high_offset2 + idx_access2 - 1]
2232 low_offsetN = +ve step ? 0 : -idx_lenN;
2233 high_offsetN = +ve step ? idx_lenN : 0;
2235 This is equivalent to testing whether:
2237 min1 + low_offset1 <= min2 + high_offset2 + idx_access2 - 1
2238 && min2 + low_offset2 <= min1 + high_offset1 + idx_access1 - 1
2240 Converting this into a single test, there is an overlap if:
2242 0 <= min2 - min1 + bias <= limit
2244 where bias = high_offset2 + idx_access2 - 1 - low_offset1
2245 limit = (high_offset1 - low_offset1 + idx_access1 - 1)
2246 + (high_offset2 - low_offset2 + idx_access2 - 1)
2247 i.e. limit = idx_len1 + idx_access1 - 1 + idx_len2 + idx_access2 - 1
2249 Combining the tests requires limit to be computable in an unsigned
2250 form of the index type; if it isn't, we fall back to the usual
2251 pointer-based checks.
2253 We can do better if DR_B is a write and if DR_A and DR_B are
2254 well-ordered in both the original and the new code (see the
2255 comment above the DR_ALIAS_* flags for details). In this case
2256 we know that for each i in [0, n-1], the write performed by
2257 access i of DR_B occurs after access numbers j<=i of DR_A in
2258 both the original and the new code. Any write or anti
2259 dependencies wrt those DR_A accesses are therefore maintained.
2261 We just need to make sure that each individual write in DR_B does not
2262 overlap any higher-indexed access in DR_A; such DR_A accesses happen
2263 after the DR_B access in the original code but happen before it in
2266 We know the steps for both accesses are equal, so by induction, we
2267 just need to test whether the first write of DR_B overlaps a later
2268 access of DR_A. In other words, we need to move min1 along by
2271 min1' = min1 + idx_step
2275 [min1' + low_offset1', min1' + high_offset1' + idx_access1 - 1]
2279 [min2, min2 + idx_access2 - 1]
2283 low_offset1' = +ve step ? 0 : -(idx_len1 - |idx_step|)
2284 high_offset1' = +ve_step ? idx_len1 - |idx_step| : 0. */
2286 idx_len1
-= abs_idx_step
;
2288 poly_offset_int limit
= idx_len1
+ idx_access1
- 1 + idx_access2
- 1;
2292 tree utype
= unsigned_type_for (TREE_TYPE (min1
));
2293 if (!wi::fits_to_tree_p (limit
, utype
))
2296 poly_offset_int low_offset1
= neg_step
? -idx_len1
: 0;
2297 poly_offset_int high_offset2
= neg_step
|| waw_or_war_p
? 0 : idx_len2
;
2298 poly_offset_int bias
= high_offset2
+ idx_access2
- 1 - low_offset1
;
2299 /* Equivalent to adding IDX_STEP to MIN1. */
2301 bias
-= wi::to_offset (idx_step
);
2303 tree subject
= fold_build2 (MINUS_EXPR
, utype
,
2304 fold_convert (utype
, min2
),
2305 fold_convert (utype
, min1
));
2306 subject
= fold_build2 (PLUS_EXPR
, utype
, subject
,
2307 wide_int_to_tree (utype
, bias
));
2308 tree part_cond_expr
= fold_build2 (GT_EXPR
, boolean_type_node
, subject
,
2309 wide_int_to_tree (utype
, limit
));
2311 *cond_expr
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
2312 *cond_expr
, part_cond_expr
);
2314 *cond_expr
= part_cond_expr
;
2316 if (dump_enabled_p ())
2319 dump_printf (MSG_NOTE
, "using an index-based WAR/WAW test\n");
2321 dump_printf (MSG_NOTE
, "using an index-based overlap test\n");
2326 /* A subroutine of create_intersect_range_checks, with a subset of the
2327 same arguments. Try to optimize cases in which the second access
2328 is a write and in which some overlap is valid. */
2331 create_waw_or_war_checks (tree
*cond_expr
,
2332 const dr_with_seg_len_pair_t
&alias_pair
)
2334 const dr_with_seg_len
& dr_a
= alias_pair
.first
;
2335 const dr_with_seg_len
& dr_b
= alias_pair
.second
;
2337 /* Check for cases in which:
2339 (a) DR_B is always a write;
2340 (b) the accesses are well-ordered in both the original and new code
2341 (see the comment above the DR_ALIAS_* flags for details); and
2342 (c) the DR_STEPs describe all access pairs covered by ALIAS_PAIR. */
2343 if (alias_pair
.flags
& ~(DR_ALIAS_WAR
| DR_ALIAS_WAW
))
2346 /* Check for equal (but possibly variable) steps. */
2347 tree step
= DR_STEP (dr_a
.dr
);
2348 if (!operand_equal_p (step
, DR_STEP (dr_b
.dr
)))
2351 /* Make sure that we can operate on sizetype without loss of precision. */
2352 tree addr_type
= TREE_TYPE (DR_BASE_ADDRESS (dr_a
.dr
));
2353 if (TYPE_PRECISION (addr_type
) != TYPE_PRECISION (sizetype
))
2356 /* All addresses involved are known to have a common alignment ALIGN.
2357 We can therefore subtract ALIGN from an exclusive endpoint to get
2358 an inclusive endpoint. In the best (and common) case, ALIGN is the
2359 same as the access sizes of both DRs, and so subtracting ALIGN
2360 cancels out the addition of an access size. */
2361 unsigned int align
= MIN (dr_a
.align
, dr_b
.align
);
2362 poly_uint64 last_chunk_a
= dr_a
.access_size
- align
;
2363 poly_uint64 last_chunk_b
= dr_b
.access_size
- align
;
2365 /* Get a boolean expression that is true when the step is negative. */
2366 tree indicator
= dr_direction_indicator (dr_a
.dr
);
2367 tree neg_step
= fold_build2 (LT_EXPR
, boolean_type_node
,
2368 fold_convert (ssizetype
, indicator
),
2371 /* Get lengths in sizetype. */
2373 = fold_convert (sizetype
, rewrite_to_non_trapping_overflow (dr_a
.seg_len
));
2374 step
= fold_convert (sizetype
, rewrite_to_non_trapping_overflow (step
));
2376 /* Each access has the following pattern:
2379 <--- A: -ve step --->
2380 +-----+-------+-----+-------+-----+
2381 | n-1 | ..... | 0 | ..... | n-1 |
2382 +-----+-------+-----+-------+-----+
2383 <--- B: +ve step --->
2388 where "n" is the number of scalar iterations covered by the segment.
2390 A is the range of bytes accessed when the step is negative,
2391 B is the range when the step is positive.
2393 We know that DR_B is a write. We also know (from checking that
2394 DR_A and DR_B are well-ordered) that for each i in [0, n-1],
2395 the write performed by access i of DR_B occurs after access numbers
2396 j<=i of DR_A in both the original and the new code. Any write or
2397 anti dependencies wrt those DR_A accesses are therefore maintained.
2399 We just need to make sure that each individual write in DR_B does not
2400 overlap any higher-indexed access in DR_A; such DR_A accesses happen
2401 after the DR_B access in the original code but happen before it in
2404 We know the steps for both accesses are equal, so by induction, we
2405 just need to test whether the first write of DR_B overlaps a later
2406 access of DR_A. In other words, we need to move addr_a along by
2409 addr_a' = addr_a + step
2413 [addr_b, addr_b + last_chunk_b]
2417 [addr_a' + low_offset_a, addr_a' + high_offset_a + last_chunk_a]
2419 where [low_offset_a, high_offset_a] spans accesses [1, n-1]. I.e.:
2421 low_offset_a = +ve step ? 0 : seg_len_a - step
2422 high_offset_a = +ve step ? seg_len_a - step : 0
2424 This is equivalent to testing whether:
2426 addr_a' + low_offset_a <= addr_b + last_chunk_b
2427 && addr_b <= addr_a' + high_offset_a + last_chunk_a
2429 Converting this into a single test, there is an overlap if:
2431 0 <= addr_b + last_chunk_b - addr_a' - low_offset_a <= limit
2433 where limit = high_offset_a - low_offset_a + last_chunk_a + last_chunk_b
2435 If DR_A is performed, limit + |step| - last_chunk_b is known to be
2436 less than the size of the object underlying DR_A. We also know
2437 that last_chunk_b <= |step|; this is checked elsewhere if it isn't
2438 guaranteed at compile time. There can therefore be no overflow if
2439 "limit" is calculated in an unsigned type with pointer precision. */
2440 tree addr_a
= fold_build_pointer_plus (DR_BASE_ADDRESS (dr_a
.dr
),
2441 DR_OFFSET (dr_a
.dr
));
2442 addr_a
= fold_build_pointer_plus (addr_a
, DR_INIT (dr_a
.dr
));
2444 tree addr_b
= fold_build_pointer_plus (DR_BASE_ADDRESS (dr_b
.dr
),
2445 DR_OFFSET (dr_b
.dr
));
2446 addr_b
= fold_build_pointer_plus (addr_b
, DR_INIT (dr_b
.dr
));
2448 /* Advance ADDR_A by one iteration and adjust the length to compensate. */
2449 addr_a
= fold_build_pointer_plus (addr_a
, step
);
2450 tree seg_len_a_minus_step
= fold_build2 (MINUS_EXPR
, sizetype
,
2452 if (!CONSTANT_CLASS_P (seg_len_a_minus_step
))
2453 seg_len_a_minus_step
= build1 (SAVE_EXPR
, sizetype
, seg_len_a_minus_step
);
2455 tree low_offset_a
= fold_build3 (COND_EXPR
, sizetype
, neg_step
,
2456 seg_len_a_minus_step
, size_zero_node
);
2457 if (!CONSTANT_CLASS_P (low_offset_a
))
2458 low_offset_a
= build1 (SAVE_EXPR
, sizetype
, low_offset_a
);
2460 /* We could use COND_EXPR <neg_step, size_zero_node, seg_len_a_minus_step>,
2461 but it's usually more efficient to reuse the LOW_OFFSET_A result. */
2462 tree high_offset_a
= fold_build2 (MINUS_EXPR
, sizetype
, seg_len_a_minus_step
,
2465 /* The amount added to addr_b - addr_a'. */
2466 tree bias
= fold_build2 (MINUS_EXPR
, sizetype
,
2467 size_int (last_chunk_b
), low_offset_a
);
2469 tree limit
= fold_build2 (MINUS_EXPR
, sizetype
, high_offset_a
, low_offset_a
);
2470 limit
= fold_build2 (PLUS_EXPR
, sizetype
, limit
,
2471 size_int (last_chunk_a
+ last_chunk_b
));
2473 tree subject
= fold_build2 (POINTER_DIFF_EXPR
, ssizetype
, addr_b
, addr_a
);
2474 subject
= fold_build2 (PLUS_EXPR
, sizetype
,
2475 fold_convert (sizetype
, subject
), bias
);
2477 *cond_expr
= fold_build2 (GT_EXPR
, boolean_type_node
, subject
, limit
);
2478 if (dump_enabled_p ())
2479 dump_printf (MSG_NOTE
, "using an address-based WAR/WAW test\n");
2483 /* If ALIGN is nonzero, set up *SEQ_MIN_OUT and *SEQ_MAX_OUT so that for
2484 every address ADDR accessed by D:
2486 *SEQ_MIN_OUT <= ADDR (== ADDR & -ALIGN) <= *SEQ_MAX_OUT
2488 In this case, every element accessed by D is aligned to at least
2491 If ALIGN is zero then instead set *SEG_MAX_OUT so that:
2493 *SEQ_MIN_OUT <= ADDR < *SEQ_MAX_OUT. */
2496 get_segment_min_max (const dr_with_seg_len
&d
, tree
*seg_min_out
,
2497 tree
*seg_max_out
, HOST_WIDE_INT align
)
2499 /* Each access has the following pattern:
2502 <--- A: -ve step --->
2503 +-----+-------+-----+-------+-----+
2504 | n-1 | ,.... | 0 | ..... | n-1 |
2505 +-----+-------+-----+-------+-----+
2506 <--- B: +ve step --->
2511 where "n" is the number of scalar iterations covered by the segment.
2512 (This should be VF for a particular pair if we know that both steps
2513 are the same, otherwise it will be the full number of scalar loop
2516 A is the range of bytes accessed when the step is negative,
2517 B is the range when the step is positive.
2519 If the access size is "access_size" bytes, the lowest addressed byte is:
2521 base + (step < 0 ? seg_len : 0) [LB]
2523 and the highest addressed byte is always below:
2525 base + (step < 0 ? 0 : seg_len) + access_size [UB]
2531 If ALIGN is nonzero, all three values are aligned to at least ALIGN
2534 LB <= ADDR <= UB - ALIGN
2536 where "- ALIGN" folds naturally with the "+ access_size" and often
2539 We don't try to simplify LB and UB beyond this (e.g. by using
2540 MIN and MAX based on whether seg_len rather than the stride is
2541 negative) because it is possible for the absolute size of the
2542 segment to overflow the range of a ssize_t.
2544 Keeping the pointer_plus outside of the cond_expr should allow
2545 the cond_exprs to be shared with other alias checks. */
2546 tree indicator
= dr_direction_indicator (d
.dr
);
2547 tree neg_step
= fold_build2 (LT_EXPR
, boolean_type_node
,
2548 fold_convert (ssizetype
, indicator
),
2550 tree addr_base
= fold_build_pointer_plus (DR_BASE_ADDRESS (d
.dr
),
2552 addr_base
= fold_build_pointer_plus (addr_base
, DR_INIT (d
.dr
));
2554 = fold_convert (sizetype
, rewrite_to_non_trapping_overflow (d
.seg_len
));
2556 tree min_reach
= fold_build3 (COND_EXPR
, sizetype
, neg_step
,
2557 seg_len
, size_zero_node
);
2558 tree max_reach
= fold_build3 (COND_EXPR
, sizetype
, neg_step
,
2559 size_zero_node
, seg_len
);
2560 max_reach
= fold_build2 (PLUS_EXPR
, sizetype
, max_reach
,
2561 size_int (d
.access_size
- align
));
2563 *seg_min_out
= fold_build_pointer_plus (addr_base
, min_reach
);
2564 *seg_max_out
= fold_build_pointer_plus (addr_base
, max_reach
);
2567 /* Generate a runtime condition that is true if ALIAS_PAIR is free of aliases,
2568 storing the condition in *COND_EXPR. The fallback is to generate a
2569 a test that the two accesses do not overlap:
2571 end_a <= start_b || end_b <= start_a. */
2574 create_intersect_range_checks (class loop
*loop
, tree
*cond_expr
,
2575 const dr_with_seg_len_pair_t
&alias_pair
)
2577 const dr_with_seg_len
& dr_a
= alias_pair
.first
;
2578 const dr_with_seg_len
& dr_b
= alias_pair
.second
;
2579 *cond_expr
= NULL_TREE
;
2580 if (create_intersect_range_checks_index (loop
, cond_expr
, alias_pair
))
2583 if (create_ifn_alias_checks (cond_expr
, alias_pair
))
2586 if (create_waw_or_war_checks (cond_expr
, alias_pair
))
2589 unsigned HOST_WIDE_INT min_align
;
2591 /* We don't have to check DR_ALIAS_MIXED_STEPS here, since both versions
2592 are equivalent. This is just an optimization heuristic. */
2593 if (TREE_CODE (DR_STEP (dr_a
.dr
)) == INTEGER_CST
2594 && TREE_CODE (DR_STEP (dr_b
.dr
)) == INTEGER_CST
)
2596 /* In this case adding access_size to seg_len is likely to give
2597 a simple X * step, where X is either the number of scalar
2598 iterations or the vectorization factor. We're better off
2599 keeping that, rather than subtracting an alignment from it.
2601 In this case the maximum values are exclusive and so there is
2602 no alias if the maximum of one segment equals the minimum
2609 /* Calculate the minimum alignment shared by all four pointers,
2610 then arrange for this alignment to be subtracted from the
2611 exclusive maximum values to get inclusive maximum values.
2612 This "- min_align" is cumulative with a "+ access_size"
2613 in the calculation of the maximum values. In the best
2614 (and common) case, the two cancel each other out, leaving
2615 us with an inclusive bound based only on seg_len. In the
2616 worst case we're simply adding a smaller number than before.
2618 Because the maximum values are inclusive, there is an alias
2619 if the maximum value of one segment is equal to the minimum
2620 value of the other. */
2621 min_align
= MIN (dr_a
.align
, dr_b
.align
);
2625 tree seg_a_min
, seg_a_max
, seg_b_min
, seg_b_max
;
2626 get_segment_min_max (dr_a
, &seg_a_min
, &seg_a_max
, min_align
);
2627 get_segment_min_max (dr_b
, &seg_b_min
, &seg_b_max
, min_align
);
2630 = fold_build2 (TRUTH_OR_EXPR
, boolean_type_node
,
2631 fold_build2 (cmp_code
, boolean_type_node
, seg_a_max
, seg_b_min
),
2632 fold_build2 (cmp_code
, boolean_type_node
, seg_b_max
, seg_a_min
));
2633 if (dump_enabled_p ())
2634 dump_printf (MSG_NOTE
, "using an address-based overlap test\n");
2637 /* Create a conditional expression that represents the run-time checks for
2638 overlapping of address ranges represented by a list of data references
2639 pairs passed in ALIAS_PAIRS. Data references are in LOOP. The returned
2640 COND_EXPR is the conditional expression to be used in the if statement
2641 that controls which version of the loop gets executed at runtime. */
2644 create_runtime_alias_checks (class loop
*loop
,
2645 vec
<dr_with_seg_len_pair_t
> *alias_pairs
,
2648 tree part_cond_expr
;
2650 fold_defer_overflow_warnings ();
2651 dr_with_seg_len_pair_t
*alias_pair
;
2653 FOR_EACH_VEC_ELT (*alias_pairs
, i
, alias_pair
)
2655 gcc_assert (alias_pair
->flags
);
2656 if (dump_enabled_p ())
2657 dump_printf (MSG_NOTE
,
2658 "create runtime check for data references %T and %T\n",
2659 DR_REF (alias_pair
->first
.dr
),
2660 DR_REF (alias_pair
->second
.dr
));
2662 /* Create condition expression for each pair data references. */
2663 create_intersect_range_checks (loop
, &part_cond_expr
, *alias_pair
);
2665 *cond_expr
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
2666 *cond_expr
, part_cond_expr
);
2668 *cond_expr
= part_cond_expr
;
2670 fold_undefer_and_ignore_overflow_warnings ();
2673 /* Check if OFFSET1 and OFFSET2 (DR_OFFSETs of some data-refs) are identical
2676 dr_equal_offsets_p1 (tree offset1
, tree offset2
)
2680 STRIP_NOPS (offset1
);
2681 STRIP_NOPS (offset2
);
2683 if (offset1
== offset2
)
2686 if (TREE_CODE (offset1
) != TREE_CODE (offset2
)
2687 || (!BINARY_CLASS_P (offset1
) && !UNARY_CLASS_P (offset1
)))
2690 res
= dr_equal_offsets_p1 (TREE_OPERAND (offset1
, 0),
2691 TREE_OPERAND (offset2
, 0));
2693 if (!res
|| !BINARY_CLASS_P (offset1
))
2696 res
= dr_equal_offsets_p1 (TREE_OPERAND (offset1
, 1),
2697 TREE_OPERAND (offset2
, 1));
2702 /* Check if DRA and DRB have equal offsets. */
2704 dr_equal_offsets_p (struct data_reference
*dra
,
2705 struct data_reference
*drb
)
2707 tree offset1
, offset2
;
2709 offset1
= DR_OFFSET (dra
);
2710 offset2
= DR_OFFSET (drb
);
2712 return dr_equal_offsets_p1 (offset1
, offset2
);
2715 /* Returns true if FNA == FNB. */
2718 affine_function_equal_p (affine_fn fna
, affine_fn fnb
)
2720 unsigned i
, n
= fna
.length ();
2722 if (n
!= fnb
.length ())
2725 for (i
= 0; i
< n
; i
++)
2726 if (!operand_equal_p (fna
[i
], fnb
[i
], 0))
2732 /* If all the functions in CF are the same, returns one of them,
2733 otherwise returns NULL. */
2736 common_affine_function (conflict_function
*cf
)
2741 if (!CF_NONTRIVIAL_P (cf
))
2742 return affine_fn ();
2746 for (i
= 1; i
< cf
->n
; i
++)
2747 if (!affine_function_equal_p (comm
, cf
->fns
[i
]))
2748 return affine_fn ();
2753 /* Returns the base of the affine function FN. */
2756 affine_function_base (affine_fn fn
)
2761 /* Returns true if FN is a constant. */
2764 affine_function_constant_p (affine_fn fn
)
2769 for (i
= 1; fn
.iterate (i
, &coef
); i
++)
2770 if (!integer_zerop (coef
))
2776 /* Returns true if FN is the zero constant function. */
2779 affine_function_zero_p (affine_fn fn
)
2781 return (integer_zerop (affine_function_base (fn
))
2782 && affine_function_constant_p (fn
));
2785 /* Returns a signed integer type with the largest precision from TA
2789 signed_type_for_types (tree ta
, tree tb
)
2791 if (TYPE_PRECISION (ta
) > TYPE_PRECISION (tb
))
2792 return signed_type_for (ta
);
2794 return signed_type_for (tb
);
2797 /* Applies operation OP on affine functions FNA and FNB, and returns the
2801 affine_fn_op (enum tree_code op
, affine_fn fna
, affine_fn fnb
)
2807 if (fnb
.length () > fna
.length ())
2819 for (i
= 0; i
< n
; i
++)
2821 tree type
= signed_type_for_types (TREE_TYPE (fna
[i
]),
2822 TREE_TYPE (fnb
[i
]));
2823 ret
.quick_push (fold_build2 (op
, type
, fna
[i
], fnb
[i
]));
2826 for (; fna
.iterate (i
, &coef
); i
++)
2827 ret
.quick_push (fold_build2 (op
, signed_type_for (TREE_TYPE (coef
)),
2828 coef
, integer_zero_node
));
2829 for (; fnb
.iterate (i
, &coef
); i
++)
2830 ret
.quick_push (fold_build2 (op
, signed_type_for (TREE_TYPE (coef
)),
2831 integer_zero_node
, coef
));
2836 /* Returns the sum of affine functions FNA and FNB. */
2839 affine_fn_plus (affine_fn fna
, affine_fn fnb
)
2841 return affine_fn_op (PLUS_EXPR
, fna
, fnb
);
2844 /* Returns the difference of affine functions FNA and FNB. */
2847 affine_fn_minus (affine_fn fna
, affine_fn fnb
)
2849 return affine_fn_op (MINUS_EXPR
, fna
, fnb
);
2852 /* Frees affine function FN. */
2855 affine_fn_free (affine_fn fn
)
2860 /* Determine for each subscript in the data dependence relation DDR
2864 compute_subscript_distance (struct data_dependence_relation
*ddr
)
2866 conflict_function
*cf_a
, *cf_b
;
2867 affine_fn fn_a
, fn_b
, diff
;
2869 if (DDR_ARE_DEPENDENT (ddr
) == NULL_TREE
)
2873 for (i
= 0; i
< DDR_NUM_SUBSCRIPTS (ddr
); i
++)
2875 struct subscript
*subscript
;
2877 subscript
= DDR_SUBSCRIPT (ddr
, i
);
2878 cf_a
= SUB_CONFLICTS_IN_A (subscript
);
2879 cf_b
= SUB_CONFLICTS_IN_B (subscript
);
2881 fn_a
= common_affine_function (cf_a
);
2882 fn_b
= common_affine_function (cf_b
);
2883 if (!fn_a
.exists () || !fn_b
.exists ())
2885 SUB_DISTANCE (subscript
) = chrec_dont_know
;
2888 diff
= affine_fn_minus (fn_a
, fn_b
);
2890 if (affine_function_constant_p (diff
))
2891 SUB_DISTANCE (subscript
) = affine_function_base (diff
);
2893 SUB_DISTANCE (subscript
) = chrec_dont_know
;
2895 affine_fn_free (diff
);
2900 /* Returns the conflict function for "unknown". */
2902 static conflict_function
*
2903 conflict_fn_not_known (void)
2905 conflict_function
*fn
= XCNEW (conflict_function
);
2911 /* Returns the conflict function for "independent". */
2913 static conflict_function
*
2914 conflict_fn_no_dependence (void)
2916 conflict_function
*fn
= XCNEW (conflict_function
);
2917 fn
->n
= NO_DEPENDENCE
;
2922 /* Returns true if the address of OBJ is invariant in LOOP. */
2925 object_address_invariant_in_loop_p (const class loop
*loop
, const_tree obj
)
2927 while (handled_component_p (obj
))
2929 if (TREE_CODE (obj
) == ARRAY_REF
)
2931 for (int i
= 1; i
< 4; ++i
)
2932 if (chrec_contains_symbols_defined_in_loop (TREE_OPERAND (obj
, i
),
2936 else if (TREE_CODE (obj
) == COMPONENT_REF
)
2938 if (chrec_contains_symbols_defined_in_loop (TREE_OPERAND (obj
, 2),
2942 obj
= TREE_OPERAND (obj
, 0);
2945 if (!INDIRECT_REF_P (obj
)
2946 && TREE_CODE (obj
) != MEM_REF
)
2949 return !chrec_contains_symbols_defined_in_loop (TREE_OPERAND (obj
, 0),
2953 /* Returns false if we can prove that data references A and B do not alias,
2954 true otherwise. If LOOP_NEST is false no cross-iteration aliases are
2958 dr_may_alias_p (const struct data_reference
*a
, const struct data_reference
*b
,
2959 class loop
*loop_nest
)
2961 tree addr_a
= DR_BASE_OBJECT (a
);
2962 tree addr_b
= DR_BASE_OBJECT (b
);
2964 /* If we are not processing a loop nest but scalar code we
2965 do not need to care about possible cross-iteration dependences
2966 and thus can process the full original reference. Do so,
2967 similar to how loop invariant motion applies extra offset-based
2971 aff_tree off1
, off2
;
2972 poly_widest_int size1
, size2
;
2973 get_inner_reference_aff (DR_REF (a
), &off1
, &size1
);
2974 get_inner_reference_aff (DR_REF (b
), &off2
, &size2
);
2975 aff_combination_scale (&off1
, -1);
2976 aff_combination_add (&off2
, &off1
);
2977 if (aff_comb_cannot_overlap_p (&off2
, size1
, size2
))
2981 if ((TREE_CODE (addr_a
) == MEM_REF
|| TREE_CODE (addr_a
) == TARGET_MEM_REF
)
2982 && (TREE_CODE (addr_b
) == MEM_REF
|| TREE_CODE (addr_b
) == TARGET_MEM_REF
)
2983 /* For cross-iteration dependences the cliques must be valid for the
2984 whole loop, not just individual iterations. */
2986 || MR_DEPENDENCE_CLIQUE (addr_a
) == 1
2987 || MR_DEPENDENCE_CLIQUE (addr_a
) == loop_nest
->owned_clique
)
2988 && MR_DEPENDENCE_CLIQUE (addr_a
) == MR_DEPENDENCE_CLIQUE (addr_b
)
2989 && MR_DEPENDENCE_BASE (addr_a
) != MR_DEPENDENCE_BASE (addr_b
))
2992 /* If we had an evolution in a pointer-based MEM_REF BASE_OBJECT we
2993 do not know the size of the base-object. So we cannot do any
2994 offset/overlap based analysis but have to rely on points-to
2995 information only. */
2996 if (TREE_CODE (addr_a
) == MEM_REF
2997 && (DR_UNCONSTRAINED_BASE (a
)
2998 || TREE_CODE (TREE_OPERAND (addr_a
, 0)) == SSA_NAME
))
3000 /* For true dependences we can apply TBAA. */
3001 if (flag_strict_aliasing
3002 && DR_IS_WRITE (a
) && DR_IS_READ (b
)
3003 && !alias_sets_conflict_p (get_alias_set (DR_REF (a
)),
3004 get_alias_set (DR_REF (b
))))
3006 if (TREE_CODE (addr_b
) == MEM_REF
)
3007 return ptr_derefs_may_alias_p (TREE_OPERAND (addr_a
, 0),
3008 TREE_OPERAND (addr_b
, 0));
3010 return ptr_derefs_may_alias_p (TREE_OPERAND (addr_a
, 0),
3011 build_fold_addr_expr (addr_b
));
3013 else if (TREE_CODE (addr_b
) == MEM_REF
3014 && (DR_UNCONSTRAINED_BASE (b
)
3015 || TREE_CODE (TREE_OPERAND (addr_b
, 0)) == SSA_NAME
))
3017 /* For true dependences we can apply TBAA. */
3018 if (flag_strict_aliasing
3019 && DR_IS_WRITE (a
) && DR_IS_READ (b
)
3020 && !alias_sets_conflict_p (get_alias_set (DR_REF (a
)),
3021 get_alias_set (DR_REF (b
))))
3023 if (TREE_CODE (addr_a
) == MEM_REF
)
3024 return ptr_derefs_may_alias_p (TREE_OPERAND (addr_a
, 0),
3025 TREE_OPERAND (addr_b
, 0));
3027 return ptr_derefs_may_alias_p (build_fold_addr_expr (addr_a
),
3028 TREE_OPERAND (addr_b
, 0));
3031 /* Otherwise DR_BASE_OBJECT is an access that covers the whole object
3032 that is being subsetted in the loop nest. */
3033 if (DR_IS_WRITE (a
) && DR_IS_WRITE (b
))
3034 return refs_output_dependent_p (addr_a
, addr_b
);
3035 else if (DR_IS_READ (a
) && DR_IS_WRITE (b
))
3036 return refs_anti_dependent_p (addr_a
, addr_b
);
3037 return refs_may_alias_p (addr_a
, addr_b
);
3040 /* REF_A and REF_B both satisfy access_fn_component_p. Return true
3041 if it is meaningful to compare their associated access functions
3042 when checking for dependencies. */
3045 access_fn_components_comparable_p (tree ref_a
, tree ref_b
)
3047 /* Allow pairs of component refs from the following sets:
3049 { REALPART_EXPR, IMAGPART_EXPR }
3052 tree_code code_a
= TREE_CODE (ref_a
);
3053 tree_code code_b
= TREE_CODE (ref_b
);
3054 if (code_a
== IMAGPART_EXPR
)
3055 code_a
= REALPART_EXPR
;
3056 if (code_b
== IMAGPART_EXPR
)
3057 code_b
= REALPART_EXPR
;
3058 if (code_a
!= code_b
)
3061 if (TREE_CODE (ref_a
) == COMPONENT_REF
)
3062 /* ??? We cannot simply use the type of operand #0 of the refs here as
3063 the Fortran compiler smuggles type punning into COMPONENT_REFs.
3064 Use the DECL_CONTEXT of the FIELD_DECLs instead. */
3065 return (DECL_CONTEXT (TREE_OPERAND (ref_a
, 1))
3066 == DECL_CONTEXT (TREE_OPERAND (ref_b
, 1)));
3068 return types_compatible_p (TREE_TYPE (TREE_OPERAND (ref_a
, 0)),
3069 TREE_TYPE (TREE_OPERAND (ref_b
, 0)));
3072 /* Initialize a data dependence relation between data accesses A and
3073 B. NB_LOOPS is the number of loops surrounding the references: the
3074 size of the classic distance/direction vectors. */
3076 struct data_dependence_relation
*
3077 initialize_data_dependence_relation (struct data_reference
*a
,
3078 struct data_reference
*b
,
3079 vec
<loop_p
> loop_nest
)
3081 struct data_dependence_relation
*res
;
3084 res
= XCNEW (struct data_dependence_relation
);
3087 DDR_LOOP_NEST (res
).create (0);
3088 DDR_SUBSCRIPTS (res
).create (0);
3089 DDR_DIR_VECTS (res
).create (0);
3090 DDR_DIST_VECTS (res
).create (0);
3092 if (a
== NULL
|| b
== NULL
)
3094 DDR_ARE_DEPENDENT (res
) = chrec_dont_know
;
3098 /* If the data references do not alias, then they are independent. */
3099 if (!dr_may_alias_p (a
, b
, loop_nest
.exists () ? loop_nest
[0] : NULL
))
3101 DDR_ARE_DEPENDENT (res
) = chrec_known
;
3105 unsigned int num_dimensions_a
= DR_NUM_DIMENSIONS (a
);
3106 unsigned int num_dimensions_b
= DR_NUM_DIMENSIONS (b
);
3107 if (num_dimensions_a
== 0 || num_dimensions_b
== 0)
3109 DDR_ARE_DEPENDENT (res
) = chrec_dont_know
;
3113 /* For unconstrained bases, the root (highest-indexed) subscript
3114 describes a variation in the base of the original DR_REF rather
3115 than a component access. We have no type that accurately describes
3116 the new DR_BASE_OBJECT (whose TREE_TYPE describes the type *after*
3117 applying this subscript) so limit the search to the last real
3123 f (int a[][8], int b[][8])
3125 for (int i = 0; i < 8; ++i)
3126 a[i * 2][0] = b[i][0];
3129 the a and b accesses have a single ARRAY_REF component reference [0]
3130 but have two subscripts. */
3131 if (DR_UNCONSTRAINED_BASE (a
))
3132 num_dimensions_a
-= 1;
3133 if (DR_UNCONSTRAINED_BASE (b
))
3134 num_dimensions_b
-= 1;
3136 /* These structures describe sequences of component references in
3137 DR_REF (A) and DR_REF (B). Each component reference is tied to a
3138 specific access function. */
3140 /* The sequence starts at DR_ACCESS_FN (A, START_A) of A and
3141 DR_ACCESS_FN (B, START_B) of B (inclusive) and extends to higher
3142 indices. In C notation, these are the indices of the rightmost
3143 component references; e.g. for a sequence .b.c.d, the start
3145 unsigned int start_a
;
3146 unsigned int start_b
;
3148 /* The sequence contains LENGTH consecutive access functions from
3150 unsigned int length
;
3152 /* The enclosing objects for the A and B sequences respectively,
3153 i.e. the objects to which DR_ACCESS_FN (A, START_A + LENGTH - 1)
3154 and DR_ACCESS_FN (B, START_B + LENGTH - 1) are applied. */
3157 } full_seq
= {}, struct_seq
= {};
3159 /* Before each iteration of the loop:
3161 - REF_A is what you get after applying DR_ACCESS_FN (A, INDEX_A) and
3162 - REF_B is what you get after applying DR_ACCESS_FN (B, INDEX_B). */
3163 unsigned int index_a
= 0;
3164 unsigned int index_b
= 0;
3165 tree ref_a
= DR_REF (a
);
3166 tree ref_b
= DR_REF (b
);
3168 /* Now walk the component references from the final DR_REFs back up to
3169 the enclosing base objects. Each component reference corresponds
3170 to one access function in the DR, with access function 0 being for
3171 the final DR_REF and the highest-indexed access function being the
3172 one that is applied to the base of the DR.
3174 Look for a sequence of component references whose access functions
3175 are comparable (see access_fn_components_comparable_p). If more
3176 than one such sequence exists, pick the one nearest the base
3177 (which is the leftmost sequence in C notation). Store this sequence
3180 For example, if we have:
3182 struct foo { struct bar s; ... } (*a)[10], (*b)[10];
3185 B: __real b[0][i].s.e[i].f
3187 (where d is the same type as the real component of f) then the access
3194 B: __real .f [i] .e .s [i]
3196 The A0/B2 column isn't comparable, since .d is a COMPONENT_REF
3197 and [i] is an ARRAY_REF. However, the A1/B3 column contains two
3198 COMPONENT_REF accesses for struct bar, so is comparable. Likewise
3199 the A2/B4 column contains two COMPONENT_REF accesses for struct foo,
3200 so is comparable. The A3/B5 column contains two ARRAY_REFs that
3201 index foo[10] arrays, so is again comparable. The sequence is
3204 A: [1, 3] (i.e. [i].s.c)
3205 B: [3, 5] (i.e. [i].s.e)
3207 Also look for sequences of component references whose access
3208 functions are comparable and whose enclosing objects have the same
3209 RECORD_TYPE. Store this sequence in STRUCT_SEQ. In the above
3210 example, STRUCT_SEQ would be:
3212 A: [1, 2] (i.e. s.c)
3213 B: [3, 4] (i.e. s.e) */
3214 while (index_a
< num_dimensions_a
&& index_b
< num_dimensions_b
)
3216 /* REF_A and REF_B must be one of the component access types
3217 allowed by dr_analyze_indices. */
3218 gcc_checking_assert (access_fn_component_p (ref_a
));
3219 gcc_checking_assert (access_fn_component_p (ref_b
));
3221 /* Get the immediately-enclosing objects for REF_A and REF_B,
3222 i.e. the references *before* applying DR_ACCESS_FN (A, INDEX_A)
3223 and DR_ACCESS_FN (B, INDEX_B). */
3224 tree object_a
= TREE_OPERAND (ref_a
, 0);
3225 tree object_b
= TREE_OPERAND (ref_b
, 0);
3227 tree type_a
= TREE_TYPE (object_a
);
3228 tree type_b
= TREE_TYPE (object_b
);
3229 if (access_fn_components_comparable_p (ref_a
, ref_b
))
3231 /* This pair of component accesses is comparable for dependence
3232 analysis, so we can include DR_ACCESS_FN (A, INDEX_A) and
3233 DR_ACCESS_FN (B, INDEX_B) in the sequence. */
3234 if (full_seq
.start_a
+ full_seq
.length
!= index_a
3235 || full_seq
.start_b
+ full_seq
.length
!= index_b
)
3237 /* The accesses don't extend the current sequence,
3238 so start a new one here. */
3239 full_seq
.start_a
= index_a
;
3240 full_seq
.start_b
= index_b
;
3241 full_seq
.length
= 0;
3244 /* Add this pair of references to the sequence. */
3245 full_seq
.length
+= 1;
3246 full_seq
.object_a
= object_a
;
3247 full_seq
.object_b
= object_b
;
3249 /* If the enclosing objects are structures (and thus have the
3250 same RECORD_TYPE), record the new sequence in STRUCT_SEQ. */
3251 if (TREE_CODE (type_a
) == RECORD_TYPE
)
3252 struct_seq
= full_seq
;
3254 /* Move to the next containing reference for both A and B. */
3262 /* Try to approach equal type sizes. */
3263 if (!COMPLETE_TYPE_P (type_a
)
3264 || !COMPLETE_TYPE_P (type_b
)
3265 || !tree_fits_uhwi_p (TYPE_SIZE_UNIT (type_a
))
3266 || !tree_fits_uhwi_p (TYPE_SIZE_UNIT (type_b
)))
3269 unsigned HOST_WIDE_INT size_a
= tree_to_uhwi (TYPE_SIZE_UNIT (type_a
));
3270 unsigned HOST_WIDE_INT size_b
= tree_to_uhwi (TYPE_SIZE_UNIT (type_b
));
3271 if (size_a
<= size_b
)
3276 if (size_b
<= size_a
)
3283 /* See whether FULL_SEQ ends at the base and whether the two bases
3284 are equal. We do not care about TBAA or alignment info so we can
3285 use OEP_ADDRESS_OF to avoid false negatives. */
3286 tree base_a
= DR_BASE_OBJECT (a
);
3287 tree base_b
= DR_BASE_OBJECT (b
);
3288 bool same_base_p
= (full_seq
.start_a
+ full_seq
.length
== num_dimensions_a
3289 && full_seq
.start_b
+ full_seq
.length
== num_dimensions_b
3290 && DR_UNCONSTRAINED_BASE (a
) == DR_UNCONSTRAINED_BASE (b
)
3291 && operand_equal_p (base_a
, base_b
, OEP_ADDRESS_OF
)
3292 && (types_compatible_p (TREE_TYPE (base_a
),
3294 || (!base_supports_access_fn_components_p (base_a
)
3295 && !base_supports_access_fn_components_p (base_b
)
3297 (TYPE_SIZE (TREE_TYPE (base_a
)),
3298 TYPE_SIZE (TREE_TYPE (base_b
)), 0)))
3299 && (!loop_nest
.exists ()
3300 || (object_address_invariant_in_loop_p
3301 (loop_nest
[0], base_a
))));
3303 /* If the bases are the same, we can include the base variation too.
3304 E.g. the b accesses in:
3306 for (int i = 0; i < n; ++i)
3307 b[i + 4][0] = b[i][0];
3309 have a definite dependence distance of 4, while for:
3311 for (int i = 0; i < n; ++i)
3312 a[i + 4][0] = b[i][0];
3314 the dependence distance depends on the gap between a and b.
3316 If the bases are different then we can only rely on the sequence
3317 rooted at a structure access, since arrays are allowed to overlap
3318 arbitrarily and change shape arbitrarily. E.g. we treat this as
3323 ((int (*)[4][3]) &a[1])[i][0] += ((int (*)[4][3]) &a[2])[i][0];
3325 where two lvalues with the same int[4][3] type overlap, and where
3326 both lvalues are distinct from the object's declared type. */
3329 if (DR_UNCONSTRAINED_BASE (a
))
3330 full_seq
.length
+= 1;
3333 full_seq
= struct_seq
;
3335 /* Punt if we didn't find a suitable sequence. */
3336 if (full_seq
.length
== 0)
3338 DDR_ARE_DEPENDENT (res
) = chrec_dont_know
;
3344 /* Partial overlap is possible for different bases when strict aliasing
3345 is not in effect. It's also possible if either base involves a union
3348 struct s1 { int a[2]; };
3349 struct s2 { struct s1 b; int c; };
3350 struct s3 { int d; struct s1 e; };
3351 union u { struct s2 f; struct s3 g; } *p, *q;
3353 the s1 at "p->f.b" (base "p->f") partially overlaps the s1 at
3354 "p->g.e" (base "p->g") and might partially overlap the s1 at
3355 "q->g.e" (base "q->g"). */
3356 if (!flag_strict_aliasing
3357 || ref_contains_union_access_p (full_seq
.object_a
)
3358 || ref_contains_union_access_p (full_seq
.object_b
))
3360 DDR_ARE_DEPENDENT (res
) = chrec_dont_know
;
3364 DDR_COULD_BE_INDEPENDENT_P (res
) = true;
3365 if (!loop_nest
.exists ()
3366 || (object_address_invariant_in_loop_p (loop_nest
[0],
3368 && object_address_invariant_in_loop_p (loop_nest
[0],
3369 full_seq
.object_b
)))
3371 DDR_OBJECT_A (res
) = full_seq
.object_a
;
3372 DDR_OBJECT_B (res
) = full_seq
.object_b
;
3376 DDR_AFFINE_P (res
) = true;
3377 DDR_ARE_DEPENDENT (res
) = NULL_TREE
;
3378 DDR_SUBSCRIPTS (res
).create (full_seq
.length
);
3379 DDR_LOOP_NEST (res
) = loop_nest
;
3380 DDR_SELF_REFERENCE (res
) = false;
3382 for (i
= 0; i
< full_seq
.length
; ++i
)
3384 struct subscript
*subscript
;
3386 subscript
= XNEW (struct subscript
);
3387 SUB_ACCESS_FN (subscript
, 0) = DR_ACCESS_FN (a
, full_seq
.start_a
+ i
);
3388 SUB_ACCESS_FN (subscript
, 1) = DR_ACCESS_FN (b
, full_seq
.start_b
+ i
);
3389 SUB_CONFLICTS_IN_A (subscript
) = conflict_fn_not_known ();
3390 SUB_CONFLICTS_IN_B (subscript
) = conflict_fn_not_known ();
3391 SUB_LAST_CONFLICT (subscript
) = chrec_dont_know
;
3392 SUB_DISTANCE (subscript
) = chrec_dont_know
;
3393 DDR_SUBSCRIPTS (res
).safe_push (subscript
);
3399 /* Frees memory used by the conflict function F. */
3402 free_conflict_function (conflict_function
*f
)
3406 if (CF_NONTRIVIAL_P (f
))
3408 for (i
= 0; i
< f
->n
; i
++)
3409 affine_fn_free (f
->fns
[i
]);
3414 /* Frees memory used by SUBSCRIPTS. */
3417 free_subscripts (vec
<subscript_p
> subscripts
)
3422 FOR_EACH_VEC_ELT (subscripts
, i
, s
)
3424 free_conflict_function (s
->conflicting_iterations_in_a
);
3425 free_conflict_function (s
->conflicting_iterations_in_b
);
3428 subscripts
.release ();
3431 /* Set DDR_ARE_DEPENDENT to CHREC and finalize the subscript overlap
3435 finalize_ddr_dependent (struct data_dependence_relation
*ddr
,
3438 DDR_ARE_DEPENDENT (ddr
) = chrec
;
3439 free_subscripts (DDR_SUBSCRIPTS (ddr
));
3440 DDR_SUBSCRIPTS (ddr
).create (0);
3443 /* The dependence relation DDR cannot be represented by a distance
3447 non_affine_dependence_relation (struct data_dependence_relation
*ddr
)
3449 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3450 fprintf (dump_file
, "(Dependence relation cannot be represented by distance vector.) \n");
3452 DDR_AFFINE_P (ddr
) = false;
3457 /* This section contains the classic Banerjee tests. */
3459 /* Returns true iff CHREC_A and CHREC_B are not dependent on any index
3460 variables, i.e., if the ZIV (Zero Index Variable) test is true. */
3463 ziv_subscript_p (const_tree chrec_a
, const_tree chrec_b
)
3465 return (evolution_function_is_constant_p (chrec_a
)
3466 && evolution_function_is_constant_p (chrec_b
));
3469 /* Returns true iff CHREC_A and CHREC_B are dependent on an index
3470 variable, i.e., if the SIV (Single Index Variable) test is true. */
3473 siv_subscript_p (const_tree chrec_a
, const_tree chrec_b
)
3475 if ((evolution_function_is_constant_p (chrec_a
)
3476 && evolution_function_is_univariate_p (chrec_b
))
3477 || (evolution_function_is_constant_p (chrec_b
)
3478 && evolution_function_is_univariate_p (chrec_a
)))
3481 if (evolution_function_is_univariate_p (chrec_a
)
3482 && evolution_function_is_univariate_p (chrec_b
))
3484 switch (TREE_CODE (chrec_a
))
3486 case POLYNOMIAL_CHREC
:
3487 switch (TREE_CODE (chrec_b
))
3489 case POLYNOMIAL_CHREC
:
3490 if (CHREC_VARIABLE (chrec_a
) != CHREC_VARIABLE (chrec_b
))
3506 /* Creates a conflict function with N dimensions. The affine functions
3507 in each dimension follow. */
3509 static conflict_function
*
3510 conflict_fn (unsigned n
, ...)
3513 conflict_function
*ret
= XCNEW (conflict_function
);
3516 gcc_assert (n
> 0 && n
<= MAX_DIM
);
3520 for (i
= 0; i
< n
; i
++)
3521 ret
->fns
[i
] = va_arg (ap
, affine_fn
);
3527 /* Returns constant affine function with value CST. */
3530 affine_fn_cst (tree cst
)
3534 fn
.quick_push (cst
);
3538 /* Returns affine function with single variable, CST + COEF * x_DIM. */
3541 affine_fn_univar (tree cst
, unsigned dim
, tree coef
)
3544 fn
.create (dim
+ 1);
3547 gcc_assert (dim
> 0);
3548 fn
.quick_push (cst
);
3549 for (i
= 1; i
< dim
; i
++)
3550 fn
.quick_push (integer_zero_node
);
3551 fn
.quick_push (coef
);
3555 /* Analyze a ZIV (Zero Index Variable) subscript. *OVERLAPS_A and
3556 *OVERLAPS_B are initialized to the functions that describe the
3557 relation between the elements accessed twice by CHREC_A and
3558 CHREC_B. For k >= 0, the following property is verified:
3560 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
3563 analyze_ziv_subscript (tree chrec_a
,
3565 conflict_function
**overlaps_a
,
3566 conflict_function
**overlaps_b
,
3567 tree
*last_conflicts
)
3569 tree type
, difference
;
3570 dependence_stats
.num_ziv
++;
3572 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3573 fprintf (dump_file
, "(analyze_ziv_subscript \n");
3575 type
= signed_type_for_types (TREE_TYPE (chrec_a
), TREE_TYPE (chrec_b
));
3576 chrec_a
= chrec_convert (type
, chrec_a
, NULL
);
3577 chrec_b
= chrec_convert (type
, chrec_b
, NULL
);
3578 difference
= chrec_fold_minus (type
, chrec_a
, chrec_b
);
3580 switch (TREE_CODE (difference
))
3583 if (integer_zerop (difference
))
3585 /* The difference is equal to zero: the accessed index
3586 overlaps for each iteration in the loop. */
3587 *overlaps_a
= conflict_fn (1, affine_fn_cst (integer_zero_node
));
3588 *overlaps_b
= conflict_fn (1, affine_fn_cst (integer_zero_node
));
3589 *last_conflicts
= chrec_dont_know
;
3590 dependence_stats
.num_ziv_dependent
++;
3594 /* The accesses do not overlap. */
3595 *overlaps_a
= conflict_fn_no_dependence ();
3596 *overlaps_b
= conflict_fn_no_dependence ();
3597 *last_conflicts
= integer_zero_node
;
3598 dependence_stats
.num_ziv_independent
++;
3603 /* We're not sure whether the indexes overlap. For the moment,
3604 conservatively answer "don't know". */
3605 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3606 fprintf (dump_file
, "ziv test failed: difference is non-integer.\n");
3608 *overlaps_a
= conflict_fn_not_known ();
3609 *overlaps_b
= conflict_fn_not_known ();
3610 *last_conflicts
= chrec_dont_know
;
3611 dependence_stats
.num_ziv_unimplemented
++;
3615 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3616 fprintf (dump_file
, ")\n");
3619 /* Similar to max_stmt_executions_int, but returns the bound as a tree,
3620 and only if it fits to the int type. If this is not the case, or the
3621 bound on the number of iterations of LOOP could not be derived, returns
3625 max_stmt_executions_tree (class loop
*loop
)
3629 if (!max_stmt_executions (loop
, &nit
))
3630 return chrec_dont_know
;
3632 if (!wi::fits_to_tree_p (nit
, unsigned_type_node
))
3633 return chrec_dont_know
;
3635 return wide_int_to_tree (unsigned_type_node
, nit
);
3638 /* Determine whether the CHREC is always positive/negative. If the expression
3639 cannot be statically analyzed, return false, otherwise set the answer into
3643 chrec_is_positive (tree chrec
, bool *value
)
3645 bool value0
, value1
, value2
;
3646 tree end_value
, nb_iter
;
3648 switch (TREE_CODE (chrec
))
3650 case POLYNOMIAL_CHREC
:
3651 if (!chrec_is_positive (CHREC_LEFT (chrec
), &value0
)
3652 || !chrec_is_positive (CHREC_RIGHT (chrec
), &value1
))
3655 /* FIXME -- overflows. */
3656 if (value0
== value1
)
3662 /* Otherwise the chrec is under the form: "{-197, +, 2}_1",
3663 and the proof consists in showing that the sign never
3664 changes during the execution of the loop, from 0 to
3665 loop->nb_iterations. */
3666 if (!evolution_function_is_affine_p (chrec
))
3669 nb_iter
= number_of_latch_executions (get_chrec_loop (chrec
));
3670 if (chrec_contains_undetermined (nb_iter
))
3674 /* TODO -- If the test is after the exit, we may decrease the number of
3675 iterations by one. */
3677 nb_iter
= chrec_fold_minus (type
, nb_iter
, build_int_cst (type
, 1));
3680 end_value
= chrec_apply (CHREC_VARIABLE (chrec
), chrec
, nb_iter
);
3682 if (!chrec_is_positive (end_value
, &value2
))
3686 return value0
== value1
;
3689 switch (tree_int_cst_sgn (chrec
))
3708 /* Analyze a SIV (Single Index Variable) subscript where CHREC_A is a
3709 constant, and CHREC_B is an affine function. *OVERLAPS_A and
3710 *OVERLAPS_B are initialized to the functions that describe the
3711 relation between the elements accessed twice by CHREC_A and
3712 CHREC_B. For k >= 0, the following property is verified:
3714 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
3717 analyze_siv_subscript_cst_affine (tree chrec_a
,
3719 conflict_function
**overlaps_a
,
3720 conflict_function
**overlaps_b
,
3721 tree
*last_conflicts
)
3723 bool value0
, value1
, value2
;
3724 tree type
, difference
, tmp
;
3726 type
= signed_type_for_types (TREE_TYPE (chrec_a
), TREE_TYPE (chrec_b
));
3727 chrec_a
= chrec_convert (type
, chrec_a
, NULL
);
3728 chrec_b
= chrec_convert (type
, chrec_b
, NULL
);
3729 difference
= chrec_fold_minus (type
, initial_condition (chrec_b
), chrec_a
);
3731 /* Special case overlap in the first iteration. */
3732 if (integer_zerop (difference
))
3734 *overlaps_a
= conflict_fn (1, affine_fn_cst (integer_zero_node
));
3735 *overlaps_b
= conflict_fn (1, affine_fn_cst (integer_zero_node
));
3736 *last_conflicts
= integer_one_node
;
3740 if (!chrec_is_positive (initial_condition (difference
), &value0
))
3742 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3743 fprintf (dump_file
, "siv test failed: chrec is not positive.\n");
3745 dependence_stats
.num_siv_unimplemented
++;
3746 *overlaps_a
= conflict_fn_not_known ();
3747 *overlaps_b
= conflict_fn_not_known ();
3748 *last_conflicts
= chrec_dont_know
;
3753 if (value0
== false)
3755 if (TREE_CODE (chrec_b
) != POLYNOMIAL_CHREC
3756 || !chrec_is_positive (CHREC_RIGHT (chrec_b
), &value1
))
3758 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3759 fprintf (dump_file
, "siv test failed: chrec not positive.\n");
3761 *overlaps_a
= conflict_fn_not_known ();
3762 *overlaps_b
= conflict_fn_not_known ();
3763 *last_conflicts
= chrec_dont_know
;
3764 dependence_stats
.num_siv_unimplemented
++;
3773 chrec_b = {10, +, 1}
3776 if (tree_fold_divides_p (CHREC_RIGHT (chrec_b
), difference
))
3778 HOST_WIDE_INT numiter
;
3779 class loop
*loop
= get_chrec_loop (chrec_b
);
3781 *overlaps_a
= conflict_fn (1, affine_fn_cst (integer_zero_node
));
3782 tmp
= fold_build2 (EXACT_DIV_EXPR
, type
,
3783 fold_build1 (ABS_EXPR
, type
, difference
),
3784 CHREC_RIGHT (chrec_b
));
3785 *overlaps_b
= conflict_fn (1, affine_fn_cst (tmp
));
3786 *last_conflicts
= integer_one_node
;
3789 /* Perform weak-zero siv test to see if overlap is
3790 outside the loop bounds. */
3791 numiter
= max_stmt_executions_int (loop
);
3794 && compare_tree_int (tmp
, numiter
) > 0)
3796 free_conflict_function (*overlaps_a
);
3797 free_conflict_function (*overlaps_b
);
3798 *overlaps_a
= conflict_fn_no_dependence ();
3799 *overlaps_b
= conflict_fn_no_dependence ();
3800 *last_conflicts
= integer_zero_node
;
3801 dependence_stats
.num_siv_independent
++;
3804 dependence_stats
.num_siv_dependent
++;
3808 /* When the step does not divide the difference, there are
3812 *overlaps_a
= conflict_fn_no_dependence ();
3813 *overlaps_b
= conflict_fn_no_dependence ();
3814 *last_conflicts
= integer_zero_node
;
3815 dependence_stats
.num_siv_independent
++;
3824 chrec_b = {10, +, -1}
3826 In this case, chrec_a will not overlap with chrec_b. */
3827 *overlaps_a
= conflict_fn_no_dependence ();
3828 *overlaps_b
= conflict_fn_no_dependence ();
3829 *last_conflicts
= integer_zero_node
;
3830 dependence_stats
.num_siv_independent
++;
3837 if (TREE_CODE (chrec_b
) != POLYNOMIAL_CHREC
3838 || !chrec_is_positive (CHREC_RIGHT (chrec_b
), &value2
))
3840 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3841 fprintf (dump_file
, "siv test failed: chrec not positive.\n");
3843 *overlaps_a
= conflict_fn_not_known ();
3844 *overlaps_b
= conflict_fn_not_known ();
3845 *last_conflicts
= chrec_dont_know
;
3846 dependence_stats
.num_siv_unimplemented
++;
3851 if (value2
== false)
3855 chrec_b = {10, +, -1}
3857 if (tree_fold_divides_p (CHREC_RIGHT (chrec_b
), difference
))
3859 HOST_WIDE_INT numiter
;
3860 class loop
*loop
= get_chrec_loop (chrec_b
);
3862 *overlaps_a
= conflict_fn (1, affine_fn_cst (integer_zero_node
));
3863 tmp
= fold_build2 (EXACT_DIV_EXPR
, type
, difference
,
3864 CHREC_RIGHT (chrec_b
));
3865 *overlaps_b
= conflict_fn (1, affine_fn_cst (tmp
));
3866 *last_conflicts
= integer_one_node
;
3868 /* Perform weak-zero siv test to see if overlap is
3869 outside the loop bounds. */
3870 numiter
= max_stmt_executions_int (loop
);
3873 && compare_tree_int (tmp
, numiter
) > 0)
3875 free_conflict_function (*overlaps_a
);
3876 free_conflict_function (*overlaps_b
);
3877 *overlaps_a
= conflict_fn_no_dependence ();
3878 *overlaps_b
= conflict_fn_no_dependence ();
3879 *last_conflicts
= integer_zero_node
;
3880 dependence_stats
.num_siv_independent
++;
3883 dependence_stats
.num_siv_dependent
++;
3887 /* When the step does not divide the difference, there
3891 *overlaps_a
= conflict_fn_no_dependence ();
3892 *overlaps_b
= conflict_fn_no_dependence ();
3893 *last_conflicts
= integer_zero_node
;
3894 dependence_stats
.num_siv_independent
++;
3904 In this case, chrec_a will not overlap with chrec_b. */
3905 *overlaps_a
= conflict_fn_no_dependence ();
3906 *overlaps_b
= conflict_fn_no_dependence ();
3907 *last_conflicts
= integer_zero_node
;
3908 dependence_stats
.num_siv_independent
++;
3916 /* Helper recursive function for initializing the matrix A. Returns
3917 the initial value of CHREC. */
3920 initialize_matrix_A (lambda_matrix A
, tree chrec
, unsigned index
, int mult
)
3924 switch (TREE_CODE (chrec
))
3926 case POLYNOMIAL_CHREC
:
3927 if (!cst_and_fits_in_hwi (CHREC_RIGHT (chrec
)))
3928 return chrec_dont_know
;
3929 A
[index
][0] = mult
* int_cst_value (CHREC_RIGHT (chrec
));
3930 return initialize_matrix_A (A
, CHREC_LEFT (chrec
), index
+ 1, mult
);
3936 tree op0
= initialize_matrix_A (A
, TREE_OPERAND (chrec
, 0), index
, mult
);
3937 tree op1
= initialize_matrix_A (A
, TREE_OPERAND (chrec
, 1), index
, mult
);
3939 return chrec_fold_op (TREE_CODE (chrec
), chrec_type (chrec
), op0
, op1
);
3944 tree op
= initialize_matrix_A (A
, TREE_OPERAND (chrec
, 0), index
, mult
);
3945 return chrec_convert (chrec_type (chrec
), op
, NULL
);
3950 /* Handle ~X as -1 - X. */
3951 tree op
= initialize_matrix_A (A
, TREE_OPERAND (chrec
, 0), index
, mult
);
3952 return chrec_fold_op (MINUS_EXPR
, chrec_type (chrec
),
3953 build_int_cst (TREE_TYPE (chrec
), -1), op
);
3965 #define FLOOR_DIV(x,y) ((x) / (y))
3967 /* Solves the special case of the Diophantine equation:
3968 | {0, +, STEP_A}_x (OVERLAPS_A) = {0, +, STEP_B}_y (OVERLAPS_B)
3970 Computes the descriptions OVERLAPS_A and OVERLAPS_B. NITER is the
3971 number of iterations that loops X and Y run. The overlaps will be
3972 constructed as evolutions in dimension DIM. */
3975 compute_overlap_steps_for_affine_univar (HOST_WIDE_INT niter
,
3976 HOST_WIDE_INT step_a
,
3977 HOST_WIDE_INT step_b
,
3978 affine_fn
*overlaps_a
,
3979 affine_fn
*overlaps_b
,
3980 tree
*last_conflicts
, int dim
)
3982 if (((step_a
> 0 && step_b
> 0)
3983 || (step_a
< 0 && step_b
< 0)))
3985 HOST_WIDE_INT step_overlaps_a
, step_overlaps_b
;
3986 HOST_WIDE_INT gcd_steps_a_b
, last_conflict
, tau2
;
3988 gcd_steps_a_b
= gcd (step_a
, step_b
);
3989 step_overlaps_a
= step_b
/ gcd_steps_a_b
;
3990 step_overlaps_b
= step_a
/ gcd_steps_a_b
;
3994 tau2
= FLOOR_DIV (niter
, step_overlaps_a
);
3995 tau2
= MIN (tau2
, FLOOR_DIV (niter
, step_overlaps_b
));
3996 last_conflict
= tau2
;
3997 *last_conflicts
= build_int_cst (NULL_TREE
, last_conflict
);
4000 *last_conflicts
= chrec_dont_know
;
4002 *overlaps_a
= affine_fn_univar (integer_zero_node
, dim
,
4003 build_int_cst (NULL_TREE
,
4005 *overlaps_b
= affine_fn_univar (integer_zero_node
, dim
,
4006 build_int_cst (NULL_TREE
,
4012 *overlaps_a
= affine_fn_cst (integer_zero_node
);
4013 *overlaps_b
= affine_fn_cst (integer_zero_node
);
4014 *last_conflicts
= integer_zero_node
;
4018 /* Solves the special case of a Diophantine equation where CHREC_A is
4019 an affine bivariate function, and CHREC_B is an affine univariate
4020 function. For example,
4022 | {{0, +, 1}_x, +, 1335}_y = {0, +, 1336}_z
4024 has the following overlapping functions:
4026 | x (t, u, v) = {{0, +, 1336}_t, +, 1}_v
4027 | y (t, u, v) = {{0, +, 1336}_u, +, 1}_v
4028 | z (t, u, v) = {{{0, +, 1}_t, +, 1335}_u, +, 1}_v
4030 FORNOW: This is a specialized implementation for a case occurring in
4031 a common benchmark. Implement the general algorithm. */
4034 compute_overlap_steps_for_affine_1_2 (tree chrec_a
, tree chrec_b
,
4035 conflict_function
**overlaps_a
,
4036 conflict_function
**overlaps_b
,
4037 tree
*last_conflicts
)
4039 bool xz_p
, yz_p
, xyz_p
;
4040 HOST_WIDE_INT step_x
, step_y
, step_z
;
4041 HOST_WIDE_INT niter_x
, niter_y
, niter_z
, niter
;
4042 affine_fn overlaps_a_xz
, overlaps_b_xz
;
4043 affine_fn overlaps_a_yz
, overlaps_b_yz
;
4044 affine_fn overlaps_a_xyz
, overlaps_b_xyz
;
4045 affine_fn ova1
, ova2
, ovb
;
4046 tree last_conflicts_xz
, last_conflicts_yz
, last_conflicts_xyz
;
4048 step_x
= int_cst_value (CHREC_RIGHT (CHREC_LEFT (chrec_a
)));
4049 step_y
= int_cst_value (CHREC_RIGHT (chrec_a
));
4050 step_z
= int_cst_value (CHREC_RIGHT (chrec_b
));
4052 niter_x
= max_stmt_executions_int (get_chrec_loop (CHREC_LEFT (chrec_a
)));
4053 niter_y
= max_stmt_executions_int (get_chrec_loop (chrec_a
));
4054 niter_z
= max_stmt_executions_int (get_chrec_loop (chrec_b
));
4056 if (niter_x
< 0 || niter_y
< 0 || niter_z
< 0)
4058 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4059 fprintf (dump_file
, "overlap steps test failed: no iteration counts.\n");
4061 *overlaps_a
= conflict_fn_not_known ();
4062 *overlaps_b
= conflict_fn_not_known ();
4063 *last_conflicts
= chrec_dont_know
;
4067 niter
= MIN (niter_x
, niter_z
);
4068 compute_overlap_steps_for_affine_univar (niter
, step_x
, step_z
,
4071 &last_conflicts_xz
, 1);
4072 niter
= MIN (niter_y
, niter_z
);
4073 compute_overlap_steps_for_affine_univar (niter
, step_y
, step_z
,
4076 &last_conflicts_yz
, 2);
4077 niter
= MIN (niter_x
, niter_z
);
4078 niter
= MIN (niter_y
, niter
);
4079 compute_overlap_steps_for_affine_univar (niter
, step_x
+ step_y
, step_z
,
4082 &last_conflicts_xyz
, 3);
4084 xz_p
= !integer_zerop (last_conflicts_xz
);
4085 yz_p
= !integer_zerop (last_conflicts_yz
);
4086 xyz_p
= !integer_zerop (last_conflicts_xyz
);
4088 if (xz_p
|| yz_p
|| xyz_p
)
4090 ova1
= affine_fn_cst (integer_zero_node
);
4091 ova2
= affine_fn_cst (integer_zero_node
);
4092 ovb
= affine_fn_cst (integer_zero_node
);
4095 affine_fn t0
= ova1
;
4098 ova1
= affine_fn_plus (ova1
, overlaps_a_xz
);
4099 ovb
= affine_fn_plus (ovb
, overlaps_b_xz
);
4100 affine_fn_free (t0
);
4101 affine_fn_free (t2
);
4102 *last_conflicts
= last_conflicts_xz
;
4106 affine_fn t0
= ova2
;
4109 ova2
= affine_fn_plus (ova2
, overlaps_a_yz
);
4110 ovb
= affine_fn_plus (ovb
, overlaps_b_yz
);
4111 affine_fn_free (t0
);
4112 affine_fn_free (t2
);
4113 *last_conflicts
= last_conflicts_yz
;
4117 affine_fn t0
= ova1
;
4118 affine_fn t2
= ova2
;
4121 ova1
= affine_fn_plus (ova1
, overlaps_a_xyz
);
4122 ova2
= affine_fn_plus (ova2
, overlaps_a_xyz
);
4123 ovb
= affine_fn_plus (ovb
, overlaps_b_xyz
);
4124 affine_fn_free (t0
);
4125 affine_fn_free (t2
);
4126 affine_fn_free (t4
);
4127 *last_conflicts
= last_conflicts_xyz
;
4129 *overlaps_a
= conflict_fn (2, ova1
, ova2
);
4130 *overlaps_b
= conflict_fn (1, ovb
);
4134 *overlaps_a
= conflict_fn (1, affine_fn_cst (integer_zero_node
));
4135 *overlaps_b
= conflict_fn (1, affine_fn_cst (integer_zero_node
));
4136 *last_conflicts
= integer_zero_node
;
4139 affine_fn_free (overlaps_a_xz
);
4140 affine_fn_free (overlaps_b_xz
);
4141 affine_fn_free (overlaps_a_yz
);
4142 affine_fn_free (overlaps_b_yz
);
4143 affine_fn_free (overlaps_a_xyz
);
4144 affine_fn_free (overlaps_b_xyz
);
4147 /* Copy the elements of vector VEC1 with length SIZE to VEC2. */
4150 lambda_vector_copy (lambda_vector vec1
, lambda_vector vec2
,
4153 memcpy (vec2
, vec1
, size
* sizeof (*vec1
));
4156 /* Copy the elements of M x N matrix MAT1 to MAT2. */
4159 lambda_matrix_copy (lambda_matrix mat1
, lambda_matrix mat2
,
4164 for (i
= 0; i
< m
; i
++)
4165 lambda_vector_copy (mat1
[i
], mat2
[i
], n
);
4168 /* Store the N x N identity matrix in MAT. */
4171 lambda_matrix_id (lambda_matrix mat
, int size
)
4175 for (i
= 0; i
< size
; i
++)
4176 for (j
= 0; j
< size
; j
++)
4177 mat
[i
][j
] = (i
== j
) ? 1 : 0;
4180 /* Return the index of the first nonzero element of vector VEC1 between
4181 START and N. We must have START <= N.
4182 Returns N if VEC1 is the zero vector. */
4185 lambda_vector_first_nz (lambda_vector vec1
, int n
, int start
)
4188 while (j
< n
&& vec1
[j
] == 0)
4193 /* Add a multiple of row R1 of matrix MAT with N columns to row R2:
4194 R2 = R2 + CONST1 * R1. */
4197 lambda_matrix_row_add (lambda_matrix mat
, int n
, int r1
, int r2
,
4205 for (i
= 0; i
< n
; i
++)
4206 mat
[r2
][i
] += const1
* mat
[r1
][i
];
4209 /* Multiply vector VEC1 of length SIZE by a constant CONST1,
4210 and store the result in VEC2. */
4213 lambda_vector_mult_const (lambda_vector vec1
, lambda_vector vec2
,
4214 int size
, lambda_int const1
)
4219 lambda_vector_clear (vec2
, size
);
4221 for (i
= 0; i
< size
; i
++)
4222 vec2
[i
] = const1
* vec1
[i
];
4225 /* Negate vector VEC1 with length SIZE and store it in VEC2. */
4228 lambda_vector_negate (lambda_vector vec1
, lambda_vector vec2
,
4231 lambda_vector_mult_const (vec1
, vec2
, size
, -1);
4234 /* Negate row R1 of matrix MAT which has N columns. */
4237 lambda_matrix_row_negate (lambda_matrix mat
, int n
, int r1
)
4239 lambda_vector_negate (mat
[r1
], mat
[r1
], n
);
4242 /* Return true if two vectors are equal. */
4245 lambda_vector_equal (lambda_vector vec1
, lambda_vector vec2
, int size
)
4248 for (i
= 0; i
< size
; i
++)
4249 if (vec1
[i
] != vec2
[i
])
4254 /* Given an M x N integer matrix A, this function determines an M x
4255 M unimodular matrix U, and an M x N echelon matrix S such that
4256 "U.A = S". This decomposition is also known as "right Hermite".
4258 Ref: Algorithm 2.1 page 33 in "Loop Transformations for
4259 Restructuring Compilers" Utpal Banerjee. */
4262 lambda_matrix_right_hermite (lambda_matrix A
, int m
, int n
,
4263 lambda_matrix S
, lambda_matrix U
)
4267 lambda_matrix_copy (A
, S
, m
, n
);
4268 lambda_matrix_id (U
, m
);
4270 for (j
= 0; j
< n
; j
++)
4272 if (lambda_vector_first_nz (S
[j
], m
, i0
) < m
)
4275 for (i
= m
- 1; i
>= i0
; i
--)
4277 while (S
[i
][j
] != 0)
4279 lambda_int sigma
, factor
, a
, b
;
4283 sigma
= ((a
< 0) ^ (b
< 0)) ? -1: 1;
4284 unsigned HOST_WIDE_INT abs_a
= absu_hwi (a
);
4285 unsigned HOST_WIDE_INT abs_b
= absu_hwi (b
);
4286 factor
= sigma
* (lambda_int
)(abs_a
/ abs_b
);
4288 lambda_matrix_row_add (S
, n
, i
, i
-1, -factor
);
4289 std::swap (S
[i
], S
[i
-1]);
4291 lambda_matrix_row_add (U
, m
, i
, i
-1, -factor
);
4292 std::swap (U
[i
], U
[i
-1]);
4299 /* Determines the overlapping elements due to accesses CHREC_A and
4300 CHREC_B, that are affine functions. This function cannot handle
4301 symbolic evolution functions, ie. when initial conditions are
4302 parameters, because it uses lambda matrices of integers. */
4305 analyze_subscript_affine_affine (tree chrec_a
,
4307 conflict_function
**overlaps_a
,
4308 conflict_function
**overlaps_b
,
4309 tree
*last_conflicts
)
4311 unsigned nb_vars_a
, nb_vars_b
, dim
;
4312 lambda_int gamma
, gcd_alpha_beta
;
4313 lambda_matrix A
, U
, S
;
4314 struct obstack scratch_obstack
;
4316 if (eq_evolutions_p (chrec_a
, chrec_b
))
4318 /* The accessed index overlaps for each iteration in the
4320 *overlaps_a
= conflict_fn (1, affine_fn_cst (integer_zero_node
));
4321 *overlaps_b
= conflict_fn (1, affine_fn_cst (integer_zero_node
));
4322 *last_conflicts
= chrec_dont_know
;
4325 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4326 fprintf (dump_file
, "(analyze_subscript_affine_affine \n");
4328 /* For determining the initial intersection, we have to solve a
4329 Diophantine equation. This is the most time consuming part.
4331 For answering to the question: "Is there a dependence?" we have
4332 to prove that there exists a solution to the Diophantine
4333 equation, and that the solution is in the iteration domain,
4334 i.e. the solution is positive or zero, and that the solution
4335 happens before the upper bound loop.nb_iterations. Otherwise
4336 there is no dependence. This function outputs a description of
4337 the iterations that hold the intersections. */
4339 nb_vars_a
= nb_vars_in_chrec (chrec_a
);
4340 nb_vars_b
= nb_vars_in_chrec (chrec_b
);
4342 gcc_obstack_init (&scratch_obstack
);
4344 dim
= nb_vars_a
+ nb_vars_b
;
4345 U
= lambda_matrix_new (dim
, dim
, &scratch_obstack
);
4346 A
= lambda_matrix_new (dim
, 1, &scratch_obstack
);
4347 S
= lambda_matrix_new (dim
, 1, &scratch_obstack
);
4349 tree init_a
= initialize_matrix_A (A
, chrec_a
, 0, 1);
4350 tree init_b
= initialize_matrix_A (A
, chrec_b
, nb_vars_a
, -1);
4351 if (init_a
== chrec_dont_know
4352 || init_b
== chrec_dont_know
)
4354 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4355 fprintf (dump_file
, "affine-affine test failed: "
4356 "representation issue.\n");
4357 *overlaps_a
= conflict_fn_not_known ();
4358 *overlaps_b
= conflict_fn_not_known ();
4359 *last_conflicts
= chrec_dont_know
;
4360 goto end_analyze_subs_aa
;
4362 gamma
= int_cst_value (init_b
) - int_cst_value (init_a
);
4364 /* Don't do all the hard work of solving the Diophantine equation
4365 when we already know the solution: for example,
4368 | gamma = 3 - 3 = 0.
4369 Then the first overlap occurs during the first iterations:
4370 | {3, +, 1}_1 ({0, +, 4}_x) = {3, +, 4}_2 ({0, +, 1}_x)
4374 if (nb_vars_a
== 1 && nb_vars_b
== 1)
4376 HOST_WIDE_INT step_a
, step_b
;
4377 HOST_WIDE_INT niter
, niter_a
, niter_b
;
4380 niter_a
= max_stmt_executions_int (get_chrec_loop (chrec_a
));
4381 niter_b
= max_stmt_executions_int (get_chrec_loop (chrec_b
));
4382 niter
= MIN (niter_a
, niter_b
);
4383 step_a
= int_cst_value (CHREC_RIGHT (chrec_a
));
4384 step_b
= int_cst_value (CHREC_RIGHT (chrec_b
));
4386 compute_overlap_steps_for_affine_univar (niter
, step_a
, step_b
,
4389 *overlaps_a
= conflict_fn (1, ova
);
4390 *overlaps_b
= conflict_fn (1, ovb
);
4393 else if (nb_vars_a
== 2 && nb_vars_b
== 1)
4394 compute_overlap_steps_for_affine_1_2
4395 (chrec_a
, chrec_b
, overlaps_a
, overlaps_b
, last_conflicts
);
4397 else if (nb_vars_a
== 1 && nb_vars_b
== 2)
4398 compute_overlap_steps_for_affine_1_2
4399 (chrec_b
, chrec_a
, overlaps_b
, overlaps_a
, last_conflicts
);
4403 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4404 fprintf (dump_file
, "affine-affine test failed: too many variables.\n");
4405 *overlaps_a
= conflict_fn_not_known ();
4406 *overlaps_b
= conflict_fn_not_known ();
4407 *last_conflicts
= chrec_dont_know
;
4409 goto end_analyze_subs_aa
;
4413 lambda_matrix_right_hermite (A
, dim
, 1, S
, U
);
4418 lambda_matrix_row_negate (U
, dim
, 0);
4420 gcd_alpha_beta
= S
[0][0];
4422 /* Something went wrong: for example in {1, +, 0}_5 vs. {0, +, 0}_5,
4423 but that is a quite strange case. Instead of ICEing, answer
4425 if (gcd_alpha_beta
== 0)
4427 *overlaps_a
= conflict_fn_not_known ();
4428 *overlaps_b
= conflict_fn_not_known ();
4429 *last_conflicts
= chrec_dont_know
;
4430 goto end_analyze_subs_aa
;
4433 /* The classic "gcd-test". */
4434 if (!int_divides_p (gcd_alpha_beta
, gamma
))
4436 /* The "gcd-test" has determined that there is no integer
4437 solution, i.e. there is no dependence. */
4438 *overlaps_a
= conflict_fn_no_dependence ();
4439 *overlaps_b
= conflict_fn_no_dependence ();
4440 *last_conflicts
= integer_zero_node
;
4443 /* Both access functions are univariate. This includes SIV and MIV cases. */
4444 else if (nb_vars_a
== 1 && nb_vars_b
== 1)
4446 /* Both functions should have the same evolution sign. */
4447 if (((A
[0][0] > 0 && -A
[1][0] > 0)
4448 || (A
[0][0] < 0 && -A
[1][0] < 0)))
4450 /* The solutions are given by:
4452 | [GAMMA/GCD_ALPHA_BETA t].[u11 u12] = [x0]
4455 For a given integer t. Using the following variables,
4457 | i0 = u11 * gamma / gcd_alpha_beta
4458 | j0 = u12 * gamma / gcd_alpha_beta
4465 | y0 = j0 + j1 * t. */
4466 HOST_WIDE_INT i0
, j0
, i1
, j1
;
4468 i0
= U
[0][0] * gamma
/ gcd_alpha_beta
;
4469 j0
= U
[0][1] * gamma
/ gcd_alpha_beta
;
4473 if ((i1
== 0 && i0
< 0)
4474 || (j1
== 0 && j0
< 0))
4476 /* There is no solution.
4477 FIXME: The case "i0 > nb_iterations, j0 > nb_iterations"
4478 falls in here, but for the moment we don't look at the
4479 upper bound of the iteration domain. */
4480 *overlaps_a
= conflict_fn_no_dependence ();
4481 *overlaps_b
= conflict_fn_no_dependence ();
4482 *last_conflicts
= integer_zero_node
;
4483 goto end_analyze_subs_aa
;
4486 if (i1
> 0 && j1
> 0)
4488 HOST_WIDE_INT niter_a
4489 = max_stmt_executions_int (get_chrec_loop (chrec_a
));
4490 HOST_WIDE_INT niter_b
4491 = max_stmt_executions_int (get_chrec_loop (chrec_b
));
4492 HOST_WIDE_INT niter
= MIN (niter_a
, niter_b
);
4494 /* (X0, Y0) is a solution of the Diophantine equation:
4495 "chrec_a (X0) = chrec_b (Y0)". */
4496 HOST_WIDE_INT tau1
= MAX (CEIL (-i0
, i1
),
4498 HOST_WIDE_INT x0
= i1
* tau1
+ i0
;
4499 HOST_WIDE_INT y0
= j1
* tau1
+ j0
;
4501 /* (X1, Y1) is the smallest positive solution of the eq
4502 "chrec_a (X1) = chrec_b (Y1)", i.e. this is where the
4503 first conflict occurs. */
4504 HOST_WIDE_INT min_multiple
= MIN (x0
/ i1
, y0
/ j1
);
4505 HOST_WIDE_INT x1
= x0
- i1
* min_multiple
;
4506 HOST_WIDE_INT y1
= y0
- j1
* min_multiple
;
4510 /* If the overlap occurs outside of the bounds of the
4511 loop, there is no dependence. */
4512 if (x1
>= niter_a
|| y1
>= niter_b
)
4514 *overlaps_a
= conflict_fn_no_dependence ();
4515 *overlaps_b
= conflict_fn_no_dependence ();
4516 *last_conflicts
= integer_zero_node
;
4517 goto end_analyze_subs_aa
;
4520 /* max stmt executions can get quite large, avoid
4521 overflows by using wide ints here. */
4523 = wi::smin (wi::sdiv_floor (wi::sub (niter_a
, i0
), i1
),
4524 wi::sdiv_floor (wi::sub (niter_b
, j0
), j1
));
4525 widest_int last_conflict
= wi::sub (tau2
, (x1
- i0
)/i1
);
4526 if (wi::min_precision (last_conflict
, SIGNED
)
4527 <= TYPE_PRECISION (integer_type_node
))
4529 = build_int_cst (integer_type_node
,
4530 last_conflict
.to_shwi ());
4532 *last_conflicts
= chrec_dont_know
;
4535 *last_conflicts
= chrec_dont_know
;
4539 affine_fn_univar (build_int_cst (NULL_TREE
, x1
),
4541 build_int_cst (NULL_TREE
, i1
)));
4544 affine_fn_univar (build_int_cst (NULL_TREE
, y1
),
4546 build_int_cst (NULL_TREE
, j1
)));
4550 /* FIXME: For the moment, the upper bound of the
4551 iteration domain for i and j is not checked. */
4552 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4553 fprintf (dump_file
, "affine-affine test failed: unimplemented.\n");
4554 *overlaps_a
= conflict_fn_not_known ();
4555 *overlaps_b
= conflict_fn_not_known ();
4556 *last_conflicts
= chrec_dont_know
;
4561 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4562 fprintf (dump_file
, "affine-affine test failed: unimplemented.\n");
4563 *overlaps_a
= conflict_fn_not_known ();
4564 *overlaps_b
= conflict_fn_not_known ();
4565 *last_conflicts
= chrec_dont_know
;
4570 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4571 fprintf (dump_file
, "affine-affine test failed: unimplemented.\n");
4572 *overlaps_a
= conflict_fn_not_known ();
4573 *overlaps_b
= conflict_fn_not_known ();
4574 *last_conflicts
= chrec_dont_know
;
4577 end_analyze_subs_aa
:
4578 obstack_free (&scratch_obstack
, NULL
);
4579 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4581 fprintf (dump_file
, " (overlaps_a = ");
4582 dump_conflict_function (dump_file
, *overlaps_a
);
4583 fprintf (dump_file
, ")\n (overlaps_b = ");
4584 dump_conflict_function (dump_file
, *overlaps_b
);
4585 fprintf (dump_file
, "))\n");
4589 /* Returns true when analyze_subscript_affine_affine can be used for
4590 determining the dependence relation between chrec_a and chrec_b,
4591 that contain symbols. This function modifies chrec_a and chrec_b
4592 such that the analysis result is the same, and such that they don't
4593 contain symbols, and then can safely be passed to the analyzer.
4595 Example: The analysis of the following tuples of evolutions produce
4596 the same results: {x+1, +, 1}_1 vs. {x+3, +, 1}_1, and {-2, +, 1}_1
4599 {x+1, +, 1}_1 ({2, +, 1}_1) = {x+3, +, 1}_1 ({0, +, 1}_1)
4600 {-2, +, 1}_1 ({2, +, 1}_1) = {0, +, 1}_1 ({0, +, 1}_1)
4604 can_use_analyze_subscript_affine_affine (tree
*chrec_a
, tree
*chrec_b
)
4606 tree diff
, type
, left_a
, left_b
, right_b
;
4608 if (chrec_contains_symbols (CHREC_RIGHT (*chrec_a
))
4609 || chrec_contains_symbols (CHREC_RIGHT (*chrec_b
)))
4610 /* FIXME: For the moment not handled. Might be refined later. */
4613 type
= chrec_type (*chrec_a
);
4614 left_a
= CHREC_LEFT (*chrec_a
);
4615 left_b
= chrec_convert (type
, CHREC_LEFT (*chrec_b
), NULL
);
4616 diff
= chrec_fold_minus (type
, left_a
, left_b
);
4618 if (!evolution_function_is_constant_p (diff
))
4621 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4622 fprintf (dump_file
, "can_use_subscript_aff_aff_for_symbolic \n");
4624 *chrec_a
= build_polynomial_chrec (CHREC_VARIABLE (*chrec_a
),
4625 diff
, CHREC_RIGHT (*chrec_a
));
4626 right_b
= chrec_convert (type
, CHREC_RIGHT (*chrec_b
), NULL
);
4627 *chrec_b
= build_polynomial_chrec (CHREC_VARIABLE (*chrec_b
),
4628 build_int_cst (type
, 0),
4633 /* Analyze a SIV (Single Index Variable) subscript. *OVERLAPS_A and
4634 *OVERLAPS_B are initialized to the functions that describe the
4635 relation between the elements accessed twice by CHREC_A and
4636 CHREC_B. For k >= 0, the following property is verified:
4638 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
4641 analyze_siv_subscript (tree chrec_a
,
4643 conflict_function
**overlaps_a
,
4644 conflict_function
**overlaps_b
,
4645 tree
*last_conflicts
,
4648 dependence_stats
.num_siv
++;
4650 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4651 fprintf (dump_file
, "(analyze_siv_subscript \n");
4653 if (evolution_function_is_constant_p (chrec_a
)
4654 && evolution_function_is_affine_in_loop (chrec_b
, loop_nest_num
))
4655 analyze_siv_subscript_cst_affine (chrec_a
, chrec_b
,
4656 overlaps_a
, overlaps_b
, last_conflicts
);
4658 else if (evolution_function_is_affine_in_loop (chrec_a
, loop_nest_num
)
4659 && evolution_function_is_constant_p (chrec_b
))
4660 analyze_siv_subscript_cst_affine (chrec_b
, chrec_a
,
4661 overlaps_b
, overlaps_a
, last_conflicts
);
4663 else if (evolution_function_is_affine_in_loop (chrec_a
, loop_nest_num
)
4664 && evolution_function_is_affine_in_loop (chrec_b
, loop_nest_num
))
4666 if (!chrec_contains_symbols (chrec_a
)
4667 && !chrec_contains_symbols (chrec_b
))
4669 analyze_subscript_affine_affine (chrec_a
, chrec_b
,
4670 overlaps_a
, overlaps_b
,
4673 if (CF_NOT_KNOWN_P (*overlaps_a
)
4674 || CF_NOT_KNOWN_P (*overlaps_b
))
4675 dependence_stats
.num_siv_unimplemented
++;
4676 else if (CF_NO_DEPENDENCE_P (*overlaps_a
)
4677 || CF_NO_DEPENDENCE_P (*overlaps_b
))
4678 dependence_stats
.num_siv_independent
++;
4680 dependence_stats
.num_siv_dependent
++;
4682 else if (can_use_analyze_subscript_affine_affine (&chrec_a
,
4685 analyze_subscript_affine_affine (chrec_a
, chrec_b
,
4686 overlaps_a
, overlaps_b
,
4689 if (CF_NOT_KNOWN_P (*overlaps_a
)
4690 || CF_NOT_KNOWN_P (*overlaps_b
))
4691 dependence_stats
.num_siv_unimplemented
++;
4692 else if (CF_NO_DEPENDENCE_P (*overlaps_a
)
4693 || CF_NO_DEPENDENCE_P (*overlaps_b
))
4694 dependence_stats
.num_siv_independent
++;
4696 dependence_stats
.num_siv_dependent
++;
4699 goto siv_subscript_dontknow
;
4704 siv_subscript_dontknow
:;
4705 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4706 fprintf (dump_file
, " siv test failed: unimplemented");
4707 *overlaps_a
= conflict_fn_not_known ();
4708 *overlaps_b
= conflict_fn_not_known ();
4709 *last_conflicts
= chrec_dont_know
;
4710 dependence_stats
.num_siv_unimplemented
++;
4713 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4714 fprintf (dump_file
, ")\n");
4717 /* Returns false if we can prove that the greatest common divisor of the steps
4718 of CHREC does not divide CST, false otherwise. */
4721 gcd_of_steps_may_divide_p (const_tree chrec
, const_tree cst
)
4723 HOST_WIDE_INT cd
= 0, val
;
4726 if (!tree_fits_shwi_p (cst
))
4728 val
= tree_to_shwi (cst
);
4730 while (TREE_CODE (chrec
) == POLYNOMIAL_CHREC
)
4732 step
= CHREC_RIGHT (chrec
);
4733 if (!tree_fits_shwi_p (step
))
4735 cd
= gcd (cd
, tree_to_shwi (step
));
4736 chrec
= CHREC_LEFT (chrec
);
4739 return val
% cd
== 0;
4742 /* Analyze a MIV (Multiple Index Variable) subscript with respect to
4743 LOOP_NEST. *OVERLAPS_A and *OVERLAPS_B are initialized to the
4744 functions that describe the relation between the elements accessed
4745 twice by CHREC_A and CHREC_B. For k >= 0, the following property
4748 CHREC_A (*OVERLAPS_A (k)) = CHREC_B (*OVERLAPS_B (k)). */
4751 analyze_miv_subscript (tree chrec_a
,
4753 conflict_function
**overlaps_a
,
4754 conflict_function
**overlaps_b
,
4755 tree
*last_conflicts
,
4756 class loop
*loop_nest
)
4758 tree type
, difference
;
4760 dependence_stats
.num_miv
++;
4761 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4762 fprintf (dump_file
, "(analyze_miv_subscript \n");
4764 type
= signed_type_for_types (TREE_TYPE (chrec_a
), TREE_TYPE (chrec_b
));
4765 chrec_a
= chrec_convert (type
, chrec_a
, NULL
);
4766 chrec_b
= chrec_convert (type
, chrec_b
, NULL
);
4767 difference
= chrec_fold_minus (type
, chrec_a
, chrec_b
);
4769 if (eq_evolutions_p (chrec_a
, chrec_b
))
4771 /* Access functions are the same: all the elements are accessed
4772 in the same order. */
4773 *overlaps_a
= conflict_fn (1, affine_fn_cst (integer_zero_node
));
4774 *overlaps_b
= conflict_fn (1, affine_fn_cst (integer_zero_node
));
4775 *last_conflicts
= max_stmt_executions_tree (get_chrec_loop (chrec_a
));
4776 dependence_stats
.num_miv_dependent
++;
4779 else if (evolution_function_is_constant_p (difference
)
4780 && evolution_function_is_affine_multivariate_p (chrec_a
,
4782 && !gcd_of_steps_may_divide_p (chrec_a
, difference
))
4784 /* testsuite/.../ssa-chrec-33.c
4785 {{21, +, 2}_1, +, -2}_2 vs. {{20, +, 2}_1, +, -2}_2
4787 The difference is 1, and all the evolution steps are multiples
4788 of 2, consequently there are no overlapping elements. */
4789 *overlaps_a
= conflict_fn_no_dependence ();
4790 *overlaps_b
= conflict_fn_no_dependence ();
4791 *last_conflicts
= integer_zero_node
;
4792 dependence_stats
.num_miv_independent
++;
4795 else if (evolution_function_is_affine_in_loop (chrec_a
, loop_nest
->num
)
4796 && !chrec_contains_symbols (chrec_a
, loop_nest
)
4797 && evolution_function_is_affine_in_loop (chrec_b
, loop_nest
->num
)
4798 && !chrec_contains_symbols (chrec_b
, loop_nest
))
4800 /* testsuite/.../ssa-chrec-35.c
4801 {0, +, 1}_2 vs. {0, +, 1}_3
4802 the overlapping elements are respectively located at iterations:
4803 {0, +, 1}_x and {0, +, 1}_x,
4804 in other words, we have the equality:
4805 {0, +, 1}_2 ({0, +, 1}_x) = {0, +, 1}_3 ({0, +, 1}_x)
4808 {{0, +, 1}_1, +, 2}_2 ({0, +, 1}_x, {0, +, 1}_y) =
4809 {0, +, 1}_1 ({{0, +, 1}_x, +, 2}_y)
4811 {{0, +, 2}_1, +, 3}_2 ({0, +, 1}_y, {0, +, 1}_x) =
4812 {{0, +, 3}_1, +, 2}_2 ({0, +, 1}_x, {0, +, 1}_y)
4814 analyze_subscript_affine_affine (chrec_a
, chrec_b
,
4815 overlaps_a
, overlaps_b
, last_conflicts
);
4817 if (CF_NOT_KNOWN_P (*overlaps_a
)
4818 || CF_NOT_KNOWN_P (*overlaps_b
))
4819 dependence_stats
.num_miv_unimplemented
++;
4820 else if (CF_NO_DEPENDENCE_P (*overlaps_a
)
4821 || CF_NO_DEPENDENCE_P (*overlaps_b
))
4822 dependence_stats
.num_miv_independent
++;
4824 dependence_stats
.num_miv_dependent
++;
4829 /* When the analysis is too difficult, answer "don't know". */
4830 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4831 fprintf (dump_file
, "analyze_miv_subscript test failed: unimplemented.\n");
4833 *overlaps_a
= conflict_fn_not_known ();
4834 *overlaps_b
= conflict_fn_not_known ();
4835 *last_conflicts
= chrec_dont_know
;
4836 dependence_stats
.num_miv_unimplemented
++;
4839 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4840 fprintf (dump_file
, ")\n");
4843 /* Determines the iterations for which CHREC_A is equal to CHREC_B in
4844 with respect to LOOP_NEST. OVERLAP_ITERATIONS_A and
4845 OVERLAP_ITERATIONS_B are initialized with two functions that
4846 describe the iterations that contain conflicting elements.
4848 Remark: For an integer k >= 0, the following equality is true:
4850 CHREC_A (OVERLAP_ITERATIONS_A (k)) == CHREC_B (OVERLAP_ITERATIONS_B (k)).
4854 analyze_overlapping_iterations (tree chrec_a
,
4856 conflict_function
**overlap_iterations_a
,
4857 conflict_function
**overlap_iterations_b
,
4858 tree
*last_conflicts
, class loop
*loop_nest
)
4860 unsigned int lnn
= loop_nest
->num
;
4862 dependence_stats
.num_subscript_tests
++;
4864 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4866 fprintf (dump_file
, "(analyze_overlapping_iterations \n");
4867 fprintf (dump_file
, " (chrec_a = ");
4868 print_generic_expr (dump_file
, chrec_a
);
4869 fprintf (dump_file
, ")\n (chrec_b = ");
4870 print_generic_expr (dump_file
, chrec_b
);
4871 fprintf (dump_file
, ")\n");
4874 if (chrec_a
== NULL_TREE
4875 || chrec_b
== NULL_TREE
4876 || chrec_contains_undetermined (chrec_a
)
4877 || chrec_contains_undetermined (chrec_b
))
4879 dependence_stats
.num_subscript_undetermined
++;
4881 *overlap_iterations_a
= conflict_fn_not_known ();
4882 *overlap_iterations_b
= conflict_fn_not_known ();
4885 /* If they are the same chrec, and are affine, they overlap
4886 on every iteration. */
4887 else if (eq_evolutions_p (chrec_a
, chrec_b
)
4888 && (evolution_function_is_affine_multivariate_p (chrec_a
, lnn
)
4889 || operand_equal_p (chrec_a
, chrec_b
, 0)))
4891 dependence_stats
.num_same_subscript_function
++;
4892 *overlap_iterations_a
= conflict_fn (1, affine_fn_cst (integer_zero_node
));
4893 *overlap_iterations_b
= conflict_fn (1, affine_fn_cst (integer_zero_node
));
4894 *last_conflicts
= chrec_dont_know
;
4897 /* If they aren't the same, and aren't affine, we can't do anything
4899 else if ((chrec_contains_symbols (chrec_a
)
4900 || chrec_contains_symbols (chrec_b
))
4901 && (!evolution_function_is_affine_multivariate_p (chrec_a
, lnn
)
4902 || !evolution_function_is_affine_multivariate_p (chrec_b
, lnn
)))
4904 dependence_stats
.num_subscript_undetermined
++;
4905 *overlap_iterations_a
= conflict_fn_not_known ();
4906 *overlap_iterations_b
= conflict_fn_not_known ();
4909 else if (ziv_subscript_p (chrec_a
, chrec_b
))
4910 analyze_ziv_subscript (chrec_a
, chrec_b
,
4911 overlap_iterations_a
, overlap_iterations_b
,
4914 else if (siv_subscript_p (chrec_a
, chrec_b
))
4915 analyze_siv_subscript (chrec_a
, chrec_b
,
4916 overlap_iterations_a
, overlap_iterations_b
,
4917 last_conflicts
, lnn
);
4920 analyze_miv_subscript (chrec_a
, chrec_b
,
4921 overlap_iterations_a
, overlap_iterations_b
,
4922 last_conflicts
, loop_nest
);
4924 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4926 fprintf (dump_file
, " (overlap_iterations_a = ");
4927 dump_conflict_function (dump_file
, *overlap_iterations_a
);
4928 fprintf (dump_file
, ")\n (overlap_iterations_b = ");
4929 dump_conflict_function (dump_file
, *overlap_iterations_b
);
4930 fprintf (dump_file
, "))\n");
4934 /* Helper function for uniquely inserting distance vectors. */
4937 save_dist_v (struct data_dependence_relation
*ddr
, lambda_vector dist_v
)
4942 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr
), i
, v
)
4943 if (lambda_vector_equal (v
, dist_v
, DDR_NB_LOOPS (ddr
)))
4946 DDR_DIST_VECTS (ddr
).safe_push (dist_v
);
4949 /* Helper function for uniquely inserting direction vectors. */
4952 save_dir_v (struct data_dependence_relation
*ddr
, lambda_vector dir_v
)
4957 FOR_EACH_VEC_ELT (DDR_DIR_VECTS (ddr
), i
, v
)
4958 if (lambda_vector_equal (v
, dir_v
, DDR_NB_LOOPS (ddr
)))
4961 DDR_DIR_VECTS (ddr
).safe_push (dir_v
);
4964 /* Add a distance of 1 on all the loops outer than INDEX. If we
4965 haven't yet determined a distance for this outer loop, push a new
4966 distance vector composed of the previous distance, and a distance
4967 of 1 for this outer loop. Example:
4975 Saved vectors are of the form (dist_in_1, dist_in_2). First, we
4976 save (0, 1), then we have to save (1, 0). */
4979 add_outer_distances (struct data_dependence_relation
*ddr
,
4980 lambda_vector dist_v
, int index
)
4982 /* For each outer loop where init_v is not set, the accesses are
4983 in dependence of distance 1 in the loop. */
4984 while (--index
>= 0)
4986 lambda_vector save_v
= lambda_vector_new (DDR_NB_LOOPS (ddr
));
4987 lambda_vector_copy (dist_v
, save_v
, DDR_NB_LOOPS (ddr
));
4989 save_dist_v (ddr
, save_v
);
4993 /* Return false when fail to represent the data dependence as a
4994 distance vector. A_INDEX is the index of the first reference
4995 (0 for DDR_A, 1 for DDR_B) and B_INDEX is the index of the
4996 second reference. INIT_B is set to true when a component has been
4997 added to the distance vector DIST_V. INDEX_CARRY is then set to
4998 the index in DIST_V that carries the dependence. */
5001 build_classic_dist_vector_1 (struct data_dependence_relation
*ddr
,
5002 unsigned int a_index
, unsigned int b_index
,
5003 lambda_vector dist_v
, bool *init_b
,
5007 lambda_vector init_v
= lambda_vector_new (DDR_NB_LOOPS (ddr
));
5008 class loop
*loop
= DDR_LOOP_NEST (ddr
)[0];
5010 for (i
= 0; i
< DDR_NUM_SUBSCRIPTS (ddr
); i
++)
5012 tree access_fn_a
, access_fn_b
;
5013 struct subscript
*subscript
= DDR_SUBSCRIPT (ddr
, i
);
5015 if (chrec_contains_undetermined (SUB_DISTANCE (subscript
)))
5017 non_affine_dependence_relation (ddr
);
5021 access_fn_a
= SUB_ACCESS_FN (subscript
, a_index
);
5022 access_fn_b
= SUB_ACCESS_FN (subscript
, b_index
);
5024 if (TREE_CODE (access_fn_a
) == POLYNOMIAL_CHREC
5025 && TREE_CODE (access_fn_b
) == POLYNOMIAL_CHREC
)
5029 int var_a
= CHREC_VARIABLE (access_fn_a
);
5030 int var_b
= CHREC_VARIABLE (access_fn_b
);
5033 || chrec_contains_undetermined (SUB_DISTANCE (subscript
)))
5035 non_affine_dependence_relation (ddr
);
5039 /* When data references are collected in a loop while data
5040 dependences are analyzed in loop nest nested in the loop, we
5041 would have more number of access functions than number of
5042 loops. Skip access functions of loops not in the loop nest.
5044 See PR89725 for more information. */
5045 if (flow_loop_nested_p (get_loop (cfun
, var_a
), loop
))
5048 dist
= int_cst_value (SUB_DISTANCE (subscript
));
5049 index
= index_in_loop_nest (var_a
, DDR_LOOP_NEST (ddr
));
5050 *index_carry
= MIN (index
, *index_carry
);
5052 /* This is the subscript coupling test. If we have already
5053 recorded a distance for this loop (a distance coming from
5054 another subscript), it should be the same. For example,
5055 in the following code, there is no dependence:
5062 if (init_v
[index
] != 0 && dist_v
[index
] != dist
)
5064 finalize_ddr_dependent (ddr
, chrec_known
);
5068 dist_v
[index
] = dist
;
5072 else if (!operand_equal_p (access_fn_a
, access_fn_b
, 0))
5074 /* This can be for example an affine vs. constant dependence
5075 (T[i] vs. T[3]) that is not an affine dependence and is
5076 not representable as a distance vector. */
5077 non_affine_dependence_relation (ddr
);
5085 /* Return true when the DDR contains only invariant access functions wrto. loop
5089 invariant_access_functions (const struct data_dependence_relation
*ddr
,
5095 FOR_EACH_VEC_ELT (DDR_SUBSCRIPTS (ddr
), i
, sub
)
5096 if (!evolution_function_is_invariant_p (SUB_ACCESS_FN (sub
, 0), lnum
)
5097 || !evolution_function_is_invariant_p (SUB_ACCESS_FN (sub
, 1), lnum
))
5103 /* Helper function for the case where DDR_A and DDR_B are the same
5104 multivariate access function with a constant step. For an example
5108 add_multivariate_self_dist (struct data_dependence_relation
*ddr
, tree c_2
)
5111 tree c_1
= CHREC_LEFT (c_2
);
5112 tree c_0
= CHREC_LEFT (c_1
);
5113 lambda_vector dist_v
;
5114 HOST_WIDE_INT v1
, v2
, cd
;
5116 /* Polynomials with more than 2 variables are not handled yet. When
5117 the evolution steps are parameters, it is not possible to
5118 represent the dependence using classical distance vectors. */
5119 if (TREE_CODE (c_0
) != INTEGER_CST
5120 || TREE_CODE (CHREC_RIGHT (c_1
)) != INTEGER_CST
5121 || TREE_CODE (CHREC_RIGHT (c_2
)) != INTEGER_CST
)
5123 DDR_AFFINE_P (ddr
) = false;
5127 x_2
= index_in_loop_nest (CHREC_VARIABLE (c_2
), DDR_LOOP_NEST (ddr
));
5128 x_1
= index_in_loop_nest (CHREC_VARIABLE (c_1
), DDR_LOOP_NEST (ddr
));
5130 /* For "{{0, +, 2}_1, +, 3}_2" the distance vector is (3, -2). */
5131 dist_v
= lambda_vector_new (DDR_NB_LOOPS (ddr
));
5132 v1
= int_cst_value (CHREC_RIGHT (c_1
));
5133 v2
= int_cst_value (CHREC_RIGHT (c_2
));
5146 save_dist_v (ddr
, dist_v
);
5148 add_outer_distances (ddr
, dist_v
, x_1
);
5151 /* Helper function for the case where DDR_A and DDR_B are the same
5152 access functions. */
5155 add_other_self_distances (struct data_dependence_relation
*ddr
)
5157 lambda_vector dist_v
;
5159 int index_carry
= DDR_NB_LOOPS (ddr
);
5161 class loop
*loop
= DDR_LOOP_NEST (ddr
)[0];
5163 FOR_EACH_VEC_ELT (DDR_SUBSCRIPTS (ddr
), i
, sub
)
5165 tree access_fun
= SUB_ACCESS_FN (sub
, 0);
5167 if (TREE_CODE (access_fun
) == POLYNOMIAL_CHREC
)
5169 if (!evolution_function_is_univariate_p (access_fun
, loop
->num
))
5171 if (DDR_NUM_SUBSCRIPTS (ddr
) != 1)
5173 DDR_ARE_DEPENDENT (ddr
) = chrec_dont_know
;
5177 access_fun
= SUB_ACCESS_FN (DDR_SUBSCRIPT (ddr
, 0), 0);
5179 if (TREE_CODE (CHREC_LEFT (access_fun
)) == POLYNOMIAL_CHREC
)
5180 add_multivariate_self_dist (ddr
, access_fun
);
5182 /* The evolution step is not constant: it varies in
5183 the outer loop, so this cannot be represented by a
5184 distance vector. For example in pr34635.c the
5185 evolution is {0, +, {0, +, 4}_1}_2. */
5186 DDR_AFFINE_P (ddr
) = false;
5191 /* When data references are collected in a loop while data
5192 dependences are analyzed in loop nest nested in the loop, we
5193 would have more number of access functions than number of
5194 loops. Skip access functions of loops not in the loop nest.
5196 See PR89725 for more information. */
5197 if (flow_loop_nested_p (get_loop (cfun
, CHREC_VARIABLE (access_fun
)),
5201 index_carry
= MIN (index_carry
,
5202 index_in_loop_nest (CHREC_VARIABLE (access_fun
),
5203 DDR_LOOP_NEST (ddr
)));
5207 dist_v
= lambda_vector_new (DDR_NB_LOOPS (ddr
));
5208 add_outer_distances (ddr
, dist_v
, index_carry
);
5212 insert_innermost_unit_dist_vector (struct data_dependence_relation
*ddr
)
5214 lambda_vector dist_v
= lambda_vector_new (DDR_NB_LOOPS (ddr
));
5217 save_dist_v (ddr
, dist_v
);
5220 /* Adds a unit distance vector to DDR when there is a 0 overlap. This
5221 is the case for example when access functions are the same and
5222 equal to a constant, as in:
5229 in which case the distance vectors are (0) and (1). */
5232 add_distance_for_zero_overlaps (struct data_dependence_relation
*ddr
)
5236 for (i
= 0; i
< DDR_NUM_SUBSCRIPTS (ddr
); i
++)
5238 subscript_p sub
= DDR_SUBSCRIPT (ddr
, i
);
5239 conflict_function
*ca
= SUB_CONFLICTS_IN_A (sub
);
5240 conflict_function
*cb
= SUB_CONFLICTS_IN_B (sub
);
5242 for (j
= 0; j
< ca
->n
; j
++)
5243 if (affine_function_zero_p (ca
->fns
[j
]))
5245 insert_innermost_unit_dist_vector (ddr
);
5249 for (j
= 0; j
< cb
->n
; j
++)
5250 if (affine_function_zero_p (cb
->fns
[j
]))
5252 insert_innermost_unit_dist_vector (ddr
);
5258 /* Return true when the DDR contains two data references that have the
5259 same access functions. */
5262 same_access_functions (const struct data_dependence_relation
*ddr
)
5267 FOR_EACH_VEC_ELT (DDR_SUBSCRIPTS (ddr
), i
, sub
)
5268 if (!eq_evolutions_p (SUB_ACCESS_FN (sub
, 0),
5269 SUB_ACCESS_FN (sub
, 1)))
5275 /* Compute the classic per loop distance vector. DDR is the data
5276 dependence relation to build a vector from. Return false when fail
5277 to represent the data dependence as a distance vector. */
5280 build_classic_dist_vector (struct data_dependence_relation
*ddr
,
5281 class loop
*loop_nest
)
5283 bool init_b
= false;
5284 int index_carry
= DDR_NB_LOOPS (ddr
);
5285 lambda_vector dist_v
;
5287 if (DDR_ARE_DEPENDENT (ddr
) != NULL_TREE
)
5290 if (same_access_functions (ddr
))
5292 /* Save the 0 vector. */
5293 dist_v
= lambda_vector_new (DDR_NB_LOOPS (ddr
));
5294 save_dist_v (ddr
, dist_v
);
5296 if (invariant_access_functions (ddr
, loop_nest
->num
))
5297 add_distance_for_zero_overlaps (ddr
);
5299 if (DDR_NB_LOOPS (ddr
) > 1)
5300 add_other_self_distances (ddr
);
5305 dist_v
= lambda_vector_new (DDR_NB_LOOPS (ddr
));
5306 if (!build_classic_dist_vector_1 (ddr
, 0, 1, dist_v
, &init_b
, &index_carry
))
5309 /* Save the distance vector if we initialized one. */
5312 /* Verify a basic constraint: classic distance vectors should
5313 always be lexicographically positive.
5315 Data references are collected in the order of execution of
5316 the program, thus for the following loop
5318 | for (i = 1; i < 100; i++)
5319 | for (j = 1; j < 100; j++)
5321 | t = T[j+1][i-1]; // A
5322 | T[j][i] = t + 2; // B
5325 references are collected following the direction of the wind:
5326 A then B. The data dependence tests are performed also
5327 following this order, such that we're looking at the distance
5328 separating the elements accessed by A from the elements later
5329 accessed by B. But in this example, the distance returned by
5330 test_dep (A, B) is lexicographically negative (-1, 1), that
5331 means that the access A occurs later than B with respect to
5332 the outer loop, ie. we're actually looking upwind. In this
5333 case we solve test_dep (B, A) looking downwind to the
5334 lexicographically positive solution, that returns the
5335 distance vector (1, -1). */
5336 if (!lambda_vector_lexico_pos (dist_v
, DDR_NB_LOOPS (ddr
)))
5338 lambda_vector save_v
= lambda_vector_new (DDR_NB_LOOPS (ddr
));
5339 if (!subscript_dependence_tester_1 (ddr
, 1, 0, loop_nest
))
5341 compute_subscript_distance (ddr
);
5342 if (!build_classic_dist_vector_1 (ddr
, 1, 0, save_v
, &init_b
,
5345 save_dist_v (ddr
, save_v
);
5346 DDR_REVERSED_P (ddr
) = true;
5348 /* In this case there is a dependence forward for all the
5351 | for (k = 1; k < 100; k++)
5352 | for (i = 1; i < 100; i++)
5353 | for (j = 1; j < 100; j++)
5355 | t = T[j+1][i-1]; // A
5356 | T[j][i] = t + 2; // B
5364 if (DDR_NB_LOOPS (ddr
) > 1)
5366 add_outer_distances (ddr
, save_v
, index_carry
);
5367 add_outer_distances (ddr
, dist_v
, index_carry
);
5372 lambda_vector save_v
= lambda_vector_new (DDR_NB_LOOPS (ddr
));
5373 lambda_vector_copy (dist_v
, save_v
, DDR_NB_LOOPS (ddr
));
5375 if (DDR_NB_LOOPS (ddr
) > 1)
5377 lambda_vector opposite_v
= lambda_vector_new (DDR_NB_LOOPS (ddr
));
5379 if (!subscript_dependence_tester_1 (ddr
, 1, 0, loop_nest
))
5381 compute_subscript_distance (ddr
);
5382 if (!build_classic_dist_vector_1 (ddr
, 1, 0, opposite_v
, &init_b
,
5386 save_dist_v (ddr
, save_v
);
5387 add_outer_distances (ddr
, dist_v
, index_carry
);
5388 add_outer_distances (ddr
, opposite_v
, index_carry
);
5391 save_dist_v (ddr
, save_v
);
5396 /* There is a distance of 1 on all the outer loops: Example:
5397 there is a dependence of distance 1 on loop_1 for the array A.
5403 add_outer_distances (ddr
, dist_v
,
5404 lambda_vector_first_nz (dist_v
,
5405 DDR_NB_LOOPS (ddr
), 0));
5408 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5412 fprintf (dump_file
, "(build_classic_dist_vector\n");
5413 for (i
= 0; i
< DDR_NUM_DIST_VECTS (ddr
); i
++)
5415 fprintf (dump_file
, " dist_vector = (");
5416 print_lambda_vector (dump_file
, DDR_DIST_VECT (ddr
, i
),
5417 DDR_NB_LOOPS (ddr
));
5418 fprintf (dump_file
, " )\n");
5420 fprintf (dump_file
, ")\n");
5426 /* Return the direction for a given distance.
5427 FIXME: Computing dir this way is suboptimal, since dir can catch
5428 cases that dist is unable to represent. */
5430 static inline enum data_dependence_direction
5431 dir_from_dist (int dist
)
5434 return dir_positive
;
5436 return dir_negative
;
5441 /* Compute the classic per loop direction vector. DDR is the data
5442 dependence relation to build a vector from. */
5445 build_classic_dir_vector (struct data_dependence_relation
*ddr
)
5448 lambda_vector dist_v
;
5450 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr
), i
, dist_v
)
5452 lambda_vector dir_v
= lambda_vector_new (DDR_NB_LOOPS (ddr
));
5454 for (j
= 0; j
< DDR_NB_LOOPS (ddr
); j
++)
5455 dir_v
[j
] = dir_from_dist (dist_v
[j
]);
5457 save_dir_v (ddr
, dir_v
);
5461 /* Helper function. Returns true when there is a dependence between the
5462 data references. A_INDEX is the index of the first reference (0 for
5463 DDR_A, 1 for DDR_B) and B_INDEX is the index of the second reference. */
5466 subscript_dependence_tester_1 (struct data_dependence_relation
*ddr
,
5467 unsigned int a_index
, unsigned int b_index
,
5468 class loop
*loop_nest
)
5471 tree last_conflicts
;
5472 struct subscript
*subscript
;
5473 tree res
= NULL_TREE
;
5475 for (i
= 0; DDR_SUBSCRIPTS (ddr
).iterate (i
, &subscript
); i
++)
5477 conflict_function
*overlaps_a
, *overlaps_b
;
5479 analyze_overlapping_iterations (SUB_ACCESS_FN (subscript
, a_index
),
5480 SUB_ACCESS_FN (subscript
, b_index
),
5481 &overlaps_a
, &overlaps_b
,
5482 &last_conflicts
, loop_nest
);
5484 if (SUB_CONFLICTS_IN_A (subscript
))
5485 free_conflict_function (SUB_CONFLICTS_IN_A (subscript
));
5486 if (SUB_CONFLICTS_IN_B (subscript
))
5487 free_conflict_function (SUB_CONFLICTS_IN_B (subscript
));
5489 SUB_CONFLICTS_IN_A (subscript
) = overlaps_a
;
5490 SUB_CONFLICTS_IN_B (subscript
) = overlaps_b
;
5491 SUB_LAST_CONFLICT (subscript
) = last_conflicts
;
5493 /* If there is any undetermined conflict function we have to
5494 give a conservative answer in case we cannot prove that
5495 no dependence exists when analyzing another subscript. */
5496 if (CF_NOT_KNOWN_P (overlaps_a
)
5497 || CF_NOT_KNOWN_P (overlaps_b
))
5499 res
= chrec_dont_know
;
5503 /* When there is a subscript with no dependence we can stop. */
5504 else if (CF_NO_DEPENDENCE_P (overlaps_a
)
5505 || CF_NO_DEPENDENCE_P (overlaps_b
))
5512 if (res
== NULL_TREE
)
5515 if (res
== chrec_known
)
5516 dependence_stats
.num_dependence_independent
++;
5518 dependence_stats
.num_dependence_undetermined
++;
5519 finalize_ddr_dependent (ddr
, res
);
5523 /* Computes the conflicting iterations in LOOP_NEST, and initialize DDR. */
5526 subscript_dependence_tester (struct data_dependence_relation
*ddr
,
5527 class loop
*loop_nest
)
5529 if (subscript_dependence_tester_1 (ddr
, 0, 1, loop_nest
))
5530 dependence_stats
.num_dependence_dependent
++;
5532 compute_subscript_distance (ddr
);
5533 if (build_classic_dist_vector (ddr
, loop_nest
))
5534 build_classic_dir_vector (ddr
);
5537 /* Returns true when all the access functions of A are affine or
5538 constant with respect to LOOP_NEST. */
5541 access_functions_are_affine_or_constant_p (const struct data_reference
*a
,
5542 const class loop
*loop_nest
)
5545 vec
<tree
> fns
= DR_ACCESS_FNS (a
);
5548 FOR_EACH_VEC_ELT (fns
, i
, t
)
5549 if (!evolution_function_is_invariant_p (t
, loop_nest
->num
)
5550 && !evolution_function_is_affine_multivariate_p (t
, loop_nest
->num
))
5556 /* This computes the affine dependence relation between A and B with
5557 respect to LOOP_NEST. CHREC_KNOWN is used for representing the
5558 independence between two accesses, while CHREC_DONT_KNOW is used
5559 for representing the unknown relation.
5561 Note that it is possible to stop the computation of the dependence
5562 relation the first time we detect a CHREC_KNOWN element for a given
5566 compute_affine_dependence (struct data_dependence_relation
*ddr
,
5567 class loop
*loop_nest
)
5569 struct data_reference
*dra
= DDR_A (ddr
);
5570 struct data_reference
*drb
= DDR_B (ddr
);
5572 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5574 fprintf (dump_file
, "(compute_affine_dependence\n");
5575 fprintf (dump_file
, " stmt_a: ");
5576 print_gimple_stmt (dump_file
, DR_STMT (dra
), 0, TDF_SLIM
);
5577 fprintf (dump_file
, " stmt_b: ");
5578 print_gimple_stmt (dump_file
, DR_STMT (drb
), 0, TDF_SLIM
);
5581 /* Analyze only when the dependence relation is not yet known. */
5582 if (DDR_ARE_DEPENDENT (ddr
) == NULL_TREE
)
5584 dependence_stats
.num_dependence_tests
++;
5586 if (access_functions_are_affine_or_constant_p (dra
, loop_nest
)
5587 && access_functions_are_affine_or_constant_p (drb
, loop_nest
))
5588 subscript_dependence_tester (ddr
, loop_nest
);
5590 /* As a last case, if the dependence cannot be determined, or if
5591 the dependence is considered too difficult to determine, answer
5595 dependence_stats
.num_dependence_undetermined
++;
5597 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5599 fprintf (dump_file
, "Data ref a:\n");
5600 dump_data_reference (dump_file
, dra
);
5601 fprintf (dump_file
, "Data ref b:\n");
5602 dump_data_reference (dump_file
, drb
);
5603 fprintf (dump_file
, "affine dependence test not usable: access function not affine or constant.\n");
5605 finalize_ddr_dependent (ddr
, chrec_dont_know
);
5609 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5611 if (DDR_ARE_DEPENDENT (ddr
) == chrec_known
)
5612 fprintf (dump_file
, ") -> no dependence\n");
5613 else if (DDR_ARE_DEPENDENT (ddr
) == chrec_dont_know
)
5614 fprintf (dump_file
, ") -> dependence analysis failed\n");
5616 fprintf (dump_file
, ")\n");
5620 /* Compute in DEPENDENCE_RELATIONS the data dependence graph for all
5621 the data references in DATAREFS, in the LOOP_NEST. When
5622 COMPUTE_SELF_AND_RR is FALSE, don't compute read-read and self
5623 relations. Return true when successful, i.e. data references number
5624 is small enough to be handled. */
5627 compute_all_dependences (vec
<data_reference_p
> datarefs
,
5628 vec
<ddr_p
> *dependence_relations
,
5629 vec
<loop_p
> loop_nest
,
5630 bool compute_self_and_rr
)
5632 struct data_dependence_relation
*ddr
;
5633 struct data_reference
*a
, *b
;
5636 if ((int) datarefs
.length ()
5637 > param_loop_max_datarefs_for_datadeps
)
5639 struct data_dependence_relation
*ddr
;
5641 /* Insert a single relation into dependence_relations:
5643 ddr
= initialize_data_dependence_relation (NULL
, NULL
, loop_nest
);
5644 dependence_relations
->safe_push (ddr
);
5648 FOR_EACH_VEC_ELT (datarefs
, i
, a
)
5649 for (j
= i
+ 1; datarefs
.iterate (j
, &b
); j
++)
5650 if (DR_IS_WRITE (a
) || DR_IS_WRITE (b
) || compute_self_and_rr
)
5652 ddr
= initialize_data_dependence_relation (a
, b
, loop_nest
);
5653 dependence_relations
->safe_push (ddr
);
5654 if (loop_nest
.exists ())
5655 compute_affine_dependence (ddr
, loop_nest
[0]);
5658 if (compute_self_and_rr
)
5659 FOR_EACH_VEC_ELT (datarefs
, i
, a
)
5661 ddr
= initialize_data_dependence_relation (a
, a
, loop_nest
);
5662 dependence_relations
->safe_push (ddr
);
5663 if (loop_nest
.exists ())
5664 compute_affine_dependence (ddr
, loop_nest
[0]);
5670 /* Describes a location of a memory reference. */
5674 /* The memory reference. */
5677 /* True if the memory reference is read. */
5680 /* True if the data reference is conditional within the containing
5681 statement, i.e. if it might not occur even when the statement
5682 is executed and runs to completion. */
5683 bool is_conditional_in_stmt
;
5687 /* Stores the locations of memory references in STMT to REFERENCES. Returns
5688 true if STMT clobbers memory, false otherwise. */
5691 get_references_in_stmt (gimple
*stmt
, vec
<data_ref_loc
, va_heap
> *references
)
5693 bool clobbers_memory
= false;
5696 enum gimple_code stmt_code
= gimple_code (stmt
);
5698 /* ASM_EXPR and CALL_EXPR may embed arbitrary side effects.
5699 As we cannot model data-references to not spelled out
5700 accesses give up if they may occur. */
5701 if (stmt_code
== GIMPLE_CALL
5702 && !(gimple_call_flags (stmt
) & ECF_CONST
))
5704 /* Allow IFN_GOMP_SIMD_LANE in their own loops. */
5705 if (gimple_call_internal_p (stmt
))
5706 switch (gimple_call_internal_fn (stmt
))
5708 case IFN_GOMP_SIMD_LANE
:
5710 class loop
*loop
= gimple_bb (stmt
)->loop_father
;
5711 tree uid
= gimple_call_arg (stmt
, 0);
5712 gcc_assert (TREE_CODE (uid
) == SSA_NAME
);
5714 || loop
->simduid
!= SSA_NAME_VAR (uid
))
5715 clobbers_memory
= true;
5719 case IFN_MASK_STORE
:
5722 clobbers_memory
= true;
5726 clobbers_memory
= true;
5728 else if (stmt_code
== GIMPLE_ASM
5729 && (gimple_asm_volatile_p (as_a
<gasm
*> (stmt
))
5730 || gimple_vuse (stmt
)))
5731 clobbers_memory
= true;
5733 if (!gimple_vuse (stmt
))
5734 return clobbers_memory
;
5736 if (stmt_code
== GIMPLE_ASSIGN
)
5739 op0
= gimple_assign_lhs (stmt
);
5740 op1
= gimple_assign_rhs1 (stmt
);
5743 || (REFERENCE_CLASS_P (op1
)
5744 && (base
= get_base_address (op1
))
5745 && TREE_CODE (base
) != SSA_NAME
5746 && !is_gimple_min_invariant (base
)))
5750 ref
.is_conditional_in_stmt
= false;
5751 references
->safe_push (ref
);
5754 else if (stmt_code
== GIMPLE_CALL
)
5760 ref
.is_read
= false;
5761 if (gimple_call_internal_p (stmt
))
5762 switch (gimple_call_internal_fn (stmt
))
5765 if (gimple_call_lhs (stmt
) == NULL_TREE
)
5769 case IFN_MASK_STORE
:
5770 ptr
= build_int_cst (TREE_TYPE (gimple_call_arg (stmt
, 1)), 0);
5771 align
= tree_to_shwi (gimple_call_arg (stmt
, 1));
5773 type
= TREE_TYPE (gimple_call_lhs (stmt
));
5775 type
= TREE_TYPE (gimple_call_arg (stmt
, 3));
5776 if (TYPE_ALIGN (type
) != align
)
5777 type
= build_aligned_type (type
, align
);
5778 ref
.is_conditional_in_stmt
= true;
5779 ref
.ref
= fold_build2 (MEM_REF
, type
, gimple_call_arg (stmt
, 0),
5781 references
->safe_push (ref
);
5787 op0
= gimple_call_lhs (stmt
);
5788 n
= gimple_call_num_args (stmt
);
5789 for (i
= 0; i
< n
; i
++)
5791 op1
= gimple_call_arg (stmt
, i
);
5794 || (REFERENCE_CLASS_P (op1
) && get_base_address (op1
)))
5798 ref
.is_conditional_in_stmt
= false;
5799 references
->safe_push (ref
);
5804 return clobbers_memory
;
5808 || (REFERENCE_CLASS_P (op0
) && get_base_address (op0
))))
5811 ref
.is_read
= false;
5812 ref
.is_conditional_in_stmt
= false;
5813 references
->safe_push (ref
);
5815 return clobbers_memory
;
5819 /* Returns true if the loop-nest has any data reference. */
5822 loop_nest_has_data_refs (loop_p loop
)
5824 basic_block
*bbs
= get_loop_body (loop
);
5825 auto_vec
<data_ref_loc
, 3> references
;
5827 for (unsigned i
= 0; i
< loop
->num_nodes
; i
++)
5829 basic_block bb
= bbs
[i
];
5830 gimple_stmt_iterator bsi
;
5832 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
5834 gimple
*stmt
= gsi_stmt (bsi
);
5835 get_references_in_stmt (stmt
, &references
);
5836 if (references
.length ())
5847 /* Stores the data references in STMT to DATAREFS. If there is an unanalyzable
5848 reference, returns false, otherwise returns true. NEST is the outermost
5849 loop of the loop nest in which the references should be analyzed. */
5852 find_data_references_in_stmt (class loop
*nest
, gimple
*stmt
,
5853 vec
<data_reference_p
> *datarefs
)
5856 auto_vec
<data_ref_loc
, 2> references
;
5858 data_reference_p dr
;
5860 if (get_references_in_stmt (stmt
, &references
))
5861 return opt_result::failure_at (stmt
, "statement clobbers memory: %G",
5864 FOR_EACH_VEC_ELT (references
, i
, ref
)
5866 dr
= create_data_ref (nest
? loop_preheader_edge (nest
) : NULL
,
5867 loop_containing_stmt (stmt
), ref
->ref
,
5868 stmt
, ref
->is_read
, ref
->is_conditional_in_stmt
);
5869 gcc_assert (dr
!= NULL
);
5870 datarefs
->safe_push (dr
);
5873 return opt_result::success ();
5876 /* Stores the data references in STMT to DATAREFS. If there is an
5877 unanalyzable reference, returns false, otherwise returns true.
5878 NEST is the outermost loop of the loop nest in which the references
5879 should be instantiated, LOOP is the loop in which the references
5880 should be analyzed. */
5883 graphite_find_data_references_in_stmt (edge nest
, loop_p loop
, gimple
*stmt
,
5884 vec
<data_reference_p
> *datarefs
)
5887 auto_vec
<data_ref_loc
, 2> references
;
5890 data_reference_p dr
;
5892 if (get_references_in_stmt (stmt
, &references
))
5895 FOR_EACH_VEC_ELT (references
, i
, ref
)
5897 dr
= create_data_ref (nest
, loop
, ref
->ref
, stmt
, ref
->is_read
,
5898 ref
->is_conditional_in_stmt
);
5899 gcc_assert (dr
!= NULL
);
5900 datarefs
->safe_push (dr
);
5906 /* Search the data references in LOOP, and record the information into
5907 DATAREFS. Returns chrec_dont_know when failing to analyze a
5908 difficult case, returns NULL_TREE otherwise. */
5911 find_data_references_in_bb (class loop
*loop
, basic_block bb
,
5912 vec
<data_reference_p
> *datarefs
)
5914 gimple_stmt_iterator bsi
;
5916 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
5918 gimple
*stmt
= gsi_stmt (bsi
);
5920 if (!find_data_references_in_stmt (loop
, stmt
, datarefs
))
5922 struct data_reference
*res
;
5923 res
= XCNEW (struct data_reference
);
5924 datarefs
->safe_push (res
);
5926 return chrec_dont_know
;
5933 /* Search the data references in LOOP, and record the information into
5934 DATAREFS. Returns chrec_dont_know when failing to analyze a
5935 difficult case, returns NULL_TREE otherwise.
5937 TODO: This function should be made smarter so that it can handle address
5938 arithmetic as if they were array accesses, etc. */
5941 find_data_references_in_loop (class loop
*loop
,
5942 vec
<data_reference_p
> *datarefs
)
5944 basic_block bb
, *bbs
;
5947 bbs
= get_loop_body_in_dom_order (loop
);
5949 for (i
= 0; i
< loop
->num_nodes
; i
++)
5953 if (find_data_references_in_bb (loop
, bb
, datarefs
) == chrec_dont_know
)
5956 return chrec_dont_know
;
5964 /* Return the alignment in bytes that DRB is guaranteed to have at all
5968 dr_alignment (innermost_loop_behavior
*drb
)
5970 /* Get the alignment of BASE_ADDRESS + INIT. */
5971 unsigned int alignment
= drb
->base_alignment
;
5972 unsigned int misalignment
= (drb
->base_misalignment
5973 + TREE_INT_CST_LOW (drb
->init
));
5974 if (misalignment
!= 0)
5975 alignment
= MIN (alignment
, misalignment
& -misalignment
);
5977 /* Cap it to the alignment of OFFSET. */
5978 if (!integer_zerop (drb
->offset
))
5979 alignment
= MIN (alignment
, drb
->offset_alignment
);
5981 /* Cap it to the alignment of STEP. */
5982 if (!integer_zerop (drb
->step
))
5983 alignment
= MIN (alignment
, drb
->step_alignment
);
5988 /* If BASE is a pointer-typed SSA name, try to find the object that it
5989 is based on. Return this object X on success and store the alignment
5990 in bytes of BASE - &X in *ALIGNMENT_OUT. */
5993 get_base_for_alignment_1 (tree base
, unsigned int *alignment_out
)
5995 if (TREE_CODE (base
) != SSA_NAME
|| !POINTER_TYPE_P (TREE_TYPE (base
)))
5998 gimple
*def
= SSA_NAME_DEF_STMT (base
);
5999 base
= analyze_scalar_evolution (loop_containing_stmt (def
), base
);
6001 /* Peel chrecs and record the minimum alignment preserved by
6003 unsigned int alignment
= MAX_OFILE_ALIGNMENT
/ BITS_PER_UNIT
;
6004 while (TREE_CODE (base
) == POLYNOMIAL_CHREC
)
6006 unsigned int step_alignment
= highest_pow2_factor (CHREC_RIGHT (base
));
6007 alignment
= MIN (alignment
, step_alignment
);
6008 base
= CHREC_LEFT (base
);
6011 /* Punt if the expression is too complicated to handle. */
6012 if (tree_contains_chrecs (base
, NULL
) || !POINTER_TYPE_P (TREE_TYPE (base
)))
6015 /* The only useful cases are those for which a dereference folds to something
6016 other than an INDIRECT_REF. */
6017 tree ref_type
= TREE_TYPE (TREE_TYPE (base
));
6018 tree ref
= fold_indirect_ref_1 (UNKNOWN_LOCATION
, ref_type
, base
);
6022 /* Analyze the base to which the steps we peeled were applied. */
6023 poly_int64 bitsize
, bitpos
, bytepos
;
6025 int unsignedp
, reversep
, volatilep
;
6027 base
= get_inner_reference (ref
, &bitsize
, &bitpos
, &offset
, &mode
,
6028 &unsignedp
, &reversep
, &volatilep
);
6029 if (!base
|| !multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
))
6032 /* Restrict the alignment to that guaranteed by the offsets. */
6033 unsigned int bytepos_alignment
= known_alignment (bytepos
);
6034 if (bytepos_alignment
!= 0)
6035 alignment
= MIN (alignment
, bytepos_alignment
);
6038 unsigned int offset_alignment
= highest_pow2_factor (offset
);
6039 alignment
= MIN (alignment
, offset_alignment
);
6042 *alignment_out
= alignment
;
6046 /* Return the object whose alignment would need to be changed in order
6047 to increase the alignment of ADDR. Store the maximum achievable
6048 alignment in *MAX_ALIGNMENT. */
6051 get_base_for_alignment (tree addr
, unsigned int *max_alignment
)
6053 tree base
= get_base_for_alignment_1 (addr
, max_alignment
);
6057 if (TREE_CODE (addr
) == ADDR_EXPR
)
6058 addr
= TREE_OPERAND (addr
, 0);
6059 *max_alignment
= MAX_OFILE_ALIGNMENT
/ BITS_PER_UNIT
;
6063 /* Recursive helper function. */
6066 find_loop_nest_1 (class loop
*loop
, vec
<loop_p
> *loop_nest
)
6068 /* Inner loops of the nest should not contain siblings. Example:
6069 when there are two consecutive loops,
6080 the dependence relation cannot be captured by the distance
6085 loop_nest
->safe_push (loop
);
6087 return find_loop_nest_1 (loop
->inner
, loop_nest
);
6091 /* Return false when the LOOP is not well nested. Otherwise return
6092 true and insert in LOOP_NEST the loops of the nest. LOOP_NEST will
6093 contain the loops from the outermost to the innermost, as they will
6094 appear in the classic distance vector. */
6097 find_loop_nest (class loop
*loop
, vec
<loop_p
> *loop_nest
)
6099 loop_nest
->safe_push (loop
);
6101 return find_loop_nest_1 (loop
->inner
, loop_nest
);
6105 /* Returns true when the data dependences have been computed, false otherwise.
6106 Given a loop nest LOOP, the following vectors are returned:
6107 DATAREFS is initialized to all the array elements contained in this loop,
6108 DEPENDENCE_RELATIONS contains the relations between the data references.
6109 Compute read-read and self relations if
6110 COMPUTE_SELF_AND_READ_READ_DEPENDENCES is TRUE. */
6113 compute_data_dependences_for_loop (class loop
*loop
,
6114 bool compute_self_and_read_read_dependences
,
6115 vec
<loop_p
> *loop_nest
,
6116 vec
<data_reference_p
> *datarefs
,
6117 vec
<ddr_p
> *dependence_relations
)
6121 memset (&dependence_stats
, 0, sizeof (dependence_stats
));
6123 /* If the loop nest is not well formed, or one of the data references
6124 is not computable, give up without spending time to compute other
6127 || !find_loop_nest (loop
, loop_nest
)
6128 || find_data_references_in_loop (loop
, datarefs
) == chrec_dont_know
6129 || !compute_all_dependences (*datarefs
, dependence_relations
, *loop_nest
,
6130 compute_self_and_read_read_dependences
))
6133 if (dump_file
&& (dump_flags
& TDF_STATS
))
6135 fprintf (dump_file
, "Dependence tester statistics:\n");
6137 fprintf (dump_file
, "Number of dependence tests: %d\n",
6138 dependence_stats
.num_dependence_tests
);
6139 fprintf (dump_file
, "Number of dependence tests classified dependent: %d\n",
6140 dependence_stats
.num_dependence_dependent
);
6141 fprintf (dump_file
, "Number of dependence tests classified independent: %d\n",
6142 dependence_stats
.num_dependence_independent
);
6143 fprintf (dump_file
, "Number of undetermined dependence tests: %d\n",
6144 dependence_stats
.num_dependence_undetermined
);
6146 fprintf (dump_file
, "Number of subscript tests: %d\n",
6147 dependence_stats
.num_subscript_tests
);
6148 fprintf (dump_file
, "Number of undetermined subscript tests: %d\n",
6149 dependence_stats
.num_subscript_undetermined
);
6150 fprintf (dump_file
, "Number of same subscript function: %d\n",
6151 dependence_stats
.num_same_subscript_function
);
6153 fprintf (dump_file
, "Number of ziv tests: %d\n",
6154 dependence_stats
.num_ziv
);
6155 fprintf (dump_file
, "Number of ziv tests returning dependent: %d\n",
6156 dependence_stats
.num_ziv_dependent
);
6157 fprintf (dump_file
, "Number of ziv tests returning independent: %d\n",
6158 dependence_stats
.num_ziv_independent
);
6159 fprintf (dump_file
, "Number of ziv tests unimplemented: %d\n",
6160 dependence_stats
.num_ziv_unimplemented
);
6162 fprintf (dump_file
, "Number of siv tests: %d\n",
6163 dependence_stats
.num_siv
);
6164 fprintf (dump_file
, "Number of siv tests returning dependent: %d\n",
6165 dependence_stats
.num_siv_dependent
);
6166 fprintf (dump_file
, "Number of siv tests returning independent: %d\n",
6167 dependence_stats
.num_siv_independent
);
6168 fprintf (dump_file
, "Number of siv tests unimplemented: %d\n",
6169 dependence_stats
.num_siv_unimplemented
);
6171 fprintf (dump_file
, "Number of miv tests: %d\n",
6172 dependence_stats
.num_miv
);
6173 fprintf (dump_file
, "Number of miv tests returning dependent: %d\n",
6174 dependence_stats
.num_miv_dependent
);
6175 fprintf (dump_file
, "Number of miv tests returning independent: %d\n",
6176 dependence_stats
.num_miv_independent
);
6177 fprintf (dump_file
, "Number of miv tests unimplemented: %d\n",
6178 dependence_stats
.num_miv_unimplemented
);
6184 /* Free the memory used by a data dependence relation DDR. */
6187 free_dependence_relation (struct data_dependence_relation
*ddr
)
6192 if (DDR_SUBSCRIPTS (ddr
).exists ())
6193 free_subscripts (DDR_SUBSCRIPTS (ddr
));
6194 DDR_DIST_VECTS (ddr
).release ();
6195 DDR_DIR_VECTS (ddr
).release ();
6200 /* Free the memory used by the data dependence relations from
6201 DEPENDENCE_RELATIONS. */
6204 free_dependence_relations (vec
<ddr_p
> dependence_relations
)
6207 struct data_dependence_relation
*ddr
;
6209 FOR_EACH_VEC_ELT (dependence_relations
, i
, ddr
)
6211 free_dependence_relation (ddr
);
6213 dependence_relations
.release ();
6216 /* Free the memory used by the data references from DATAREFS. */
6219 free_data_refs (vec
<data_reference_p
> datarefs
)
6222 struct data_reference
*dr
;
6224 FOR_EACH_VEC_ELT (datarefs
, i
, dr
)
6226 datarefs
.release ();
6229 /* Common routine implementing both dr_direction_indicator and
6230 dr_zero_step_indicator. Return USEFUL_MIN if the indicator is known
6231 to be >= USEFUL_MIN and -1 if the indicator is known to be negative.
6232 Return the step as the indicator otherwise. */
6235 dr_step_indicator (struct data_reference
*dr
, int useful_min
)
6237 tree step
= DR_STEP (dr
);
6241 /* Look for cases where the step is scaled by a positive constant
6242 integer, which will often be the access size. If the multiplication
6243 doesn't change the sign (due to overflow effects) then we can
6244 test the unscaled value instead. */
6245 if (TREE_CODE (step
) == MULT_EXPR
6246 && TREE_CODE (TREE_OPERAND (step
, 1)) == INTEGER_CST
6247 && tree_int_cst_sgn (TREE_OPERAND (step
, 1)) > 0)
6249 tree factor
= TREE_OPERAND (step
, 1);
6250 step
= TREE_OPERAND (step
, 0);
6252 /* Strip widening and truncating conversions as well as nops. */
6253 if (CONVERT_EXPR_P (step
)
6254 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (step
, 0))))
6255 step
= TREE_OPERAND (step
, 0);
6256 tree type
= TREE_TYPE (step
);
6258 /* Get the range of step values that would not cause overflow. */
6259 widest_int minv
= (wi::to_widest (TYPE_MIN_VALUE (ssizetype
))
6260 / wi::to_widest (factor
));
6261 widest_int maxv
= (wi::to_widest (TYPE_MAX_VALUE (ssizetype
))
6262 / wi::to_widest (factor
));
6264 /* Get the range of values that the unconverted step actually has. */
6265 wide_int step_min
, step_max
;
6266 if (TREE_CODE (step
) != SSA_NAME
6267 || get_range_info (step
, &step_min
, &step_max
) != VR_RANGE
)
6269 step_min
= wi::to_wide (TYPE_MIN_VALUE (type
));
6270 step_max
= wi::to_wide (TYPE_MAX_VALUE (type
));
6273 /* Check whether the unconverted step has an acceptable range. */
6274 signop sgn
= TYPE_SIGN (type
);
6275 if (wi::les_p (minv
, widest_int::from (step_min
, sgn
))
6276 && wi::ges_p (maxv
, widest_int::from (step_max
, sgn
)))
6278 if (wi::ge_p (step_min
, useful_min
, sgn
))
6279 return ssize_int (useful_min
);
6280 else if (wi::lt_p (step_max
, 0, sgn
))
6281 return ssize_int (-1);
6283 return fold_convert (ssizetype
, step
);
6286 return DR_STEP (dr
);
6289 /* Return a value that is negative iff DR has a negative step. */
6292 dr_direction_indicator (struct data_reference
*dr
)
6294 return dr_step_indicator (dr
, 0);
6297 /* Return a value that is zero iff DR has a zero step. */
6300 dr_zero_step_indicator (struct data_reference
*dr
)
6302 return dr_step_indicator (dr
, 1);
6305 /* Return true if DR is known to have a nonnegative (but possibly zero)
6309 dr_known_forward_stride_p (struct data_reference
*dr
)
6311 tree indicator
= dr_direction_indicator (dr
);
6312 tree neg_step_val
= fold_binary (LT_EXPR
, boolean_type_node
,
6313 fold_convert (ssizetype
, indicator
),
6315 return neg_step_val
&& integer_zerop (neg_step_val
);