Daily bump.
[gcc.git] / gcc / tree-ssa-uninit.c
1 /* Predicate aware uninitialized variable warning.
2 Copyright (C) 2001-2021 Free Software Foundation, Inc.
3 Contributed by Xinliang David Li <davidxl@google.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #define INCLUDE_STRING
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "tree-pass.h"
29 #include "ssa.h"
30 #include "gimple-pretty-print.h"
31 #include "diagnostic-core.h"
32 #include "fold-const.h"
33 #include "gimple-iterator.h"
34 #include "tree-ssa.h"
35 #include "tree-cfg.h"
36 #include "cfghooks.h"
37 #include "attribs.h"
38 #include "builtins.h"
39 #include "calls.h"
40
41 /* This implements the pass that does predicate aware warning on uses of
42 possibly uninitialized variables. The pass first collects the set of
43 possibly uninitialized SSA names. For each such name, it walks through
44 all its immediate uses. For each immediate use, it rebuilds the condition
45 expression (the predicate) that guards the use. The predicate is then
46 examined to see if the variable is always defined under that same condition.
47 This is done either by pruning the unrealizable paths that lead to the
48 default definitions or by checking if the predicate set that guards the
49 defining paths is a superset of the use predicate. */
50
51 /* Max PHI args we can handle in pass. */
52 const unsigned max_phi_args = 32;
53
54 /* Pointer set of potentially undefined ssa names, i.e.,
55 ssa names that are defined by phi with operands that
56 are not defined or potentially undefined. */
57 static hash_set<tree> *possibly_undefined_names = 0;
58
59 /* Bit mask handling macros. */
60 #define MASK_SET_BIT(mask, pos) mask |= (1 << pos)
61 #define MASK_TEST_BIT(mask, pos) (mask & (1 << pos))
62 #define MASK_EMPTY(mask) (mask == 0)
63
64 /* Returns the first bit position (starting from LSB)
65 in mask that is non zero. Returns -1 if the mask is empty. */
66 static int
67 get_mask_first_set_bit (unsigned mask)
68 {
69 int pos = 0;
70 if (mask == 0)
71 return -1;
72
73 while ((mask & (1 << pos)) == 0)
74 pos++;
75
76 return pos;
77 }
78 #define MASK_FIRST_SET_BIT(mask) get_mask_first_set_bit (mask)
79
80 /* Return true if T, an SSA_NAME, has an undefined value. */
81 static bool
82 has_undefined_value_p (tree t)
83 {
84 return (ssa_undefined_value_p (t)
85 || (possibly_undefined_names
86 && possibly_undefined_names->contains (t)));
87 }
88
89 /* Like has_undefined_value_p, but don't return true if TREE_NO_WARNING
90 is set on SSA_NAME_VAR. */
91
92 static inline bool
93 uninit_undefined_value_p (tree t)
94 {
95 if (!has_undefined_value_p (t))
96 return false;
97 if (SSA_NAME_VAR (t) && TREE_NO_WARNING (SSA_NAME_VAR (t)))
98 return false;
99 return true;
100 }
101
102 /* Emit warnings for uninitialized variables. This is done in two passes.
103
104 The first pass notices real uses of SSA names with undefined values.
105 Such uses are unconditionally uninitialized, and we can be certain that
106 such a use is a mistake. This pass is run before most optimizations,
107 so that we catch as many as we can.
108
109 The second pass follows PHI nodes to find uses that are potentially
110 uninitialized. In this case we can't necessarily prove that the use
111 is really uninitialized. This pass is run after most optimizations,
112 so that we thread as many jumps and possible, and delete as much dead
113 code as possible, in order to reduce false positives. We also look
114 again for plain uninitialized variables, since optimization may have
115 changed conditionally uninitialized to unconditionally uninitialized. */
116
117 /* Emit a warning for EXPR based on variable VAR at the point in the
118 program T, an SSA_NAME, is used being uninitialized. The exact
119 warning text is in MSGID and DATA is the gimple stmt with info about
120 the location in source code. When DATA is a GIMPLE_PHI, PHIARG_IDX
121 gives which argument of the phi node to take the location from. WC
122 is the warning code. */
123
124 static void
125 warn_uninit (enum opt_code wc, tree t, tree expr, tree var,
126 const char *gmsgid, void *data, location_t phiarg_loc)
127 {
128 gimple *context = (gimple *) data;
129 location_t location, cfun_loc;
130 expanded_location xloc, floc;
131
132 /* Ignore COMPLEX_EXPR as initializing only a part of a complex
133 turns in a COMPLEX_EXPR with the not initialized part being
134 set to its previous (undefined) value. */
135 if (is_gimple_assign (context)
136 && gimple_assign_rhs_code (context) == COMPLEX_EXPR)
137 return;
138 if (!has_undefined_value_p (t))
139 return;
140
141 /* Anonymous SSA_NAMEs shouldn't be uninitialized, but ssa_undefined_value_p
142 can return true if the def stmt of anonymous SSA_NAME is COMPLEX_EXPR
143 created for conversion from scalar to complex. Use the underlying var of
144 the COMPLEX_EXPRs real part in that case. See PR71581. */
145 if (expr == NULL_TREE
146 && var == NULL_TREE
147 && SSA_NAME_VAR (t) == NULL_TREE
148 && is_gimple_assign (SSA_NAME_DEF_STMT (t))
149 && gimple_assign_rhs_code (SSA_NAME_DEF_STMT (t)) == COMPLEX_EXPR)
150 {
151 tree v = gimple_assign_rhs1 (SSA_NAME_DEF_STMT (t));
152 if (TREE_CODE (v) == SSA_NAME
153 && has_undefined_value_p (v)
154 && zerop (gimple_assign_rhs2 (SSA_NAME_DEF_STMT (t))))
155 {
156 expr = SSA_NAME_VAR (v);
157 var = expr;
158 }
159 }
160
161 if (expr == NULL_TREE)
162 return;
163
164 /* TREE_NO_WARNING either means we already warned, or the front end
165 wishes to suppress the warning. */
166 if ((context
167 && (gimple_no_warning_p (context)
168 || (gimple_assign_single_p (context)
169 && TREE_NO_WARNING (gimple_assign_rhs1 (context)))))
170 || TREE_NO_WARNING (expr))
171 return;
172
173 if (context != NULL && gimple_has_location (context))
174 location = gimple_location (context);
175 else if (phiarg_loc != UNKNOWN_LOCATION)
176 location = phiarg_loc;
177 else
178 location = DECL_SOURCE_LOCATION (var);
179 location = linemap_resolve_location (line_table, location,
180 LRK_SPELLING_LOCATION, NULL);
181 cfun_loc = DECL_SOURCE_LOCATION (cfun->decl);
182 xloc = expand_location (location);
183 floc = expand_location (cfun_loc);
184 auto_diagnostic_group d;
185 if (warning_at (location, wc, gmsgid, expr))
186 {
187 TREE_NO_WARNING (expr) = 1;
188
189 if (location == DECL_SOURCE_LOCATION (var))
190 return;
191 if (xloc.file != floc.file
192 || linemap_location_before_p (line_table, location, cfun_loc)
193 || linemap_location_before_p (line_table, cfun->function_end_locus,
194 location))
195 inform (DECL_SOURCE_LOCATION (var), "%qD was declared here", var);
196 }
197 }
198
199 struct check_defs_data
200 {
201 /* If we found any may-defs besides must-def clobbers. */
202 bool found_may_defs;
203 };
204
205 /* Callback for walk_aliased_vdefs. */
206
207 static bool
208 check_defs (ao_ref *ref, tree vdef, void *data_)
209 {
210 check_defs_data *data = (check_defs_data *)data_;
211 gimple *def_stmt = SSA_NAME_DEF_STMT (vdef);
212 /* If this is a clobber then if it is not a kill walk past it. */
213 if (gimple_clobber_p (def_stmt))
214 {
215 if (stmt_kills_ref_p (def_stmt, ref))
216 return true;
217 return false;
218 }
219 /* Found a may-def on this path. */
220 data->found_may_defs = true;
221 return true;
222 }
223
224 /* Counters and limits controlling the the depth of analysis and
225 strictness of the warning. */
226 struct wlimits
227 {
228 /* Number of VDEFs encountered. */
229 unsigned int vdef_cnt;
230 /* Number of statements examined by walk_aliased_vdefs. */
231 unsigned int oracle_cnt;
232 /* Limit on the number of statements visited by walk_aliased_vdefs. */
233 unsigned limit;
234 /* Set when basic block with statement is executed unconditionally. */
235 bool always_executed;
236 /* Set to issue -Wmaybe-uninitialized. */
237 bool wmaybe_uninit;
238 };
239
240 /* Determine if REF references an uninitialized operand and diagnose
241 it if so. */
242
243 static tree
244 maybe_warn_operand (ao_ref &ref, gimple *stmt, tree lhs, tree rhs,
245 wlimits &wlims)
246 {
247 bool has_bit_insert = false;
248 use_operand_p luse_p;
249 imm_use_iterator liter;
250
251 if (TREE_NO_WARNING (rhs))
252 return NULL_TREE;
253
254 /* Do not warn if the base was marked so or this is a
255 hard register var. */
256 tree base = ao_ref_base (&ref);
257 if ((VAR_P (base)
258 && DECL_HARD_REGISTER (base))
259 || TREE_NO_WARNING (base))
260 return NULL_TREE;
261
262 /* Do not warn if the access is fully outside of the variable. */
263 poly_int64 decl_size;
264 if (DECL_P (base)
265 && ((known_size_p (ref.size)
266 && known_eq (ref.max_size, ref.size)
267 && known_le (ref.offset + ref.size, 0))
268 || (known_ge (ref.offset, 0)
269 && DECL_SIZE (base)
270 && poly_int_tree_p (DECL_SIZE (base), &decl_size)
271 && known_le (decl_size, ref.offset))))
272 return NULL_TREE;
273
274 /* Do not warn if the result of the access is then used for
275 a BIT_INSERT_EXPR. */
276 if (lhs && TREE_CODE (lhs) == SSA_NAME)
277 FOR_EACH_IMM_USE_FAST (luse_p, liter, lhs)
278 {
279 gimple *use_stmt = USE_STMT (luse_p);
280 /* BIT_INSERT_EXPR first operand should not be considered
281 a use for the purpose of uninit warnings. */
282 if (gassign *ass = dyn_cast <gassign *> (use_stmt))
283 {
284 if (gimple_assign_rhs_code (ass) == BIT_INSERT_EXPR
285 && luse_p->use == gimple_assign_rhs1_ptr (ass))
286 {
287 has_bit_insert = true;
288 break;
289 }
290 }
291 }
292
293 if (has_bit_insert)
294 return NULL_TREE;
295
296 /* Limit the walking to a constant number of stmts after
297 we overcommit quadratic behavior for small functions
298 and O(n) behavior. */
299 if (wlims.oracle_cnt > 128 * 128
300 && wlims.oracle_cnt > wlims.vdef_cnt * 2)
301 wlims.limit = 32;
302
303 check_defs_data data;
304 bool fentry_reached = false;
305 data.found_may_defs = false;
306 tree use = gimple_vuse (stmt);
307 if (!use)
308 return NULL_TREE;
309 int res = walk_aliased_vdefs (&ref, use,
310 check_defs, &data, NULL,
311 &fentry_reached, wlims.limit);
312 if (res == -1)
313 {
314 wlims.oracle_cnt += wlims.limit;
315 return NULL_TREE;
316 }
317
318 wlims.oracle_cnt += res;
319 if (data.found_may_defs)
320 return NULL_TREE;
321
322 bool found_alloc = false;
323
324 if (fentry_reached)
325 {
326 if (TREE_CODE (base) == MEM_REF)
327 base = TREE_OPERAND (base, 0);
328
329 /* Follow the chain of SSA_NAME assignments looking for an alloca
330 call (or VLA) or malloc/realloc, or for decls. If any is found
331 (and in the latter case, the operand is a local variable) issue
332 a warning. */
333 while (TREE_CODE (base) == SSA_NAME)
334 {
335 gimple *def_stmt = SSA_NAME_DEF_STMT (base);
336
337 if (is_gimple_call (def_stmt)
338 && gimple_call_builtin_p (def_stmt))
339 {
340 /* Detect uses of uninitialized alloca/VLAs. */
341 tree fndecl = gimple_call_fndecl (def_stmt);
342 const built_in_function fncode = DECL_FUNCTION_CODE (fndecl);
343 if (fncode == BUILT_IN_ALLOCA
344 || fncode == BUILT_IN_ALLOCA_WITH_ALIGN
345 || fncode == BUILT_IN_MALLOC)
346 found_alloc = true;
347 break;
348 }
349
350 if (!is_gimple_assign (def_stmt))
351 break;
352
353 tree_code code = gimple_assign_rhs_code (def_stmt);
354 if (code != ADDR_EXPR && code != POINTER_PLUS_EXPR)
355 break;
356
357 base = gimple_assign_rhs1 (def_stmt);
358 if (TREE_CODE (base) == ADDR_EXPR)
359 base = TREE_OPERAND (base, 0);
360
361 if (DECL_P (base)
362 || TREE_CODE (base) == COMPONENT_REF)
363 rhs = base;
364
365 if (TREE_CODE (base) == MEM_REF)
366 base = TREE_OPERAND (base, 0);
367
368 if (tree ba = get_base_address (base))
369 base = ba;
370 }
371
372 /* Replace the RHS expression with BASE so that it
373 refers to it in the diagnostic (instead of to
374 '<unknown>'). */
375 if (DECL_P (base)
376 && EXPR_P (rhs)
377 && TREE_CODE (rhs) != COMPONENT_REF)
378 rhs = base;
379 }
380
381 /* Do not warn if it can be initialized outside this function.
382 If we did not reach function entry then we found killing
383 clobbers on all paths to entry. */
384 if (!found_alloc
385 && fentry_reached
386 /* ??? We'd like to use ref_may_alias_global_p but that
387 excludes global readonly memory and thus we get bogus
388 warnings from p = cond ? "a" : "b" for example. */
389 && (!VAR_P (base)
390 || is_global_var (base)))
391 return NULL_TREE;
392
393 /* Strip the address-of expression from arrays passed to functions. */
394 if (TREE_CODE (rhs) == ADDR_EXPR)
395 rhs = TREE_OPERAND (rhs, 0);
396
397 /* Check again since RHS may have changed above. */
398 if (TREE_NO_WARNING (rhs))
399 return NULL_TREE;
400
401 /* Avoid warning about empty types such as structs with no members.
402 The first_field() test is important for C++ where the predicate
403 alone isn't always sufficient. */
404 tree rhstype = TREE_TYPE (rhs);
405 if (POINTER_TYPE_P (rhstype))
406 rhstype = TREE_TYPE (rhstype);
407 if (is_empty_type (rhstype))
408 return NULL_TREE;
409
410 bool warned = false;
411 /* We didn't find any may-defs so on all paths either
412 reached function entry or a killing clobber. */
413 location_t location
414 = linemap_resolve_location (line_table, gimple_location (stmt),
415 LRK_SPELLING_LOCATION, NULL);
416 if (wlims.always_executed)
417 {
418 if (warning_at (location, OPT_Wuninitialized,
419 "%G%qE is used uninitialized", stmt, rhs))
420 {
421 /* ??? This is only effective for decls as in
422 gcc.dg/uninit-B-O0.c. Avoid doing this for maybe-uninit
423 uses or accesses by functions as it may hide important
424 locations. */
425 if (lhs)
426 TREE_NO_WARNING (rhs) = 1;
427 warned = true;
428 }
429 }
430 else if (wlims.wmaybe_uninit)
431 warned = warning_at (location, OPT_Wmaybe_uninitialized,
432 "%G%qE may be used uninitialized", stmt, rhs);
433
434 return warned ? base : NULL_TREE;
435 }
436
437
438 /* Diagnose passing addresses of uninitialized objects to either const
439 pointer arguments to functions, or to functions declared with attribute
440 access implying read access to those objects. */
441
442 static void
443 maybe_warn_pass_by_reference (gcall *stmt, wlimits &wlims)
444 {
445 if (!wlims.wmaybe_uninit)
446 return;
447
448 unsigned nargs = gimple_call_num_args (stmt);
449 if (!nargs)
450 return;
451
452 tree fndecl = gimple_call_fndecl (stmt);
453 tree fntype = gimple_call_fntype (stmt);
454 if (!fntype)
455 return;
456
457 /* Const function do not read their arguments. */
458 if (gimple_call_flags (stmt) & ECF_CONST)
459 return;
460
461 const built_in_function fncode
462 = (fndecl && gimple_call_builtin_p (stmt, BUILT_IN_NORMAL)
463 ? DECL_FUNCTION_CODE (fndecl) : (built_in_function)BUILT_IN_LAST);
464
465 if (fncode == BUILT_IN_MEMCPY || fncode == BUILT_IN_MEMMOVE)
466 /* Avoid diagnosing calls to raw memory functions (this is overly
467 permissive; consider tightening it up). */
468 return;
469
470 /* Save the current warning setting and replace it either a "maybe"
471 when passing addresses of uninitialized variables to const-qualified
472 pointers or arguments declared with attribute read_write, or with
473 a "certain" when passing them to arguments declared with attribute
474 read_only. */
475 const bool save_always_executed = wlims.always_executed;
476
477 /* Initialize a map of attribute access specifications for arguments
478 to the function function call. */
479 rdwr_map rdwr_idx;
480 init_attr_rdwr_indices (&rdwr_idx, TYPE_ATTRIBUTES (fntype));
481
482 tree argtype;
483 unsigned argno = 0;
484 function_args_iterator it;
485
486 FOREACH_FUNCTION_ARGS (fntype, argtype, it)
487 {
488 ++argno;
489
490 if (!POINTER_TYPE_P (argtype))
491 continue;
492
493 tree access_size = NULL_TREE;
494 const attr_access* access = rdwr_idx.get (argno - 1);
495 if (access)
496 {
497 if (access->mode == access_none
498 || access->mode == access_write_only)
499 continue;
500
501 if (access->mode == access_deferred
502 && !TYPE_READONLY (TREE_TYPE (argtype)))
503 continue;
504
505 if (save_always_executed && access->mode == access_read_only)
506 /* Attribute read_only arguments imply read access. */
507 wlims.always_executed = true;
508 else
509 /* Attribute read_write arguments are documented as requiring
510 initialized objects but it's expected that aggregates may
511 be only partially initialized regardless. */
512 wlims.always_executed = false;
513
514 if (access->sizarg < nargs)
515 access_size = gimple_call_arg (stmt, access->sizarg);
516 }
517 else if (!TYPE_READONLY (TREE_TYPE (argtype)))
518 continue;
519 else if (save_always_executed && fncode != BUILT_IN_LAST)
520 /* Const-qualified arguments to built-ins imply read access. */
521 wlims.always_executed = true;
522 else
523 /* Const-qualified arguments to ordinary functions imply a likely
524 (but not definitive) read access. */
525 wlims.always_executed = false;
526
527 /* Ignore args we are not going to read from. */
528 if (gimple_call_arg_flags (stmt, argno - 1) & EAF_UNUSED)
529 continue;
530
531 tree arg = gimple_call_arg (stmt, argno - 1);
532
533 ao_ref ref;
534 ao_ref_init_from_ptr_and_size (&ref, arg, access_size);
535 tree argbase = maybe_warn_operand (ref, stmt, NULL_TREE, arg, wlims);
536 if (!argbase)
537 continue;
538
539 if (access && access->mode != access_deferred)
540 {
541 const char* const access_str =
542 TREE_STRING_POINTER (access->to_external_string ());
543
544 if (fndecl)
545 {
546 location_t loc = DECL_SOURCE_LOCATION (fndecl);
547 inform (loc, "in a call to %qD declared with "
548 "attribute %<%s%> here", fndecl, access_str);
549 }
550 else
551 {
552 /* Handle calls through function pointers. */
553 location_t loc = gimple_location (stmt);
554 inform (loc, "in a call to %qT declared with "
555 "attribute %<%s%>", fntype, access_str);
556 }
557 }
558 else
559 {
560 /* For a declaration with no relevant attribute access create
561 a dummy object and use the formatting function to avoid
562 having to complicate things here. */
563 attr_access ptr_access = { };
564 if (!access)
565 access = &ptr_access;
566 const std::string argtypestr = access->array_as_string (argtype);
567 if (fndecl)
568 {
569 location_t loc (DECL_SOURCE_LOCATION (fndecl));
570 inform (loc, "by argument %u of type %s to %qD "
571 "declared here",
572 argno, argtypestr.c_str (), fndecl);
573 }
574 else
575 {
576 /* Handle calls through function pointers. */
577 location_t loc (gimple_location (stmt));
578 inform (loc, "by argument %u of type %s to %qT",
579 argno, argtypestr.c_str (), fntype);
580 }
581 }
582
583 if (DECL_P (argbase))
584 {
585 location_t loc = DECL_SOURCE_LOCATION (argbase);
586 inform (loc, "%qD declared here", argbase);
587 }
588 }
589
590 wlims.always_executed = save_always_executed;
591 }
592
593
594 static unsigned int
595 warn_uninitialized_vars (bool wmaybe_uninit)
596 {
597 /* Counters and limits controlling the the depth of the warning. */
598 wlimits wlims = { };
599 wlims.wmaybe_uninit = wmaybe_uninit;
600
601 gimple_stmt_iterator gsi;
602 basic_block bb;
603 FOR_EACH_BB_FN (bb, cfun)
604 {
605 basic_block succ = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
606 wlims.always_executed = dominated_by_p (CDI_POST_DOMINATORS, succ, bb);
607 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
608 {
609 gimple *stmt = gsi_stmt (gsi);
610 use_operand_p use_p;
611 ssa_op_iter op_iter;
612 tree use;
613
614 if (is_gimple_debug (stmt))
615 continue;
616
617 /* We only do data flow with SSA_NAMEs, so that's all we
618 can warn about. */
619 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, op_iter, SSA_OP_USE)
620 {
621 /* BIT_INSERT_EXPR first operand should not be considered
622 a use for the purpose of uninit warnings. */
623 if (gassign *ass = dyn_cast <gassign *> (stmt))
624 {
625 if (gimple_assign_rhs_code (ass) == BIT_INSERT_EXPR
626 && use_p->use == gimple_assign_rhs1_ptr (ass))
627 continue;
628 }
629 use = USE_FROM_PTR (use_p);
630 if (wlims.always_executed)
631 warn_uninit (OPT_Wuninitialized, use, SSA_NAME_VAR (use),
632 SSA_NAME_VAR (use),
633 "%qD is used uninitialized", stmt,
634 UNKNOWN_LOCATION);
635 else if (wmaybe_uninit)
636 warn_uninit (OPT_Wmaybe_uninitialized, use, SSA_NAME_VAR (use),
637 SSA_NAME_VAR (use),
638 "%qD may be used uninitialized",
639 stmt, UNKNOWN_LOCATION);
640 }
641
642 /* For limiting the alias walk below we count all
643 vdefs in the function. */
644 if (gimple_vdef (stmt))
645 wlims.vdef_cnt++;
646
647 if (gcall *call = dyn_cast <gcall *> (stmt))
648 maybe_warn_pass_by_reference (call, wlims);
649 else if (gimple_assign_load_p (stmt)
650 && gimple_has_location (stmt))
651 {
652 tree rhs = gimple_assign_rhs1 (stmt);
653 tree lhs = gimple_assign_lhs (stmt);
654
655 ao_ref ref;
656 ao_ref_init (&ref, rhs);
657 tree var = maybe_warn_operand (ref, stmt, lhs, rhs, wlims);
658 if (!var)
659 continue;
660
661 if (DECL_P (var))
662 {
663 location_t loc = DECL_SOURCE_LOCATION (var);
664 inform (loc, "%qD declared here", var);
665 }
666 }
667 }
668 }
669
670 return 0;
671 }
672
673 /* Checks if the operand OPND of PHI is defined by
674 another phi with one operand defined by this PHI,
675 but the rest operands are all defined. If yes,
676 returns true to skip this operand as being
677 redundant. Can be enhanced to be more general. */
678
679 static bool
680 can_skip_redundant_opnd (tree opnd, gimple *phi)
681 {
682 gimple *op_def;
683 tree phi_def;
684 int i, n;
685
686 phi_def = gimple_phi_result (phi);
687 op_def = SSA_NAME_DEF_STMT (opnd);
688 if (gimple_code (op_def) != GIMPLE_PHI)
689 return false;
690 n = gimple_phi_num_args (op_def);
691 for (i = 0; i < n; ++i)
692 {
693 tree op = gimple_phi_arg_def (op_def, i);
694 if (TREE_CODE (op) != SSA_NAME)
695 continue;
696 if (op != phi_def && uninit_undefined_value_p (op))
697 return false;
698 }
699
700 return true;
701 }
702
703 /* Returns a bit mask holding the positions of arguments in PHI
704 that have empty (or possibly empty) definitions. */
705
706 static unsigned
707 compute_uninit_opnds_pos (gphi *phi)
708 {
709 size_t i, n;
710 unsigned uninit_opnds = 0;
711
712 n = gimple_phi_num_args (phi);
713 /* Bail out for phi with too many args. */
714 if (n > max_phi_args)
715 return 0;
716
717 for (i = 0; i < n; ++i)
718 {
719 tree op = gimple_phi_arg_def (phi, i);
720 if (TREE_CODE (op) == SSA_NAME
721 && uninit_undefined_value_p (op)
722 && !can_skip_redundant_opnd (op, phi))
723 {
724 if (cfun->has_nonlocal_label || cfun->calls_setjmp)
725 {
726 /* Ignore SSA_NAMEs that appear on abnormal edges
727 somewhere. */
728 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
729 continue;
730 }
731 MASK_SET_BIT (uninit_opnds, i);
732 }
733 }
734 return uninit_opnds;
735 }
736
737 /* Find the immediate postdominator PDOM of the specified
738 basic block BLOCK. */
739
740 static inline basic_block
741 find_pdom (basic_block block)
742 {
743 if (block == EXIT_BLOCK_PTR_FOR_FN (cfun))
744 return EXIT_BLOCK_PTR_FOR_FN (cfun);
745 else
746 {
747 basic_block bb = get_immediate_dominator (CDI_POST_DOMINATORS, block);
748 if (!bb)
749 return EXIT_BLOCK_PTR_FOR_FN (cfun);
750 return bb;
751 }
752 }
753
754 /* Find the immediate DOM of the specified basic block BLOCK. */
755
756 static inline basic_block
757 find_dom (basic_block block)
758 {
759 if (block == ENTRY_BLOCK_PTR_FOR_FN (cfun))
760 return ENTRY_BLOCK_PTR_FOR_FN (cfun);
761 else
762 {
763 basic_block bb = get_immediate_dominator (CDI_DOMINATORS, block);
764 if (!bb)
765 return ENTRY_BLOCK_PTR_FOR_FN (cfun);
766 return bb;
767 }
768 }
769
770 /* Returns true if BB1 is postdominating BB2 and BB1 is
771 not a loop exit bb. The loop exit bb check is simple and does
772 not cover all cases. */
773
774 static bool
775 is_non_loop_exit_postdominating (basic_block bb1, basic_block bb2)
776 {
777 if (!dominated_by_p (CDI_POST_DOMINATORS, bb2, bb1))
778 return false;
779
780 if (single_pred_p (bb1) && !single_succ_p (bb2))
781 return false;
782
783 return true;
784 }
785
786 /* Find the closest postdominator of a specified BB, which is control
787 equivalent to BB. */
788
789 static inline basic_block
790 find_control_equiv_block (basic_block bb)
791 {
792 basic_block pdom;
793
794 pdom = find_pdom (bb);
795
796 /* Skip the postdominating bb that is also loop exit. */
797 if (!is_non_loop_exit_postdominating (pdom, bb))
798 return NULL;
799
800 if (dominated_by_p (CDI_DOMINATORS, pdom, bb))
801 return pdom;
802
803 return NULL;
804 }
805
806 #define MAX_NUM_CHAINS 8
807 #define MAX_CHAIN_LEN 5
808 #define MAX_POSTDOM_CHECK 8
809 #define MAX_SWITCH_CASES 40
810
811 /* Computes the control dependence chains (paths of edges)
812 for DEP_BB up to the dominating basic block BB (the head node of a
813 chain should be dominated by it). CD_CHAINS is pointer to an
814 array holding the result chains. CUR_CD_CHAIN is the current
815 chain being computed. *NUM_CHAINS is total number of chains. The
816 function returns true if the information is successfully computed,
817 return false if there is no control dependence or not computed. */
818
819 static bool
820 compute_control_dep_chain (basic_block bb, basic_block dep_bb,
821 vec<edge> *cd_chains,
822 size_t *num_chains,
823 vec<edge> *cur_cd_chain,
824 int *num_calls)
825 {
826 edge_iterator ei;
827 edge e;
828 size_t i;
829 bool found_cd_chain = false;
830 size_t cur_chain_len = 0;
831
832 if (*num_calls > param_uninit_control_dep_attempts)
833 return false;
834 ++*num_calls;
835
836 /* Could use a set instead. */
837 cur_chain_len = cur_cd_chain->length ();
838 if (cur_chain_len > MAX_CHAIN_LEN)
839 return false;
840
841 for (i = 0; i < cur_chain_len; i++)
842 {
843 edge e = (*cur_cd_chain)[i];
844 /* Cycle detected. */
845 if (e->src == bb)
846 return false;
847 }
848
849 FOR_EACH_EDGE (e, ei, bb->succs)
850 {
851 basic_block cd_bb;
852 int post_dom_check = 0;
853 if (e->flags & (EDGE_FAKE | EDGE_ABNORMAL))
854 continue;
855
856 cd_bb = e->dest;
857 cur_cd_chain->safe_push (e);
858 while (!is_non_loop_exit_postdominating (cd_bb, bb))
859 {
860 if (cd_bb == dep_bb)
861 {
862 /* Found a direct control dependence. */
863 if (*num_chains < MAX_NUM_CHAINS)
864 {
865 cd_chains[*num_chains] = cur_cd_chain->copy ();
866 (*num_chains)++;
867 }
868 found_cd_chain = true;
869 /* Check path from next edge. */
870 break;
871 }
872
873 /* Now check if DEP_BB is indirectly control dependent on BB. */
874 if (compute_control_dep_chain (cd_bb, dep_bb, cd_chains, num_chains,
875 cur_cd_chain, num_calls))
876 {
877 found_cd_chain = true;
878 break;
879 }
880
881 cd_bb = find_pdom (cd_bb);
882 post_dom_check++;
883 if (cd_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
884 || post_dom_check > MAX_POSTDOM_CHECK)
885 break;
886 }
887 cur_cd_chain->pop ();
888 gcc_assert (cur_cd_chain->length () == cur_chain_len);
889 }
890 gcc_assert (cur_cd_chain->length () == cur_chain_len);
891
892 return found_cd_chain;
893 }
894
895 /* The type to represent a simple predicate. */
896
897 struct pred_info
898 {
899 tree pred_lhs;
900 tree pred_rhs;
901 enum tree_code cond_code;
902 bool invert;
903 };
904
905 /* The type to represent a sequence of predicates grouped
906 with .AND. operation. */
907
908 typedef vec<pred_info, va_heap, vl_ptr> pred_chain;
909
910 /* The type to represent a sequence of pred_chains grouped
911 with .OR. operation. */
912
913 typedef vec<pred_chain, va_heap, vl_ptr> pred_chain_union;
914
915 /* Converts the chains of control dependence edges into a set of
916 predicates. A control dependence chain is represented by a vector
917 edges. DEP_CHAINS points to an array of dependence chains.
918 NUM_CHAINS is the size of the chain array. One edge in a dependence
919 chain is mapped to predicate expression represented by pred_info
920 type. One dependence chain is converted to a composite predicate that
921 is the result of AND operation of pred_info mapped to each edge.
922 A composite predicate is presented by a vector of pred_info. On
923 return, *PREDS points to the resulting array of composite predicates.
924 *NUM_PREDS is the number of composite predictes. */
925
926 static bool
927 convert_control_dep_chain_into_preds (vec<edge> *dep_chains,
928 size_t num_chains,
929 pred_chain_union *preds)
930 {
931 bool has_valid_pred = false;
932 size_t i, j;
933 if (num_chains == 0 || num_chains >= MAX_NUM_CHAINS)
934 return false;
935
936 /* Now convert the control dep chain into a set
937 of predicates. */
938 preds->reserve (num_chains);
939
940 for (i = 0; i < num_chains; i++)
941 {
942 vec<edge> one_cd_chain = dep_chains[i];
943
944 has_valid_pred = false;
945 pred_chain t_chain = vNULL;
946 for (j = 0; j < one_cd_chain.length (); j++)
947 {
948 gimple *cond_stmt;
949 gimple_stmt_iterator gsi;
950 basic_block guard_bb;
951 pred_info one_pred;
952 edge e;
953
954 e = one_cd_chain[j];
955 guard_bb = e->src;
956 gsi = gsi_last_bb (guard_bb);
957 /* Ignore empty forwarder blocks. */
958 if (empty_block_p (guard_bb) && single_succ_p (guard_bb))
959 continue;
960 /* An empty basic block here is likely a PHI, and is not one
961 of the cases we handle below. */
962 if (gsi_end_p (gsi))
963 {
964 has_valid_pred = false;
965 break;
966 }
967 cond_stmt = gsi_stmt (gsi);
968 if (is_gimple_call (cond_stmt) && EDGE_COUNT (e->src->succs) >= 2)
969 /* Ignore EH edge. Can add assertion on the other edge's flag. */
970 continue;
971 /* Skip if there is essentially one succesor. */
972 if (EDGE_COUNT (e->src->succs) == 2)
973 {
974 edge e1;
975 edge_iterator ei1;
976 bool skip = false;
977
978 FOR_EACH_EDGE (e1, ei1, e->src->succs)
979 {
980 if (EDGE_COUNT (e1->dest->succs) == 0)
981 {
982 skip = true;
983 break;
984 }
985 }
986 if (skip)
987 continue;
988 }
989 if (gimple_code (cond_stmt) == GIMPLE_COND)
990 {
991 one_pred.pred_lhs = gimple_cond_lhs (cond_stmt);
992 one_pred.pred_rhs = gimple_cond_rhs (cond_stmt);
993 one_pred.cond_code = gimple_cond_code (cond_stmt);
994 one_pred.invert = !!(e->flags & EDGE_FALSE_VALUE);
995 t_chain.safe_push (one_pred);
996 has_valid_pred = true;
997 }
998 else if (gswitch *gs = dyn_cast<gswitch *> (cond_stmt))
999 {
1000 /* Avoid quadratic behavior. */
1001 if (gimple_switch_num_labels (gs) > MAX_SWITCH_CASES)
1002 {
1003 has_valid_pred = false;
1004 break;
1005 }
1006 /* Find the case label. */
1007 tree l = NULL_TREE;
1008 unsigned idx;
1009 for (idx = 0; idx < gimple_switch_num_labels (gs); ++idx)
1010 {
1011 tree tl = gimple_switch_label (gs, idx);
1012 if (e->dest == label_to_block (cfun, CASE_LABEL (tl)))
1013 {
1014 if (!l)
1015 l = tl;
1016 else
1017 {
1018 l = NULL_TREE;
1019 break;
1020 }
1021 }
1022 }
1023 /* If more than one label reaches this block or the case
1024 label doesn't have a single value (like the default one)
1025 fail. */
1026 if (!l
1027 || !CASE_LOW (l)
1028 || (CASE_HIGH (l)
1029 && !operand_equal_p (CASE_LOW (l), CASE_HIGH (l), 0)))
1030 {
1031 has_valid_pred = false;
1032 break;
1033 }
1034 one_pred.pred_lhs = gimple_switch_index (gs);
1035 one_pred.pred_rhs = CASE_LOW (l);
1036 one_pred.cond_code = EQ_EXPR;
1037 one_pred.invert = false;
1038 t_chain.safe_push (one_pred);
1039 has_valid_pred = true;
1040 }
1041 else
1042 {
1043 has_valid_pred = false;
1044 break;
1045 }
1046 }
1047
1048 if (!has_valid_pred)
1049 break;
1050 else
1051 preds->safe_push (t_chain);
1052 }
1053 return has_valid_pred;
1054 }
1055
1056 /* Computes all control dependence chains for USE_BB. The control
1057 dependence chains are then converted to an array of composite
1058 predicates pointed to by PREDS. PHI_BB is the basic block of
1059 the phi whose result is used in USE_BB. */
1060
1061 static bool
1062 find_predicates (pred_chain_union *preds,
1063 basic_block phi_bb,
1064 basic_block use_bb)
1065 {
1066 size_t num_chains = 0, i;
1067 int num_calls = 0;
1068 vec<edge> dep_chains[MAX_NUM_CHAINS];
1069 auto_vec<edge, MAX_CHAIN_LEN + 1> cur_chain;
1070 bool has_valid_pred = false;
1071 basic_block cd_root = 0;
1072
1073 /* First find the closest bb that is control equivalent to PHI_BB
1074 that also dominates USE_BB. */
1075 cd_root = phi_bb;
1076 while (dominated_by_p (CDI_DOMINATORS, use_bb, cd_root))
1077 {
1078 basic_block ctrl_eq_bb = find_control_equiv_block (cd_root);
1079 if (ctrl_eq_bb && dominated_by_p (CDI_DOMINATORS, use_bb, ctrl_eq_bb))
1080 cd_root = ctrl_eq_bb;
1081 else
1082 break;
1083 }
1084
1085 compute_control_dep_chain (cd_root, use_bb, dep_chains, &num_chains,
1086 &cur_chain, &num_calls);
1087
1088 has_valid_pred
1089 = convert_control_dep_chain_into_preds (dep_chains, num_chains, preds);
1090 for (i = 0; i < num_chains; i++)
1091 dep_chains[i].release ();
1092 return has_valid_pred;
1093 }
1094
1095 /* Computes the set of incoming edges of PHI that have non empty
1096 definitions of a phi chain. The collection will be done
1097 recursively on operands that are defined by phis. CD_ROOT
1098 is the control dependence root. *EDGES holds the result, and
1099 VISITED_PHIS is a pointer set for detecting cycles. */
1100
1101 static void
1102 collect_phi_def_edges (gphi *phi, basic_block cd_root,
1103 auto_vec<edge> *edges,
1104 hash_set<gimple *> *visited_phis)
1105 {
1106 size_t i, n;
1107 edge opnd_edge;
1108 tree opnd;
1109
1110 if (visited_phis->add (phi))
1111 return;
1112
1113 n = gimple_phi_num_args (phi);
1114 for (i = 0; i < n; i++)
1115 {
1116 opnd_edge = gimple_phi_arg_edge (phi, i);
1117 opnd = gimple_phi_arg_def (phi, i);
1118
1119 if (TREE_CODE (opnd) != SSA_NAME)
1120 {
1121 if (dump_file && (dump_flags & TDF_DETAILS))
1122 {
1123 fprintf (dump_file, "\n[CHECK] Found def edge %d in ", (int) i);
1124 print_gimple_stmt (dump_file, phi, 0);
1125 }
1126 edges->safe_push (opnd_edge);
1127 }
1128 else
1129 {
1130 gimple *def = SSA_NAME_DEF_STMT (opnd);
1131
1132 if (gimple_code (def) == GIMPLE_PHI
1133 && dominated_by_p (CDI_DOMINATORS, gimple_bb (def), cd_root))
1134 collect_phi_def_edges (as_a<gphi *> (def), cd_root, edges,
1135 visited_phis);
1136 else if (!uninit_undefined_value_p (opnd))
1137 {
1138 if (dump_file && (dump_flags & TDF_DETAILS))
1139 {
1140 fprintf (dump_file, "\n[CHECK] Found def edge %d in ",
1141 (int) i);
1142 print_gimple_stmt (dump_file, phi, 0);
1143 }
1144 edges->safe_push (opnd_edge);
1145 }
1146 }
1147 }
1148 }
1149
1150 /* For each use edge of PHI, computes all control dependence chains.
1151 The control dependence chains are then converted to an array of
1152 composite predicates pointed to by PREDS. */
1153
1154 static bool
1155 find_def_preds (pred_chain_union *preds, gphi *phi)
1156 {
1157 size_t num_chains = 0, i, n;
1158 vec<edge> dep_chains[MAX_NUM_CHAINS];
1159 auto_vec<edge, MAX_CHAIN_LEN + 1> cur_chain;
1160 auto_vec<edge> def_edges;
1161 bool has_valid_pred = false;
1162 basic_block phi_bb, cd_root = 0;
1163
1164 phi_bb = gimple_bb (phi);
1165 /* First find the closest dominating bb to be
1166 the control dependence root. */
1167 cd_root = find_dom (phi_bb);
1168 if (!cd_root)
1169 return false;
1170
1171 hash_set<gimple *> visited_phis;
1172 collect_phi_def_edges (phi, cd_root, &def_edges, &visited_phis);
1173
1174 n = def_edges.length ();
1175 if (n == 0)
1176 return false;
1177
1178 for (i = 0; i < n; i++)
1179 {
1180 size_t prev_nc, j;
1181 int num_calls = 0;
1182 edge opnd_edge;
1183
1184 opnd_edge = def_edges[i];
1185 prev_nc = num_chains;
1186 compute_control_dep_chain (cd_root, opnd_edge->src, dep_chains,
1187 &num_chains, &cur_chain, &num_calls);
1188
1189 /* Now update the newly added chains with
1190 the phi operand edge: */
1191 if (EDGE_COUNT (opnd_edge->src->succs) > 1)
1192 {
1193 if (prev_nc == num_chains && num_chains < MAX_NUM_CHAINS)
1194 dep_chains[num_chains++] = vNULL;
1195 for (j = prev_nc; j < num_chains; j++)
1196 dep_chains[j].safe_push (opnd_edge);
1197 }
1198 }
1199
1200 has_valid_pred
1201 = convert_control_dep_chain_into_preds (dep_chains, num_chains, preds);
1202 for (i = 0; i < num_chains; i++)
1203 dep_chains[i].release ();
1204 return has_valid_pred;
1205 }
1206
1207 /* Dump a pred_info. */
1208
1209 static void
1210 dump_pred_info (pred_info one_pred)
1211 {
1212 if (one_pred.invert)
1213 fprintf (dump_file, " (.NOT.) ");
1214 print_generic_expr (dump_file, one_pred.pred_lhs);
1215 fprintf (dump_file, " %s ", op_symbol_code (one_pred.cond_code));
1216 print_generic_expr (dump_file, one_pred.pred_rhs);
1217 }
1218
1219 /* Dump a pred_chain. */
1220
1221 static void
1222 dump_pred_chain (pred_chain one_pred_chain)
1223 {
1224 size_t np = one_pred_chain.length ();
1225 for (size_t j = 0; j < np; j++)
1226 {
1227 dump_pred_info (one_pred_chain[j]);
1228 if (j < np - 1)
1229 fprintf (dump_file, " (.AND.) ");
1230 else
1231 fprintf (dump_file, "\n");
1232 }
1233 }
1234
1235 /* Dumps the predicates (PREDS) for USESTMT. */
1236
1237 static void
1238 dump_predicates (gimple *usestmt, pred_chain_union preds, const char *msg)
1239 {
1240 fprintf (dump_file, "%s", msg);
1241 if (usestmt)
1242 {
1243 print_gimple_stmt (dump_file, usestmt, 0);
1244 fprintf (dump_file, "is guarded by :\n\n");
1245 }
1246 size_t num_preds = preds.length ();
1247 for (size_t i = 0; i < num_preds; i++)
1248 {
1249 dump_pred_chain (preds[i]);
1250 if (i < num_preds - 1)
1251 fprintf (dump_file, "(.OR.)\n");
1252 else
1253 fprintf (dump_file, "\n\n");
1254 }
1255 }
1256
1257 /* Destroys the predicate set *PREDS. */
1258
1259 static void
1260 destroy_predicate_vecs (pred_chain_union *preds)
1261 {
1262 size_t i;
1263
1264 size_t n = preds->length ();
1265 for (i = 0; i < n; i++)
1266 (*preds)[i].release ();
1267 preds->release ();
1268 }
1269
1270 /* Computes the 'normalized' conditional code with operand
1271 swapping and condition inversion. */
1272
1273 static enum tree_code
1274 get_cmp_code (enum tree_code orig_cmp_code, bool swap_cond, bool invert)
1275 {
1276 enum tree_code tc = orig_cmp_code;
1277
1278 if (swap_cond)
1279 tc = swap_tree_comparison (orig_cmp_code);
1280 if (invert)
1281 tc = invert_tree_comparison (tc, false);
1282
1283 switch (tc)
1284 {
1285 case LT_EXPR:
1286 case LE_EXPR:
1287 case GT_EXPR:
1288 case GE_EXPR:
1289 case EQ_EXPR:
1290 case NE_EXPR:
1291 break;
1292 default:
1293 return ERROR_MARK;
1294 }
1295 return tc;
1296 }
1297
1298 /* Returns whether VAL CMPC BOUNDARY is true. */
1299
1300 static bool
1301 is_value_included_in (tree val, tree boundary, enum tree_code cmpc)
1302 {
1303 bool inverted = false;
1304 bool result;
1305
1306 /* Only handle integer constant here. */
1307 if (TREE_CODE (val) != INTEGER_CST || TREE_CODE (boundary) != INTEGER_CST)
1308 return true;
1309
1310 if (cmpc == GE_EXPR || cmpc == GT_EXPR || cmpc == NE_EXPR)
1311 {
1312 cmpc = invert_tree_comparison (cmpc, false);
1313 inverted = true;
1314 }
1315
1316 if (cmpc == EQ_EXPR)
1317 result = tree_int_cst_equal (val, boundary);
1318 else if (cmpc == LT_EXPR)
1319 result = tree_int_cst_lt (val, boundary);
1320 else
1321 {
1322 gcc_assert (cmpc == LE_EXPR);
1323 result = tree_int_cst_le (val, boundary);
1324 }
1325
1326 if (inverted)
1327 result ^= 1;
1328
1329 return result;
1330 }
1331
1332 /* Returns whether VAL satisfies (x CMPC BOUNDARY) predicate. CMPC can be
1333 either one of the range comparison codes ({GE,LT,EQ,NE}_EXPR and the like),
1334 or BIT_AND_EXPR. EXACT_P is only meaningful for the latter. It modifies the
1335 question from whether VAL & BOUNDARY != 0 to whether VAL & BOUNDARY == VAL.
1336 For other values of CMPC, EXACT_P is ignored. */
1337
1338 static bool
1339 value_sat_pred_p (tree val, tree boundary, enum tree_code cmpc,
1340 bool exact_p = false)
1341 {
1342 if (cmpc != BIT_AND_EXPR)
1343 return is_value_included_in (val, boundary, cmpc);
1344
1345 wide_int andw = wi::to_wide (val) & wi::to_wide (boundary);
1346 if (exact_p)
1347 return andw == wi::to_wide (val);
1348 else
1349 return andw.to_uhwi ();
1350 }
1351
1352 /* Returns true if PRED is common among all the predicate
1353 chains (PREDS) (and therefore can be factored out). */
1354
1355 static bool
1356 find_matching_predicate_in_rest_chains (pred_info pred, pred_chain_union preds)
1357 {
1358 size_t i, j, n;
1359
1360 /* Trival case. */
1361 if (preds.length () == 1)
1362 return true;
1363
1364 for (i = 1; i < preds.length (); i++)
1365 {
1366 bool found = false;
1367 pred_chain one_chain = preds[i];
1368 n = one_chain.length ();
1369 for (j = 0; j < n; j++)
1370 {
1371 pred_info pred2 = one_chain[j];
1372 /* Can relax the condition comparison to not
1373 use address comparison. However, the most common
1374 case is that multiple control dependent paths share
1375 a common path prefix, so address comparison should
1376 be ok. */
1377
1378 if (operand_equal_p (pred2.pred_lhs, pred.pred_lhs, 0)
1379 && operand_equal_p (pred2.pred_rhs, pred.pred_rhs, 0)
1380 && pred2.invert == pred.invert)
1381 {
1382 found = true;
1383 break;
1384 }
1385 }
1386 if (!found)
1387 return false;
1388 }
1389 return true;
1390 }
1391
1392 /* Forward declaration. */
1393 static bool is_use_properly_guarded (gimple *use_stmt,
1394 basic_block use_bb,
1395 gphi *phi,
1396 unsigned uninit_opnds,
1397 pred_chain_union *def_preds,
1398 hash_set<gphi *> *visited_phis);
1399
1400 /* Returns true if all uninitialized opnds are pruned. Returns false
1401 otherwise. PHI is the phi node with uninitialized operands,
1402 UNINIT_OPNDS is the bitmap of the uninitialize operand positions,
1403 FLAG_DEF is the statement defining the flag guarding the use of the
1404 PHI output, BOUNDARY_CST is the const value used in the predicate
1405 associated with the flag, CMP_CODE is the comparison code used in
1406 the predicate, VISITED_PHIS is the pointer set of phis visited, and
1407 VISITED_FLAG_PHIS is the pointer to the pointer set of flag definitions
1408 that are also phis.
1409
1410 Example scenario:
1411
1412 BB1:
1413 flag_1 = phi <0, 1> // (1)
1414 var_1 = phi <undef, some_val>
1415
1416
1417 BB2:
1418 flag_2 = phi <0, flag_1, flag_1> // (2)
1419 var_2 = phi <undef, var_1, var_1>
1420 if (flag_2 == 1)
1421 goto BB3;
1422
1423 BB3:
1424 use of var_2 // (3)
1425
1426 Because some flag arg in (1) is not constant, if we do not look into the
1427 flag phis recursively, it is conservatively treated as unknown and var_1
1428 is thought to be flowed into use at (3). Since var_1 is potentially
1429 uninitialized a false warning will be emitted.
1430 Checking recursively into (1), the compiler can find out that only some_val
1431 (which is defined) can flow into (3) which is OK. */
1432
1433 static bool
1434 prune_uninit_phi_opnds (gphi *phi, unsigned uninit_opnds, gphi *flag_def,
1435 tree boundary_cst, enum tree_code cmp_code,
1436 hash_set<gphi *> *visited_phis,
1437 bitmap *visited_flag_phis)
1438 {
1439 unsigned i;
1440
1441 for (i = 0; i < MIN (max_phi_args, gimple_phi_num_args (flag_def)); i++)
1442 {
1443 tree flag_arg;
1444
1445 if (!MASK_TEST_BIT (uninit_opnds, i))
1446 continue;
1447
1448 flag_arg = gimple_phi_arg_def (flag_def, i);
1449 if (!is_gimple_constant (flag_arg))
1450 {
1451 gphi *flag_arg_def, *phi_arg_def;
1452 tree phi_arg;
1453 unsigned uninit_opnds_arg_phi;
1454
1455 if (TREE_CODE (flag_arg) != SSA_NAME)
1456 return false;
1457 flag_arg_def = dyn_cast<gphi *> (SSA_NAME_DEF_STMT (flag_arg));
1458 if (!flag_arg_def)
1459 return false;
1460
1461 phi_arg = gimple_phi_arg_def (phi, i);
1462 if (TREE_CODE (phi_arg) != SSA_NAME)
1463 return false;
1464
1465 phi_arg_def = dyn_cast<gphi *> (SSA_NAME_DEF_STMT (phi_arg));
1466 if (!phi_arg_def)
1467 return false;
1468
1469 if (gimple_bb (phi_arg_def) != gimple_bb (flag_arg_def))
1470 return false;
1471
1472 if (!*visited_flag_phis)
1473 *visited_flag_phis = BITMAP_ALLOC (NULL);
1474
1475 tree phi_result = gimple_phi_result (flag_arg_def);
1476 if (bitmap_bit_p (*visited_flag_phis, SSA_NAME_VERSION (phi_result)))
1477 return false;
1478
1479 bitmap_set_bit (*visited_flag_phis,
1480 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def)));
1481
1482 /* Now recursively prune the uninitialized phi args. */
1483 uninit_opnds_arg_phi = compute_uninit_opnds_pos (phi_arg_def);
1484 if (!prune_uninit_phi_opnds
1485 (phi_arg_def, uninit_opnds_arg_phi, flag_arg_def, boundary_cst,
1486 cmp_code, visited_phis, visited_flag_phis))
1487 return false;
1488
1489 phi_result = gimple_phi_result (flag_arg_def);
1490 bitmap_clear_bit (*visited_flag_phis, SSA_NAME_VERSION (phi_result));
1491 continue;
1492 }
1493
1494 /* Now check if the constant is in the guarded range. */
1495 if (is_value_included_in (flag_arg, boundary_cst, cmp_code))
1496 {
1497 tree opnd;
1498 gimple *opnd_def;
1499
1500 /* Now that we know that this undefined edge is not
1501 pruned. If the operand is defined by another phi,
1502 we can further prune the incoming edges of that
1503 phi by checking the predicates of this operands. */
1504
1505 opnd = gimple_phi_arg_def (phi, i);
1506 opnd_def = SSA_NAME_DEF_STMT (opnd);
1507 if (gphi *opnd_def_phi = dyn_cast <gphi *> (opnd_def))
1508 {
1509 edge opnd_edge;
1510 unsigned uninit_opnds2 = compute_uninit_opnds_pos (opnd_def_phi);
1511 if (!MASK_EMPTY (uninit_opnds2))
1512 {
1513 pred_chain_union def_preds = vNULL;
1514 bool ok;
1515 opnd_edge = gimple_phi_arg_edge (phi, i);
1516 ok = is_use_properly_guarded (phi,
1517 opnd_edge->src,
1518 opnd_def_phi,
1519 uninit_opnds2,
1520 &def_preds,
1521 visited_phis);
1522 destroy_predicate_vecs (&def_preds);
1523 if (!ok)
1524 return false;
1525 }
1526 }
1527 else
1528 return false;
1529 }
1530 }
1531
1532 return true;
1533 }
1534
1535 /* A helper function finds predicate which will be examined against uninit
1536 paths. If there is no "flag_var cmp const" form predicate, the function
1537 tries to find predicate of form like "flag_var cmp flag_var" with value
1538 range info. PHI is the phi node whose incoming (undefined) paths need to
1539 be examined. On success, the function returns the comparsion code, sets
1540 defintion gimple of the flag_var to FLAG_DEF, sets boundary_cst to
1541 BOUNDARY_CST. On fail, the function returns ERROR_MARK. */
1542
1543 static enum tree_code
1544 find_var_cmp_const (pred_chain_union preds, gphi *phi, gimple **flag_def,
1545 tree *boundary_cst)
1546 {
1547 enum tree_code vrinfo_code = ERROR_MARK, code;
1548 gimple *vrinfo_def = NULL;
1549 tree vrinfo_cst = NULL, cond_lhs, cond_rhs;
1550
1551 gcc_assert (preds.length () > 0);
1552 pred_chain the_pred_chain = preds[0];
1553 for (unsigned i = 0; i < the_pred_chain.length (); i++)
1554 {
1555 bool use_vrinfo_p = false;
1556 pred_info the_pred = the_pred_chain[i];
1557 cond_lhs = the_pred.pred_lhs;
1558 cond_rhs = the_pred.pred_rhs;
1559 if (cond_lhs == NULL_TREE || cond_rhs == NULL_TREE)
1560 continue;
1561
1562 code = get_cmp_code (the_pred.cond_code, false, the_pred.invert);
1563 if (code == ERROR_MARK)
1564 continue;
1565
1566 if (TREE_CODE (cond_lhs) == SSA_NAME && is_gimple_constant (cond_rhs))
1567 ;
1568 else if (TREE_CODE (cond_rhs) == SSA_NAME
1569 && is_gimple_constant (cond_lhs))
1570 {
1571 std::swap (cond_lhs, cond_rhs);
1572 if ((code = get_cmp_code (code, true, false)) == ERROR_MARK)
1573 continue;
1574 }
1575 /* Check if we can take advantage of "flag_var comp flag_var" predicate
1576 with value range info. Note only first of such case is handled. */
1577 else if (vrinfo_code == ERROR_MARK
1578 && TREE_CODE (cond_lhs) == SSA_NAME
1579 && TREE_CODE (cond_rhs) == SSA_NAME)
1580 {
1581 gimple* lhs_def = SSA_NAME_DEF_STMT (cond_lhs);
1582 if (!lhs_def || gimple_code (lhs_def) != GIMPLE_PHI
1583 || gimple_bb (lhs_def) != gimple_bb (phi))
1584 {
1585 std::swap (cond_lhs, cond_rhs);
1586 if ((code = get_cmp_code (code, true, false)) == ERROR_MARK)
1587 continue;
1588 }
1589
1590 /* Check value range info of rhs, do following transforms:
1591 flag_var < [min, max] -> flag_var < max
1592 flag_var > [min, max] -> flag_var > min
1593
1594 We can also transform LE_EXPR/GE_EXPR to LT_EXPR/GT_EXPR:
1595 flag_var <= [min, max] -> flag_var < [min, max+1]
1596 flag_var >= [min, max] -> flag_var > [min-1, max]
1597 if no overflow/wrap. */
1598 wide_int min, max;
1599 tree type = TREE_TYPE (cond_lhs);
1600 if (!INTEGRAL_TYPE_P (type)
1601 || get_range_info (cond_rhs, &min, &max) != VR_RANGE)
1602 continue;
1603 if (code == LE_EXPR
1604 && max != wi::max_value (TYPE_PRECISION (type), TYPE_SIGN (type)))
1605 {
1606 code = LT_EXPR;
1607 max = max + 1;
1608 }
1609 if (code == GE_EXPR
1610 && min != wi::min_value (TYPE_PRECISION (type), TYPE_SIGN (type)))
1611 {
1612 code = GT_EXPR;
1613 min = min - 1;
1614 }
1615 if (code == LT_EXPR)
1616 cond_rhs = wide_int_to_tree (type, max);
1617 else if (code == GT_EXPR)
1618 cond_rhs = wide_int_to_tree (type, min);
1619 else
1620 continue;
1621
1622 use_vrinfo_p = true;
1623 }
1624 else
1625 continue;
1626
1627 if ((*flag_def = SSA_NAME_DEF_STMT (cond_lhs)) == NULL)
1628 continue;
1629
1630 if (gimple_code (*flag_def) != GIMPLE_PHI
1631 || gimple_bb (*flag_def) != gimple_bb (phi)
1632 || !find_matching_predicate_in_rest_chains (the_pred, preds))
1633 continue;
1634
1635 /* Return if any "flag_var comp const" predicate is found. */
1636 if (!use_vrinfo_p)
1637 {
1638 *boundary_cst = cond_rhs;
1639 return code;
1640 }
1641 /* Record if any "flag_var comp flag_var[vinfo]" predicate is found. */
1642 else if (vrinfo_code == ERROR_MARK)
1643 {
1644 vrinfo_code = code;
1645 vrinfo_def = *flag_def;
1646 vrinfo_cst = cond_rhs;
1647 }
1648 }
1649 /* Return the "flag_var cmp flag_var[vinfo]" predicate we found. */
1650 if (vrinfo_code != ERROR_MARK)
1651 {
1652 *flag_def = vrinfo_def;
1653 *boundary_cst = vrinfo_cst;
1654 }
1655 return vrinfo_code;
1656 }
1657
1658 /* A helper function that determines if the predicate set
1659 of the use is not overlapping with that of the uninit paths.
1660 The most common senario of guarded use is in Example 1:
1661 Example 1:
1662 if (some_cond)
1663 {
1664 x = ...;
1665 flag = true;
1666 }
1667
1668 ... some code ...
1669
1670 if (flag)
1671 use (x);
1672
1673 The real world examples are usually more complicated, but similar
1674 and usually result from inlining:
1675
1676 bool init_func (int * x)
1677 {
1678 if (some_cond)
1679 return false;
1680 *x = ..
1681 return true;
1682 }
1683
1684 void foo (..)
1685 {
1686 int x;
1687
1688 if (!init_func (&x))
1689 return;
1690
1691 .. some_code ...
1692 use (x);
1693 }
1694
1695 Another possible use scenario is in the following trivial example:
1696
1697 Example 2:
1698 if (n > 0)
1699 x = 1;
1700 ...
1701 if (n > 0)
1702 {
1703 if (m < 2)
1704 .. = x;
1705 }
1706
1707 Predicate analysis needs to compute the composite predicate:
1708
1709 1) 'x' use predicate: (n > 0) .AND. (m < 2)
1710 2) 'x' default value (non-def) predicate: .NOT. (n > 0)
1711 (the predicate chain for phi operand defs can be computed
1712 starting from a bb that is control equivalent to the phi's
1713 bb and is dominating the operand def.)
1714
1715 and check overlapping:
1716 (n > 0) .AND. (m < 2) .AND. (.NOT. (n > 0))
1717 <==> false
1718
1719 This implementation provides framework that can handle
1720 scenarios. (Note that many simple cases are handled properly
1721 without the predicate analysis -- this is due to jump threading
1722 transformation which eliminates the merge point thus makes
1723 path sensitive analysis unnecessary.)
1724
1725 PHI is the phi node whose incoming (undefined) paths need to be
1726 pruned, and UNINIT_OPNDS is the bitmap holding uninit operand
1727 positions. VISITED_PHIS is the pointer set of phi stmts being
1728 checked. */
1729
1730 static bool
1731 use_pred_not_overlap_with_undef_path_pred (pred_chain_union preds,
1732 gphi *phi, unsigned uninit_opnds,
1733 hash_set<gphi *> *visited_phis)
1734 {
1735 gimple *flag_def = 0;
1736 tree boundary_cst = 0;
1737 enum tree_code cmp_code;
1738 bitmap visited_flag_phis = NULL;
1739 bool all_pruned = false;
1740
1741 /* Find within the common prefix of multiple predicate chains
1742 a predicate that is a comparison of a flag variable against
1743 a constant. */
1744 cmp_code = find_var_cmp_const (preds, phi, &flag_def, &boundary_cst);
1745 if (cmp_code == ERROR_MARK)
1746 return false;
1747
1748 /* Now check all the uninit incoming edge has a constant flag value
1749 that is in conflict with the use guard/predicate. */
1750 all_pruned = prune_uninit_phi_opnds
1751 (phi, uninit_opnds, as_a<gphi *> (flag_def), boundary_cst, cmp_code,
1752 visited_phis, &visited_flag_phis);
1753
1754 if (visited_flag_phis)
1755 BITMAP_FREE (visited_flag_phis);
1756
1757 return all_pruned;
1758 }
1759
1760 /* The helper function returns true if two predicates X1 and X2
1761 are equivalent. It assumes the expressions have already
1762 properly re-associated. */
1763
1764 static inline bool
1765 pred_equal_p (pred_info x1, pred_info x2)
1766 {
1767 enum tree_code c1, c2;
1768 if (!operand_equal_p (x1.pred_lhs, x2.pred_lhs, 0)
1769 || !operand_equal_p (x1.pred_rhs, x2.pred_rhs, 0))
1770 return false;
1771
1772 c1 = x1.cond_code;
1773 if (x1.invert != x2.invert
1774 && TREE_CODE_CLASS (x2.cond_code) == tcc_comparison)
1775 c2 = invert_tree_comparison (x2.cond_code, false);
1776 else
1777 c2 = x2.cond_code;
1778
1779 return c1 == c2;
1780 }
1781
1782 /* Returns true if the predication is testing !=. */
1783
1784 static inline bool
1785 is_neq_relop_p (pred_info pred)
1786 {
1787
1788 return ((pred.cond_code == NE_EXPR && !pred.invert)
1789 || (pred.cond_code == EQ_EXPR && pred.invert));
1790 }
1791
1792 /* Returns true if pred is of the form X != 0. */
1793
1794 static inline bool
1795 is_neq_zero_form_p (pred_info pred)
1796 {
1797 if (!is_neq_relop_p (pred) || !integer_zerop (pred.pred_rhs)
1798 || TREE_CODE (pred.pred_lhs) != SSA_NAME)
1799 return false;
1800 return true;
1801 }
1802
1803 /* The helper function returns true if two predicates X1
1804 is equivalent to X2 != 0. */
1805
1806 static inline bool
1807 pred_expr_equal_p (pred_info x1, tree x2)
1808 {
1809 if (!is_neq_zero_form_p (x1))
1810 return false;
1811
1812 return operand_equal_p (x1.pred_lhs, x2, 0);
1813 }
1814
1815 /* Returns true of the domain of single predicate expression
1816 EXPR1 is a subset of that of EXPR2. Returns false if it
1817 cannot be proved. */
1818
1819 static bool
1820 is_pred_expr_subset_of (pred_info expr1, pred_info expr2)
1821 {
1822 enum tree_code code1, code2;
1823
1824 if (pred_equal_p (expr1, expr2))
1825 return true;
1826
1827 if ((TREE_CODE (expr1.pred_rhs) != INTEGER_CST)
1828 || (TREE_CODE (expr2.pred_rhs) != INTEGER_CST))
1829 return false;
1830
1831 if (!operand_equal_p (expr1.pred_lhs, expr2.pred_lhs, 0))
1832 return false;
1833
1834 code1 = expr1.cond_code;
1835 if (expr1.invert)
1836 code1 = invert_tree_comparison (code1, false);
1837 code2 = expr2.cond_code;
1838 if (expr2.invert)
1839 code2 = invert_tree_comparison (code2, false);
1840
1841 if (code2 == NE_EXPR && code1 == NE_EXPR)
1842 return false;
1843
1844 if (code2 == NE_EXPR)
1845 return !value_sat_pred_p (expr2.pred_rhs, expr1.pred_rhs, code1);
1846
1847 if (code1 == EQ_EXPR)
1848 return value_sat_pred_p (expr1.pred_rhs, expr2.pred_rhs, code2);
1849
1850 if (code1 == code2)
1851 return value_sat_pred_p (expr1.pred_rhs, expr2.pred_rhs, code2,
1852 code1 == BIT_AND_EXPR);
1853
1854 return false;
1855 }
1856
1857 /* Returns true if the domain of PRED1 is a subset
1858 of that of PRED2. Returns false if it cannot be proved so. */
1859
1860 static bool
1861 is_pred_chain_subset_of (pred_chain pred1, pred_chain pred2)
1862 {
1863 size_t np1, np2, i1, i2;
1864
1865 np1 = pred1.length ();
1866 np2 = pred2.length ();
1867
1868 for (i2 = 0; i2 < np2; i2++)
1869 {
1870 bool found = false;
1871 pred_info info2 = pred2[i2];
1872 for (i1 = 0; i1 < np1; i1++)
1873 {
1874 pred_info info1 = pred1[i1];
1875 if (is_pred_expr_subset_of (info1, info2))
1876 {
1877 found = true;
1878 break;
1879 }
1880 }
1881 if (!found)
1882 return false;
1883 }
1884 return true;
1885 }
1886
1887 /* Returns true if the domain defined by
1888 one pred chain ONE_PRED is a subset of the domain
1889 of *PREDS. It returns false if ONE_PRED's domain is
1890 not a subset of any of the sub-domains of PREDS
1891 (corresponding to each individual chains in it), even
1892 though it may be still be a subset of whole domain
1893 of PREDS which is the union (ORed) of all its subdomains.
1894 In other words, the result is conservative. */
1895
1896 static bool
1897 is_included_in (pred_chain one_pred, pred_chain_union preds)
1898 {
1899 size_t i;
1900 size_t n = preds.length ();
1901
1902 for (i = 0; i < n; i++)
1903 {
1904 if (is_pred_chain_subset_of (one_pred, preds[i]))
1905 return true;
1906 }
1907
1908 return false;
1909 }
1910
1911 /* Compares two predicate sets PREDS1 and PREDS2 and returns
1912 true if the domain defined by PREDS1 is a superset
1913 of PREDS2's domain. N1 and N2 are array sizes of PREDS1 and
1914 PREDS2 respectively. The implementation chooses not to build
1915 generic trees (and relying on the folding capability of the
1916 compiler), but instead performs brute force comparison of
1917 individual predicate chains (won't be a compile time problem
1918 as the chains are pretty short). When the function returns
1919 false, it does not necessarily mean *PREDS1 is not a superset
1920 of *PREDS2, but mean it may not be so since the analysis cannot
1921 prove it. In such cases, false warnings may still be
1922 emitted. */
1923
1924 static bool
1925 is_superset_of (pred_chain_union preds1, pred_chain_union preds2)
1926 {
1927 size_t i, n2;
1928 pred_chain one_pred_chain = vNULL;
1929
1930 n2 = preds2.length ();
1931
1932 for (i = 0; i < n2; i++)
1933 {
1934 one_pred_chain = preds2[i];
1935 if (!is_included_in (one_pred_chain, preds1))
1936 return false;
1937 }
1938
1939 return true;
1940 }
1941
1942 /* Returns true if X1 is the negate of X2. */
1943
1944 static inline bool
1945 pred_neg_p (pred_info x1, pred_info x2)
1946 {
1947 enum tree_code c1, c2;
1948 if (!operand_equal_p (x1.pred_lhs, x2.pred_lhs, 0)
1949 || !operand_equal_p (x1.pred_rhs, x2.pred_rhs, 0))
1950 return false;
1951
1952 c1 = x1.cond_code;
1953 if (x1.invert == x2.invert)
1954 c2 = invert_tree_comparison (x2.cond_code, false);
1955 else
1956 c2 = x2.cond_code;
1957
1958 return c1 == c2;
1959 }
1960
1961 /* 1) ((x IOR y) != 0) AND (x != 0) is equivalent to (x != 0);
1962 2) (X AND Y) OR (!X AND Y) is equivalent to Y;
1963 3) X OR (!X AND Y) is equivalent to (X OR Y);
1964 4) ((x IAND y) != 0) || (x != 0 AND y != 0)) is equivalent to
1965 (x != 0 AND y != 0)
1966 5) (X AND Y) OR (!X AND Z) OR (!Y AND Z) is equivalent to
1967 (X AND Y) OR Z
1968
1969 PREDS is the predicate chains, and N is the number of chains. */
1970
1971 /* Helper function to implement rule 1 above. ONE_CHAIN is
1972 the AND predication to be simplified. */
1973
1974 static void
1975 simplify_pred (pred_chain *one_chain)
1976 {
1977 size_t i, j, n;
1978 bool simplified = false;
1979 pred_chain s_chain = vNULL;
1980
1981 n = one_chain->length ();
1982
1983 for (i = 0; i < n; i++)
1984 {
1985 pred_info *a_pred = &(*one_chain)[i];
1986
1987 if (!a_pred->pred_lhs)
1988 continue;
1989 if (!is_neq_zero_form_p (*a_pred))
1990 continue;
1991
1992 gimple *def_stmt = SSA_NAME_DEF_STMT (a_pred->pred_lhs);
1993 if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
1994 continue;
1995 if (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR)
1996 {
1997 for (j = 0; j < n; j++)
1998 {
1999 pred_info *b_pred = &(*one_chain)[j];
2000
2001 if (!b_pred->pred_lhs)
2002 continue;
2003 if (!is_neq_zero_form_p (*b_pred))
2004 continue;
2005
2006 if (pred_expr_equal_p (*b_pred, gimple_assign_rhs1 (def_stmt))
2007 || pred_expr_equal_p (*b_pred, gimple_assign_rhs2 (def_stmt)))
2008 {
2009 /* Mark a_pred for removal. */
2010 a_pred->pred_lhs = NULL;
2011 a_pred->pred_rhs = NULL;
2012 simplified = true;
2013 break;
2014 }
2015 }
2016 }
2017 }
2018
2019 if (!simplified)
2020 return;
2021
2022 for (i = 0; i < n; i++)
2023 {
2024 pred_info *a_pred = &(*one_chain)[i];
2025 if (!a_pred->pred_lhs)
2026 continue;
2027 s_chain.safe_push (*a_pred);
2028 }
2029
2030 one_chain->release ();
2031 *one_chain = s_chain;
2032 }
2033
2034 /* The helper function implements the rule 2 for the
2035 OR predicate PREDS.
2036
2037 2) (X AND Y) OR (!X AND Y) is equivalent to Y. */
2038
2039 static bool
2040 simplify_preds_2 (pred_chain_union *preds)
2041 {
2042 size_t i, j, n;
2043 bool simplified = false;
2044 pred_chain_union s_preds = vNULL;
2045
2046 /* (X AND Y) OR (!X AND Y) is equivalent to Y.
2047 (X AND Y) OR (X AND !Y) is equivalent to X. */
2048
2049 n = preds->length ();
2050 for (i = 0; i < n; i++)
2051 {
2052 pred_info x, y;
2053 pred_chain *a_chain = &(*preds)[i];
2054
2055 if (a_chain->length () != 2)
2056 continue;
2057
2058 x = (*a_chain)[0];
2059 y = (*a_chain)[1];
2060
2061 for (j = 0; j < n; j++)
2062 {
2063 pred_chain *b_chain;
2064 pred_info x2, y2;
2065
2066 if (j == i)
2067 continue;
2068
2069 b_chain = &(*preds)[j];
2070 if (b_chain->length () != 2)
2071 continue;
2072
2073 x2 = (*b_chain)[0];
2074 y2 = (*b_chain)[1];
2075
2076 if (pred_equal_p (x, x2) && pred_neg_p (y, y2))
2077 {
2078 /* Kill a_chain. */
2079 a_chain->release ();
2080 b_chain->release ();
2081 b_chain->safe_push (x);
2082 simplified = true;
2083 break;
2084 }
2085 if (pred_neg_p (x, x2) && pred_equal_p (y, y2))
2086 {
2087 /* Kill a_chain. */
2088 a_chain->release ();
2089 b_chain->release ();
2090 b_chain->safe_push (y);
2091 simplified = true;
2092 break;
2093 }
2094 }
2095 }
2096 /* Now clean up the chain. */
2097 if (simplified)
2098 {
2099 for (i = 0; i < n; i++)
2100 {
2101 if ((*preds)[i].is_empty ())
2102 continue;
2103 s_preds.safe_push ((*preds)[i]);
2104 }
2105 preds->release ();
2106 (*preds) = s_preds;
2107 s_preds = vNULL;
2108 }
2109
2110 return simplified;
2111 }
2112
2113 /* The helper function implements the rule 2 for the
2114 OR predicate PREDS.
2115
2116 3) x OR (!x AND y) is equivalent to x OR y. */
2117
2118 static bool
2119 simplify_preds_3 (pred_chain_union *preds)
2120 {
2121 size_t i, j, n;
2122 bool simplified = false;
2123
2124 /* Now iteratively simplify X OR (!X AND Z ..)
2125 into X OR (Z ...). */
2126
2127 n = preds->length ();
2128 if (n < 2)
2129 return false;
2130
2131 for (i = 0; i < n; i++)
2132 {
2133 pred_info x;
2134 pred_chain *a_chain = &(*preds)[i];
2135
2136 if (a_chain->length () != 1)
2137 continue;
2138
2139 x = (*a_chain)[0];
2140
2141 for (j = 0; j < n; j++)
2142 {
2143 pred_chain *b_chain;
2144 pred_info x2;
2145 size_t k;
2146
2147 if (j == i)
2148 continue;
2149
2150 b_chain = &(*preds)[j];
2151 if (b_chain->length () < 2)
2152 continue;
2153
2154 for (k = 0; k < b_chain->length (); k++)
2155 {
2156 x2 = (*b_chain)[k];
2157 if (pred_neg_p (x, x2))
2158 {
2159 b_chain->unordered_remove (k);
2160 simplified = true;
2161 break;
2162 }
2163 }
2164 }
2165 }
2166 return simplified;
2167 }
2168
2169 /* The helper function implements the rule 4 for the
2170 OR predicate PREDS.
2171
2172 2) ((x AND y) != 0) OR (x != 0 AND y != 0) is equivalent to
2173 (x != 0 ANd y != 0). */
2174
2175 static bool
2176 simplify_preds_4 (pred_chain_union *preds)
2177 {
2178 size_t i, j, n;
2179 bool simplified = false;
2180 pred_chain_union s_preds = vNULL;
2181 gimple *def_stmt;
2182
2183 n = preds->length ();
2184 for (i = 0; i < n; i++)
2185 {
2186 pred_info z;
2187 pred_chain *a_chain = &(*preds)[i];
2188
2189 if (a_chain->length () != 1)
2190 continue;
2191
2192 z = (*a_chain)[0];
2193
2194 if (!is_neq_zero_form_p (z))
2195 continue;
2196
2197 def_stmt = SSA_NAME_DEF_STMT (z.pred_lhs);
2198 if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
2199 continue;
2200
2201 if (gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR)
2202 continue;
2203
2204 for (j = 0; j < n; j++)
2205 {
2206 pred_chain *b_chain;
2207 pred_info x2, y2;
2208
2209 if (j == i)
2210 continue;
2211
2212 b_chain = &(*preds)[j];
2213 if (b_chain->length () != 2)
2214 continue;
2215
2216 x2 = (*b_chain)[0];
2217 y2 = (*b_chain)[1];
2218 if (!is_neq_zero_form_p (x2) || !is_neq_zero_form_p (y2))
2219 continue;
2220
2221 if ((pred_expr_equal_p (x2, gimple_assign_rhs1 (def_stmt))
2222 && pred_expr_equal_p (y2, gimple_assign_rhs2 (def_stmt)))
2223 || (pred_expr_equal_p (x2, gimple_assign_rhs2 (def_stmt))
2224 && pred_expr_equal_p (y2, gimple_assign_rhs1 (def_stmt))))
2225 {
2226 /* Kill a_chain. */
2227 a_chain->release ();
2228 simplified = true;
2229 break;
2230 }
2231 }
2232 }
2233 /* Now clean up the chain. */
2234 if (simplified)
2235 {
2236 for (i = 0; i < n; i++)
2237 {
2238 if ((*preds)[i].is_empty ())
2239 continue;
2240 s_preds.safe_push ((*preds)[i]);
2241 }
2242
2243 preds->release ();
2244 (*preds) = s_preds;
2245 s_preds = vNULL;
2246 }
2247
2248 return simplified;
2249 }
2250
2251 /* This function simplifies predicates in PREDS. */
2252
2253 static void
2254 simplify_preds (pred_chain_union *preds, gimple *use_or_def, bool is_use)
2255 {
2256 size_t i, n;
2257 bool changed = false;
2258
2259 if (dump_file && dump_flags & TDF_DETAILS)
2260 {
2261 fprintf (dump_file, "[BEFORE SIMPLICATION -- ");
2262 dump_predicates (use_or_def, *preds, is_use ? "[USE]:\n" : "[DEF]:\n");
2263 }
2264
2265 for (i = 0; i < preds->length (); i++)
2266 simplify_pred (&(*preds)[i]);
2267
2268 n = preds->length ();
2269 if (n < 2)
2270 return;
2271
2272 do
2273 {
2274 changed = false;
2275 if (simplify_preds_2 (preds))
2276 changed = true;
2277
2278 /* Now iteratively simplify X OR (!X AND Z ..)
2279 into X OR (Z ...). */
2280 if (simplify_preds_3 (preds))
2281 changed = true;
2282
2283 if (simplify_preds_4 (preds))
2284 changed = true;
2285 }
2286 while (changed);
2287
2288 return;
2289 }
2290
2291 /* This is a helper function which attempts to normalize predicate chains
2292 by following UD chains. It basically builds up a big tree of either IOR
2293 operations or AND operations, and convert the IOR tree into a
2294 pred_chain_union or BIT_AND tree into a pred_chain.
2295 Example:
2296
2297 _3 = _2 RELOP1 _1;
2298 _6 = _5 RELOP2 _4;
2299 _9 = _8 RELOP3 _7;
2300 _10 = _3 | _6;
2301 _12 = _9 | _0;
2302 _t = _10 | _12;
2303
2304 then _t != 0 will be normalized into a pred_chain_union
2305
2306 (_2 RELOP1 _1) OR (_5 RELOP2 _4) OR (_8 RELOP3 _7) OR (_0 != 0)
2307
2308 Similarly given,
2309
2310 _3 = _2 RELOP1 _1;
2311 _6 = _5 RELOP2 _4;
2312 _9 = _8 RELOP3 _7;
2313 _10 = _3 & _6;
2314 _12 = _9 & _0;
2315
2316 then _t != 0 will be normalized into a pred_chain:
2317 (_2 RELOP1 _1) AND (_5 RELOP2 _4) AND (_8 RELOP3 _7) AND (_0 != 0)
2318
2319 */
2320
2321 /* This is a helper function that stores a PRED into NORM_PREDS. */
2322
2323 inline static void
2324 push_pred (pred_chain_union *norm_preds, pred_info pred)
2325 {
2326 pred_chain pred_chain = vNULL;
2327 pred_chain.safe_push (pred);
2328 norm_preds->safe_push (pred_chain);
2329 }
2330
2331 /* A helper function that creates a predicate of the form
2332 OP != 0 and push it WORK_LIST. */
2333
2334 inline static void
2335 push_to_worklist (tree op, vec<pred_info, va_heap, vl_ptr> *work_list,
2336 hash_set<tree> *mark_set)
2337 {
2338 if (mark_set->contains (op))
2339 return;
2340 mark_set->add (op);
2341
2342 pred_info arg_pred;
2343 arg_pred.pred_lhs = op;
2344 arg_pred.pred_rhs = integer_zero_node;
2345 arg_pred.cond_code = NE_EXPR;
2346 arg_pred.invert = false;
2347 work_list->safe_push (arg_pred);
2348 }
2349
2350 /* A helper that generates a pred_info from a gimple assignment
2351 CMP_ASSIGN with comparison rhs. */
2352
2353 static pred_info
2354 get_pred_info_from_cmp (gimple *cmp_assign)
2355 {
2356 pred_info n_pred;
2357 n_pred.pred_lhs = gimple_assign_rhs1 (cmp_assign);
2358 n_pred.pred_rhs = gimple_assign_rhs2 (cmp_assign);
2359 n_pred.cond_code = gimple_assign_rhs_code (cmp_assign);
2360 n_pred.invert = false;
2361 return n_pred;
2362 }
2363
2364 /* Returns true if the PHI is a degenerated phi with
2365 all args with the same value (relop). In that case, *PRED
2366 will be updated to that value. */
2367
2368 static bool
2369 is_degenerated_phi (gimple *phi, pred_info *pred_p)
2370 {
2371 int i, n;
2372 tree op0;
2373 gimple *def0;
2374 pred_info pred0;
2375
2376 n = gimple_phi_num_args (phi);
2377 op0 = gimple_phi_arg_def (phi, 0);
2378
2379 if (TREE_CODE (op0) != SSA_NAME)
2380 return false;
2381
2382 def0 = SSA_NAME_DEF_STMT (op0);
2383 if (gimple_code (def0) != GIMPLE_ASSIGN)
2384 return false;
2385 if (TREE_CODE_CLASS (gimple_assign_rhs_code (def0)) != tcc_comparison)
2386 return false;
2387 pred0 = get_pred_info_from_cmp (def0);
2388
2389 for (i = 1; i < n; ++i)
2390 {
2391 gimple *def;
2392 pred_info pred;
2393 tree op = gimple_phi_arg_def (phi, i);
2394
2395 if (TREE_CODE (op) != SSA_NAME)
2396 return false;
2397
2398 def = SSA_NAME_DEF_STMT (op);
2399 if (gimple_code (def) != GIMPLE_ASSIGN)
2400 return false;
2401 if (TREE_CODE_CLASS (gimple_assign_rhs_code (def)) != tcc_comparison)
2402 return false;
2403 pred = get_pred_info_from_cmp (def);
2404 if (!pred_equal_p (pred, pred0))
2405 return false;
2406 }
2407
2408 *pred_p = pred0;
2409 return true;
2410 }
2411
2412 /* Normalize one predicate PRED
2413 1) if PRED can no longer be normlized, put it into NORM_PREDS.
2414 2) otherwise if PRED is of the form x != 0, follow x's definition
2415 and put normalized predicates into WORK_LIST. */
2416
2417 static void
2418 normalize_one_pred_1 (pred_chain_union *norm_preds,
2419 pred_chain *norm_chain,
2420 pred_info pred,
2421 enum tree_code and_or_code,
2422 vec<pred_info, va_heap, vl_ptr> *work_list,
2423 hash_set<tree> *mark_set)
2424 {
2425 if (!is_neq_zero_form_p (pred))
2426 {
2427 if (and_or_code == BIT_IOR_EXPR)
2428 push_pred (norm_preds, pred);
2429 else
2430 norm_chain->safe_push (pred);
2431 return;
2432 }
2433
2434 gimple *def_stmt = SSA_NAME_DEF_STMT (pred.pred_lhs);
2435
2436 if (gimple_code (def_stmt) == GIMPLE_PHI
2437 && is_degenerated_phi (def_stmt, &pred))
2438 work_list->safe_push (pred);
2439 else if (gimple_code (def_stmt) == GIMPLE_PHI && and_or_code == BIT_IOR_EXPR)
2440 {
2441 int i, n;
2442 n = gimple_phi_num_args (def_stmt);
2443
2444 /* If we see non zero constant, we should punt. The predicate
2445 * should be one guarding the phi edge. */
2446 for (i = 0; i < n; ++i)
2447 {
2448 tree op = gimple_phi_arg_def (def_stmt, i);
2449 if (TREE_CODE (op) == INTEGER_CST && !integer_zerop (op))
2450 {
2451 push_pred (norm_preds, pred);
2452 return;
2453 }
2454 }
2455
2456 for (i = 0; i < n; ++i)
2457 {
2458 tree op = gimple_phi_arg_def (def_stmt, i);
2459 if (integer_zerop (op))
2460 continue;
2461
2462 push_to_worklist (op, work_list, mark_set);
2463 }
2464 }
2465 else if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
2466 {
2467 if (and_or_code == BIT_IOR_EXPR)
2468 push_pred (norm_preds, pred);
2469 else
2470 norm_chain->safe_push (pred);
2471 }
2472 else if (gimple_assign_rhs_code (def_stmt) == and_or_code)
2473 {
2474 /* Avoid splitting up bit manipulations like x & 3 or y | 1. */
2475 if (is_gimple_min_invariant (gimple_assign_rhs2 (def_stmt)))
2476 {
2477 /* But treat x & 3 as condition. */
2478 if (and_or_code == BIT_AND_EXPR)
2479 {
2480 pred_info n_pred;
2481 n_pred.pred_lhs = gimple_assign_rhs1 (def_stmt);
2482 n_pred.pred_rhs = gimple_assign_rhs2 (def_stmt);
2483 n_pred.cond_code = and_or_code;
2484 n_pred.invert = false;
2485 norm_chain->safe_push (n_pred);
2486 }
2487 }
2488 else
2489 {
2490 push_to_worklist (gimple_assign_rhs1 (def_stmt), work_list, mark_set);
2491 push_to_worklist (gimple_assign_rhs2 (def_stmt), work_list, mark_set);
2492 }
2493 }
2494 else if (TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt))
2495 == tcc_comparison)
2496 {
2497 pred_info n_pred = get_pred_info_from_cmp (def_stmt);
2498 if (and_or_code == BIT_IOR_EXPR)
2499 push_pred (norm_preds, n_pred);
2500 else
2501 norm_chain->safe_push (n_pred);
2502 }
2503 else
2504 {
2505 if (and_or_code == BIT_IOR_EXPR)
2506 push_pred (norm_preds, pred);
2507 else
2508 norm_chain->safe_push (pred);
2509 }
2510 }
2511
2512 /* Normalize PRED and store the normalized predicates into NORM_PREDS. */
2513
2514 static void
2515 normalize_one_pred (pred_chain_union *norm_preds, pred_info pred)
2516 {
2517 vec<pred_info, va_heap, vl_ptr> work_list = vNULL;
2518 enum tree_code and_or_code = ERROR_MARK;
2519 pred_chain norm_chain = vNULL;
2520
2521 if (!is_neq_zero_form_p (pred))
2522 {
2523 push_pred (norm_preds, pred);
2524 return;
2525 }
2526
2527 gimple *def_stmt = SSA_NAME_DEF_STMT (pred.pred_lhs);
2528 if (gimple_code (def_stmt) == GIMPLE_ASSIGN)
2529 and_or_code = gimple_assign_rhs_code (def_stmt);
2530 if (and_or_code != BIT_IOR_EXPR && and_or_code != BIT_AND_EXPR)
2531 {
2532 if (TREE_CODE_CLASS (and_or_code) == tcc_comparison)
2533 {
2534 pred_info n_pred = get_pred_info_from_cmp (def_stmt);
2535 push_pred (norm_preds, n_pred);
2536 }
2537 else
2538 push_pred (norm_preds, pred);
2539 return;
2540 }
2541
2542 work_list.safe_push (pred);
2543 hash_set<tree> mark_set;
2544
2545 while (!work_list.is_empty ())
2546 {
2547 pred_info a_pred = work_list.pop ();
2548 normalize_one_pred_1 (norm_preds, &norm_chain, a_pred, and_or_code,
2549 &work_list, &mark_set);
2550 }
2551 if (and_or_code == BIT_AND_EXPR)
2552 norm_preds->safe_push (norm_chain);
2553
2554 work_list.release ();
2555 }
2556
2557 static void
2558 normalize_one_pred_chain (pred_chain_union *norm_preds, pred_chain one_chain)
2559 {
2560 vec<pred_info, va_heap, vl_ptr> work_list = vNULL;
2561 hash_set<tree> mark_set;
2562 pred_chain norm_chain = vNULL;
2563 size_t i;
2564
2565 for (i = 0; i < one_chain.length (); i++)
2566 {
2567 work_list.safe_push (one_chain[i]);
2568 mark_set.add (one_chain[i].pred_lhs);
2569 }
2570
2571 while (!work_list.is_empty ())
2572 {
2573 pred_info a_pred = work_list.pop ();
2574 normalize_one_pred_1 (0, &norm_chain, a_pred, BIT_AND_EXPR, &work_list,
2575 &mark_set);
2576 }
2577
2578 norm_preds->safe_push (norm_chain);
2579 work_list.release ();
2580 }
2581
2582 /* Normalize predicate chains PREDS and returns the normalized one. */
2583
2584 static pred_chain_union
2585 normalize_preds (pred_chain_union preds, gimple *use_or_def, bool is_use)
2586 {
2587 pred_chain_union norm_preds = vNULL;
2588 size_t n = preds.length ();
2589 size_t i;
2590
2591 if (dump_file && dump_flags & TDF_DETAILS)
2592 {
2593 fprintf (dump_file, "[BEFORE NORMALIZATION --");
2594 dump_predicates (use_or_def, preds, is_use ? "[USE]:\n" : "[DEF]:\n");
2595 }
2596
2597 for (i = 0; i < n; i++)
2598 {
2599 if (preds[i].length () != 1)
2600 normalize_one_pred_chain (&norm_preds, preds[i]);
2601 else
2602 {
2603 normalize_one_pred (&norm_preds, preds[i][0]);
2604 preds[i].release ();
2605 }
2606 }
2607
2608 if (dump_file)
2609 {
2610 fprintf (dump_file, "[AFTER NORMALIZATION -- ");
2611 dump_predicates (use_or_def, norm_preds,
2612 is_use ? "[USE]:\n" : "[DEF]:\n");
2613 }
2614
2615 destroy_predicate_vecs (&preds);
2616 return norm_preds;
2617 }
2618
2619 /* Return TRUE if PREDICATE can be invalidated by any individual
2620 predicate in USE_GUARD. */
2621
2622 static bool
2623 can_one_predicate_be_invalidated_p (pred_info predicate,
2624 pred_chain use_guard)
2625 {
2626 if (dump_file && dump_flags & TDF_DETAILS)
2627 {
2628 fprintf (dump_file, "Testing if this predicate: ");
2629 dump_pred_info (predicate);
2630 fprintf (dump_file, "\n...can be invalidated by a USE guard of: ");
2631 dump_pred_chain (use_guard);
2632 }
2633 for (size_t i = 0; i < use_guard.length (); ++i)
2634 {
2635 /* NOTE: This is a very simple check, and only understands an
2636 exact opposite. So, [i == 0] is currently only invalidated
2637 by [.NOT. i == 0] or [i != 0]. Ideally we should also
2638 invalidate with say [i > 5] or [i == 8]. There is certainly
2639 room for improvement here. */
2640 if (pred_neg_p (predicate, use_guard[i]))
2641 {
2642 if (dump_file && dump_flags & TDF_DETAILS)
2643 {
2644 fprintf (dump_file, " Predicate was invalidated by: ");
2645 dump_pred_info (use_guard[i]);
2646 fputc ('\n', dump_file);
2647 }
2648 return true;
2649 }
2650 }
2651 return false;
2652 }
2653
2654 /* Return TRUE if all predicates in UNINIT_PRED are invalidated by
2655 USE_GUARD being true. */
2656
2657 static bool
2658 can_chain_union_be_invalidated_p (pred_chain_union uninit_pred,
2659 pred_chain use_guard)
2660 {
2661 if (uninit_pred.is_empty ())
2662 return false;
2663 if (dump_file && dump_flags & TDF_DETAILS)
2664 dump_predicates (NULL, uninit_pred,
2665 "Testing if anything here can be invalidated: ");
2666 for (size_t i = 0; i < uninit_pred.length (); ++i)
2667 {
2668 pred_chain c = uninit_pred[i];
2669 size_t j;
2670 for (j = 0; j < c.length (); ++j)
2671 if (can_one_predicate_be_invalidated_p (c[j], use_guard))
2672 break;
2673
2674 /* If we were unable to invalidate any predicate in C, then there
2675 is a viable path from entry to the PHI where the PHI takes
2676 an uninitialized value and continues to a use of the PHI. */
2677 if (j == c.length ())
2678 return false;
2679 }
2680 return true;
2681 }
2682
2683 /* Return TRUE if none of the uninitialized operands in UNINT_OPNDS
2684 can actually happen if we arrived at a use for PHI.
2685
2686 PHI_USE_GUARDS are the guard conditions for the use of the PHI. */
2687
2688 static bool
2689 uninit_uses_cannot_happen (gphi *phi, unsigned uninit_opnds,
2690 pred_chain_union phi_use_guards)
2691 {
2692 unsigned phi_args = gimple_phi_num_args (phi);
2693 if (phi_args > max_phi_args)
2694 return false;
2695
2696 /* PHI_USE_GUARDS are OR'ed together. If we have more than one
2697 possible guard, there's no way of knowing which guard was true.
2698 Since we need to be absolutely sure that the uninitialized
2699 operands will be invalidated, bail. */
2700 if (phi_use_guards.length () != 1)
2701 return false;
2702
2703 /* Look for the control dependencies of all the uninitialized
2704 operands and build guard predicates describing them. */
2705 pred_chain_union uninit_preds;
2706 bool ret = true;
2707 for (unsigned i = 0; i < phi_args; ++i)
2708 {
2709 if (!MASK_TEST_BIT (uninit_opnds, i))
2710 continue;
2711
2712 edge e = gimple_phi_arg_edge (phi, i);
2713 vec<edge> dep_chains[MAX_NUM_CHAINS];
2714 auto_vec<edge, MAX_CHAIN_LEN + 1> cur_chain;
2715 size_t num_chains = 0;
2716 int num_calls = 0;
2717
2718 /* Build the control dependency chain for uninit operand `i'... */
2719 uninit_preds = vNULL;
2720 if (!compute_control_dep_chain (ENTRY_BLOCK_PTR_FOR_FN (cfun),
2721 e->src, dep_chains, &num_chains,
2722 &cur_chain, &num_calls))
2723 {
2724 ret = false;
2725 break;
2726 }
2727 /* ...and convert it into a set of predicates. */
2728 bool has_valid_preds
2729 = convert_control_dep_chain_into_preds (dep_chains, num_chains,
2730 &uninit_preds);
2731 for (size_t j = 0; j < num_chains; ++j)
2732 dep_chains[j].release ();
2733 if (!has_valid_preds)
2734 {
2735 ret = false;
2736 break;
2737 }
2738 simplify_preds (&uninit_preds, NULL, false);
2739 uninit_preds = normalize_preds (uninit_preds, NULL, false);
2740
2741 /* Can the guard for this uninitialized operand be invalidated
2742 by the PHI use? */
2743 if (!can_chain_union_be_invalidated_p (uninit_preds, phi_use_guards[0]))
2744 {
2745 ret = false;
2746 break;
2747 }
2748 }
2749 destroy_predicate_vecs (&uninit_preds);
2750 return ret;
2751 }
2752
2753 /* Computes the predicates that guard the use and checks
2754 if the incoming paths that have empty (or possibly
2755 empty) definition can be pruned/filtered. The function returns
2756 true if it can be determined that the use of PHI's def in
2757 USE_STMT is guarded with a predicate set not overlapping with
2758 predicate sets of all runtime paths that do not have a definition.
2759
2760 Returns false if it is not or it cannot be determined. USE_BB is
2761 the bb of the use (for phi operand use, the bb is not the bb of
2762 the phi stmt, but the src bb of the operand edge).
2763
2764 UNINIT_OPNDS is a bit vector. If an operand of PHI is uninitialized, the
2765 corresponding bit in the vector is 1. VISITED_PHIS is a pointer
2766 set of phis being visited.
2767
2768 *DEF_PREDS contains the (memoized) defining predicate chains of PHI.
2769 If *DEF_PREDS is the empty vector, the defining predicate chains of
2770 PHI will be computed and stored into *DEF_PREDS as needed.
2771
2772 VISITED_PHIS is a pointer set of phis being visited. */
2773
2774 static bool
2775 is_use_properly_guarded (gimple *use_stmt,
2776 basic_block use_bb,
2777 gphi *phi,
2778 unsigned uninit_opnds,
2779 pred_chain_union *def_preds,
2780 hash_set<gphi *> *visited_phis)
2781 {
2782 basic_block phi_bb;
2783 pred_chain_union preds = vNULL;
2784 bool has_valid_preds = false;
2785 bool is_properly_guarded = false;
2786
2787 if (visited_phis->add (phi))
2788 return false;
2789
2790 phi_bb = gimple_bb (phi);
2791
2792 if (is_non_loop_exit_postdominating (use_bb, phi_bb))
2793 return false;
2794
2795 has_valid_preds = find_predicates (&preds, phi_bb, use_bb);
2796
2797 if (!has_valid_preds)
2798 {
2799 destroy_predicate_vecs (&preds);
2800 return false;
2801 }
2802
2803 /* Try to prune the dead incoming phi edges. */
2804 is_properly_guarded
2805 = use_pred_not_overlap_with_undef_path_pred (preds, phi, uninit_opnds,
2806 visited_phis);
2807
2808 /* We might be able to prove that if the control dependencies
2809 for UNINIT_OPNDS are true, that the control dependencies for
2810 USE_STMT can never be true. */
2811 if (!is_properly_guarded)
2812 is_properly_guarded |= uninit_uses_cannot_happen (phi, uninit_opnds,
2813 preds);
2814
2815 if (is_properly_guarded)
2816 {
2817 destroy_predicate_vecs (&preds);
2818 return true;
2819 }
2820
2821 if (def_preds->is_empty ())
2822 {
2823 has_valid_preds = find_def_preds (def_preds, phi);
2824
2825 if (!has_valid_preds)
2826 {
2827 destroy_predicate_vecs (&preds);
2828 return false;
2829 }
2830
2831 simplify_preds (def_preds, phi, false);
2832 *def_preds = normalize_preds (*def_preds, phi, false);
2833 }
2834
2835 simplify_preds (&preds, use_stmt, true);
2836 preds = normalize_preds (preds, use_stmt, true);
2837
2838 is_properly_guarded = is_superset_of (*def_preds, preds);
2839
2840 destroy_predicate_vecs (&preds);
2841 return is_properly_guarded;
2842 }
2843
2844 /* Searches through all uses of a potentially
2845 uninitialized variable defined by PHI and returns a use
2846 statement if the use is not properly guarded. It returns
2847 NULL if all uses are guarded. UNINIT_OPNDS is a bitvector
2848 holding the position(s) of uninit PHI operands. WORKLIST
2849 is the vector of candidate phis that may be updated by this
2850 function. ADDED_TO_WORKLIST is the pointer set tracking
2851 if the new phi is already in the worklist. */
2852
2853 static gimple *
2854 find_uninit_use (gphi *phi, unsigned uninit_opnds,
2855 vec<gphi *> *worklist,
2856 hash_set<gphi *> *added_to_worklist)
2857 {
2858 tree phi_result;
2859 use_operand_p use_p;
2860 gimple *use_stmt;
2861 imm_use_iterator iter;
2862 pred_chain_union def_preds = vNULL;
2863 gimple *ret = NULL;
2864
2865 phi_result = gimple_phi_result (phi);
2866
2867 FOR_EACH_IMM_USE_FAST (use_p, iter, phi_result)
2868 {
2869 basic_block use_bb;
2870
2871 use_stmt = USE_STMT (use_p);
2872 if (is_gimple_debug (use_stmt))
2873 continue;
2874
2875 if (gphi *use_phi = dyn_cast<gphi *> (use_stmt))
2876 use_bb = gimple_phi_arg_edge (use_phi,
2877 PHI_ARG_INDEX_FROM_USE (use_p))->src;
2878 else
2879 use_bb = gimple_bb (use_stmt);
2880
2881 hash_set<gphi *> visited_phis;
2882 if (is_use_properly_guarded (use_stmt, use_bb, phi, uninit_opnds,
2883 &def_preds, &visited_phis))
2884 continue;
2885
2886 if (dump_file && (dump_flags & TDF_DETAILS))
2887 {
2888 fprintf (dump_file, "[CHECK]: Found unguarded use: ");
2889 print_gimple_stmt (dump_file, use_stmt, 0);
2890 }
2891 /* Found one real use, return. */
2892 if (gimple_code (use_stmt) != GIMPLE_PHI)
2893 {
2894 ret = use_stmt;
2895 break;
2896 }
2897
2898 /* Found a phi use that is not guarded,
2899 add the phi to the worklist. */
2900 if (!added_to_worklist->add (as_a<gphi *> (use_stmt)))
2901 {
2902 if (dump_file && (dump_flags & TDF_DETAILS))
2903 {
2904 fprintf (dump_file, "[WORKLIST]: Update worklist with phi: ");
2905 print_gimple_stmt (dump_file, use_stmt, 0);
2906 }
2907
2908 worklist->safe_push (as_a<gphi *> (use_stmt));
2909 possibly_undefined_names->add (phi_result);
2910 }
2911 }
2912
2913 destroy_predicate_vecs (&def_preds);
2914 return ret;
2915 }
2916
2917 /* Look for inputs to PHI that are SSA_NAMEs that have empty definitions
2918 and gives warning if there exists a runtime path from the entry to a
2919 use of the PHI def that does not contain a definition. In other words,
2920 the warning is on the real use. The more dead paths that can be pruned
2921 by the compiler, the fewer false positives the warning is. WORKLIST
2922 is a vector of candidate phis to be examined. ADDED_TO_WORKLIST is
2923 a pointer set tracking if the new phi is added to the worklist or not. */
2924
2925 static void
2926 warn_uninitialized_phi (gphi *phi, vec<gphi *> *worklist,
2927 hash_set<gphi *> *added_to_worklist)
2928 {
2929 unsigned uninit_opnds;
2930 gimple *uninit_use_stmt = 0;
2931 tree uninit_op;
2932 int phiarg_index;
2933 location_t loc;
2934
2935 /* Don't look at virtual operands. */
2936 if (virtual_operand_p (gimple_phi_result (phi)))
2937 return;
2938
2939 uninit_opnds = compute_uninit_opnds_pos (phi);
2940
2941 if (MASK_EMPTY (uninit_opnds))
2942 return;
2943
2944 if (dump_file && (dump_flags & TDF_DETAILS))
2945 {
2946 fprintf (dump_file, "[CHECK]: examining phi: ");
2947 print_gimple_stmt (dump_file, phi, 0);
2948 }
2949
2950 /* Now check if we have any use of the value without proper guard. */
2951 uninit_use_stmt = find_uninit_use (phi, uninit_opnds,
2952 worklist, added_to_worklist);
2953
2954 /* All uses are properly guarded. */
2955 if (!uninit_use_stmt)
2956 return;
2957
2958 phiarg_index = MASK_FIRST_SET_BIT (uninit_opnds);
2959 uninit_op = gimple_phi_arg_def (phi, phiarg_index);
2960 if (SSA_NAME_VAR (uninit_op) == NULL_TREE)
2961 return;
2962 if (gimple_phi_arg_has_location (phi, phiarg_index))
2963 loc = gimple_phi_arg_location (phi, phiarg_index);
2964 else
2965 loc = UNKNOWN_LOCATION;
2966 warn_uninit (OPT_Wmaybe_uninitialized, uninit_op, SSA_NAME_VAR (uninit_op),
2967 SSA_NAME_VAR (uninit_op),
2968 "%qD may be used uninitialized in this function",
2969 uninit_use_stmt, loc);
2970 }
2971
2972 static bool
2973 gate_warn_uninitialized (void)
2974 {
2975 return warn_uninitialized || warn_maybe_uninitialized;
2976 }
2977
2978 namespace {
2979
2980 const pass_data pass_data_late_warn_uninitialized =
2981 {
2982 GIMPLE_PASS, /* type */
2983 "uninit", /* name */
2984 OPTGROUP_NONE, /* optinfo_flags */
2985 TV_NONE, /* tv_id */
2986 PROP_ssa, /* properties_required */
2987 0, /* properties_provided */
2988 0, /* properties_destroyed */
2989 0, /* todo_flags_start */
2990 0, /* todo_flags_finish */
2991 };
2992
2993 class pass_late_warn_uninitialized : public gimple_opt_pass
2994 {
2995 public:
2996 pass_late_warn_uninitialized (gcc::context *ctxt)
2997 : gimple_opt_pass (pass_data_late_warn_uninitialized, ctxt)
2998 {}
2999
3000 /* opt_pass methods: */
3001 opt_pass *clone () { return new pass_late_warn_uninitialized (m_ctxt); }
3002 virtual bool gate (function *) { return gate_warn_uninitialized (); }
3003 virtual unsigned int execute (function *);
3004
3005 }; // class pass_late_warn_uninitialized
3006
3007 unsigned int
3008 pass_late_warn_uninitialized::execute (function *fun)
3009 {
3010 basic_block bb;
3011 gphi_iterator gsi;
3012 vec<gphi *> worklist = vNULL;
3013
3014 calculate_dominance_info (CDI_DOMINATORS);
3015 calculate_dominance_info (CDI_POST_DOMINATORS);
3016 /* Re-do the plain uninitialized variable check, as optimization may have
3017 straightened control flow. Do this first so that we don't accidentally
3018 get a "may be" warning when we'd have seen an "is" warning later. */
3019 warn_uninitialized_vars (/*warn_maybe_uninitialized=*/1);
3020
3021 timevar_push (TV_TREE_UNINIT);
3022
3023 possibly_undefined_names = new hash_set<tree>;
3024 hash_set<gphi *> added_to_worklist;
3025
3026 /* Initialize worklist */
3027 FOR_EACH_BB_FN (bb, fun)
3028 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3029 {
3030 gphi *phi = gsi.phi ();
3031 size_t n, i;
3032
3033 n = gimple_phi_num_args (phi);
3034
3035 /* Don't look at virtual operands. */
3036 if (virtual_operand_p (gimple_phi_result (phi)))
3037 continue;
3038
3039 for (i = 0; i < n; ++i)
3040 {
3041 tree op = gimple_phi_arg_def (phi, i);
3042 if (TREE_CODE (op) == SSA_NAME && uninit_undefined_value_p (op))
3043 {
3044 worklist.safe_push (phi);
3045 added_to_worklist.add (phi);
3046 if (dump_file && (dump_flags & TDF_DETAILS))
3047 {
3048 fprintf (dump_file, "[WORKLIST]: add to initial list: ");
3049 print_gimple_stmt (dump_file, phi, 0);
3050 }
3051 break;
3052 }
3053 }
3054 }
3055
3056 while (worklist.length () != 0)
3057 {
3058 gphi *cur_phi = 0;
3059 cur_phi = worklist.pop ();
3060 warn_uninitialized_phi (cur_phi, &worklist, &added_to_worklist);
3061 }
3062
3063 worklist.release ();
3064 delete possibly_undefined_names;
3065 possibly_undefined_names = NULL;
3066 free_dominance_info (CDI_POST_DOMINATORS);
3067 timevar_pop (TV_TREE_UNINIT);
3068 return 0;
3069 }
3070
3071 } // anon namespace
3072
3073 gimple_opt_pass *
3074 make_pass_late_warn_uninitialized (gcc::context *ctxt)
3075 {
3076 return new pass_late_warn_uninitialized (ctxt);
3077 }
3078
3079 static unsigned int
3080 execute_early_warn_uninitialized (void)
3081 {
3082 /* Currently, this pass runs always but
3083 execute_late_warn_uninitialized only runs with optimization. With
3084 optimization we want to warn about possible uninitialized as late
3085 as possible, thus don't do it here. However, without
3086 optimization we need to warn here about "may be uninitialized". */
3087 calculate_dominance_info (CDI_POST_DOMINATORS);
3088
3089 warn_uninitialized_vars (/*warn_maybe_uninitialized=*/!optimize);
3090
3091 /* Post-dominator information cannot be reliably updated. Free it
3092 after the use. */
3093
3094 free_dominance_info (CDI_POST_DOMINATORS);
3095 return 0;
3096 }
3097
3098 namespace {
3099
3100 const pass_data pass_data_early_warn_uninitialized =
3101 {
3102 GIMPLE_PASS, /* type */
3103 "*early_warn_uninitialized", /* name */
3104 OPTGROUP_NONE, /* optinfo_flags */
3105 TV_TREE_UNINIT, /* tv_id */
3106 PROP_ssa, /* properties_required */
3107 0, /* properties_provided */
3108 0, /* properties_destroyed */
3109 0, /* todo_flags_start */
3110 0, /* todo_flags_finish */
3111 };
3112
3113 class pass_early_warn_uninitialized : public gimple_opt_pass
3114 {
3115 public:
3116 pass_early_warn_uninitialized (gcc::context *ctxt)
3117 : gimple_opt_pass (pass_data_early_warn_uninitialized, ctxt)
3118 {}
3119
3120 /* opt_pass methods: */
3121 virtual bool gate (function *) { return gate_warn_uninitialized (); }
3122 virtual unsigned int execute (function *)
3123 {
3124 return execute_early_warn_uninitialized ();
3125 }
3126
3127 }; // class pass_early_warn_uninitialized
3128
3129 } // anon namespace
3130
3131 gimple_opt_pass *
3132 make_pass_early_warn_uninitialized (gcc::context *ctxt)
3133 {
3134 return new pass_early_warn_uninitialized (ctxt);
3135 }