Daily bump.
[gcc.git] / gcc / tree-ssa-phiopt.c
1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2021 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "insn-codes.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "cfghooks.h"
29 #include "tree-pass.h"
30 #include "ssa.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "gimple-pretty-print.h"
34 #include "fold-const.h"
35 #include "stor-layout.h"
36 #include "cfganal.h"
37 #include "gimplify.h"
38 #include "gimple-iterator.h"
39 #include "gimplify-me.h"
40 #include "tree-cfg.h"
41 #include "tree-dfa.h"
42 #include "domwalk.h"
43 #include "cfgloop.h"
44 #include "tree-data-ref.h"
45 #include "tree-scalar-evolution.h"
46 #include "tree-inline.h"
47 #include "case-cfn-macros.h"
48 #include "tree-eh.h"
49 #include "gimple-fold.h"
50 #include "internal-fn.h"
51
52 static unsigned int tree_ssa_phiopt_worker (bool, bool, bool);
53 static bool two_value_replacement (basic_block, basic_block, edge, gphi *,
54 tree, tree);
55 static bool conditional_replacement (basic_block, basic_block,
56 edge, edge, gphi *, tree, tree);
57 static gphi *factor_out_conditional_conversion (edge, edge, gphi *, tree, tree,
58 gimple *);
59 static int value_replacement (basic_block, basic_block,
60 edge, edge, gimple *, tree, tree);
61 static bool minmax_replacement (basic_block, basic_block,
62 edge, edge, gimple *, tree, tree);
63 static bool abs_replacement (basic_block, basic_block,
64 edge, edge, gimple *, tree, tree);
65 static bool xor_replacement (basic_block, basic_block,
66 edge, edge, gimple *, tree, tree);
67 static bool cond_removal_in_popcount_clz_ctz_pattern (basic_block, basic_block,
68 edge, edge, gimple *,
69 tree, tree);
70 static bool cond_store_replacement (basic_block, basic_block, edge, edge,
71 hash_set<tree> *);
72 static bool cond_if_else_store_replacement (basic_block, basic_block, basic_block);
73 static hash_set<tree> * get_non_trapping ();
74 static void replace_phi_edge_with_variable (basic_block, edge, gimple *, tree);
75 static void hoist_adjacent_loads (basic_block, basic_block,
76 basic_block, basic_block);
77 static bool gate_hoist_loads (void);
78
79 /* This pass tries to transform conditional stores into unconditional
80 ones, enabling further simplifications with the simpler then and else
81 blocks. In particular it replaces this:
82
83 bb0:
84 if (cond) goto bb2; else goto bb1;
85 bb1:
86 *p = RHS;
87 bb2:
88
89 with
90
91 bb0:
92 if (cond) goto bb1; else goto bb2;
93 bb1:
94 condtmp' = *p;
95 bb2:
96 condtmp = PHI <RHS, condtmp'>
97 *p = condtmp;
98
99 This transformation can only be done under several constraints,
100 documented below. It also replaces:
101
102 bb0:
103 if (cond) goto bb2; else goto bb1;
104 bb1:
105 *p = RHS1;
106 goto bb3;
107 bb2:
108 *p = RHS2;
109 bb3:
110
111 with
112
113 bb0:
114 if (cond) goto bb3; else goto bb1;
115 bb1:
116 bb3:
117 condtmp = PHI <RHS1, RHS2>
118 *p = condtmp; */
119
120 static unsigned int
121 tree_ssa_cs_elim (void)
122 {
123 unsigned todo;
124 /* ??? We are not interested in loop related info, but the following
125 will create it, ICEing as we didn't init loops with pre-headers.
126 An interfacing issue of find_data_references_in_bb. */
127 loop_optimizer_init (LOOPS_NORMAL);
128 scev_initialize ();
129 todo = tree_ssa_phiopt_worker (true, false, false);
130 scev_finalize ();
131 loop_optimizer_finalize ();
132 return todo;
133 }
134
135 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
136
137 static gphi *
138 single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
139 {
140 gimple_stmt_iterator i;
141 gphi *phi = NULL;
142 if (gimple_seq_singleton_p (seq))
143 return as_a <gphi *> (gsi_stmt (gsi_start (seq)));
144 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
145 {
146 gphi *p = as_a <gphi *> (gsi_stmt (i));
147 /* If the PHI arguments are equal then we can skip this PHI. */
148 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
149 gimple_phi_arg_def (p, e1->dest_idx)))
150 continue;
151
152 /* If we already have a PHI that has the two edge arguments are
153 different, then return it is not a singleton for these PHIs. */
154 if (phi)
155 return NULL;
156
157 phi = p;
158 }
159 return phi;
160 }
161
162 /* The core routine of conditional store replacement and normal
163 phi optimizations. Both share much of the infrastructure in how
164 to match applicable basic block patterns. DO_STORE_ELIM is true
165 when we want to do conditional store replacement, false otherwise.
166 DO_HOIST_LOADS is true when we want to hoist adjacent loads out
167 of diamond control flow patterns, false otherwise. */
168 static unsigned int
169 tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads, bool early_p)
170 {
171 basic_block bb;
172 basic_block *bb_order;
173 unsigned n, i;
174 bool cfgchanged = false;
175 hash_set<tree> *nontrap = 0;
176
177 if (do_store_elim)
178 /* Calculate the set of non-trapping memory accesses. */
179 nontrap = get_non_trapping ();
180
181 /* Search every basic block for COND_EXPR we may be able to optimize.
182
183 We walk the blocks in order that guarantees that a block with
184 a single predecessor is processed before the predecessor.
185 This ensures that we collapse inner ifs before visiting the
186 outer ones, and also that we do not try to visit a removed
187 block. */
188 bb_order = single_pred_before_succ_order ();
189 n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
190
191 for (i = 0; i < n; i++)
192 {
193 gimple *cond_stmt;
194 gphi *phi;
195 basic_block bb1, bb2;
196 edge e1, e2;
197 tree arg0, arg1;
198
199 bb = bb_order[i];
200
201 cond_stmt = last_stmt (bb);
202 /* Check to see if the last statement is a GIMPLE_COND. */
203 if (!cond_stmt
204 || gimple_code (cond_stmt) != GIMPLE_COND)
205 continue;
206
207 e1 = EDGE_SUCC (bb, 0);
208 bb1 = e1->dest;
209 e2 = EDGE_SUCC (bb, 1);
210 bb2 = e2->dest;
211
212 /* We cannot do the optimization on abnormal edges. */
213 if ((e1->flags & EDGE_ABNORMAL) != 0
214 || (e2->flags & EDGE_ABNORMAL) != 0)
215 continue;
216
217 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
218 if (EDGE_COUNT (bb1->succs) == 0
219 || bb2 == NULL
220 || EDGE_COUNT (bb2->succs) == 0)
221 continue;
222
223 /* Find the bb which is the fall through to the other. */
224 if (EDGE_SUCC (bb1, 0)->dest == bb2)
225 ;
226 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
227 {
228 std::swap (bb1, bb2);
229 std::swap (e1, e2);
230 }
231 else if (do_store_elim
232 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
233 {
234 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
235
236 if (!single_succ_p (bb1)
237 || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
238 || !single_succ_p (bb2)
239 || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
240 || EDGE_COUNT (bb3->preds) != 2)
241 continue;
242 if (cond_if_else_store_replacement (bb1, bb2, bb3))
243 cfgchanged = true;
244 continue;
245 }
246 else if (do_hoist_loads
247 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
248 {
249 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
250
251 if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
252 && single_succ_p (bb1)
253 && single_succ_p (bb2)
254 && single_pred_p (bb1)
255 && single_pred_p (bb2)
256 && EDGE_COUNT (bb->succs) == 2
257 && EDGE_COUNT (bb3->preds) == 2
258 /* If one edge or the other is dominant, a conditional move
259 is likely to perform worse than the well-predicted branch. */
260 && !predictable_edge_p (EDGE_SUCC (bb, 0))
261 && !predictable_edge_p (EDGE_SUCC (bb, 1)))
262 hoist_adjacent_loads (bb, bb1, bb2, bb3);
263 continue;
264 }
265 else
266 continue;
267
268 e1 = EDGE_SUCC (bb1, 0);
269
270 /* Make sure that bb1 is just a fall through. */
271 if (!single_succ_p (bb1)
272 || (e1->flags & EDGE_FALLTHRU) == 0)
273 continue;
274
275 /* Also make sure that bb1 only have one predecessor and that it
276 is bb. */
277 if (!single_pred_p (bb1)
278 || single_pred (bb1) != bb)
279 continue;
280
281 if (do_store_elim)
282 {
283 /* bb1 is the middle block, bb2 the join block, bb the split block,
284 e1 the fallthrough edge from bb1 to bb2. We can't do the
285 optimization if the join block has more than two predecessors. */
286 if (EDGE_COUNT (bb2->preds) > 2)
287 continue;
288 if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
289 cfgchanged = true;
290 }
291 else
292 {
293 gimple_seq phis = phi_nodes (bb2);
294 gimple_stmt_iterator gsi;
295 bool candorest = true;
296
297 /* Value replacement can work with more than one PHI
298 so try that first. */
299 if (!early_p)
300 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
301 {
302 phi = as_a <gphi *> (gsi_stmt (gsi));
303 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
304 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
305 if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
306 {
307 candorest = false;
308 cfgchanged = true;
309 break;
310 }
311 }
312
313 if (!candorest)
314 continue;
315
316 phi = single_non_singleton_phi_for_edges (phis, e1, e2);
317 if (!phi)
318 continue;
319
320 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
321 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
322
323 /* Something is wrong if we cannot find the arguments in the PHI
324 node. */
325 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
326
327 gphi *newphi = factor_out_conditional_conversion (e1, e2, phi,
328 arg0, arg1,
329 cond_stmt);
330 if (newphi != NULL)
331 {
332 phi = newphi;
333 /* factor_out_conditional_conversion may create a new PHI in
334 BB2 and eliminate an existing PHI in BB2. Recompute values
335 that may be affected by that change. */
336 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
337 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
338 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
339 }
340
341 /* Do the replacement of conditional if it can be done. */
342 if (!early_p && two_value_replacement (bb, bb1, e2, phi, arg0, arg1))
343 cfgchanged = true;
344 else if (!early_p
345 && conditional_replacement (bb, bb1, e1, e2, phi,
346 arg0, arg1))
347 cfgchanged = true;
348 else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
349 cfgchanged = true;
350 else if (!early_p
351 && xor_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
352 cfgchanged = true;
353 else if (!early_p
354 && cond_removal_in_popcount_clz_ctz_pattern (bb, bb1, e1,
355 e2, phi, arg0,
356 arg1))
357 cfgchanged = true;
358 else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
359 cfgchanged = true;
360 }
361 }
362
363 free (bb_order);
364
365 if (do_store_elim)
366 delete nontrap;
367 /* If the CFG has changed, we should cleanup the CFG. */
368 if (cfgchanged && do_store_elim)
369 {
370 /* In cond-store replacement we have added some loads on edges
371 and new VOPS (as we moved the store, and created a load). */
372 gsi_commit_edge_inserts ();
373 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
374 }
375 else if (cfgchanged)
376 return TODO_cleanup_cfg;
377 return 0;
378 }
379
380 /* Replace PHI node element whose edge is E in block BB with variable NEW.
381 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
382 is known to have two edges, one of which must reach BB). */
383
384 static void
385 replace_phi_edge_with_variable (basic_block cond_block,
386 edge e, gimple *phi, tree new_tree)
387 {
388 basic_block bb = gimple_bb (phi);
389 basic_block block_to_remove;
390 gimple_stmt_iterator gsi;
391
392 /* Change the PHI argument to new. */
393 SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
394
395 /* Remove the empty basic block. */
396 if (EDGE_SUCC (cond_block, 0)->dest == bb)
397 {
398 EDGE_SUCC (cond_block, 0)->flags |= EDGE_FALLTHRU;
399 EDGE_SUCC (cond_block, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
400 EDGE_SUCC (cond_block, 0)->probability = profile_probability::always ();
401
402 block_to_remove = EDGE_SUCC (cond_block, 1)->dest;
403 }
404 else
405 {
406 EDGE_SUCC (cond_block, 1)->flags |= EDGE_FALLTHRU;
407 EDGE_SUCC (cond_block, 1)->flags
408 &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
409 EDGE_SUCC (cond_block, 1)->probability = profile_probability::always ();
410
411 block_to_remove = EDGE_SUCC (cond_block, 0)->dest;
412 }
413 delete_basic_block (block_to_remove);
414
415 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
416 gsi = gsi_last_bb (cond_block);
417 gsi_remove (&gsi, true);
418
419 if (dump_file && (dump_flags & TDF_DETAILS))
420 fprintf (dump_file,
421 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
422 cond_block->index,
423 bb->index);
424 }
425
426 /* PR66726: Factor conversion out of COND_EXPR. If the arguments of the PHI
427 stmt are CONVERT_STMT, factor out the conversion and perform the conversion
428 to the result of PHI stmt. COND_STMT is the controlling predicate.
429 Return the newly-created PHI, if any. */
430
431 static gphi *
432 factor_out_conditional_conversion (edge e0, edge e1, gphi *phi,
433 tree arg0, tree arg1, gimple *cond_stmt)
434 {
435 gimple *arg0_def_stmt = NULL, *arg1_def_stmt = NULL, *new_stmt;
436 tree new_arg0 = NULL_TREE, new_arg1 = NULL_TREE;
437 tree temp, result;
438 gphi *newphi;
439 gimple_stmt_iterator gsi, gsi_for_def;
440 location_t locus = gimple_location (phi);
441 enum tree_code convert_code;
442
443 /* Handle only PHI statements with two arguments. TODO: If all
444 other arguments to PHI are INTEGER_CST or if their defining
445 statement have the same unary operation, we can handle more
446 than two arguments too. */
447 if (gimple_phi_num_args (phi) != 2)
448 return NULL;
449
450 /* First canonicalize to simplify tests. */
451 if (TREE_CODE (arg0) != SSA_NAME)
452 {
453 std::swap (arg0, arg1);
454 std::swap (e0, e1);
455 }
456
457 if (TREE_CODE (arg0) != SSA_NAME
458 || (TREE_CODE (arg1) != SSA_NAME
459 && TREE_CODE (arg1) != INTEGER_CST))
460 return NULL;
461
462 /* Check if arg0 is an SSA_NAME and the stmt which defines arg0 is
463 a conversion. */
464 arg0_def_stmt = SSA_NAME_DEF_STMT (arg0);
465 if (!gimple_assign_cast_p (arg0_def_stmt))
466 return NULL;
467
468 /* Use the RHS as new_arg0. */
469 convert_code = gimple_assign_rhs_code (arg0_def_stmt);
470 new_arg0 = gimple_assign_rhs1 (arg0_def_stmt);
471 if (convert_code == VIEW_CONVERT_EXPR)
472 {
473 new_arg0 = TREE_OPERAND (new_arg0, 0);
474 if (!is_gimple_reg_type (TREE_TYPE (new_arg0)))
475 return NULL;
476 }
477 if (TREE_CODE (new_arg0) == SSA_NAME
478 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_arg0))
479 return NULL;
480
481 if (TREE_CODE (arg1) == SSA_NAME)
482 {
483 /* Check if arg1 is an SSA_NAME and the stmt which defines arg1
484 is a conversion. */
485 arg1_def_stmt = SSA_NAME_DEF_STMT (arg1);
486 if (!is_gimple_assign (arg1_def_stmt)
487 || gimple_assign_rhs_code (arg1_def_stmt) != convert_code)
488 return NULL;
489
490 /* Either arg1_def_stmt or arg0_def_stmt should be conditional. */
491 if (dominated_by_p (CDI_DOMINATORS, gimple_bb (phi), gimple_bb (arg0_def_stmt))
492 && dominated_by_p (CDI_DOMINATORS,
493 gimple_bb (phi), gimple_bb (arg1_def_stmt)))
494 return NULL;
495
496 /* Use the RHS as new_arg1. */
497 new_arg1 = gimple_assign_rhs1 (arg1_def_stmt);
498 if (convert_code == VIEW_CONVERT_EXPR)
499 new_arg1 = TREE_OPERAND (new_arg1, 0);
500 if (TREE_CODE (new_arg1) == SSA_NAME
501 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_arg1))
502 return NULL;
503 }
504 else
505 {
506 /* arg0_def_stmt should be conditional. */
507 if (dominated_by_p (CDI_DOMINATORS, gimple_bb (phi), gimple_bb (arg0_def_stmt)))
508 return NULL;
509 /* If arg1 is an INTEGER_CST, fold it to new type. */
510 if (INTEGRAL_TYPE_P (TREE_TYPE (new_arg0))
511 && int_fits_type_p (arg1, TREE_TYPE (new_arg0)))
512 {
513 if (gimple_assign_cast_p (arg0_def_stmt))
514 {
515 /* For the INTEGER_CST case, we are just moving the
516 conversion from one place to another, which can often
517 hurt as the conversion moves further away from the
518 statement that computes the value. So, perform this
519 only if new_arg0 is an operand of COND_STMT, or
520 if arg0_def_stmt is the only non-debug stmt in
521 its basic block, because then it is possible this
522 could enable further optimizations (minmax replacement
523 etc.). See PR71016. */
524 if (new_arg0 != gimple_cond_lhs (cond_stmt)
525 && new_arg0 != gimple_cond_rhs (cond_stmt)
526 && gimple_bb (arg0_def_stmt) == e0->src)
527 {
528 gsi = gsi_for_stmt (arg0_def_stmt);
529 gsi_prev_nondebug (&gsi);
530 if (!gsi_end_p (gsi))
531 {
532 if (gassign *assign
533 = dyn_cast <gassign *> (gsi_stmt (gsi)))
534 {
535 tree lhs = gimple_assign_lhs (assign);
536 enum tree_code ass_code
537 = gimple_assign_rhs_code (assign);
538 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
539 return NULL;
540 if (lhs != gimple_assign_rhs1 (arg0_def_stmt))
541 return NULL;
542 gsi_prev_nondebug (&gsi);
543 if (!gsi_end_p (gsi))
544 return NULL;
545 }
546 else
547 return NULL;
548 }
549 gsi = gsi_for_stmt (arg0_def_stmt);
550 gsi_next_nondebug (&gsi);
551 if (!gsi_end_p (gsi))
552 return NULL;
553 }
554 new_arg1 = fold_convert (TREE_TYPE (new_arg0), arg1);
555 }
556 else
557 return NULL;
558 }
559 else
560 return NULL;
561 }
562
563 /* If arg0/arg1 have > 1 use, then this transformation actually increases
564 the number of expressions evaluated at runtime. */
565 if (!has_single_use (arg0)
566 || (arg1_def_stmt && !has_single_use (arg1)))
567 return NULL;
568
569 /* If types of new_arg0 and new_arg1 are different bailout. */
570 if (!types_compatible_p (TREE_TYPE (new_arg0), TREE_TYPE (new_arg1)))
571 return NULL;
572
573 /* Create a new PHI stmt. */
574 result = PHI_RESULT (phi);
575 temp = make_ssa_name (TREE_TYPE (new_arg0), NULL);
576 newphi = create_phi_node (temp, gimple_bb (phi));
577
578 if (dump_file && (dump_flags & TDF_DETAILS))
579 {
580 fprintf (dump_file, "PHI ");
581 print_generic_expr (dump_file, gimple_phi_result (phi));
582 fprintf (dump_file,
583 " changed to factor conversion out from COND_EXPR.\n");
584 fprintf (dump_file, "New stmt with CAST that defines ");
585 print_generic_expr (dump_file, result);
586 fprintf (dump_file, ".\n");
587 }
588
589 /* Remove the old cast(s) that has single use. */
590 gsi_for_def = gsi_for_stmt (arg0_def_stmt);
591 gsi_remove (&gsi_for_def, true);
592 release_defs (arg0_def_stmt);
593
594 if (arg1_def_stmt)
595 {
596 gsi_for_def = gsi_for_stmt (arg1_def_stmt);
597 gsi_remove (&gsi_for_def, true);
598 release_defs (arg1_def_stmt);
599 }
600
601 add_phi_arg (newphi, new_arg0, e0, locus);
602 add_phi_arg (newphi, new_arg1, e1, locus);
603
604 /* Create the conversion stmt and insert it. */
605 if (convert_code == VIEW_CONVERT_EXPR)
606 {
607 temp = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (result), temp);
608 new_stmt = gimple_build_assign (result, temp);
609 }
610 else
611 new_stmt = gimple_build_assign (result, convert_code, temp);
612 gsi = gsi_after_labels (gimple_bb (phi));
613 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
614
615 /* Remove the original PHI stmt. */
616 gsi = gsi_for_stmt (phi);
617 gsi_remove (&gsi, true);
618 return newphi;
619 }
620
621 /* Optimize
622 # x_5 in range [cst1, cst2] where cst2 = cst1 + 1
623 if (x_5 op cstN) # where op is == or != and N is 1 or 2
624 goto bb3;
625 else
626 goto bb4;
627 bb3:
628 bb4:
629 # r_6 = PHI<cst3(2), cst4(3)> # where cst3 == cst4 + 1 or cst4 == cst3 + 1
630
631 to r_6 = x_5 + (min (cst3, cst4) - cst1) or
632 r_6 = (min (cst3, cst4) + cst1) - x_5 depending on op, N and which
633 of cst3 and cst4 is smaller. */
634
635 static bool
636 two_value_replacement (basic_block cond_bb, basic_block middle_bb,
637 edge e1, gphi *phi, tree arg0, tree arg1)
638 {
639 /* Only look for adjacent integer constants. */
640 if (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
641 || !INTEGRAL_TYPE_P (TREE_TYPE (arg1))
642 || TREE_CODE (arg0) != INTEGER_CST
643 || TREE_CODE (arg1) != INTEGER_CST
644 || (tree_int_cst_lt (arg0, arg1)
645 ? wi::to_widest (arg0) + 1 != wi::to_widest (arg1)
646 : wi::to_widest (arg1) + 1 != wi::to_widest (arg0)))
647 return false;
648
649 if (!empty_block_p (middle_bb))
650 return false;
651
652 gimple *stmt = last_stmt (cond_bb);
653 tree lhs = gimple_cond_lhs (stmt);
654 tree rhs = gimple_cond_rhs (stmt);
655
656 if (TREE_CODE (lhs) != SSA_NAME
657 || !INTEGRAL_TYPE_P (TREE_TYPE (lhs))
658 || TREE_CODE (rhs) != INTEGER_CST)
659 return false;
660
661 switch (gimple_cond_code (stmt))
662 {
663 case EQ_EXPR:
664 case NE_EXPR:
665 break;
666 default:
667 return false;
668 }
669
670 /* Defer boolean x ? 0 : {1,-1} or x ? {1,-1} : 0 to
671 conditional_replacement. */
672 if (TREE_CODE (TREE_TYPE (lhs)) == BOOLEAN_TYPE
673 && (integer_zerop (arg0)
674 || integer_zerop (arg1)
675 || TREE_CODE (TREE_TYPE (arg0)) == BOOLEAN_TYPE
676 || (TYPE_PRECISION (TREE_TYPE (arg0))
677 <= TYPE_PRECISION (TREE_TYPE (lhs)))))
678 return false;
679
680 wide_int min, max;
681 if (get_range_info (lhs, &min, &max) != VR_RANGE)
682 {
683 int prec = TYPE_PRECISION (TREE_TYPE (lhs));
684 signop sgn = TYPE_SIGN (TREE_TYPE (lhs));
685 min = wi::min_value (prec, sgn);
686 max = wi::max_value (prec, sgn);
687 }
688 if (min + 1 != max
689 || (wi::to_wide (rhs) != min
690 && wi::to_wide (rhs) != max))
691 return false;
692
693 /* We need to know which is the true edge and which is the false
694 edge so that we know when to invert the condition below. */
695 edge true_edge, false_edge;
696 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
697 if ((gimple_cond_code (stmt) == EQ_EXPR)
698 ^ (wi::to_wide (rhs) == max)
699 ^ (e1 == false_edge))
700 std::swap (arg0, arg1);
701
702 tree type;
703 if (TYPE_PRECISION (TREE_TYPE (lhs)) == TYPE_PRECISION (TREE_TYPE (arg0)))
704 {
705 /* Avoid performing the arithmetics in bool type which has different
706 semantics, otherwise prefer unsigned types from the two with
707 the same precision. */
708 if (TREE_CODE (TREE_TYPE (arg0)) == BOOLEAN_TYPE
709 || !TYPE_UNSIGNED (TREE_TYPE (arg0)))
710 type = TREE_TYPE (lhs);
711 else
712 type = TREE_TYPE (arg0);
713 }
714 else if (TYPE_PRECISION (TREE_TYPE (lhs)) > TYPE_PRECISION (TREE_TYPE (arg0)))
715 type = TREE_TYPE (lhs);
716 else
717 type = TREE_TYPE (arg0);
718
719 min = wide_int::from (min, TYPE_PRECISION (type),
720 TYPE_SIGN (TREE_TYPE (lhs)));
721 wide_int a = wide_int::from (wi::to_wide (arg0), TYPE_PRECISION (type),
722 TYPE_SIGN (TREE_TYPE (arg0)));
723 enum tree_code code;
724 wi::overflow_type ovf;
725 if (tree_int_cst_lt (arg0, arg1))
726 {
727 code = PLUS_EXPR;
728 a -= min;
729 if (!TYPE_UNSIGNED (type))
730 {
731 /* lhs is known to be in range [min, min+1] and we want to add a
732 to it. Check if that operation can overflow for those 2 values
733 and if yes, force unsigned type. */
734 wi::add (min + (wi::neg_p (a) ? 0 : 1), a, SIGNED, &ovf);
735 if (ovf)
736 type = unsigned_type_for (type);
737 }
738 }
739 else
740 {
741 code = MINUS_EXPR;
742 a += min;
743 if (!TYPE_UNSIGNED (type))
744 {
745 /* lhs is known to be in range [min, min+1] and we want to subtract
746 it from a. Check if that operation can overflow for those 2
747 values and if yes, force unsigned type. */
748 wi::sub (a, min + (wi::neg_p (min) ? 0 : 1), SIGNED, &ovf);
749 if (ovf)
750 type = unsigned_type_for (type);
751 }
752 }
753
754 tree arg = wide_int_to_tree (type, a);
755 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
756 if (!useless_type_conversion_p (type, TREE_TYPE (lhs)))
757 lhs = gimplify_build1 (&gsi, NOP_EXPR, type, lhs);
758 tree new_rhs;
759 if (code == PLUS_EXPR)
760 new_rhs = gimplify_build2 (&gsi, PLUS_EXPR, type, lhs, arg);
761 else
762 new_rhs = gimplify_build2 (&gsi, MINUS_EXPR, type, arg, lhs);
763 if (!useless_type_conversion_p (TREE_TYPE (arg0), type))
764 new_rhs = gimplify_build1 (&gsi, NOP_EXPR, TREE_TYPE (arg0), new_rhs);
765
766 replace_phi_edge_with_variable (cond_bb, e1, phi, new_rhs);
767
768 /* Note that we optimized this PHI. */
769 return true;
770 }
771
772 /* The function conditional_replacement does the main work of doing the
773 conditional replacement. Return true if the replacement is done.
774 Otherwise return false.
775 BB is the basic block where the replacement is going to be done on. ARG0
776 is argument 0 from PHI. Likewise for ARG1. */
777
778 static bool
779 conditional_replacement (basic_block cond_bb, basic_block middle_bb,
780 edge e0, edge e1, gphi *phi,
781 tree arg0, tree arg1)
782 {
783 tree result;
784 gimple *stmt;
785 gassign *new_stmt;
786 tree cond;
787 gimple_stmt_iterator gsi;
788 edge true_edge, false_edge;
789 tree new_var, new_var2;
790 bool neg = false;
791 int shift = 0;
792 tree nonzero_arg;
793
794 /* FIXME: Gimplification of complex type is too hard for now. */
795 /* We aren't prepared to handle vectors either (and it is a question
796 if it would be worthwhile anyway). */
797 if (!(INTEGRAL_TYPE_P (TREE_TYPE (arg0))
798 || POINTER_TYPE_P (TREE_TYPE (arg0)))
799 || !(INTEGRAL_TYPE_P (TREE_TYPE (arg1))
800 || POINTER_TYPE_P (TREE_TYPE (arg1))))
801 return false;
802
803 /* The PHI arguments have the constants 0 and 1, or 0 and -1 or
804 0 and (1 << cst), then convert it to the conditional. */
805 if (integer_zerop (arg0))
806 nonzero_arg = arg1;
807 else if (integer_zerop (arg1))
808 nonzero_arg = arg0;
809 else
810 return false;
811 if (integer_all_onesp (nonzero_arg))
812 neg = true;
813 else if (integer_pow2p (nonzero_arg))
814 {
815 shift = tree_log2 (nonzero_arg);
816 if (shift && POINTER_TYPE_P (TREE_TYPE (nonzero_arg)))
817 return false;
818 }
819 else
820 return false;
821
822 if (!empty_block_p (middle_bb))
823 return false;
824
825 /* At this point we know we have a GIMPLE_COND with two successors.
826 One successor is BB, the other successor is an empty block which
827 falls through into BB.
828
829 There is a single PHI node at the join point (BB) and its arguments
830 are constants (0, 1) or (0, -1) or (0, (1 << shift)).
831
832 So, given the condition COND, and the two PHI arguments, we can
833 rewrite this PHI into non-branching code:
834
835 dest = (COND) or dest = COND' or dest = (COND) << shift
836
837 We use the condition as-is if the argument associated with the
838 true edge has the value one or the argument associated with the
839 false edge as the value zero. Note that those conditions are not
840 the same since only one of the outgoing edges from the GIMPLE_COND
841 will directly reach BB and thus be associated with an argument. */
842
843 stmt = last_stmt (cond_bb);
844 result = PHI_RESULT (phi);
845
846 /* To handle special cases like floating point comparison, it is easier and
847 less error-prone to build a tree and gimplify it on the fly though it is
848 less efficient. */
849 cond = fold_build2_loc (gimple_location (stmt),
850 gimple_cond_code (stmt), boolean_type_node,
851 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
852
853 /* We need to know which is the true edge and which is the false
854 edge so that we know when to invert the condition below. */
855 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
856 if ((e0 == true_edge && integer_zerop (arg0))
857 || (e0 == false_edge && !integer_zerop (arg0))
858 || (e1 == true_edge && integer_zerop (arg1))
859 || (e1 == false_edge && !integer_zerop (arg1)))
860 cond = fold_build1_loc (gimple_location (stmt),
861 TRUTH_NOT_EXPR, TREE_TYPE (cond), cond);
862
863 if (neg)
864 {
865 cond = fold_convert_loc (gimple_location (stmt),
866 TREE_TYPE (result), cond);
867 cond = fold_build1_loc (gimple_location (stmt),
868 NEGATE_EXPR, TREE_TYPE (cond), cond);
869 }
870 else if (shift)
871 {
872 cond = fold_convert_loc (gimple_location (stmt),
873 TREE_TYPE (result), cond);
874 cond = fold_build2_loc (gimple_location (stmt),
875 LSHIFT_EXPR, TREE_TYPE (cond), cond,
876 build_int_cst (integer_type_node, shift));
877 }
878
879 /* Insert our new statements at the end of conditional block before the
880 COND_STMT. */
881 gsi = gsi_for_stmt (stmt);
882 new_var = force_gimple_operand_gsi (&gsi, cond, true, NULL, true,
883 GSI_SAME_STMT);
884
885 if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (new_var)))
886 {
887 location_t locus_0, locus_1;
888
889 new_var2 = make_ssa_name (TREE_TYPE (result));
890 new_stmt = gimple_build_assign (new_var2, CONVERT_EXPR, new_var);
891 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
892 new_var = new_var2;
893
894 /* Set the locus to the first argument, unless is doesn't have one. */
895 locus_0 = gimple_phi_arg_location (phi, 0);
896 locus_1 = gimple_phi_arg_location (phi, 1);
897 if (locus_0 == UNKNOWN_LOCATION)
898 locus_0 = locus_1;
899 gimple_set_location (new_stmt, locus_0);
900 }
901
902 replace_phi_edge_with_variable (cond_bb, e1, phi, new_var);
903
904 /* Note that we optimized this PHI. */
905 return true;
906 }
907
908 /* Update *ARG which is defined in STMT so that it contains the
909 computed value if that seems profitable. Return true if the
910 statement is made dead by that rewriting. */
911
912 static bool
913 jump_function_from_stmt (tree *arg, gimple *stmt)
914 {
915 enum tree_code code = gimple_assign_rhs_code (stmt);
916 if (code == ADDR_EXPR)
917 {
918 /* For arg = &p->i transform it to p, if possible. */
919 tree rhs1 = gimple_assign_rhs1 (stmt);
920 poly_int64 offset;
921 tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
922 &offset);
923 if (tem
924 && TREE_CODE (tem) == MEM_REF
925 && known_eq (mem_ref_offset (tem) + offset, 0))
926 {
927 *arg = TREE_OPERAND (tem, 0);
928 return true;
929 }
930 }
931 /* TODO: Much like IPA-CP jump-functions we want to handle constant
932 additions symbolically here, and we'd need to update the comparison
933 code that compares the arg + cst tuples in our caller. For now the
934 code above exactly handles the VEC_BASE pattern from vec.h. */
935 return false;
936 }
937
938 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
939 of the form SSA_NAME NE 0.
940
941 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
942 the two input values of the EQ_EXPR match arg0 and arg1.
943
944 If so update *code and return TRUE. Otherwise return FALSE. */
945
946 static bool
947 rhs_is_fed_for_value_replacement (const_tree arg0, const_tree arg1,
948 enum tree_code *code, const_tree rhs)
949 {
950 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
951 statement. */
952 if (TREE_CODE (rhs) == SSA_NAME)
953 {
954 gimple *def1 = SSA_NAME_DEF_STMT (rhs);
955
956 /* Verify the defining statement has an EQ_EXPR on the RHS. */
957 if (is_gimple_assign (def1) && gimple_assign_rhs_code (def1) == EQ_EXPR)
958 {
959 /* Finally verify the source operands of the EQ_EXPR are equal
960 to arg0 and arg1. */
961 tree op0 = gimple_assign_rhs1 (def1);
962 tree op1 = gimple_assign_rhs2 (def1);
963 if ((operand_equal_for_phi_arg_p (arg0, op0)
964 && operand_equal_for_phi_arg_p (arg1, op1))
965 || (operand_equal_for_phi_arg_p (arg0, op1)
966 && operand_equal_for_phi_arg_p (arg1, op0)))
967 {
968 /* We will perform the optimization. */
969 *code = gimple_assign_rhs_code (def1);
970 return true;
971 }
972 }
973 }
974 return false;
975 }
976
977 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
978
979 Also return TRUE if arg0/arg1 are equal to the source arguments of a
980 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
981
982 Return FALSE otherwise. */
983
984 static bool
985 operand_equal_for_value_replacement (const_tree arg0, const_tree arg1,
986 enum tree_code *code, gimple *cond)
987 {
988 gimple *def;
989 tree lhs = gimple_cond_lhs (cond);
990 tree rhs = gimple_cond_rhs (cond);
991
992 if ((operand_equal_for_phi_arg_p (arg0, lhs)
993 && operand_equal_for_phi_arg_p (arg1, rhs))
994 || (operand_equal_for_phi_arg_p (arg1, lhs)
995 && operand_equal_for_phi_arg_p (arg0, rhs)))
996 return true;
997
998 /* Now handle more complex case where we have an EQ comparison
999 which feeds a BIT_AND_EXPR which feeds COND.
1000
1001 First verify that COND is of the form SSA_NAME NE 0. */
1002 if (*code != NE_EXPR || !integer_zerop (rhs)
1003 || TREE_CODE (lhs) != SSA_NAME)
1004 return false;
1005
1006 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
1007 def = SSA_NAME_DEF_STMT (lhs);
1008 if (!is_gimple_assign (def) || gimple_assign_rhs_code (def) != BIT_AND_EXPR)
1009 return false;
1010
1011 /* Now verify arg0/arg1 correspond to the source arguments of an
1012 EQ comparison feeding the BIT_AND_EXPR. */
1013
1014 tree tmp = gimple_assign_rhs1 (def);
1015 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
1016 return true;
1017
1018 tmp = gimple_assign_rhs2 (def);
1019 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
1020 return true;
1021
1022 return false;
1023 }
1024
1025 /* Returns true if ARG is a neutral element for operation CODE
1026 on the RIGHT side. */
1027
1028 static bool
1029 neutral_element_p (tree_code code, tree arg, bool right)
1030 {
1031 switch (code)
1032 {
1033 case PLUS_EXPR:
1034 case BIT_IOR_EXPR:
1035 case BIT_XOR_EXPR:
1036 return integer_zerop (arg);
1037
1038 case LROTATE_EXPR:
1039 case RROTATE_EXPR:
1040 case LSHIFT_EXPR:
1041 case RSHIFT_EXPR:
1042 case MINUS_EXPR:
1043 case POINTER_PLUS_EXPR:
1044 return right && integer_zerop (arg);
1045
1046 case MULT_EXPR:
1047 return integer_onep (arg);
1048
1049 case TRUNC_DIV_EXPR:
1050 case CEIL_DIV_EXPR:
1051 case FLOOR_DIV_EXPR:
1052 case ROUND_DIV_EXPR:
1053 case EXACT_DIV_EXPR:
1054 return right && integer_onep (arg);
1055
1056 case BIT_AND_EXPR:
1057 return integer_all_onesp (arg);
1058
1059 default:
1060 return false;
1061 }
1062 }
1063
1064 /* Returns true if ARG is an absorbing element for operation CODE. */
1065
1066 static bool
1067 absorbing_element_p (tree_code code, tree arg, bool right, tree rval)
1068 {
1069 switch (code)
1070 {
1071 case BIT_IOR_EXPR:
1072 return integer_all_onesp (arg);
1073
1074 case MULT_EXPR:
1075 case BIT_AND_EXPR:
1076 return integer_zerop (arg);
1077
1078 case LSHIFT_EXPR:
1079 case RSHIFT_EXPR:
1080 case LROTATE_EXPR:
1081 case RROTATE_EXPR:
1082 return !right && integer_zerop (arg);
1083
1084 case TRUNC_DIV_EXPR:
1085 case CEIL_DIV_EXPR:
1086 case FLOOR_DIV_EXPR:
1087 case ROUND_DIV_EXPR:
1088 case EXACT_DIV_EXPR:
1089 case TRUNC_MOD_EXPR:
1090 case CEIL_MOD_EXPR:
1091 case FLOOR_MOD_EXPR:
1092 case ROUND_MOD_EXPR:
1093 return (!right
1094 && integer_zerop (arg)
1095 && tree_single_nonzero_warnv_p (rval, NULL));
1096
1097 default:
1098 return false;
1099 }
1100 }
1101
1102 /* The function value_replacement does the main work of doing the value
1103 replacement. Return non-zero if the replacement is done. Otherwise return
1104 0. If we remove the middle basic block, return 2.
1105 BB is the basic block where the replacement is going to be done on. ARG0
1106 is argument 0 from the PHI. Likewise for ARG1. */
1107
1108 static int
1109 value_replacement (basic_block cond_bb, basic_block middle_bb,
1110 edge e0, edge e1, gimple *phi,
1111 tree arg0, tree arg1)
1112 {
1113 gimple_stmt_iterator gsi;
1114 gimple *cond;
1115 edge true_edge, false_edge;
1116 enum tree_code code;
1117 bool empty_or_with_defined_p = true;
1118
1119 /* If the type says honor signed zeros we cannot do this
1120 optimization. */
1121 if (HONOR_SIGNED_ZEROS (arg1))
1122 return 0;
1123
1124 /* If there is a statement in MIDDLE_BB that defines one of the PHI
1125 arguments, then adjust arg0 or arg1. */
1126 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
1127 while (!gsi_end_p (gsi))
1128 {
1129 gimple *stmt = gsi_stmt (gsi);
1130 tree lhs;
1131 gsi_next_nondebug (&gsi);
1132 if (!is_gimple_assign (stmt))
1133 {
1134 if (gimple_code (stmt) != GIMPLE_PREDICT
1135 && gimple_code (stmt) != GIMPLE_NOP)
1136 empty_or_with_defined_p = false;
1137 continue;
1138 }
1139 /* Now try to adjust arg0 or arg1 according to the computation
1140 in the statement. */
1141 lhs = gimple_assign_lhs (stmt);
1142 if (!(lhs == arg0
1143 && jump_function_from_stmt (&arg0, stmt))
1144 || (lhs == arg1
1145 && jump_function_from_stmt (&arg1, stmt)))
1146 empty_or_with_defined_p = false;
1147 }
1148
1149 cond = last_stmt (cond_bb);
1150 code = gimple_cond_code (cond);
1151
1152 /* This transformation is only valid for equality comparisons. */
1153 if (code != NE_EXPR && code != EQ_EXPR)
1154 return 0;
1155
1156 /* We need to know which is the true edge and which is the false
1157 edge so that we know if have abs or negative abs. */
1158 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1159
1160 /* At this point we know we have a COND_EXPR with two successors.
1161 One successor is BB, the other successor is an empty block which
1162 falls through into BB.
1163
1164 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
1165
1166 There is a single PHI node at the join point (BB) with two arguments.
1167
1168 We now need to verify that the two arguments in the PHI node match
1169 the two arguments to the equality comparison. */
1170
1171 if (operand_equal_for_value_replacement (arg0, arg1, &code, cond))
1172 {
1173 edge e;
1174 tree arg;
1175
1176 /* For NE_EXPR, we want to build an assignment result = arg where
1177 arg is the PHI argument associated with the true edge. For
1178 EQ_EXPR we want the PHI argument associated with the false edge. */
1179 e = (code == NE_EXPR ? true_edge : false_edge);
1180
1181 /* Unfortunately, E may not reach BB (it may instead have gone to
1182 OTHER_BLOCK). If that is the case, then we want the single outgoing
1183 edge from OTHER_BLOCK which reaches BB and represents the desired
1184 path from COND_BLOCK. */
1185 if (e->dest == middle_bb)
1186 e = single_succ_edge (e->dest);
1187
1188 /* Now we know the incoming edge to BB that has the argument for the
1189 RHS of our new assignment statement. */
1190 if (e0 == e)
1191 arg = arg0;
1192 else
1193 arg = arg1;
1194
1195 /* If the middle basic block was empty or is defining the
1196 PHI arguments and this is a single phi where the args are different
1197 for the edges e0 and e1 then we can remove the middle basic block. */
1198 if (empty_or_with_defined_p
1199 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
1200 e0, e1) == phi)
1201 {
1202 replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
1203 /* Note that we optimized this PHI. */
1204 return 2;
1205 }
1206 else
1207 {
1208 /* Replace the PHI arguments with arg. */
1209 SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
1210 SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
1211 if (dump_file && (dump_flags & TDF_DETAILS))
1212 {
1213 fprintf (dump_file, "PHI ");
1214 print_generic_expr (dump_file, gimple_phi_result (phi));
1215 fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
1216 cond_bb->index);
1217 print_generic_expr (dump_file, arg);
1218 fprintf (dump_file, ".\n");
1219 }
1220 return 1;
1221 }
1222
1223 }
1224
1225 /* Now optimize (x != 0) ? x + y : y to just x + y. */
1226 gsi = gsi_last_nondebug_bb (middle_bb);
1227 if (gsi_end_p (gsi))
1228 return 0;
1229
1230 gimple *assign = gsi_stmt (gsi);
1231 if (!is_gimple_assign (assign)
1232 || gimple_assign_rhs_class (assign) != GIMPLE_BINARY_RHS
1233 || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
1234 && !POINTER_TYPE_P (TREE_TYPE (arg0))))
1235 return 0;
1236
1237 /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
1238 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
1239 return 0;
1240
1241 /* Allow up to 2 cheap preparation statements that prepare argument
1242 for assign, e.g.:
1243 if (y_4 != 0)
1244 goto <bb 3>;
1245 else
1246 goto <bb 4>;
1247 <bb 3>:
1248 _1 = (int) y_4;
1249 iftmp.0_6 = x_5(D) r<< _1;
1250 <bb 4>:
1251 # iftmp.0_2 = PHI <iftmp.0_6(3), x_5(D)(2)>
1252 or:
1253 if (y_3(D) == 0)
1254 goto <bb 4>;
1255 else
1256 goto <bb 3>;
1257 <bb 3>:
1258 y_4 = y_3(D) & 31;
1259 _1 = (int) y_4;
1260 _6 = x_5(D) r<< _1;
1261 <bb 4>:
1262 # _2 = PHI <x_5(D)(2), _6(3)> */
1263 gimple *prep_stmt[2] = { NULL, NULL };
1264 int prep_cnt;
1265 for (prep_cnt = 0; ; prep_cnt++)
1266 {
1267 gsi_prev_nondebug (&gsi);
1268 if (gsi_end_p (gsi))
1269 break;
1270
1271 gimple *g = gsi_stmt (gsi);
1272 if (gimple_code (g) == GIMPLE_LABEL)
1273 break;
1274
1275 if (prep_cnt == 2 || !is_gimple_assign (g))
1276 return 0;
1277
1278 tree lhs = gimple_assign_lhs (g);
1279 tree rhs1 = gimple_assign_rhs1 (g);
1280 use_operand_p use_p;
1281 gimple *use_stmt;
1282 if (TREE_CODE (lhs) != SSA_NAME
1283 || TREE_CODE (rhs1) != SSA_NAME
1284 || !INTEGRAL_TYPE_P (TREE_TYPE (lhs))
1285 || !INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1286 || !single_imm_use (lhs, &use_p, &use_stmt)
1287 || use_stmt != (prep_cnt ? prep_stmt[prep_cnt - 1] : assign))
1288 return 0;
1289 switch (gimple_assign_rhs_code (g))
1290 {
1291 CASE_CONVERT:
1292 break;
1293 case PLUS_EXPR:
1294 case BIT_AND_EXPR:
1295 case BIT_IOR_EXPR:
1296 case BIT_XOR_EXPR:
1297 if (TREE_CODE (gimple_assign_rhs2 (g)) != INTEGER_CST)
1298 return 0;
1299 break;
1300 default:
1301 return 0;
1302 }
1303 prep_stmt[prep_cnt] = g;
1304 }
1305
1306 /* Only transform if it removes the condition. */
1307 if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)), e0, e1))
1308 return 0;
1309
1310 /* Size-wise, this is always profitable. */
1311 if (optimize_bb_for_speed_p (cond_bb)
1312 /* The special case is useless if it has a low probability. */
1313 && profile_status_for_fn (cfun) != PROFILE_ABSENT
1314 && EDGE_PRED (middle_bb, 0)->probability < profile_probability::even ()
1315 /* If assign is cheap, there is no point avoiding it. */
1316 && estimate_num_insns_seq (bb_seq (middle_bb), &eni_time_weights)
1317 >= 3 * estimate_num_insns (cond, &eni_time_weights))
1318 return 0;
1319
1320 tree lhs = gimple_assign_lhs (assign);
1321 tree rhs1 = gimple_assign_rhs1 (assign);
1322 tree rhs2 = gimple_assign_rhs2 (assign);
1323 enum tree_code code_def = gimple_assign_rhs_code (assign);
1324 tree cond_lhs = gimple_cond_lhs (cond);
1325 tree cond_rhs = gimple_cond_rhs (cond);
1326
1327 /* Propagate the cond_rhs constant through preparation stmts,
1328 make sure UB isn't invoked while doing that. */
1329 for (int i = prep_cnt - 1; i >= 0; --i)
1330 {
1331 gimple *g = prep_stmt[i];
1332 tree grhs1 = gimple_assign_rhs1 (g);
1333 if (!operand_equal_for_phi_arg_p (cond_lhs, grhs1))
1334 return 0;
1335 cond_lhs = gimple_assign_lhs (g);
1336 cond_rhs = fold_convert (TREE_TYPE (grhs1), cond_rhs);
1337 if (TREE_CODE (cond_rhs) != INTEGER_CST
1338 || TREE_OVERFLOW (cond_rhs))
1339 return 0;
1340 if (gimple_assign_rhs_class (g) == GIMPLE_BINARY_RHS)
1341 {
1342 cond_rhs = int_const_binop (gimple_assign_rhs_code (g), cond_rhs,
1343 gimple_assign_rhs2 (g));
1344 if (TREE_OVERFLOW (cond_rhs))
1345 return 0;
1346 }
1347 cond_rhs = fold_convert (TREE_TYPE (cond_lhs), cond_rhs);
1348 if (TREE_CODE (cond_rhs) != INTEGER_CST
1349 || TREE_OVERFLOW (cond_rhs))
1350 return 0;
1351 }
1352
1353 if (((code == NE_EXPR && e1 == false_edge)
1354 || (code == EQ_EXPR && e1 == true_edge))
1355 && arg0 == lhs
1356 && ((arg1 == rhs1
1357 && operand_equal_for_phi_arg_p (rhs2, cond_lhs)
1358 && neutral_element_p (code_def, cond_rhs, true))
1359 || (arg1 == rhs2
1360 && operand_equal_for_phi_arg_p (rhs1, cond_lhs)
1361 && neutral_element_p (code_def, cond_rhs, false))
1362 || (operand_equal_for_phi_arg_p (arg1, cond_rhs)
1363 && ((operand_equal_for_phi_arg_p (rhs2, cond_lhs)
1364 && absorbing_element_p (code_def, cond_rhs, true, rhs2))
1365 || (operand_equal_for_phi_arg_p (rhs1, cond_lhs)
1366 && absorbing_element_p (code_def,
1367 cond_rhs, false, rhs2))))))
1368 {
1369 gsi = gsi_for_stmt (cond);
1370 /* Moving ASSIGN might change VR of lhs, e.g. when moving u_6
1371 def-stmt in:
1372 if (n_5 != 0)
1373 goto <bb 3>;
1374 else
1375 goto <bb 4>;
1376
1377 <bb 3>:
1378 # RANGE [0, 4294967294]
1379 u_6 = n_5 + 4294967295;
1380
1381 <bb 4>:
1382 # u_3 = PHI <u_6(3), 4294967295(2)> */
1383 reset_flow_sensitive_info (lhs);
1384 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs)))
1385 {
1386 /* If available, we can use VR of phi result at least. */
1387 tree phires = gimple_phi_result (phi);
1388 struct range_info_def *phires_range_info
1389 = SSA_NAME_RANGE_INFO (phires);
1390 if (phires_range_info)
1391 duplicate_ssa_name_range_info (lhs, SSA_NAME_RANGE_TYPE (phires),
1392 phires_range_info);
1393 }
1394 gimple_stmt_iterator gsi_from;
1395 for (int i = prep_cnt - 1; i >= 0; --i)
1396 {
1397 tree plhs = gimple_assign_lhs (prep_stmt[i]);
1398 reset_flow_sensitive_info (plhs);
1399 gsi_from = gsi_for_stmt (prep_stmt[i]);
1400 gsi_move_before (&gsi_from, &gsi);
1401 }
1402 gsi_from = gsi_for_stmt (assign);
1403 gsi_move_before (&gsi_from, &gsi);
1404 replace_phi_edge_with_variable (cond_bb, e1, phi, lhs);
1405 return 2;
1406 }
1407
1408 return 0;
1409 }
1410
1411 /* The function minmax_replacement does the main work of doing the minmax
1412 replacement. Return true if the replacement is done. Otherwise return
1413 false.
1414 BB is the basic block where the replacement is going to be done on. ARG0
1415 is argument 0 from the PHI. Likewise for ARG1. */
1416
1417 static bool
1418 minmax_replacement (basic_block cond_bb, basic_block middle_bb,
1419 edge e0, edge e1, gimple *phi,
1420 tree arg0, tree arg1)
1421 {
1422 tree result;
1423 edge true_edge, false_edge;
1424 enum tree_code minmax, ass_code;
1425 tree smaller, larger, arg_true, arg_false;
1426 gimple_stmt_iterator gsi, gsi_from;
1427
1428 tree type = TREE_TYPE (PHI_RESULT (phi));
1429
1430 /* The optimization may be unsafe due to NaNs. */
1431 if (HONOR_NANS (type) || HONOR_SIGNED_ZEROS (type))
1432 return false;
1433
1434 gcond *cond = as_a <gcond *> (last_stmt (cond_bb));
1435 enum tree_code cmp = gimple_cond_code (cond);
1436 tree rhs = gimple_cond_rhs (cond);
1437
1438 /* Turn EQ/NE of extreme values to order comparisons. */
1439 if ((cmp == NE_EXPR || cmp == EQ_EXPR)
1440 && TREE_CODE (rhs) == INTEGER_CST
1441 && INTEGRAL_TYPE_P (TREE_TYPE (rhs)))
1442 {
1443 if (wi::eq_p (wi::to_wide (rhs), wi::min_value (TREE_TYPE (rhs))))
1444 {
1445 cmp = (cmp == EQ_EXPR) ? LT_EXPR : GE_EXPR;
1446 rhs = wide_int_to_tree (TREE_TYPE (rhs),
1447 wi::min_value (TREE_TYPE (rhs)) + 1);
1448 }
1449 else if (wi::eq_p (wi::to_wide (rhs), wi::max_value (TREE_TYPE (rhs))))
1450 {
1451 cmp = (cmp == EQ_EXPR) ? GT_EXPR : LE_EXPR;
1452 rhs = wide_int_to_tree (TREE_TYPE (rhs),
1453 wi::max_value (TREE_TYPE (rhs)) - 1);
1454 }
1455 }
1456
1457 /* This transformation is only valid for order comparisons. Record which
1458 operand is smaller/larger if the result of the comparison is true. */
1459 tree alt_smaller = NULL_TREE;
1460 tree alt_larger = NULL_TREE;
1461 if (cmp == LT_EXPR || cmp == LE_EXPR)
1462 {
1463 smaller = gimple_cond_lhs (cond);
1464 larger = rhs;
1465 /* If we have smaller < CST it is equivalent to smaller <= CST-1.
1466 Likewise smaller <= CST is equivalent to smaller < CST+1. */
1467 if (TREE_CODE (larger) == INTEGER_CST
1468 && INTEGRAL_TYPE_P (TREE_TYPE (larger)))
1469 {
1470 if (cmp == LT_EXPR)
1471 {
1472 wi::overflow_type overflow;
1473 wide_int alt = wi::sub (wi::to_wide (larger), 1,
1474 TYPE_SIGN (TREE_TYPE (larger)),
1475 &overflow);
1476 if (! overflow)
1477 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1478 }
1479 else
1480 {
1481 wi::overflow_type overflow;
1482 wide_int alt = wi::add (wi::to_wide (larger), 1,
1483 TYPE_SIGN (TREE_TYPE (larger)),
1484 &overflow);
1485 if (! overflow)
1486 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1487 }
1488 }
1489 }
1490 else if (cmp == GT_EXPR || cmp == GE_EXPR)
1491 {
1492 smaller = rhs;
1493 larger = gimple_cond_lhs (cond);
1494 /* If we have larger > CST it is equivalent to larger >= CST+1.
1495 Likewise larger >= CST is equivalent to larger > CST-1. */
1496 if (TREE_CODE (smaller) == INTEGER_CST
1497 && INTEGRAL_TYPE_P (TREE_TYPE (smaller)))
1498 {
1499 wi::overflow_type overflow;
1500 if (cmp == GT_EXPR)
1501 {
1502 wide_int alt = wi::add (wi::to_wide (smaller), 1,
1503 TYPE_SIGN (TREE_TYPE (smaller)),
1504 &overflow);
1505 if (! overflow)
1506 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1507 }
1508 else
1509 {
1510 wide_int alt = wi::sub (wi::to_wide (smaller), 1,
1511 TYPE_SIGN (TREE_TYPE (smaller)),
1512 &overflow);
1513 if (! overflow)
1514 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1515 }
1516 }
1517 }
1518 else
1519 return false;
1520
1521 /* Handle the special case of (signed_type)x < 0 being equivalent
1522 to x > MAX_VAL(signed_type) and (signed_type)x >= 0 equivalent
1523 to x <= MAX_VAL(signed_type). */
1524 if ((cmp == GE_EXPR || cmp == LT_EXPR)
1525 && INTEGRAL_TYPE_P (type)
1526 && TYPE_UNSIGNED (type)
1527 && integer_zerop (rhs))
1528 {
1529 tree op = gimple_cond_lhs (cond);
1530 if (TREE_CODE (op) == SSA_NAME
1531 && INTEGRAL_TYPE_P (TREE_TYPE (op))
1532 && !TYPE_UNSIGNED (TREE_TYPE (op)))
1533 {
1534 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
1535 if (gimple_assign_cast_p (def_stmt))
1536 {
1537 tree op1 = gimple_assign_rhs1 (def_stmt);
1538 if (INTEGRAL_TYPE_P (TREE_TYPE (op1))
1539 && TYPE_UNSIGNED (TREE_TYPE (op1))
1540 && (TYPE_PRECISION (TREE_TYPE (op))
1541 == TYPE_PRECISION (TREE_TYPE (op1)))
1542 && useless_type_conversion_p (type, TREE_TYPE (op1)))
1543 {
1544 wide_int w1 = wi::max_value (TREE_TYPE (op));
1545 wide_int w2 = wi::add (w1, 1);
1546 if (cmp == LT_EXPR)
1547 {
1548 larger = op1;
1549 smaller = wide_int_to_tree (TREE_TYPE (op1), w1);
1550 alt_smaller = wide_int_to_tree (TREE_TYPE (op1), w2);
1551 alt_larger = NULL_TREE;
1552 }
1553 else
1554 {
1555 smaller = op1;
1556 larger = wide_int_to_tree (TREE_TYPE (op1), w1);
1557 alt_larger = wide_int_to_tree (TREE_TYPE (op1), w2);
1558 alt_smaller = NULL_TREE;
1559 }
1560 }
1561 }
1562 }
1563 }
1564
1565 /* We need to know which is the true edge and which is the false
1566 edge so that we know if have abs or negative abs. */
1567 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1568
1569 /* Forward the edges over the middle basic block. */
1570 if (true_edge->dest == middle_bb)
1571 true_edge = EDGE_SUCC (true_edge->dest, 0);
1572 if (false_edge->dest == middle_bb)
1573 false_edge = EDGE_SUCC (false_edge->dest, 0);
1574
1575 if (true_edge == e0)
1576 {
1577 gcc_assert (false_edge == e1);
1578 arg_true = arg0;
1579 arg_false = arg1;
1580 }
1581 else
1582 {
1583 gcc_assert (false_edge == e0);
1584 gcc_assert (true_edge == e1);
1585 arg_true = arg1;
1586 arg_false = arg0;
1587 }
1588
1589 if (empty_block_p (middle_bb))
1590 {
1591 if ((operand_equal_for_phi_arg_p (arg_true, smaller)
1592 || (alt_smaller
1593 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1594 && (operand_equal_for_phi_arg_p (arg_false, larger)
1595 || (alt_larger
1596 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1597 {
1598 /* Case
1599
1600 if (smaller < larger)
1601 rslt = smaller;
1602 else
1603 rslt = larger; */
1604 minmax = MIN_EXPR;
1605 }
1606 else if ((operand_equal_for_phi_arg_p (arg_false, smaller)
1607 || (alt_smaller
1608 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1609 && (operand_equal_for_phi_arg_p (arg_true, larger)
1610 || (alt_larger
1611 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1612 minmax = MAX_EXPR;
1613 else
1614 return false;
1615 }
1616 else
1617 {
1618 /* Recognize the following case, assuming d <= u:
1619
1620 if (a <= u)
1621 b = MAX (a, d);
1622 x = PHI <b, u>
1623
1624 This is equivalent to
1625
1626 b = MAX (a, d);
1627 x = MIN (b, u); */
1628
1629 gimple *assign = last_and_only_stmt (middle_bb);
1630 tree lhs, op0, op1, bound;
1631
1632 if (!assign
1633 || gimple_code (assign) != GIMPLE_ASSIGN)
1634 return false;
1635
1636 lhs = gimple_assign_lhs (assign);
1637 ass_code = gimple_assign_rhs_code (assign);
1638 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
1639 return false;
1640 op0 = gimple_assign_rhs1 (assign);
1641 op1 = gimple_assign_rhs2 (assign);
1642
1643 if (true_edge->src == middle_bb)
1644 {
1645 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1646 if (!operand_equal_for_phi_arg_p (lhs, arg_true))
1647 return false;
1648
1649 if (operand_equal_for_phi_arg_p (arg_false, larger)
1650 || (alt_larger
1651 && operand_equal_for_phi_arg_p (arg_false, alt_larger)))
1652 {
1653 /* Case
1654
1655 if (smaller < larger)
1656 {
1657 r' = MAX_EXPR (smaller, bound)
1658 }
1659 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
1660 if (ass_code != MAX_EXPR)
1661 return false;
1662
1663 minmax = MIN_EXPR;
1664 if (operand_equal_for_phi_arg_p (op0, smaller)
1665 || (alt_smaller
1666 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1667 bound = op1;
1668 else if (operand_equal_for_phi_arg_p (op1, smaller)
1669 || (alt_smaller
1670 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
1671 bound = op0;
1672 else
1673 return false;
1674
1675 /* We need BOUND <= LARGER. */
1676 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1677 bound, larger)))
1678 return false;
1679 }
1680 else if (operand_equal_for_phi_arg_p (arg_false, smaller)
1681 || (alt_smaller
1682 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1683 {
1684 /* Case
1685
1686 if (smaller < larger)
1687 {
1688 r' = MIN_EXPR (larger, bound)
1689 }
1690 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
1691 if (ass_code != MIN_EXPR)
1692 return false;
1693
1694 minmax = MAX_EXPR;
1695 if (operand_equal_for_phi_arg_p (op0, larger)
1696 || (alt_larger
1697 && operand_equal_for_phi_arg_p (op0, alt_larger)))
1698 bound = op1;
1699 else if (operand_equal_for_phi_arg_p (op1, larger)
1700 || (alt_larger
1701 && operand_equal_for_phi_arg_p (op1, alt_larger)))
1702 bound = op0;
1703 else
1704 return false;
1705
1706 /* We need BOUND >= SMALLER. */
1707 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1708 bound, smaller)))
1709 return false;
1710 }
1711 else
1712 return false;
1713 }
1714 else
1715 {
1716 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
1717 if (!operand_equal_for_phi_arg_p (lhs, arg_false))
1718 return false;
1719
1720 if (operand_equal_for_phi_arg_p (arg_true, larger)
1721 || (alt_larger
1722 && operand_equal_for_phi_arg_p (arg_true, alt_larger)))
1723 {
1724 /* Case
1725
1726 if (smaller > larger)
1727 {
1728 r' = MIN_EXPR (smaller, bound)
1729 }
1730 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
1731 if (ass_code != MIN_EXPR)
1732 return false;
1733
1734 minmax = MAX_EXPR;
1735 if (operand_equal_for_phi_arg_p (op0, smaller)
1736 || (alt_smaller
1737 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1738 bound = op1;
1739 else if (operand_equal_for_phi_arg_p (op1, smaller)
1740 || (alt_smaller
1741 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
1742 bound = op0;
1743 else
1744 return false;
1745
1746 /* We need BOUND >= LARGER. */
1747 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1748 bound, larger)))
1749 return false;
1750 }
1751 else if (operand_equal_for_phi_arg_p (arg_true, smaller)
1752 || (alt_smaller
1753 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1754 {
1755 /* Case
1756
1757 if (smaller > larger)
1758 {
1759 r' = MAX_EXPR (larger, bound)
1760 }
1761 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
1762 if (ass_code != MAX_EXPR)
1763 return false;
1764
1765 minmax = MIN_EXPR;
1766 if (operand_equal_for_phi_arg_p (op0, larger))
1767 bound = op1;
1768 else if (operand_equal_for_phi_arg_p (op1, larger))
1769 bound = op0;
1770 else
1771 return false;
1772
1773 /* We need BOUND <= SMALLER. */
1774 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1775 bound, smaller)))
1776 return false;
1777 }
1778 else
1779 return false;
1780 }
1781
1782 /* Move the statement from the middle block. */
1783 gsi = gsi_last_bb (cond_bb);
1784 gsi_from = gsi_last_nondebug_bb (middle_bb);
1785 reset_flow_sensitive_info (SINGLE_SSA_TREE_OPERAND (gsi_stmt (gsi_from),
1786 SSA_OP_DEF));
1787 gsi_move_before (&gsi_from, &gsi);
1788 }
1789
1790 /* Emit the statement to compute min/max. */
1791 gimple_seq stmts = NULL;
1792 tree phi_result = PHI_RESULT (phi);
1793 result = gimple_build (&stmts, minmax, TREE_TYPE (phi_result), arg0, arg1);
1794 /* Duplicate range info if we're the only things setting the target PHI. */
1795 if (!gimple_seq_empty_p (stmts)
1796 && EDGE_COUNT (gimple_bb (phi)->preds) == 2
1797 && !POINTER_TYPE_P (TREE_TYPE (phi_result))
1798 && SSA_NAME_RANGE_INFO (phi_result))
1799 duplicate_ssa_name_range_info (result, SSA_NAME_RANGE_TYPE (phi_result),
1800 SSA_NAME_RANGE_INFO (phi_result));
1801
1802 gsi = gsi_last_bb (cond_bb);
1803 gsi_insert_seq_before (&gsi, stmts, GSI_NEW_STMT);
1804
1805 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1806
1807 return true;
1808 }
1809
1810 /* Convert
1811
1812 <bb 2>
1813 if (b_4(D) != 0)
1814 goto <bb 3>
1815 else
1816 goto <bb 4>
1817
1818 <bb 3>
1819 _2 = (unsigned long) b_4(D);
1820 _9 = __builtin_popcountl (_2);
1821 OR
1822 _9 = __builtin_popcountl (b_4(D));
1823
1824 <bb 4>
1825 c_12 = PHI <0(2), _9(3)>
1826
1827 Into
1828 <bb 2>
1829 _2 = (unsigned long) b_4(D);
1830 _9 = __builtin_popcountl (_2);
1831 OR
1832 _9 = __builtin_popcountl (b_4(D));
1833
1834 <bb 4>
1835 c_12 = PHI <_9(2)>
1836
1837 Similarly for __builtin_clz or __builtin_ctz if
1838 C?Z_DEFINED_VALUE_AT_ZERO is 2, optab is present and
1839 instead of 0 above it uses the value from that macro. */
1840
1841 static bool
1842 cond_removal_in_popcount_clz_ctz_pattern (basic_block cond_bb,
1843 basic_block middle_bb,
1844 edge e1, edge e2, gimple *phi,
1845 tree arg0, tree arg1)
1846 {
1847 gimple *cond;
1848 gimple_stmt_iterator gsi, gsi_from;
1849 gimple *call;
1850 gimple *cast = NULL;
1851 tree lhs, arg;
1852
1853 /* Check that
1854 _2 = (unsigned long) b_4(D);
1855 _9 = __builtin_popcountl (_2);
1856 OR
1857 _9 = __builtin_popcountl (b_4(D));
1858 are the only stmts in the middle_bb. */
1859
1860 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
1861 if (gsi_end_p (gsi))
1862 return false;
1863 cast = gsi_stmt (gsi);
1864 gsi_next_nondebug (&gsi);
1865 if (!gsi_end_p (gsi))
1866 {
1867 call = gsi_stmt (gsi);
1868 gsi_next_nondebug (&gsi);
1869 if (!gsi_end_p (gsi))
1870 return false;
1871 }
1872 else
1873 {
1874 call = cast;
1875 cast = NULL;
1876 }
1877
1878 /* Check that we have a popcount/clz/ctz builtin. */
1879 if (!is_gimple_call (call) || gimple_call_num_args (call) != 1)
1880 return false;
1881
1882 arg = gimple_call_arg (call, 0);
1883 lhs = gimple_get_lhs (call);
1884
1885 if (lhs == NULL_TREE)
1886 return false;
1887
1888 combined_fn cfn = gimple_call_combined_fn (call);
1889 internal_fn ifn = IFN_LAST;
1890 int val = 0;
1891 switch (cfn)
1892 {
1893 CASE_CFN_POPCOUNT:
1894 break;
1895 CASE_CFN_CLZ:
1896 if (INTEGRAL_TYPE_P (TREE_TYPE (arg)))
1897 {
1898 tree type = TREE_TYPE (arg);
1899 if (direct_internal_fn_supported_p (IFN_CLZ, type, OPTIMIZE_FOR_BOTH)
1900 && CLZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type),
1901 val) == 2)
1902 {
1903 ifn = IFN_CLZ;
1904 break;
1905 }
1906 }
1907 return false;
1908 CASE_CFN_CTZ:
1909 if (INTEGRAL_TYPE_P (TREE_TYPE (arg)))
1910 {
1911 tree type = TREE_TYPE (arg);
1912 if (direct_internal_fn_supported_p (IFN_CTZ, type, OPTIMIZE_FOR_BOTH)
1913 && CTZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type),
1914 val) == 2)
1915 {
1916 ifn = IFN_CTZ;
1917 break;
1918 }
1919 }
1920 return false;
1921 default:
1922 return false;
1923 }
1924
1925 if (cast)
1926 {
1927 /* We have a cast stmt feeding popcount/clz/ctz builtin. */
1928 /* Check that we have a cast prior to that. */
1929 if (gimple_code (cast) != GIMPLE_ASSIGN
1930 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (cast)))
1931 return false;
1932 /* Result of the cast stmt is the argument to the builtin. */
1933 if (arg != gimple_assign_lhs (cast))
1934 return false;
1935 arg = gimple_assign_rhs1 (cast);
1936 }
1937
1938 cond = last_stmt (cond_bb);
1939
1940 /* Cond_bb has a check for b_4 [!=|==] 0 before calling the popcount/clz/ctz
1941 builtin. */
1942 if (gimple_code (cond) != GIMPLE_COND
1943 || (gimple_cond_code (cond) != NE_EXPR
1944 && gimple_cond_code (cond) != EQ_EXPR)
1945 || !integer_zerop (gimple_cond_rhs (cond))
1946 || arg != gimple_cond_lhs (cond))
1947 return false;
1948
1949 /* Canonicalize. */
1950 if ((e2->flags & EDGE_TRUE_VALUE
1951 && gimple_cond_code (cond) == NE_EXPR)
1952 || (e1->flags & EDGE_TRUE_VALUE
1953 && gimple_cond_code (cond) == EQ_EXPR))
1954 {
1955 std::swap (arg0, arg1);
1956 std::swap (e1, e2);
1957 }
1958
1959 /* Check PHI arguments. */
1960 if (lhs != arg0
1961 || TREE_CODE (arg1) != INTEGER_CST
1962 || wi::to_wide (arg1) != val)
1963 return false;
1964
1965 /* And insert the popcount/clz/ctz builtin and cast stmt before the
1966 cond_bb. */
1967 gsi = gsi_last_bb (cond_bb);
1968 if (cast)
1969 {
1970 gsi_from = gsi_for_stmt (cast);
1971 gsi_move_before (&gsi_from, &gsi);
1972 reset_flow_sensitive_info (gimple_get_lhs (cast));
1973 }
1974 gsi_from = gsi_for_stmt (call);
1975 if (ifn == IFN_LAST || gimple_call_internal_p (call))
1976 gsi_move_before (&gsi_from, &gsi);
1977 else
1978 {
1979 /* For __builtin_c[lt]z* force .C[LT]Z ifn, because only
1980 the latter is well defined at zero. */
1981 call = gimple_build_call_internal (ifn, 1, gimple_call_arg (call, 0));
1982 gimple_call_set_lhs (call, lhs);
1983 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
1984 gsi_remove (&gsi_from, true);
1985 }
1986 reset_flow_sensitive_info (lhs);
1987
1988 /* Now update the PHI and remove unneeded bbs. */
1989 replace_phi_edge_with_variable (cond_bb, e2, phi, lhs);
1990 return true;
1991 }
1992
1993 /* The function absolute_replacement does the main work of doing the absolute
1994 replacement. Return true if the replacement is done. Otherwise return
1995 false.
1996 bb is the basic block where the replacement is going to be done on. arg0
1997 is argument 0 from the phi. Likewise for arg1. */
1998
1999 static bool
2000 abs_replacement (basic_block cond_bb, basic_block middle_bb,
2001 edge e0 ATTRIBUTE_UNUSED, edge e1,
2002 gimple *phi, tree arg0, tree arg1)
2003 {
2004 tree result;
2005 gassign *new_stmt;
2006 gimple *cond;
2007 gimple_stmt_iterator gsi;
2008 edge true_edge, false_edge;
2009 gimple *assign;
2010 edge e;
2011 tree rhs, lhs;
2012 bool negate;
2013 enum tree_code cond_code;
2014
2015 /* If the type says honor signed zeros we cannot do this
2016 optimization. */
2017 if (HONOR_SIGNED_ZEROS (arg1))
2018 return false;
2019
2020 /* OTHER_BLOCK must have only one executable statement which must have the
2021 form arg0 = -arg1 or arg1 = -arg0. */
2022
2023 assign = last_and_only_stmt (middle_bb);
2024 /* If we did not find the proper negation assignment, then we cannot
2025 optimize. */
2026 if (assign == NULL)
2027 return false;
2028
2029 /* If we got here, then we have found the only executable statement
2030 in OTHER_BLOCK. If it is anything other than arg = -arg1 or
2031 arg1 = -arg0, then we cannot optimize. */
2032 if (gimple_code (assign) != GIMPLE_ASSIGN)
2033 return false;
2034
2035 lhs = gimple_assign_lhs (assign);
2036
2037 if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
2038 return false;
2039
2040 rhs = gimple_assign_rhs1 (assign);
2041
2042 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
2043 if (!(lhs == arg0 && rhs == arg1)
2044 && !(lhs == arg1 && rhs == arg0))
2045 return false;
2046
2047 cond = last_stmt (cond_bb);
2048 result = PHI_RESULT (phi);
2049
2050 /* Only relationals comparing arg[01] against zero are interesting. */
2051 cond_code = gimple_cond_code (cond);
2052 if (cond_code != GT_EXPR && cond_code != GE_EXPR
2053 && cond_code != LT_EXPR && cond_code != LE_EXPR)
2054 return false;
2055
2056 /* Make sure the conditional is arg[01] OP y. */
2057 if (gimple_cond_lhs (cond) != rhs)
2058 return false;
2059
2060 if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond)))
2061 ? real_zerop (gimple_cond_rhs (cond))
2062 : integer_zerop (gimple_cond_rhs (cond)))
2063 ;
2064 else
2065 return false;
2066
2067 /* We need to know which is the true edge and which is the false
2068 edge so that we know if have abs or negative abs. */
2069 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
2070
2071 /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
2072 will need to negate the result. Similarly for LT_EXPR/LE_EXPR if
2073 the false edge goes to OTHER_BLOCK. */
2074 if (cond_code == GT_EXPR || cond_code == GE_EXPR)
2075 e = true_edge;
2076 else
2077 e = false_edge;
2078
2079 if (e->dest == middle_bb)
2080 negate = true;
2081 else
2082 negate = false;
2083
2084 /* If the code negates only iff positive then make sure to not
2085 introduce undefined behavior when negating or computing the absolute.
2086 ??? We could use range info if present to check for arg1 == INT_MIN. */
2087 if (negate
2088 && (ANY_INTEGRAL_TYPE_P (TREE_TYPE (arg1))
2089 && ! TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg1))))
2090 return false;
2091
2092 result = duplicate_ssa_name (result, NULL);
2093
2094 if (negate)
2095 lhs = make_ssa_name (TREE_TYPE (result));
2096 else
2097 lhs = result;
2098
2099 /* Build the modify expression with abs expression. */
2100 new_stmt = gimple_build_assign (lhs, ABS_EXPR, rhs);
2101
2102 gsi = gsi_last_bb (cond_bb);
2103 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
2104
2105 if (negate)
2106 {
2107 /* Get the right GSI. We want to insert after the recently
2108 added ABS_EXPR statement (which we know is the first statement
2109 in the block. */
2110 new_stmt = gimple_build_assign (result, NEGATE_EXPR, lhs);
2111
2112 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
2113 }
2114
2115 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
2116
2117 /* Note that we optimized this PHI. */
2118 return true;
2119 }
2120
2121 /* Optimize x < 0 ? ~y : y into (x >> (prec-1)) ^ y. */
2122
2123 static bool
2124 xor_replacement (basic_block cond_bb, basic_block middle_bb,
2125 edge e0 ATTRIBUTE_UNUSED, edge e1,
2126 gimple *phi, tree arg0, tree arg1)
2127 {
2128 if (!INTEGRAL_TYPE_P (TREE_TYPE (arg1)))
2129 return false;
2130
2131 /* OTHER_BLOCK must have only one executable statement which must have the
2132 form arg0 = ~arg1 or arg1 = ~arg0. */
2133
2134 gimple *assign = last_and_only_stmt (middle_bb);
2135 /* If we did not find the proper one's complement assignment, then we cannot
2136 optimize. */
2137 if (assign == NULL)
2138 return false;
2139
2140 /* If we got here, then we have found the only executable statement
2141 in OTHER_BLOCK. If it is anything other than arg = ~arg1 or
2142 arg1 = ~arg0, then we cannot optimize. */
2143 if (!is_gimple_assign (assign))
2144 return false;
2145
2146 if (gimple_assign_rhs_code (assign) != BIT_NOT_EXPR)
2147 return false;
2148
2149 tree lhs = gimple_assign_lhs (assign);
2150 tree rhs = gimple_assign_rhs1 (assign);
2151
2152 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
2153 if (!(lhs == arg0 && rhs == arg1) && !(lhs == arg1 && rhs == arg0))
2154 return false;
2155
2156 gimple *cond = last_stmt (cond_bb);
2157 tree result = PHI_RESULT (phi);
2158
2159 /* Only relationals comparing arg[01] against zero are interesting. */
2160 enum tree_code cond_code = gimple_cond_code (cond);
2161 if (cond_code != LT_EXPR && cond_code != GE_EXPR)
2162 return false;
2163
2164 /* Make sure the conditional is x OP 0. */
2165 tree clhs = gimple_cond_lhs (cond);
2166 if (TREE_CODE (clhs) != SSA_NAME
2167 || !INTEGRAL_TYPE_P (TREE_TYPE (clhs))
2168 || TYPE_UNSIGNED (TREE_TYPE (clhs))
2169 || TYPE_PRECISION (TREE_TYPE (clhs)) != TYPE_PRECISION (TREE_TYPE (arg1))
2170 || !integer_zerop (gimple_cond_rhs (cond)))
2171 return false;
2172
2173 /* We need to know which is the true edge and which is the false
2174 edge so that we know if have xor or inverted xor. */
2175 edge true_edge, false_edge;
2176 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
2177
2178 /* For GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
2179 will need to invert the result. Similarly for LT_EXPR if
2180 the false edge goes to OTHER_BLOCK. */
2181 edge e;
2182 if (cond_code == GE_EXPR)
2183 e = true_edge;
2184 else
2185 e = false_edge;
2186
2187 bool invert = e->dest == middle_bb;
2188
2189 result = duplicate_ssa_name (result, NULL);
2190
2191 gimple_stmt_iterator gsi = gsi_last_bb (cond_bb);
2192
2193 int prec = TYPE_PRECISION (TREE_TYPE (clhs));
2194 gimple *new_stmt
2195 = gimple_build_assign (make_ssa_name (TREE_TYPE (clhs)), RSHIFT_EXPR, clhs,
2196 build_int_cst (integer_type_node, prec - 1));
2197 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
2198
2199 if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (clhs)))
2200 {
2201 new_stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (result)),
2202 NOP_EXPR, gimple_assign_lhs (new_stmt));
2203 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
2204 }
2205 lhs = gimple_assign_lhs (new_stmt);
2206
2207 if (invert)
2208 {
2209 new_stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (result)),
2210 BIT_NOT_EXPR, rhs);
2211 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
2212 rhs = gimple_assign_lhs (new_stmt);
2213 }
2214
2215 new_stmt = gimple_build_assign (result, BIT_XOR_EXPR, lhs, rhs);
2216 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
2217
2218 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
2219
2220 /* Note that we optimized this PHI. */
2221 return true;
2222 }
2223
2224 /* Auxiliary functions to determine the set of memory accesses which
2225 can't trap because they are preceded by accesses to the same memory
2226 portion. We do that for MEM_REFs, so we only need to track
2227 the SSA_NAME of the pointer indirectly referenced. The algorithm
2228 simply is a walk over all instructions in dominator order. When
2229 we see an MEM_REF we determine if we've already seen a same
2230 ref anywhere up to the root of the dominator tree. If we do the
2231 current access can't trap. If we don't see any dominating access
2232 the current access might trap, but might also make later accesses
2233 non-trapping, so we remember it. We need to be careful with loads
2234 or stores, for instance a load might not trap, while a store would,
2235 so if we see a dominating read access this doesn't mean that a later
2236 write access would not trap. Hence we also need to differentiate the
2237 type of access(es) seen.
2238
2239 ??? We currently are very conservative and assume that a load might
2240 trap even if a store doesn't (write-only memory). This probably is
2241 overly conservative.
2242
2243 We currently support a special case that for !TREE_ADDRESSABLE automatic
2244 variables, it could ignore whether something is a load or store because the
2245 local stack should be always writable. */
2246
2247 /* A hash-table of references (MEM_REF/ARRAY_REF/COMPONENT_REF), and in which
2248 basic block an *_REF through it was seen, which would constitute a
2249 no-trap region for same accesses.
2250
2251 Size is needed to support 2 MEM_REFs of different types, like
2252 MEM<double>(s_1) and MEM<long>(s_1), which would compare equal with
2253 OEP_ADDRESS_OF. */
2254 struct ref_to_bb
2255 {
2256 tree exp;
2257 HOST_WIDE_INT size;
2258 unsigned int phase;
2259 basic_block bb;
2260 };
2261
2262 /* Hashtable helpers. */
2263
2264 struct refs_hasher : free_ptr_hash<ref_to_bb>
2265 {
2266 static inline hashval_t hash (const ref_to_bb *);
2267 static inline bool equal (const ref_to_bb *, const ref_to_bb *);
2268 };
2269
2270 /* Used for quick clearing of the hash-table when we see calls.
2271 Hash entries with phase < nt_call_phase are invalid. */
2272 static unsigned int nt_call_phase;
2273
2274 /* The hash function. */
2275
2276 inline hashval_t
2277 refs_hasher::hash (const ref_to_bb *n)
2278 {
2279 inchash::hash hstate;
2280 inchash::add_expr (n->exp, hstate, OEP_ADDRESS_OF);
2281 hstate.add_hwi (n->size);
2282 return hstate.end ();
2283 }
2284
2285 /* The equality function of *P1 and *P2. */
2286
2287 inline bool
2288 refs_hasher::equal (const ref_to_bb *n1, const ref_to_bb *n2)
2289 {
2290 return operand_equal_p (n1->exp, n2->exp, OEP_ADDRESS_OF)
2291 && n1->size == n2->size;
2292 }
2293
2294 class nontrapping_dom_walker : public dom_walker
2295 {
2296 public:
2297 nontrapping_dom_walker (cdi_direction direction, hash_set<tree> *ps)
2298 : dom_walker (direction), m_nontrapping (ps), m_seen_refs (128)
2299 {}
2300
2301 virtual edge before_dom_children (basic_block);
2302 virtual void after_dom_children (basic_block);
2303
2304 private:
2305
2306 /* We see the expression EXP in basic block BB. If it's an interesting
2307 expression (an MEM_REF through an SSA_NAME) possibly insert the
2308 expression into the set NONTRAP or the hash table of seen expressions.
2309 STORE is true if this expression is on the LHS, otherwise it's on
2310 the RHS. */
2311 void add_or_mark_expr (basic_block, tree, bool);
2312
2313 hash_set<tree> *m_nontrapping;
2314
2315 /* The hash table for remembering what we've seen. */
2316 hash_table<refs_hasher> m_seen_refs;
2317 };
2318
2319 /* Called by walk_dominator_tree, when entering the block BB. */
2320 edge
2321 nontrapping_dom_walker::before_dom_children (basic_block bb)
2322 {
2323 edge e;
2324 edge_iterator ei;
2325 gimple_stmt_iterator gsi;
2326
2327 /* If we haven't seen all our predecessors, clear the hash-table. */
2328 FOR_EACH_EDGE (e, ei, bb->preds)
2329 if ((((size_t)e->src->aux) & 2) == 0)
2330 {
2331 nt_call_phase++;
2332 break;
2333 }
2334
2335 /* Mark this BB as being on the path to dominator root and as visited. */
2336 bb->aux = (void*)(1 | 2);
2337
2338 /* And walk the statements in order. */
2339 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2340 {
2341 gimple *stmt = gsi_stmt (gsi);
2342
2343 if ((gimple_code (stmt) == GIMPLE_ASM && gimple_vdef (stmt))
2344 || (is_gimple_call (stmt)
2345 && (!nonfreeing_call_p (stmt) || !nonbarrier_call_p (stmt))))
2346 nt_call_phase++;
2347 else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
2348 {
2349 add_or_mark_expr (bb, gimple_assign_lhs (stmt), true);
2350 add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), false);
2351 }
2352 }
2353 return NULL;
2354 }
2355
2356 /* Called by walk_dominator_tree, when basic block BB is exited. */
2357 void
2358 nontrapping_dom_walker::after_dom_children (basic_block bb)
2359 {
2360 /* This BB isn't on the path to dominator root anymore. */
2361 bb->aux = (void*)2;
2362 }
2363
2364 /* We see the expression EXP in basic block BB. If it's an interesting
2365 expression of:
2366 1) MEM_REF
2367 2) ARRAY_REF
2368 3) COMPONENT_REF
2369 possibly insert the expression into the set NONTRAP or the hash table
2370 of seen expressions. STORE is true if this expression is on the LHS,
2371 otherwise it's on the RHS. */
2372 void
2373 nontrapping_dom_walker::add_or_mark_expr (basic_block bb, tree exp, bool store)
2374 {
2375 HOST_WIDE_INT size;
2376
2377 if ((TREE_CODE (exp) == MEM_REF || TREE_CODE (exp) == ARRAY_REF
2378 || TREE_CODE (exp) == COMPONENT_REF)
2379 && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
2380 {
2381 struct ref_to_bb map;
2382 ref_to_bb **slot;
2383 struct ref_to_bb *r2bb;
2384 basic_block found_bb = 0;
2385
2386 if (!store)
2387 {
2388 tree base = get_base_address (exp);
2389 /* Only record a LOAD of a local variable without address-taken, as
2390 the local stack is always writable. This allows cselim on a STORE
2391 with a dominating LOAD. */
2392 if (!auto_var_p (base) || TREE_ADDRESSABLE (base))
2393 return;
2394 }
2395
2396 /* Try to find the last seen *_REF, which can trap. */
2397 map.exp = exp;
2398 map.size = size;
2399 slot = m_seen_refs.find_slot (&map, INSERT);
2400 r2bb = *slot;
2401 if (r2bb && r2bb->phase >= nt_call_phase)
2402 found_bb = r2bb->bb;
2403
2404 /* If we've found a trapping *_REF, _and_ it dominates EXP
2405 (it's in a basic block on the path from us to the dominator root)
2406 then we can't trap. */
2407 if (found_bb && (((size_t)found_bb->aux) & 1) == 1)
2408 {
2409 m_nontrapping->add (exp);
2410 }
2411 else
2412 {
2413 /* EXP might trap, so insert it into the hash table. */
2414 if (r2bb)
2415 {
2416 r2bb->phase = nt_call_phase;
2417 r2bb->bb = bb;
2418 }
2419 else
2420 {
2421 r2bb = XNEW (struct ref_to_bb);
2422 r2bb->phase = nt_call_phase;
2423 r2bb->bb = bb;
2424 r2bb->exp = exp;
2425 r2bb->size = size;
2426 *slot = r2bb;
2427 }
2428 }
2429 }
2430 }
2431
2432 /* This is the entry point of gathering non trapping memory accesses.
2433 It will do a dominator walk over the whole function, and it will
2434 make use of the bb->aux pointers. It returns a set of trees
2435 (the MEM_REFs itself) which can't trap. */
2436 static hash_set<tree> *
2437 get_non_trapping (void)
2438 {
2439 nt_call_phase = 0;
2440 hash_set<tree> *nontrap = new hash_set<tree>;
2441 /* We're going to do a dominator walk, so ensure that we have
2442 dominance information. */
2443 calculate_dominance_info (CDI_DOMINATORS);
2444
2445 nontrapping_dom_walker (CDI_DOMINATORS, nontrap)
2446 .walk (cfun->cfg->x_entry_block_ptr);
2447
2448 clear_aux_for_blocks ();
2449 return nontrap;
2450 }
2451
2452 /* Do the main work of conditional store replacement. We already know
2453 that the recognized pattern looks like so:
2454
2455 split:
2456 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
2457 MIDDLE_BB:
2458 something
2459 fallthrough (edge E0)
2460 JOIN_BB:
2461 some more
2462
2463 We check that MIDDLE_BB contains only one store, that that store
2464 doesn't trap (not via NOTRAP, but via checking if an access to the same
2465 memory location dominates us, or the store is to a local addressable
2466 object) and that the store has a "simple" RHS. */
2467
2468 static bool
2469 cond_store_replacement (basic_block middle_bb, basic_block join_bb,
2470 edge e0, edge e1, hash_set<tree> *nontrap)
2471 {
2472 gimple *assign = last_and_only_stmt (middle_bb);
2473 tree lhs, rhs, name, name2;
2474 gphi *newphi;
2475 gassign *new_stmt;
2476 gimple_stmt_iterator gsi;
2477 location_t locus;
2478
2479 /* Check if middle_bb contains of only one store. */
2480 if (!assign
2481 || !gimple_assign_single_p (assign)
2482 || gimple_has_volatile_ops (assign))
2483 return false;
2484
2485 /* And no PHI nodes so all uses in the single stmt are also
2486 available where we insert to. */
2487 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
2488 return false;
2489
2490 locus = gimple_location (assign);
2491 lhs = gimple_assign_lhs (assign);
2492 rhs = gimple_assign_rhs1 (assign);
2493 if ((TREE_CODE (lhs) != MEM_REF
2494 && TREE_CODE (lhs) != ARRAY_REF
2495 && TREE_CODE (lhs) != COMPONENT_REF)
2496 || !is_gimple_reg_type (TREE_TYPE (lhs)))
2497 return false;
2498
2499 /* Prove that we can move the store down. We could also check
2500 TREE_THIS_NOTRAP here, but in that case we also could move stores,
2501 whose value is not available readily, which we want to avoid. */
2502 if (!nontrap->contains (lhs))
2503 {
2504 /* If LHS is an access to a local variable without address-taken
2505 (or when we allow data races) and known not to trap, we could
2506 always safely move down the store. */
2507 tree base = get_base_address (lhs);
2508 if (!auto_var_p (base)
2509 || (TREE_ADDRESSABLE (base) && !flag_store_data_races)
2510 || tree_could_trap_p (lhs))
2511 return false;
2512 }
2513
2514 /* Now we've checked the constraints, so do the transformation:
2515 1) Remove the single store. */
2516 gsi = gsi_for_stmt (assign);
2517 unlink_stmt_vdef (assign);
2518 gsi_remove (&gsi, true);
2519 release_defs (assign);
2520
2521 /* Make both store and load use alias-set zero as we have to
2522 deal with the case of the store being a conditional change
2523 of the dynamic type. */
2524 lhs = unshare_expr (lhs);
2525 tree *basep = &lhs;
2526 while (handled_component_p (*basep))
2527 basep = &TREE_OPERAND (*basep, 0);
2528 if (TREE_CODE (*basep) == MEM_REF
2529 || TREE_CODE (*basep) == TARGET_MEM_REF)
2530 TREE_OPERAND (*basep, 1)
2531 = fold_convert (ptr_type_node, TREE_OPERAND (*basep, 1));
2532 else
2533 *basep = build2 (MEM_REF, TREE_TYPE (*basep),
2534 build_fold_addr_expr (*basep),
2535 build_zero_cst (ptr_type_node));
2536
2537 /* 2) Insert a load from the memory of the store to the temporary
2538 on the edge which did not contain the store. */
2539 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
2540 new_stmt = gimple_build_assign (name, lhs);
2541 gimple_set_location (new_stmt, locus);
2542 lhs = unshare_expr (lhs);
2543 /* Set TREE_NO_WARNING on the rhs of the load to avoid uninit
2544 warnings. */
2545 TREE_NO_WARNING (gimple_assign_rhs1 (new_stmt)) = 1;
2546 gsi_insert_on_edge (e1, new_stmt);
2547
2548 /* 3) Create a PHI node at the join block, with one argument
2549 holding the old RHS, and the other holding the temporary
2550 where we stored the old memory contents. */
2551 name2 = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
2552 newphi = create_phi_node (name2, join_bb);
2553 add_phi_arg (newphi, rhs, e0, locus);
2554 add_phi_arg (newphi, name, e1, locus);
2555
2556 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
2557
2558 /* 4) Insert that PHI node. */
2559 gsi = gsi_after_labels (join_bb);
2560 if (gsi_end_p (gsi))
2561 {
2562 gsi = gsi_last_bb (join_bb);
2563 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
2564 }
2565 else
2566 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
2567
2568 if (dump_file && (dump_flags & TDF_DETAILS))
2569 {
2570 fprintf (dump_file, "\nConditional store replacement happened!");
2571 fprintf (dump_file, "\nReplaced the store with a load.");
2572 fprintf (dump_file, "\nInserted a new PHI statement in joint block:\n");
2573 print_gimple_stmt (dump_file, new_stmt, 0, TDF_VOPS|TDF_MEMSYMS);
2574 }
2575
2576 return true;
2577 }
2578
2579 /* Do the main work of conditional store replacement. */
2580
2581 static bool
2582 cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
2583 basic_block join_bb, gimple *then_assign,
2584 gimple *else_assign)
2585 {
2586 tree lhs_base, lhs, then_rhs, else_rhs, name;
2587 location_t then_locus, else_locus;
2588 gimple_stmt_iterator gsi;
2589 gphi *newphi;
2590 gassign *new_stmt;
2591
2592 if (then_assign == NULL
2593 || !gimple_assign_single_p (then_assign)
2594 || gimple_clobber_p (then_assign)
2595 || gimple_has_volatile_ops (then_assign)
2596 || else_assign == NULL
2597 || !gimple_assign_single_p (else_assign)
2598 || gimple_clobber_p (else_assign)
2599 || gimple_has_volatile_ops (else_assign))
2600 return false;
2601
2602 lhs = gimple_assign_lhs (then_assign);
2603 if (!is_gimple_reg_type (TREE_TYPE (lhs))
2604 || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
2605 return false;
2606
2607 lhs_base = get_base_address (lhs);
2608 if (lhs_base == NULL_TREE
2609 || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
2610 return false;
2611
2612 then_rhs = gimple_assign_rhs1 (then_assign);
2613 else_rhs = gimple_assign_rhs1 (else_assign);
2614 then_locus = gimple_location (then_assign);
2615 else_locus = gimple_location (else_assign);
2616
2617 /* Now we've checked the constraints, so do the transformation:
2618 1) Remove the stores. */
2619 gsi = gsi_for_stmt (then_assign);
2620 unlink_stmt_vdef (then_assign);
2621 gsi_remove (&gsi, true);
2622 release_defs (then_assign);
2623
2624 gsi = gsi_for_stmt (else_assign);
2625 unlink_stmt_vdef (else_assign);
2626 gsi_remove (&gsi, true);
2627 release_defs (else_assign);
2628
2629 /* 2) Create a PHI node at the join block, with one argument
2630 holding the old RHS, and the other holding the temporary
2631 where we stored the old memory contents. */
2632 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
2633 newphi = create_phi_node (name, join_bb);
2634 add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
2635 add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
2636
2637 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
2638
2639 /* 3) Insert that PHI node. */
2640 gsi = gsi_after_labels (join_bb);
2641 if (gsi_end_p (gsi))
2642 {
2643 gsi = gsi_last_bb (join_bb);
2644 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
2645 }
2646 else
2647 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
2648
2649 return true;
2650 }
2651
2652 /* Return the single store in BB with VDEF or NULL if there are
2653 other stores in the BB or loads following the store. */
2654
2655 static gimple *
2656 single_trailing_store_in_bb (basic_block bb, tree vdef)
2657 {
2658 if (SSA_NAME_IS_DEFAULT_DEF (vdef))
2659 return NULL;
2660 gimple *store = SSA_NAME_DEF_STMT (vdef);
2661 if (gimple_bb (store) != bb
2662 || gimple_code (store) == GIMPLE_PHI)
2663 return NULL;
2664
2665 /* Verify there is no other store in this BB. */
2666 if (!SSA_NAME_IS_DEFAULT_DEF (gimple_vuse (store))
2667 && gimple_bb (SSA_NAME_DEF_STMT (gimple_vuse (store))) == bb
2668 && gimple_code (SSA_NAME_DEF_STMT (gimple_vuse (store))) != GIMPLE_PHI)
2669 return NULL;
2670
2671 /* Verify there is no load or store after the store. */
2672 use_operand_p use_p;
2673 imm_use_iterator imm_iter;
2674 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_vdef (store))
2675 if (USE_STMT (use_p) != store
2676 && gimple_bb (USE_STMT (use_p)) == bb)
2677 return NULL;
2678
2679 return store;
2680 }
2681
2682 /* Conditional store replacement. We already know
2683 that the recognized pattern looks like so:
2684
2685 split:
2686 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
2687 THEN_BB:
2688 ...
2689 X = Y;
2690 ...
2691 goto JOIN_BB;
2692 ELSE_BB:
2693 ...
2694 X = Z;
2695 ...
2696 fallthrough (edge E0)
2697 JOIN_BB:
2698 some more
2699
2700 We check that it is safe to sink the store to JOIN_BB by verifying that
2701 there are no read-after-write or write-after-write dependencies in
2702 THEN_BB and ELSE_BB. */
2703
2704 static bool
2705 cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
2706 basic_block join_bb)
2707 {
2708 vec<data_reference_p> then_datarefs, else_datarefs;
2709 vec<ddr_p> then_ddrs, else_ddrs;
2710 gimple *then_store, *else_store;
2711 bool found, ok = false, res;
2712 struct data_dependence_relation *ddr;
2713 data_reference_p then_dr, else_dr;
2714 int i, j;
2715 tree then_lhs, else_lhs;
2716 basic_block blocks[3];
2717
2718 /* Handle the case with single store in THEN_BB and ELSE_BB. That is
2719 cheap enough to always handle as it allows us to elide dependence
2720 checking. */
2721 gphi *vphi = NULL;
2722 for (gphi_iterator si = gsi_start_phis (join_bb); !gsi_end_p (si);
2723 gsi_next (&si))
2724 if (virtual_operand_p (gimple_phi_result (si.phi ())))
2725 {
2726 vphi = si.phi ();
2727 break;
2728 }
2729 if (!vphi)
2730 return false;
2731 tree then_vdef = PHI_ARG_DEF_FROM_EDGE (vphi, single_succ_edge (then_bb));
2732 tree else_vdef = PHI_ARG_DEF_FROM_EDGE (vphi, single_succ_edge (else_bb));
2733 gimple *then_assign = single_trailing_store_in_bb (then_bb, then_vdef);
2734 if (then_assign)
2735 {
2736 gimple *else_assign = single_trailing_store_in_bb (else_bb, else_vdef);
2737 if (else_assign)
2738 return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
2739 then_assign, else_assign);
2740 }
2741
2742 /* If either vectorization or if-conversion is disabled then do
2743 not sink any stores. */
2744 if (param_max_stores_to_sink == 0
2745 || (!flag_tree_loop_vectorize && !flag_tree_slp_vectorize)
2746 || !flag_tree_loop_if_convert)
2747 return false;
2748
2749 /* Find data references. */
2750 then_datarefs.create (1);
2751 else_datarefs.create (1);
2752 if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
2753 == chrec_dont_know)
2754 || !then_datarefs.length ()
2755 || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
2756 == chrec_dont_know)
2757 || !else_datarefs.length ())
2758 {
2759 free_data_refs (then_datarefs);
2760 free_data_refs (else_datarefs);
2761 return false;
2762 }
2763
2764 /* Find pairs of stores with equal LHS. */
2765 auto_vec<gimple *, 1> then_stores, else_stores;
2766 FOR_EACH_VEC_ELT (then_datarefs, i, then_dr)
2767 {
2768 if (DR_IS_READ (then_dr))
2769 continue;
2770
2771 then_store = DR_STMT (then_dr);
2772 then_lhs = gimple_get_lhs (then_store);
2773 if (then_lhs == NULL_TREE)
2774 continue;
2775 found = false;
2776
2777 FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
2778 {
2779 if (DR_IS_READ (else_dr))
2780 continue;
2781
2782 else_store = DR_STMT (else_dr);
2783 else_lhs = gimple_get_lhs (else_store);
2784 if (else_lhs == NULL_TREE)
2785 continue;
2786
2787 if (operand_equal_p (then_lhs, else_lhs, 0))
2788 {
2789 found = true;
2790 break;
2791 }
2792 }
2793
2794 if (!found)
2795 continue;
2796
2797 then_stores.safe_push (then_store);
2798 else_stores.safe_push (else_store);
2799 }
2800
2801 /* No pairs of stores found. */
2802 if (!then_stores.length ()
2803 || then_stores.length () > (unsigned) param_max_stores_to_sink)
2804 {
2805 free_data_refs (then_datarefs);
2806 free_data_refs (else_datarefs);
2807 return false;
2808 }
2809
2810 /* Compute and check data dependencies in both basic blocks. */
2811 then_ddrs.create (1);
2812 else_ddrs.create (1);
2813 if (!compute_all_dependences (then_datarefs, &then_ddrs,
2814 vNULL, false)
2815 || !compute_all_dependences (else_datarefs, &else_ddrs,
2816 vNULL, false))
2817 {
2818 free_dependence_relations (then_ddrs);
2819 free_dependence_relations (else_ddrs);
2820 free_data_refs (then_datarefs);
2821 free_data_refs (else_datarefs);
2822 return false;
2823 }
2824 blocks[0] = then_bb;
2825 blocks[1] = else_bb;
2826 blocks[2] = join_bb;
2827 renumber_gimple_stmt_uids_in_blocks (blocks, 3);
2828
2829 /* Check that there are no read-after-write or write-after-write dependencies
2830 in THEN_BB. */
2831 FOR_EACH_VEC_ELT (then_ddrs, i, ddr)
2832 {
2833 struct data_reference *dra = DDR_A (ddr);
2834 struct data_reference *drb = DDR_B (ddr);
2835
2836 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
2837 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
2838 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
2839 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
2840 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
2841 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
2842 {
2843 free_dependence_relations (then_ddrs);
2844 free_dependence_relations (else_ddrs);
2845 free_data_refs (then_datarefs);
2846 free_data_refs (else_datarefs);
2847 return false;
2848 }
2849 }
2850
2851 /* Check that there are no read-after-write or write-after-write dependencies
2852 in ELSE_BB. */
2853 FOR_EACH_VEC_ELT (else_ddrs, i, ddr)
2854 {
2855 struct data_reference *dra = DDR_A (ddr);
2856 struct data_reference *drb = DDR_B (ddr);
2857
2858 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
2859 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
2860 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
2861 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
2862 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
2863 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
2864 {
2865 free_dependence_relations (then_ddrs);
2866 free_dependence_relations (else_ddrs);
2867 free_data_refs (then_datarefs);
2868 free_data_refs (else_datarefs);
2869 return false;
2870 }
2871 }
2872
2873 /* Sink stores with same LHS. */
2874 FOR_EACH_VEC_ELT (then_stores, i, then_store)
2875 {
2876 else_store = else_stores[i];
2877 res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
2878 then_store, else_store);
2879 ok = ok || res;
2880 }
2881
2882 free_dependence_relations (then_ddrs);
2883 free_dependence_relations (else_ddrs);
2884 free_data_refs (then_datarefs);
2885 free_data_refs (else_datarefs);
2886
2887 return ok;
2888 }
2889
2890 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
2891
2892 static bool
2893 local_mem_dependence (gimple *stmt, basic_block bb)
2894 {
2895 tree vuse = gimple_vuse (stmt);
2896 gimple *def;
2897
2898 if (!vuse)
2899 return false;
2900
2901 def = SSA_NAME_DEF_STMT (vuse);
2902 return (def && gimple_bb (def) == bb);
2903 }
2904
2905 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
2906 BB1 and BB2 are "then" and "else" blocks dependent on this test,
2907 and BB3 rejoins control flow following BB1 and BB2, look for
2908 opportunities to hoist loads as follows. If BB3 contains a PHI of
2909 two loads, one each occurring in BB1 and BB2, and the loads are
2910 provably of adjacent fields in the same structure, then move both
2911 loads into BB0. Of course this can only be done if there are no
2912 dependencies preventing such motion.
2913
2914 One of the hoisted loads will always be speculative, so the
2915 transformation is currently conservative:
2916
2917 - The fields must be strictly adjacent.
2918 - The two fields must occupy a single memory block that is
2919 guaranteed to not cross a page boundary.
2920
2921 The last is difficult to prove, as such memory blocks should be
2922 aligned on the minimum of the stack alignment boundary and the
2923 alignment guaranteed by heap allocation interfaces. Thus we rely
2924 on a parameter for the alignment value.
2925
2926 Provided a good value is used for the last case, the first
2927 restriction could possibly be relaxed. */
2928
2929 static void
2930 hoist_adjacent_loads (basic_block bb0, basic_block bb1,
2931 basic_block bb2, basic_block bb3)
2932 {
2933 int param_align = param_l1_cache_line_size;
2934 unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
2935 gphi_iterator gsi;
2936
2937 /* Walk the phis in bb3 looking for an opportunity. We are looking
2938 for phis of two SSA names, one each of which is defined in bb1 and
2939 bb2. */
2940 for (gsi = gsi_start_phis (bb3); !gsi_end_p (gsi); gsi_next (&gsi))
2941 {
2942 gphi *phi_stmt = gsi.phi ();
2943 gimple *def1, *def2;
2944 tree arg1, arg2, ref1, ref2, field1, field2;
2945 tree tree_offset1, tree_offset2, tree_size2, next;
2946 int offset1, offset2, size2;
2947 unsigned align1;
2948 gimple_stmt_iterator gsi2;
2949 basic_block bb_for_def1, bb_for_def2;
2950
2951 if (gimple_phi_num_args (phi_stmt) != 2
2952 || virtual_operand_p (gimple_phi_result (phi_stmt)))
2953 continue;
2954
2955 arg1 = gimple_phi_arg_def (phi_stmt, 0);
2956 arg2 = gimple_phi_arg_def (phi_stmt, 1);
2957
2958 if (TREE_CODE (arg1) != SSA_NAME
2959 || TREE_CODE (arg2) != SSA_NAME
2960 || SSA_NAME_IS_DEFAULT_DEF (arg1)
2961 || SSA_NAME_IS_DEFAULT_DEF (arg2))
2962 continue;
2963
2964 def1 = SSA_NAME_DEF_STMT (arg1);
2965 def2 = SSA_NAME_DEF_STMT (arg2);
2966
2967 if ((gimple_bb (def1) != bb1 || gimple_bb (def2) != bb2)
2968 && (gimple_bb (def2) != bb1 || gimple_bb (def1) != bb2))
2969 continue;
2970
2971 /* Check the mode of the arguments to be sure a conditional move
2972 can be generated for it. */
2973 if (optab_handler (movcc_optab, TYPE_MODE (TREE_TYPE (arg1)))
2974 == CODE_FOR_nothing)
2975 continue;
2976
2977 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
2978 if (!gimple_assign_single_p (def1)
2979 || !gimple_assign_single_p (def2)
2980 || gimple_has_volatile_ops (def1)
2981 || gimple_has_volatile_ops (def2))
2982 continue;
2983
2984 ref1 = gimple_assign_rhs1 (def1);
2985 ref2 = gimple_assign_rhs1 (def2);
2986
2987 if (TREE_CODE (ref1) != COMPONENT_REF
2988 || TREE_CODE (ref2) != COMPONENT_REF)
2989 continue;
2990
2991 /* The zeroth operand of the two component references must be
2992 identical. It is not sufficient to compare get_base_address of
2993 the two references, because this could allow for different
2994 elements of the same array in the two trees. It is not safe to
2995 assume that the existence of one array element implies the
2996 existence of a different one. */
2997 if (!operand_equal_p (TREE_OPERAND (ref1, 0), TREE_OPERAND (ref2, 0), 0))
2998 continue;
2999
3000 field1 = TREE_OPERAND (ref1, 1);
3001 field2 = TREE_OPERAND (ref2, 1);
3002
3003 /* Check for field adjacency, and ensure field1 comes first. */
3004 for (next = DECL_CHAIN (field1);
3005 next && TREE_CODE (next) != FIELD_DECL;
3006 next = DECL_CHAIN (next))
3007 ;
3008
3009 if (next != field2)
3010 {
3011 for (next = DECL_CHAIN (field2);
3012 next && TREE_CODE (next) != FIELD_DECL;
3013 next = DECL_CHAIN (next))
3014 ;
3015
3016 if (next != field1)
3017 continue;
3018
3019 std::swap (field1, field2);
3020 std::swap (def1, def2);
3021 }
3022
3023 bb_for_def1 = gimple_bb (def1);
3024 bb_for_def2 = gimple_bb (def2);
3025
3026 /* Check for proper alignment of the first field. */
3027 tree_offset1 = bit_position (field1);
3028 tree_offset2 = bit_position (field2);
3029 tree_size2 = DECL_SIZE (field2);
3030
3031 if (!tree_fits_uhwi_p (tree_offset1)
3032 || !tree_fits_uhwi_p (tree_offset2)
3033 || !tree_fits_uhwi_p (tree_size2))
3034 continue;
3035
3036 offset1 = tree_to_uhwi (tree_offset1);
3037 offset2 = tree_to_uhwi (tree_offset2);
3038 size2 = tree_to_uhwi (tree_size2);
3039 align1 = DECL_ALIGN (field1) % param_align_bits;
3040
3041 if (offset1 % BITS_PER_UNIT != 0)
3042 continue;
3043
3044 /* For profitability, the two field references should fit within
3045 a single cache line. */
3046 if (align1 + offset2 - offset1 + size2 > param_align_bits)
3047 continue;
3048
3049 /* The two expressions cannot be dependent upon vdefs defined
3050 in bb1/bb2. */
3051 if (local_mem_dependence (def1, bb_for_def1)
3052 || local_mem_dependence (def2, bb_for_def2))
3053 continue;
3054
3055 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
3056 bb0. We hoist the first one first so that a cache miss is handled
3057 efficiently regardless of hardware cache-fill policy. */
3058 gsi2 = gsi_for_stmt (def1);
3059 gsi_move_to_bb_end (&gsi2, bb0);
3060 gsi2 = gsi_for_stmt (def2);
3061 gsi_move_to_bb_end (&gsi2, bb0);
3062
3063 if (dump_file && (dump_flags & TDF_DETAILS))
3064 {
3065 fprintf (dump_file,
3066 "\nHoisting adjacent loads from %d and %d into %d: \n",
3067 bb_for_def1->index, bb_for_def2->index, bb0->index);
3068 print_gimple_stmt (dump_file, def1, 0, TDF_VOPS|TDF_MEMSYMS);
3069 print_gimple_stmt (dump_file, def2, 0, TDF_VOPS|TDF_MEMSYMS);
3070 }
3071 }
3072 }
3073
3074 /* Determine whether we should attempt to hoist adjacent loads out of
3075 diamond patterns in pass_phiopt. Always hoist loads if
3076 -fhoist-adjacent-loads is specified and the target machine has
3077 both a conditional move instruction and a defined cache line size. */
3078
3079 static bool
3080 gate_hoist_loads (void)
3081 {
3082 return (flag_hoist_adjacent_loads == 1
3083 && param_l1_cache_line_size
3084 && HAVE_conditional_move);
3085 }
3086
3087 /* This pass tries to replaces an if-then-else block with an
3088 assignment. We have four kinds of transformations. Some of these
3089 transformations are also performed by the ifcvt RTL optimizer.
3090
3091 Conditional Replacement
3092 -----------------------
3093
3094 This transformation, implemented in conditional_replacement,
3095 replaces
3096
3097 bb0:
3098 if (cond) goto bb2; else goto bb1;
3099 bb1:
3100 bb2:
3101 x = PHI <0 (bb1), 1 (bb0), ...>;
3102
3103 with
3104
3105 bb0:
3106 x' = cond;
3107 goto bb2;
3108 bb2:
3109 x = PHI <x' (bb0), ...>;
3110
3111 We remove bb1 as it becomes unreachable. This occurs often due to
3112 gimplification of conditionals.
3113
3114 Value Replacement
3115 -----------------
3116
3117 This transformation, implemented in value_replacement, replaces
3118
3119 bb0:
3120 if (a != b) goto bb2; else goto bb1;
3121 bb1:
3122 bb2:
3123 x = PHI <a (bb1), b (bb0), ...>;
3124
3125 with
3126
3127 bb0:
3128 bb2:
3129 x = PHI <b (bb0), ...>;
3130
3131 This opportunity can sometimes occur as a result of other
3132 optimizations.
3133
3134
3135 Another case caught by value replacement looks like this:
3136
3137 bb0:
3138 t1 = a == CONST;
3139 t2 = b > c;
3140 t3 = t1 & t2;
3141 if (t3 != 0) goto bb1; else goto bb2;
3142 bb1:
3143 bb2:
3144 x = PHI (CONST, a)
3145
3146 Gets replaced with:
3147 bb0:
3148 bb2:
3149 t1 = a == CONST;
3150 t2 = b > c;
3151 t3 = t1 & t2;
3152 x = a;
3153
3154 ABS Replacement
3155 ---------------
3156
3157 This transformation, implemented in abs_replacement, replaces
3158
3159 bb0:
3160 if (a >= 0) goto bb2; else goto bb1;
3161 bb1:
3162 x = -a;
3163 bb2:
3164 x = PHI <x (bb1), a (bb0), ...>;
3165
3166 with
3167
3168 bb0:
3169 x' = ABS_EXPR< a >;
3170 bb2:
3171 x = PHI <x' (bb0), ...>;
3172
3173 MIN/MAX Replacement
3174 -------------------
3175
3176 This transformation, minmax_replacement replaces
3177
3178 bb0:
3179 if (a <= b) goto bb2; else goto bb1;
3180 bb1:
3181 bb2:
3182 x = PHI <b (bb1), a (bb0), ...>;
3183
3184 with
3185
3186 bb0:
3187 x' = MIN_EXPR (a, b)
3188 bb2:
3189 x = PHI <x' (bb0), ...>;
3190
3191 A similar transformation is done for MAX_EXPR.
3192
3193
3194 This pass also performs a fifth transformation of a slightly different
3195 flavor.
3196
3197 Factor conversion in COND_EXPR
3198 ------------------------------
3199
3200 This transformation factors the conversion out of COND_EXPR with
3201 factor_out_conditional_conversion.
3202
3203 For example:
3204 if (a <= CST) goto <bb 3>; else goto <bb 4>;
3205 <bb 3>:
3206 tmp = (int) a;
3207 <bb 4>:
3208 tmp = PHI <tmp, CST>
3209
3210 Into:
3211 if (a <= CST) goto <bb 3>; else goto <bb 4>;
3212 <bb 3>:
3213 <bb 4>:
3214 a = PHI <a, CST>
3215 tmp = (int) a;
3216
3217 Adjacent Load Hoisting
3218 ----------------------
3219
3220 This transformation replaces
3221
3222 bb0:
3223 if (...) goto bb2; else goto bb1;
3224 bb1:
3225 x1 = (<expr>).field1;
3226 goto bb3;
3227 bb2:
3228 x2 = (<expr>).field2;
3229 bb3:
3230 # x = PHI <x1, x2>;
3231
3232 with
3233
3234 bb0:
3235 x1 = (<expr>).field1;
3236 x2 = (<expr>).field2;
3237 if (...) goto bb2; else goto bb1;
3238 bb1:
3239 goto bb3;
3240 bb2:
3241 bb3:
3242 # x = PHI <x1, x2>;
3243
3244 The purpose of this transformation is to enable generation of conditional
3245 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
3246 the loads is speculative, the transformation is restricted to very
3247 specific cases to avoid introducing a page fault. We are looking for
3248 the common idiom:
3249
3250 if (...)
3251 x = y->left;
3252 else
3253 x = y->right;
3254
3255 where left and right are typically adjacent pointers in a tree structure. */
3256
3257 namespace {
3258
3259 const pass_data pass_data_phiopt =
3260 {
3261 GIMPLE_PASS, /* type */
3262 "phiopt", /* name */
3263 OPTGROUP_NONE, /* optinfo_flags */
3264 TV_TREE_PHIOPT, /* tv_id */
3265 ( PROP_cfg | PROP_ssa ), /* properties_required */
3266 0, /* properties_provided */
3267 0, /* properties_destroyed */
3268 0, /* todo_flags_start */
3269 0, /* todo_flags_finish */
3270 };
3271
3272 class pass_phiopt : public gimple_opt_pass
3273 {
3274 public:
3275 pass_phiopt (gcc::context *ctxt)
3276 : gimple_opt_pass (pass_data_phiopt, ctxt), early_p (false)
3277 {}
3278
3279 /* opt_pass methods: */
3280 opt_pass * clone () { return new pass_phiopt (m_ctxt); }
3281 void set_pass_param (unsigned n, bool param)
3282 {
3283 gcc_assert (n == 0);
3284 early_p = param;
3285 }
3286 virtual bool gate (function *) { return flag_ssa_phiopt; }
3287 virtual unsigned int execute (function *)
3288 {
3289 return tree_ssa_phiopt_worker (false,
3290 !early_p ? gate_hoist_loads () : false,
3291 early_p);
3292 }
3293
3294 private:
3295 bool early_p;
3296 }; // class pass_phiopt
3297
3298 } // anon namespace
3299
3300 gimple_opt_pass *
3301 make_pass_phiopt (gcc::context *ctxt)
3302 {
3303 return new pass_phiopt (ctxt);
3304 }
3305
3306 namespace {
3307
3308 const pass_data pass_data_cselim =
3309 {
3310 GIMPLE_PASS, /* type */
3311 "cselim", /* name */
3312 OPTGROUP_NONE, /* optinfo_flags */
3313 TV_TREE_PHIOPT, /* tv_id */
3314 ( PROP_cfg | PROP_ssa ), /* properties_required */
3315 0, /* properties_provided */
3316 0, /* properties_destroyed */
3317 0, /* todo_flags_start */
3318 0, /* todo_flags_finish */
3319 };
3320
3321 class pass_cselim : public gimple_opt_pass
3322 {
3323 public:
3324 pass_cselim (gcc::context *ctxt)
3325 : gimple_opt_pass (pass_data_cselim, ctxt)
3326 {}
3327
3328 /* opt_pass methods: */
3329 virtual bool gate (function *) { return flag_tree_cselim; }
3330 virtual unsigned int execute (function *) { return tree_ssa_cs_elim (); }
3331
3332 }; // class pass_cselim
3333
3334 } // anon namespace
3335
3336 gimple_opt_pass *
3337 make_pass_cselim (gcc::context *ctxt)
3338 {
3339 return new pass_cselim (ctxt);
3340 }