bbc78af6996d6f81eae9c220e8d2b4d34a20d2ae
[gcc.git] / gcc / tree-ssa-phiopt.c
1 /* Optimization of PHI nodes by converting them into straightline code.
2 Copyright (C) 2004-2021 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "insn-codes.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "cfghooks.h"
29 #include "tree-pass.h"
30 #include "ssa.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "gimple-pretty-print.h"
34 #include "fold-const.h"
35 #include "stor-layout.h"
36 #include "cfganal.h"
37 #include "gimplify.h"
38 #include "gimple-iterator.h"
39 #include "gimplify-me.h"
40 #include "tree-cfg.h"
41 #include "tree-dfa.h"
42 #include "domwalk.h"
43 #include "cfgloop.h"
44 #include "tree-data-ref.h"
45 #include "tree-scalar-evolution.h"
46 #include "tree-inline.h"
47 #include "case-cfn-macros.h"
48 #include "tree-eh.h"
49 #include "gimple-fold.h"
50 #include "internal-fn.h"
51
52 static unsigned int tree_ssa_phiopt_worker (bool, bool, bool);
53 static bool two_value_replacement (basic_block, basic_block, edge, gphi *,
54 tree, tree);
55 static bool conditional_replacement (basic_block, basic_block,
56 edge, edge, gphi *, tree, tree);
57 static gphi *factor_out_conditional_conversion (edge, edge, gphi *, tree, tree,
58 gimple *);
59 static int value_replacement (basic_block, basic_block,
60 edge, edge, gimple *, tree, tree);
61 static bool minmax_replacement (basic_block, basic_block,
62 edge, edge, gimple *, tree, tree);
63 static bool abs_replacement (basic_block, basic_block,
64 edge, edge, gimple *, tree, tree);
65 static bool xor_replacement (basic_block, basic_block,
66 edge, edge, gimple *, tree, tree);
67 static bool cond_removal_in_popcount_clz_ctz_pattern (basic_block, basic_block,
68 edge, edge, gimple *,
69 tree, tree);
70 static bool cond_store_replacement (basic_block, basic_block, edge, edge,
71 hash_set<tree> *);
72 static bool cond_if_else_store_replacement (basic_block, basic_block, basic_block);
73 static hash_set<tree> * get_non_trapping ();
74 static void replace_phi_edge_with_variable (basic_block, edge, gimple *, tree);
75 static void hoist_adjacent_loads (basic_block, basic_block,
76 basic_block, basic_block);
77 static bool gate_hoist_loads (void);
78
79 /* This pass tries to transform conditional stores into unconditional
80 ones, enabling further simplifications with the simpler then and else
81 blocks. In particular it replaces this:
82
83 bb0:
84 if (cond) goto bb2; else goto bb1;
85 bb1:
86 *p = RHS;
87 bb2:
88
89 with
90
91 bb0:
92 if (cond) goto bb1; else goto bb2;
93 bb1:
94 condtmp' = *p;
95 bb2:
96 condtmp = PHI <RHS, condtmp'>
97 *p = condtmp;
98
99 This transformation can only be done under several constraints,
100 documented below. It also replaces:
101
102 bb0:
103 if (cond) goto bb2; else goto bb1;
104 bb1:
105 *p = RHS1;
106 goto bb3;
107 bb2:
108 *p = RHS2;
109 bb3:
110
111 with
112
113 bb0:
114 if (cond) goto bb3; else goto bb1;
115 bb1:
116 bb3:
117 condtmp = PHI <RHS1, RHS2>
118 *p = condtmp; */
119
120 static unsigned int
121 tree_ssa_cs_elim (void)
122 {
123 unsigned todo;
124 /* ??? We are not interested in loop related info, but the following
125 will create it, ICEing as we didn't init loops with pre-headers.
126 An interfacing issue of find_data_references_in_bb. */
127 loop_optimizer_init (LOOPS_NORMAL);
128 scev_initialize ();
129 todo = tree_ssa_phiopt_worker (true, false, false);
130 scev_finalize ();
131 loop_optimizer_finalize ();
132 return todo;
133 }
134
135 /* Return the singleton PHI in the SEQ of PHIs for edges E0 and E1. */
136
137 static gphi *
138 single_non_singleton_phi_for_edges (gimple_seq seq, edge e0, edge e1)
139 {
140 gimple_stmt_iterator i;
141 gphi *phi = NULL;
142 if (gimple_seq_singleton_p (seq))
143 return as_a <gphi *> (gsi_stmt (gsi_start (seq)));
144 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
145 {
146 gphi *p = as_a <gphi *> (gsi_stmt (i));
147 /* If the PHI arguments are equal then we can skip this PHI. */
148 if (operand_equal_for_phi_arg_p (gimple_phi_arg_def (p, e0->dest_idx),
149 gimple_phi_arg_def (p, e1->dest_idx)))
150 continue;
151
152 /* If we already have a PHI that has the two edge arguments are
153 different, then return it is not a singleton for these PHIs. */
154 if (phi)
155 return NULL;
156
157 phi = p;
158 }
159 return phi;
160 }
161
162 /* The core routine of conditional store replacement and normal
163 phi optimizations. Both share much of the infrastructure in how
164 to match applicable basic block patterns. DO_STORE_ELIM is true
165 when we want to do conditional store replacement, false otherwise.
166 DO_HOIST_LOADS is true when we want to hoist adjacent loads out
167 of diamond control flow patterns, false otherwise. */
168 static unsigned int
169 tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads, bool early_p)
170 {
171 basic_block bb;
172 basic_block *bb_order;
173 unsigned n, i;
174 bool cfgchanged = false;
175 hash_set<tree> *nontrap = 0;
176
177 if (do_store_elim)
178 /* Calculate the set of non-trapping memory accesses. */
179 nontrap = get_non_trapping ();
180
181 /* Search every basic block for COND_EXPR we may be able to optimize.
182
183 We walk the blocks in order that guarantees that a block with
184 a single predecessor is processed before the predecessor.
185 This ensures that we collapse inner ifs before visiting the
186 outer ones, and also that we do not try to visit a removed
187 block. */
188 bb_order = single_pred_before_succ_order ();
189 n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
190
191 for (i = 0; i < n; i++)
192 {
193 gimple *cond_stmt;
194 gphi *phi;
195 basic_block bb1, bb2;
196 edge e1, e2;
197 tree arg0, arg1;
198
199 bb = bb_order[i];
200
201 cond_stmt = last_stmt (bb);
202 /* Check to see if the last statement is a GIMPLE_COND. */
203 if (!cond_stmt
204 || gimple_code (cond_stmt) != GIMPLE_COND)
205 continue;
206
207 e1 = EDGE_SUCC (bb, 0);
208 bb1 = e1->dest;
209 e2 = EDGE_SUCC (bb, 1);
210 bb2 = e2->dest;
211
212 /* We cannot do the optimization on abnormal edges. */
213 if ((e1->flags & EDGE_ABNORMAL) != 0
214 || (e2->flags & EDGE_ABNORMAL) != 0)
215 continue;
216
217 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */
218 if (EDGE_COUNT (bb1->succs) == 0
219 || bb2 == NULL
220 || EDGE_COUNT (bb2->succs) == 0)
221 continue;
222
223 /* Find the bb which is the fall through to the other. */
224 if (EDGE_SUCC (bb1, 0)->dest == bb2)
225 ;
226 else if (EDGE_SUCC (bb2, 0)->dest == bb1)
227 {
228 std::swap (bb1, bb2);
229 std::swap (e1, e2);
230 }
231 else if (do_store_elim
232 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
233 {
234 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
235
236 if (!single_succ_p (bb1)
237 || (EDGE_SUCC (bb1, 0)->flags & EDGE_FALLTHRU) == 0
238 || !single_succ_p (bb2)
239 || (EDGE_SUCC (bb2, 0)->flags & EDGE_FALLTHRU) == 0
240 || EDGE_COUNT (bb3->preds) != 2)
241 continue;
242 if (cond_if_else_store_replacement (bb1, bb2, bb3))
243 cfgchanged = true;
244 continue;
245 }
246 else if (do_hoist_loads
247 && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
248 {
249 basic_block bb3 = EDGE_SUCC (bb1, 0)->dest;
250
251 if (!FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (cond_stmt)))
252 && single_succ_p (bb1)
253 && single_succ_p (bb2)
254 && single_pred_p (bb1)
255 && single_pred_p (bb2)
256 && EDGE_COUNT (bb->succs) == 2
257 && EDGE_COUNT (bb3->preds) == 2
258 /* If one edge or the other is dominant, a conditional move
259 is likely to perform worse than the well-predicted branch. */
260 && !predictable_edge_p (EDGE_SUCC (bb, 0))
261 && !predictable_edge_p (EDGE_SUCC (bb, 1)))
262 hoist_adjacent_loads (bb, bb1, bb2, bb3);
263 continue;
264 }
265 else
266 continue;
267
268 e1 = EDGE_SUCC (bb1, 0);
269
270 /* Make sure that bb1 is just a fall through. */
271 if (!single_succ_p (bb1)
272 || (e1->flags & EDGE_FALLTHRU) == 0)
273 continue;
274
275 /* Also make sure that bb1 only have one predecessor and that it
276 is bb. */
277 if (!single_pred_p (bb1)
278 || single_pred (bb1) != bb)
279 continue;
280
281 if (do_store_elim)
282 {
283 /* bb1 is the middle block, bb2 the join block, bb the split block,
284 e1 the fallthrough edge from bb1 to bb2. We can't do the
285 optimization if the join block has more than two predecessors. */
286 if (EDGE_COUNT (bb2->preds) > 2)
287 continue;
288 if (cond_store_replacement (bb1, bb2, e1, e2, nontrap))
289 cfgchanged = true;
290 }
291 else
292 {
293 gimple_seq phis = phi_nodes (bb2);
294 gimple_stmt_iterator gsi;
295 bool candorest = true;
296
297 /* Value replacement can work with more than one PHI
298 so try that first. */
299 if (!early_p)
300 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
301 {
302 phi = as_a <gphi *> (gsi_stmt (gsi));
303 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
304 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
305 if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1) == 2)
306 {
307 candorest = false;
308 cfgchanged = true;
309 break;
310 }
311 }
312
313 if (!candorest)
314 continue;
315
316 phi = single_non_singleton_phi_for_edges (phis, e1, e2);
317 if (!phi)
318 continue;
319
320 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
321 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
322
323 /* Something is wrong if we cannot find the arguments in the PHI
324 node. */
325 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
326
327 gphi *newphi = factor_out_conditional_conversion (e1, e2, phi,
328 arg0, arg1,
329 cond_stmt);
330 if (newphi != NULL)
331 {
332 phi = newphi;
333 /* factor_out_conditional_conversion may create a new PHI in
334 BB2 and eliminate an existing PHI in BB2. Recompute values
335 that may be affected by that change. */
336 arg0 = gimple_phi_arg_def (phi, e1->dest_idx);
337 arg1 = gimple_phi_arg_def (phi, e2->dest_idx);
338 gcc_assert (arg0 != NULL_TREE && arg1 != NULL_TREE);
339 }
340
341 /* Do the replacement of conditional if it can be done. */
342 if (!early_p && two_value_replacement (bb, bb1, e2, phi, arg0, arg1))
343 cfgchanged = true;
344 else if (!early_p
345 && conditional_replacement (bb, bb1, e1, e2, phi,
346 arg0, arg1))
347 cfgchanged = true;
348 else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
349 cfgchanged = true;
350 else if (!early_p
351 && xor_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
352 cfgchanged = true;
353 else if (!early_p
354 && cond_removal_in_popcount_clz_ctz_pattern (bb, bb1, e1,
355 e2, phi, arg0,
356 arg1))
357 cfgchanged = true;
358 else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1))
359 cfgchanged = true;
360 }
361 }
362
363 free (bb_order);
364
365 if (do_store_elim)
366 delete nontrap;
367 /* If the CFG has changed, we should cleanup the CFG. */
368 if (cfgchanged && do_store_elim)
369 {
370 /* In cond-store replacement we have added some loads on edges
371 and new VOPS (as we moved the store, and created a load). */
372 gsi_commit_edge_inserts ();
373 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
374 }
375 else if (cfgchanged)
376 return TODO_cleanup_cfg;
377 return 0;
378 }
379
380 /* Replace PHI node element whose edge is E in block BB with variable NEW.
381 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK
382 is known to have two edges, one of which must reach BB). */
383
384 static void
385 replace_phi_edge_with_variable (basic_block cond_block,
386 edge e, gimple *phi, tree new_tree)
387 {
388 basic_block bb = gimple_bb (phi);
389 basic_block block_to_remove;
390 gimple_stmt_iterator gsi;
391
392 /* Change the PHI argument to new. */
393 SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree);
394
395 /* Remove the empty basic block. */
396 if (EDGE_SUCC (cond_block, 0)->dest == bb)
397 {
398 EDGE_SUCC (cond_block, 0)->flags |= EDGE_FALLTHRU;
399 EDGE_SUCC (cond_block, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
400 EDGE_SUCC (cond_block, 0)->probability = profile_probability::always ();
401
402 block_to_remove = EDGE_SUCC (cond_block, 1)->dest;
403 }
404 else
405 {
406 EDGE_SUCC (cond_block, 1)->flags |= EDGE_FALLTHRU;
407 EDGE_SUCC (cond_block, 1)->flags
408 &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
409 EDGE_SUCC (cond_block, 1)->probability = profile_probability::always ();
410
411 block_to_remove = EDGE_SUCC (cond_block, 0)->dest;
412 }
413 delete_basic_block (block_to_remove);
414
415 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */
416 gsi = gsi_last_bb (cond_block);
417 gsi_remove (&gsi, true);
418
419 if (dump_file && (dump_flags & TDF_DETAILS))
420 fprintf (dump_file,
421 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n",
422 cond_block->index,
423 bb->index);
424 }
425
426 /* PR66726: Factor conversion out of COND_EXPR. If the arguments of the PHI
427 stmt are CONVERT_STMT, factor out the conversion and perform the conversion
428 to the result of PHI stmt. COND_STMT is the controlling predicate.
429 Return the newly-created PHI, if any. */
430
431 static gphi *
432 factor_out_conditional_conversion (edge e0, edge e1, gphi *phi,
433 tree arg0, tree arg1, gimple *cond_stmt)
434 {
435 gimple *arg0_def_stmt = NULL, *arg1_def_stmt = NULL, *new_stmt;
436 tree new_arg0 = NULL_TREE, new_arg1 = NULL_TREE;
437 tree temp, result;
438 gphi *newphi;
439 gimple_stmt_iterator gsi, gsi_for_def;
440 location_t locus = gimple_location (phi);
441 enum tree_code convert_code;
442
443 /* Handle only PHI statements with two arguments. TODO: If all
444 other arguments to PHI are INTEGER_CST or if their defining
445 statement have the same unary operation, we can handle more
446 than two arguments too. */
447 if (gimple_phi_num_args (phi) != 2)
448 return NULL;
449
450 /* First canonicalize to simplify tests. */
451 if (TREE_CODE (arg0) != SSA_NAME)
452 {
453 std::swap (arg0, arg1);
454 std::swap (e0, e1);
455 }
456
457 if (TREE_CODE (arg0) != SSA_NAME
458 || (TREE_CODE (arg1) != SSA_NAME
459 && TREE_CODE (arg1) != INTEGER_CST))
460 return NULL;
461
462 /* Check if arg0 is an SSA_NAME and the stmt which defines arg0 is
463 a conversion. */
464 arg0_def_stmt = SSA_NAME_DEF_STMT (arg0);
465 if (!gimple_assign_cast_p (arg0_def_stmt))
466 return NULL;
467
468 /* Use the RHS as new_arg0. */
469 convert_code = gimple_assign_rhs_code (arg0_def_stmt);
470 new_arg0 = gimple_assign_rhs1 (arg0_def_stmt);
471 if (convert_code == VIEW_CONVERT_EXPR)
472 {
473 new_arg0 = TREE_OPERAND (new_arg0, 0);
474 if (!is_gimple_reg_type (TREE_TYPE (new_arg0)))
475 return NULL;
476 }
477
478 if (TREE_CODE (arg1) == SSA_NAME)
479 {
480 /* Check if arg1 is an SSA_NAME and the stmt which defines arg1
481 is a conversion. */
482 arg1_def_stmt = SSA_NAME_DEF_STMT (arg1);
483 if (!is_gimple_assign (arg1_def_stmt)
484 || gimple_assign_rhs_code (arg1_def_stmt) != convert_code)
485 return NULL;
486
487 /* Use the RHS as new_arg1. */
488 new_arg1 = gimple_assign_rhs1 (arg1_def_stmt);
489 if (convert_code == VIEW_CONVERT_EXPR)
490 new_arg1 = TREE_OPERAND (new_arg1, 0);
491 }
492 else
493 {
494 /* If arg1 is an INTEGER_CST, fold it to new type. */
495 if (INTEGRAL_TYPE_P (TREE_TYPE (new_arg0))
496 && int_fits_type_p (arg1, TREE_TYPE (new_arg0)))
497 {
498 if (gimple_assign_cast_p (arg0_def_stmt))
499 {
500 /* For the INTEGER_CST case, we are just moving the
501 conversion from one place to another, which can often
502 hurt as the conversion moves further away from the
503 statement that computes the value. So, perform this
504 only if new_arg0 is an operand of COND_STMT, or
505 if arg0_def_stmt is the only non-debug stmt in
506 its basic block, because then it is possible this
507 could enable further optimizations (minmax replacement
508 etc.). See PR71016. */
509 if (new_arg0 != gimple_cond_lhs (cond_stmt)
510 && new_arg0 != gimple_cond_rhs (cond_stmt)
511 && gimple_bb (arg0_def_stmt) == e0->src)
512 {
513 gsi = gsi_for_stmt (arg0_def_stmt);
514 gsi_prev_nondebug (&gsi);
515 if (!gsi_end_p (gsi))
516 {
517 if (gassign *assign
518 = dyn_cast <gassign *> (gsi_stmt (gsi)))
519 {
520 tree lhs = gimple_assign_lhs (assign);
521 enum tree_code ass_code
522 = gimple_assign_rhs_code (assign);
523 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
524 return NULL;
525 if (lhs != gimple_assign_rhs1 (arg0_def_stmt))
526 return NULL;
527 gsi_prev_nondebug (&gsi);
528 if (!gsi_end_p (gsi))
529 return NULL;
530 }
531 else
532 return NULL;
533 }
534 gsi = gsi_for_stmt (arg0_def_stmt);
535 gsi_next_nondebug (&gsi);
536 if (!gsi_end_p (gsi))
537 return NULL;
538 }
539 new_arg1 = fold_convert (TREE_TYPE (new_arg0), arg1);
540 }
541 else
542 return NULL;
543 }
544 else
545 return NULL;
546 }
547
548 /* If arg0/arg1 have > 1 use, then this transformation actually increases
549 the number of expressions evaluated at runtime. */
550 if (!has_single_use (arg0)
551 || (arg1_def_stmt && !has_single_use (arg1)))
552 return NULL;
553
554 /* If types of new_arg0 and new_arg1 are different bailout. */
555 if (!types_compatible_p (TREE_TYPE (new_arg0), TREE_TYPE (new_arg1)))
556 return NULL;
557
558 /* Create a new PHI stmt. */
559 result = PHI_RESULT (phi);
560 temp = make_ssa_name (TREE_TYPE (new_arg0), NULL);
561 newphi = create_phi_node (temp, gimple_bb (phi));
562
563 if (dump_file && (dump_flags & TDF_DETAILS))
564 {
565 fprintf (dump_file, "PHI ");
566 print_generic_expr (dump_file, gimple_phi_result (phi));
567 fprintf (dump_file,
568 " changed to factor conversion out from COND_EXPR.\n");
569 fprintf (dump_file, "New stmt with CAST that defines ");
570 print_generic_expr (dump_file, result);
571 fprintf (dump_file, ".\n");
572 }
573
574 /* Remove the old cast(s) that has single use. */
575 gsi_for_def = gsi_for_stmt (arg0_def_stmt);
576 gsi_remove (&gsi_for_def, true);
577 release_defs (arg0_def_stmt);
578
579 if (arg1_def_stmt)
580 {
581 gsi_for_def = gsi_for_stmt (arg1_def_stmt);
582 gsi_remove (&gsi_for_def, true);
583 release_defs (arg1_def_stmt);
584 }
585
586 add_phi_arg (newphi, new_arg0, e0, locus);
587 add_phi_arg (newphi, new_arg1, e1, locus);
588
589 /* Create the conversion stmt and insert it. */
590 if (convert_code == VIEW_CONVERT_EXPR)
591 {
592 temp = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (result), temp);
593 new_stmt = gimple_build_assign (result, temp);
594 }
595 else
596 new_stmt = gimple_build_assign (result, convert_code, temp);
597 gsi = gsi_after_labels (gimple_bb (phi));
598 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
599
600 /* Remove the original PHI stmt. */
601 gsi = gsi_for_stmt (phi);
602 gsi_remove (&gsi, true);
603 return newphi;
604 }
605
606 /* Optimize
607 # x_5 in range [cst1, cst2] where cst2 = cst1 + 1
608 if (x_5 op cstN) # where op is == or != and N is 1 or 2
609 goto bb3;
610 else
611 goto bb4;
612 bb3:
613 bb4:
614 # r_6 = PHI<cst3(2), cst4(3)> # where cst3 == cst4 + 1 or cst4 == cst3 + 1
615
616 to r_6 = x_5 + (min (cst3, cst4) - cst1) or
617 r_6 = (min (cst3, cst4) + cst1) - x_5 depending on op, N and which
618 of cst3 and cst4 is smaller. */
619
620 static bool
621 two_value_replacement (basic_block cond_bb, basic_block middle_bb,
622 edge e1, gphi *phi, tree arg0, tree arg1)
623 {
624 /* Only look for adjacent integer constants. */
625 if (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
626 || !INTEGRAL_TYPE_P (TREE_TYPE (arg1))
627 || TREE_CODE (arg0) != INTEGER_CST
628 || TREE_CODE (arg1) != INTEGER_CST
629 || (tree_int_cst_lt (arg0, arg1)
630 ? wi::to_widest (arg0) + 1 != wi::to_widest (arg1)
631 : wi::to_widest (arg1) + 1 != wi::to_widest (arg0)))
632 return false;
633
634 if (!empty_block_p (middle_bb))
635 return false;
636
637 gimple *stmt = last_stmt (cond_bb);
638 tree lhs = gimple_cond_lhs (stmt);
639 tree rhs = gimple_cond_rhs (stmt);
640
641 if (TREE_CODE (lhs) != SSA_NAME
642 || !INTEGRAL_TYPE_P (TREE_TYPE (lhs))
643 || TREE_CODE (rhs) != INTEGER_CST)
644 return false;
645
646 switch (gimple_cond_code (stmt))
647 {
648 case EQ_EXPR:
649 case NE_EXPR:
650 break;
651 default:
652 return false;
653 }
654
655 /* Defer boolean x ? 0 : {1,-1} or x ? {1,-1} : 0 to
656 conditional_replacement. */
657 if (TREE_CODE (TREE_TYPE (lhs)) == BOOLEAN_TYPE
658 && (integer_zerop (arg0)
659 || integer_zerop (arg1)
660 || TREE_CODE (TREE_TYPE (arg0)) == BOOLEAN_TYPE
661 || (TYPE_PRECISION (TREE_TYPE (arg0))
662 <= TYPE_PRECISION (TREE_TYPE (lhs)))))
663 return false;
664
665 wide_int min, max;
666 if (get_range_info (lhs, &min, &max) != VR_RANGE)
667 {
668 int prec = TYPE_PRECISION (TREE_TYPE (lhs));
669 signop sgn = TYPE_SIGN (TREE_TYPE (lhs));
670 min = wi::min_value (prec, sgn);
671 max = wi::max_value (prec, sgn);
672 }
673 if (min + 1 != max
674 || (wi::to_wide (rhs) != min
675 && wi::to_wide (rhs) != max))
676 return false;
677
678 /* We need to know which is the true edge and which is the false
679 edge so that we know when to invert the condition below. */
680 edge true_edge, false_edge;
681 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
682 if ((gimple_cond_code (stmt) == EQ_EXPR)
683 ^ (wi::to_wide (rhs) == max)
684 ^ (e1 == false_edge))
685 std::swap (arg0, arg1);
686
687 tree type;
688 if (TYPE_PRECISION (TREE_TYPE (lhs)) == TYPE_PRECISION (TREE_TYPE (arg0)))
689 {
690 /* Avoid performing the arithmetics in bool type which has different
691 semantics, otherwise prefer unsigned types from the two with
692 the same precision. */
693 if (TREE_CODE (TREE_TYPE (arg0)) == BOOLEAN_TYPE
694 || !TYPE_UNSIGNED (TREE_TYPE (arg0)))
695 type = TREE_TYPE (lhs);
696 else
697 type = TREE_TYPE (arg0);
698 }
699 else if (TYPE_PRECISION (TREE_TYPE (lhs)) > TYPE_PRECISION (TREE_TYPE (arg0)))
700 type = TREE_TYPE (lhs);
701 else
702 type = TREE_TYPE (arg0);
703
704 min = wide_int::from (min, TYPE_PRECISION (type),
705 TYPE_SIGN (TREE_TYPE (lhs)));
706 wide_int a = wide_int::from (wi::to_wide (arg0), TYPE_PRECISION (type),
707 TYPE_SIGN (TREE_TYPE (arg0)));
708 enum tree_code code;
709 wi::overflow_type ovf;
710 if (tree_int_cst_lt (arg0, arg1))
711 {
712 code = PLUS_EXPR;
713 a -= min;
714 if (!TYPE_UNSIGNED (type))
715 {
716 /* lhs is known to be in range [min, min+1] and we want to add a
717 to it. Check if that operation can overflow for those 2 values
718 and if yes, force unsigned type. */
719 wi::add (min + (wi::neg_p (a) ? 0 : 1), a, SIGNED, &ovf);
720 if (ovf)
721 type = unsigned_type_for (type);
722 }
723 }
724 else
725 {
726 code = MINUS_EXPR;
727 a += min;
728 if (!TYPE_UNSIGNED (type))
729 {
730 /* lhs is known to be in range [min, min+1] and we want to subtract
731 it from a. Check if that operation can overflow for those 2
732 values and if yes, force unsigned type. */
733 wi::sub (a, min + (wi::neg_p (min) ? 0 : 1), SIGNED, &ovf);
734 if (ovf)
735 type = unsigned_type_for (type);
736 }
737 }
738
739 tree arg = wide_int_to_tree (type, a);
740 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
741 if (!useless_type_conversion_p (type, TREE_TYPE (lhs)))
742 lhs = gimplify_build1 (&gsi, NOP_EXPR, type, lhs);
743 tree new_rhs;
744 if (code == PLUS_EXPR)
745 new_rhs = gimplify_build2 (&gsi, PLUS_EXPR, type, lhs, arg);
746 else
747 new_rhs = gimplify_build2 (&gsi, MINUS_EXPR, type, arg, lhs);
748 if (!useless_type_conversion_p (TREE_TYPE (arg0), type))
749 new_rhs = gimplify_build1 (&gsi, NOP_EXPR, TREE_TYPE (arg0), new_rhs);
750
751 replace_phi_edge_with_variable (cond_bb, e1, phi, new_rhs);
752
753 /* Note that we optimized this PHI. */
754 return true;
755 }
756
757 /* The function conditional_replacement does the main work of doing the
758 conditional replacement. Return true if the replacement is done.
759 Otherwise return false.
760 BB is the basic block where the replacement is going to be done on. ARG0
761 is argument 0 from PHI. Likewise for ARG1. */
762
763 static bool
764 conditional_replacement (basic_block cond_bb, basic_block middle_bb,
765 edge e0, edge e1, gphi *phi,
766 tree arg0, tree arg1)
767 {
768 tree result;
769 gimple *stmt;
770 gassign *new_stmt;
771 tree cond;
772 gimple_stmt_iterator gsi;
773 edge true_edge, false_edge;
774 tree new_var, new_var2;
775 bool neg = false;
776 int shift = 0;
777 tree nonzero_arg;
778
779 /* FIXME: Gimplification of complex type is too hard for now. */
780 /* We aren't prepared to handle vectors either (and it is a question
781 if it would be worthwhile anyway). */
782 if (!(INTEGRAL_TYPE_P (TREE_TYPE (arg0))
783 || POINTER_TYPE_P (TREE_TYPE (arg0)))
784 || !(INTEGRAL_TYPE_P (TREE_TYPE (arg1))
785 || POINTER_TYPE_P (TREE_TYPE (arg1))))
786 return false;
787
788 /* The PHI arguments have the constants 0 and 1, or 0 and -1 or
789 0 and (1 << cst), then convert it to the conditional. */
790 if (integer_zerop (arg0))
791 nonzero_arg = arg1;
792 else if (integer_zerop (arg1))
793 nonzero_arg = arg0;
794 else
795 return false;
796 if (integer_all_onesp (nonzero_arg))
797 neg = true;
798 else if (integer_pow2p (nonzero_arg))
799 {
800 shift = tree_log2 (nonzero_arg);
801 if (shift && POINTER_TYPE_P (TREE_TYPE (nonzero_arg)))
802 return false;
803 }
804 else
805 return false;
806
807 if (!empty_block_p (middle_bb))
808 return false;
809
810 /* At this point we know we have a GIMPLE_COND with two successors.
811 One successor is BB, the other successor is an empty block which
812 falls through into BB.
813
814 There is a single PHI node at the join point (BB) and its arguments
815 are constants (0, 1) or (0, -1) or (0, (1 << shift)).
816
817 So, given the condition COND, and the two PHI arguments, we can
818 rewrite this PHI into non-branching code:
819
820 dest = (COND) or dest = COND' or dest = (COND) << shift
821
822 We use the condition as-is if the argument associated with the
823 true edge has the value one or the argument associated with the
824 false edge as the value zero. Note that those conditions are not
825 the same since only one of the outgoing edges from the GIMPLE_COND
826 will directly reach BB and thus be associated with an argument. */
827
828 stmt = last_stmt (cond_bb);
829 result = PHI_RESULT (phi);
830
831 /* To handle special cases like floating point comparison, it is easier and
832 less error-prone to build a tree and gimplify it on the fly though it is
833 less efficient. */
834 cond = fold_build2_loc (gimple_location (stmt),
835 gimple_cond_code (stmt), boolean_type_node,
836 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
837
838 /* We need to know which is the true edge and which is the false
839 edge so that we know when to invert the condition below. */
840 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
841 if ((e0 == true_edge && integer_zerop (arg0))
842 || (e0 == false_edge && !integer_zerop (arg0))
843 || (e1 == true_edge && integer_zerop (arg1))
844 || (e1 == false_edge && !integer_zerop (arg1)))
845 cond = fold_build1_loc (gimple_location (stmt),
846 TRUTH_NOT_EXPR, TREE_TYPE (cond), cond);
847
848 if (neg)
849 {
850 cond = fold_convert_loc (gimple_location (stmt),
851 TREE_TYPE (result), cond);
852 cond = fold_build1_loc (gimple_location (stmt),
853 NEGATE_EXPR, TREE_TYPE (cond), cond);
854 }
855 else if (shift)
856 {
857 cond = fold_convert_loc (gimple_location (stmt),
858 TREE_TYPE (result), cond);
859 cond = fold_build2_loc (gimple_location (stmt),
860 LSHIFT_EXPR, TREE_TYPE (cond), cond,
861 build_int_cst (integer_type_node, shift));
862 }
863
864 /* Insert our new statements at the end of conditional block before the
865 COND_STMT. */
866 gsi = gsi_for_stmt (stmt);
867 new_var = force_gimple_operand_gsi (&gsi, cond, true, NULL, true,
868 GSI_SAME_STMT);
869
870 if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (new_var)))
871 {
872 location_t locus_0, locus_1;
873
874 new_var2 = make_ssa_name (TREE_TYPE (result));
875 new_stmt = gimple_build_assign (new_var2, CONVERT_EXPR, new_var);
876 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
877 new_var = new_var2;
878
879 /* Set the locus to the first argument, unless is doesn't have one. */
880 locus_0 = gimple_phi_arg_location (phi, 0);
881 locus_1 = gimple_phi_arg_location (phi, 1);
882 if (locus_0 == UNKNOWN_LOCATION)
883 locus_0 = locus_1;
884 gimple_set_location (new_stmt, locus_0);
885 }
886
887 replace_phi_edge_with_variable (cond_bb, e1, phi, new_var);
888
889 /* Note that we optimized this PHI. */
890 return true;
891 }
892
893 /* Update *ARG which is defined in STMT so that it contains the
894 computed value if that seems profitable. Return true if the
895 statement is made dead by that rewriting. */
896
897 static bool
898 jump_function_from_stmt (tree *arg, gimple *stmt)
899 {
900 enum tree_code code = gimple_assign_rhs_code (stmt);
901 if (code == ADDR_EXPR)
902 {
903 /* For arg = &p->i transform it to p, if possible. */
904 tree rhs1 = gimple_assign_rhs1 (stmt);
905 poly_int64 offset;
906 tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs1, 0),
907 &offset);
908 if (tem
909 && TREE_CODE (tem) == MEM_REF
910 && known_eq (mem_ref_offset (tem) + offset, 0))
911 {
912 *arg = TREE_OPERAND (tem, 0);
913 return true;
914 }
915 }
916 /* TODO: Much like IPA-CP jump-functions we want to handle constant
917 additions symbolically here, and we'd need to update the comparison
918 code that compares the arg + cst tuples in our caller. For now the
919 code above exactly handles the VEC_BASE pattern from vec.h. */
920 return false;
921 }
922
923 /* RHS is a source argument in a BIT_AND_EXPR which feeds a conditional
924 of the form SSA_NAME NE 0.
925
926 If RHS is fed by a simple EQ_EXPR comparison of two values, see if
927 the two input values of the EQ_EXPR match arg0 and arg1.
928
929 If so update *code and return TRUE. Otherwise return FALSE. */
930
931 static bool
932 rhs_is_fed_for_value_replacement (const_tree arg0, const_tree arg1,
933 enum tree_code *code, const_tree rhs)
934 {
935 /* Obviously if RHS is not an SSA_NAME, we can't look at the defining
936 statement. */
937 if (TREE_CODE (rhs) == SSA_NAME)
938 {
939 gimple *def1 = SSA_NAME_DEF_STMT (rhs);
940
941 /* Verify the defining statement has an EQ_EXPR on the RHS. */
942 if (is_gimple_assign (def1) && gimple_assign_rhs_code (def1) == EQ_EXPR)
943 {
944 /* Finally verify the source operands of the EQ_EXPR are equal
945 to arg0 and arg1. */
946 tree op0 = gimple_assign_rhs1 (def1);
947 tree op1 = gimple_assign_rhs2 (def1);
948 if ((operand_equal_for_phi_arg_p (arg0, op0)
949 && operand_equal_for_phi_arg_p (arg1, op1))
950 || (operand_equal_for_phi_arg_p (arg0, op1)
951 && operand_equal_for_phi_arg_p (arg1, op0)))
952 {
953 /* We will perform the optimization. */
954 *code = gimple_assign_rhs_code (def1);
955 return true;
956 }
957 }
958 }
959 return false;
960 }
961
962 /* Return TRUE if arg0/arg1 are equal to the rhs/lhs or lhs/rhs of COND.
963
964 Also return TRUE if arg0/arg1 are equal to the source arguments of a
965 an EQ comparison feeding a BIT_AND_EXPR which feeds COND.
966
967 Return FALSE otherwise. */
968
969 static bool
970 operand_equal_for_value_replacement (const_tree arg0, const_tree arg1,
971 enum tree_code *code, gimple *cond)
972 {
973 gimple *def;
974 tree lhs = gimple_cond_lhs (cond);
975 tree rhs = gimple_cond_rhs (cond);
976
977 if ((operand_equal_for_phi_arg_p (arg0, lhs)
978 && operand_equal_for_phi_arg_p (arg1, rhs))
979 || (operand_equal_for_phi_arg_p (arg1, lhs)
980 && operand_equal_for_phi_arg_p (arg0, rhs)))
981 return true;
982
983 /* Now handle more complex case where we have an EQ comparison
984 which feeds a BIT_AND_EXPR which feeds COND.
985
986 First verify that COND is of the form SSA_NAME NE 0. */
987 if (*code != NE_EXPR || !integer_zerop (rhs)
988 || TREE_CODE (lhs) != SSA_NAME)
989 return false;
990
991 /* Now ensure that SSA_NAME is set by a BIT_AND_EXPR. */
992 def = SSA_NAME_DEF_STMT (lhs);
993 if (!is_gimple_assign (def) || gimple_assign_rhs_code (def) != BIT_AND_EXPR)
994 return false;
995
996 /* Now verify arg0/arg1 correspond to the source arguments of an
997 EQ comparison feeding the BIT_AND_EXPR. */
998
999 tree tmp = gimple_assign_rhs1 (def);
1000 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
1001 return true;
1002
1003 tmp = gimple_assign_rhs2 (def);
1004 if (rhs_is_fed_for_value_replacement (arg0, arg1, code, tmp))
1005 return true;
1006
1007 return false;
1008 }
1009
1010 /* Returns true if ARG is a neutral element for operation CODE
1011 on the RIGHT side. */
1012
1013 static bool
1014 neutral_element_p (tree_code code, tree arg, bool right)
1015 {
1016 switch (code)
1017 {
1018 case PLUS_EXPR:
1019 case BIT_IOR_EXPR:
1020 case BIT_XOR_EXPR:
1021 return integer_zerop (arg);
1022
1023 case LROTATE_EXPR:
1024 case RROTATE_EXPR:
1025 case LSHIFT_EXPR:
1026 case RSHIFT_EXPR:
1027 case MINUS_EXPR:
1028 case POINTER_PLUS_EXPR:
1029 return right && integer_zerop (arg);
1030
1031 case MULT_EXPR:
1032 return integer_onep (arg);
1033
1034 case TRUNC_DIV_EXPR:
1035 case CEIL_DIV_EXPR:
1036 case FLOOR_DIV_EXPR:
1037 case ROUND_DIV_EXPR:
1038 case EXACT_DIV_EXPR:
1039 return right && integer_onep (arg);
1040
1041 case BIT_AND_EXPR:
1042 return integer_all_onesp (arg);
1043
1044 default:
1045 return false;
1046 }
1047 }
1048
1049 /* Returns true if ARG is an absorbing element for operation CODE. */
1050
1051 static bool
1052 absorbing_element_p (tree_code code, tree arg, bool right, tree rval)
1053 {
1054 switch (code)
1055 {
1056 case BIT_IOR_EXPR:
1057 return integer_all_onesp (arg);
1058
1059 case MULT_EXPR:
1060 case BIT_AND_EXPR:
1061 return integer_zerop (arg);
1062
1063 case LSHIFT_EXPR:
1064 case RSHIFT_EXPR:
1065 case LROTATE_EXPR:
1066 case RROTATE_EXPR:
1067 return !right && integer_zerop (arg);
1068
1069 case TRUNC_DIV_EXPR:
1070 case CEIL_DIV_EXPR:
1071 case FLOOR_DIV_EXPR:
1072 case ROUND_DIV_EXPR:
1073 case EXACT_DIV_EXPR:
1074 case TRUNC_MOD_EXPR:
1075 case CEIL_MOD_EXPR:
1076 case FLOOR_MOD_EXPR:
1077 case ROUND_MOD_EXPR:
1078 return (!right
1079 && integer_zerop (arg)
1080 && tree_single_nonzero_warnv_p (rval, NULL));
1081
1082 default:
1083 return false;
1084 }
1085 }
1086
1087 /* The function value_replacement does the main work of doing the value
1088 replacement. Return non-zero if the replacement is done. Otherwise return
1089 0. If we remove the middle basic block, return 2.
1090 BB is the basic block where the replacement is going to be done on. ARG0
1091 is argument 0 from the PHI. Likewise for ARG1. */
1092
1093 static int
1094 value_replacement (basic_block cond_bb, basic_block middle_bb,
1095 edge e0, edge e1, gimple *phi,
1096 tree arg0, tree arg1)
1097 {
1098 gimple_stmt_iterator gsi;
1099 gimple *cond;
1100 edge true_edge, false_edge;
1101 enum tree_code code;
1102 bool empty_or_with_defined_p = true;
1103
1104 /* If the type says honor signed zeros we cannot do this
1105 optimization. */
1106 if (HONOR_SIGNED_ZEROS (arg1))
1107 return 0;
1108
1109 /* If there is a statement in MIDDLE_BB that defines one of the PHI
1110 arguments, then adjust arg0 or arg1. */
1111 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
1112 while (!gsi_end_p (gsi))
1113 {
1114 gimple *stmt = gsi_stmt (gsi);
1115 tree lhs;
1116 gsi_next_nondebug (&gsi);
1117 if (!is_gimple_assign (stmt))
1118 {
1119 if (gimple_code (stmt) != GIMPLE_PREDICT
1120 && gimple_code (stmt) != GIMPLE_NOP)
1121 empty_or_with_defined_p = false;
1122 continue;
1123 }
1124 /* Now try to adjust arg0 or arg1 according to the computation
1125 in the statement. */
1126 lhs = gimple_assign_lhs (stmt);
1127 if (!(lhs == arg0
1128 && jump_function_from_stmt (&arg0, stmt))
1129 || (lhs == arg1
1130 && jump_function_from_stmt (&arg1, stmt)))
1131 empty_or_with_defined_p = false;
1132 }
1133
1134 cond = last_stmt (cond_bb);
1135 code = gimple_cond_code (cond);
1136
1137 /* This transformation is only valid for equality comparisons. */
1138 if (code != NE_EXPR && code != EQ_EXPR)
1139 return 0;
1140
1141 /* We need to know which is the true edge and which is the false
1142 edge so that we know if have abs or negative abs. */
1143 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1144
1145 /* At this point we know we have a COND_EXPR with two successors.
1146 One successor is BB, the other successor is an empty block which
1147 falls through into BB.
1148
1149 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR.
1150
1151 There is a single PHI node at the join point (BB) with two arguments.
1152
1153 We now need to verify that the two arguments in the PHI node match
1154 the two arguments to the equality comparison. */
1155
1156 if (operand_equal_for_value_replacement (arg0, arg1, &code, cond))
1157 {
1158 edge e;
1159 tree arg;
1160
1161 /* For NE_EXPR, we want to build an assignment result = arg where
1162 arg is the PHI argument associated with the true edge. For
1163 EQ_EXPR we want the PHI argument associated with the false edge. */
1164 e = (code == NE_EXPR ? true_edge : false_edge);
1165
1166 /* Unfortunately, E may not reach BB (it may instead have gone to
1167 OTHER_BLOCK). If that is the case, then we want the single outgoing
1168 edge from OTHER_BLOCK which reaches BB and represents the desired
1169 path from COND_BLOCK. */
1170 if (e->dest == middle_bb)
1171 e = single_succ_edge (e->dest);
1172
1173 /* Now we know the incoming edge to BB that has the argument for the
1174 RHS of our new assignment statement. */
1175 if (e0 == e)
1176 arg = arg0;
1177 else
1178 arg = arg1;
1179
1180 /* If the middle basic block was empty or is defining the
1181 PHI arguments and this is a single phi where the args are different
1182 for the edges e0 and e1 then we can remove the middle basic block. */
1183 if (empty_or_with_defined_p
1184 && single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)),
1185 e0, e1) == phi)
1186 {
1187 replace_phi_edge_with_variable (cond_bb, e1, phi, arg);
1188 /* Note that we optimized this PHI. */
1189 return 2;
1190 }
1191 else
1192 {
1193 /* Replace the PHI arguments with arg. */
1194 SET_PHI_ARG_DEF (phi, e0->dest_idx, arg);
1195 SET_PHI_ARG_DEF (phi, e1->dest_idx, arg);
1196 if (dump_file && (dump_flags & TDF_DETAILS))
1197 {
1198 fprintf (dump_file, "PHI ");
1199 print_generic_expr (dump_file, gimple_phi_result (phi));
1200 fprintf (dump_file, " reduced for COND_EXPR in block %d to ",
1201 cond_bb->index);
1202 print_generic_expr (dump_file, arg);
1203 fprintf (dump_file, ".\n");
1204 }
1205 return 1;
1206 }
1207
1208 }
1209
1210 /* Now optimize (x != 0) ? x + y : y to just x + y. */
1211 gsi = gsi_last_nondebug_bb (middle_bb);
1212 if (gsi_end_p (gsi))
1213 return 0;
1214
1215 gimple *assign = gsi_stmt (gsi);
1216 if (!is_gimple_assign (assign)
1217 || gimple_assign_rhs_class (assign) != GIMPLE_BINARY_RHS
1218 || (!INTEGRAL_TYPE_P (TREE_TYPE (arg0))
1219 && !POINTER_TYPE_P (TREE_TYPE (arg0))))
1220 return 0;
1221
1222 /* Punt if there are (degenerate) PHIs in middle_bb, there should not be. */
1223 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
1224 return 0;
1225
1226 /* Allow up to 2 cheap preparation statements that prepare argument
1227 for assign, e.g.:
1228 if (y_4 != 0)
1229 goto <bb 3>;
1230 else
1231 goto <bb 4>;
1232 <bb 3>:
1233 _1 = (int) y_4;
1234 iftmp.0_6 = x_5(D) r<< _1;
1235 <bb 4>:
1236 # iftmp.0_2 = PHI <iftmp.0_6(3), x_5(D)(2)>
1237 or:
1238 if (y_3(D) == 0)
1239 goto <bb 4>;
1240 else
1241 goto <bb 3>;
1242 <bb 3>:
1243 y_4 = y_3(D) & 31;
1244 _1 = (int) y_4;
1245 _6 = x_5(D) r<< _1;
1246 <bb 4>:
1247 # _2 = PHI <x_5(D)(2), _6(3)> */
1248 gimple *prep_stmt[2] = { NULL, NULL };
1249 int prep_cnt;
1250 for (prep_cnt = 0; ; prep_cnt++)
1251 {
1252 gsi_prev_nondebug (&gsi);
1253 if (gsi_end_p (gsi))
1254 break;
1255
1256 gimple *g = gsi_stmt (gsi);
1257 if (gimple_code (g) == GIMPLE_LABEL)
1258 break;
1259
1260 if (prep_cnt == 2 || !is_gimple_assign (g))
1261 return 0;
1262
1263 tree lhs = gimple_assign_lhs (g);
1264 tree rhs1 = gimple_assign_rhs1 (g);
1265 use_operand_p use_p;
1266 gimple *use_stmt;
1267 if (TREE_CODE (lhs) != SSA_NAME
1268 || TREE_CODE (rhs1) != SSA_NAME
1269 || !INTEGRAL_TYPE_P (TREE_TYPE (lhs))
1270 || !INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
1271 || !single_imm_use (lhs, &use_p, &use_stmt)
1272 || use_stmt != (prep_cnt ? prep_stmt[prep_cnt - 1] : assign))
1273 return 0;
1274 switch (gimple_assign_rhs_code (g))
1275 {
1276 CASE_CONVERT:
1277 break;
1278 case PLUS_EXPR:
1279 case BIT_AND_EXPR:
1280 case BIT_IOR_EXPR:
1281 case BIT_XOR_EXPR:
1282 if (TREE_CODE (gimple_assign_rhs2 (g)) != INTEGER_CST)
1283 return 0;
1284 break;
1285 default:
1286 return 0;
1287 }
1288 prep_stmt[prep_cnt] = g;
1289 }
1290
1291 /* Only transform if it removes the condition. */
1292 if (!single_non_singleton_phi_for_edges (phi_nodes (gimple_bb (phi)), e0, e1))
1293 return 0;
1294
1295 /* Size-wise, this is always profitable. */
1296 if (optimize_bb_for_speed_p (cond_bb)
1297 /* The special case is useless if it has a low probability. */
1298 && profile_status_for_fn (cfun) != PROFILE_ABSENT
1299 && EDGE_PRED (middle_bb, 0)->probability < profile_probability::even ()
1300 /* If assign is cheap, there is no point avoiding it. */
1301 && estimate_num_insns_seq (bb_seq (middle_bb), &eni_time_weights)
1302 >= 3 * estimate_num_insns (cond, &eni_time_weights))
1303 return 0;
1304
1305 tree lhs = gimple_assign_lhs (assign);
1306 tree rhs1 = gimple_assign_rhs1 (assign);
1307 tree rhs2 = gimple_assign_rhs2 (assign);
1308 enum tree_code code_def = gimple_assign_rhs_code (assign);
1309 tree cond_lhs = gimple_cond_lhs (cond);
1310 tree cond_rhs = gimple_cond_rhs (cond);
1311
1312 /* Propagate the cond_rhs constant through preparation stmts,
1313 make sure UB isn't invoked while doing that. */
1314 for (int i = prep_cnt - 1; i >= 0; --i)
1315 {
1316 gimple *g = prep_stmt[i];
1317 tree grhs1 = gimple_assign_rhs1 (g);
1318 if (!operand_equal_for_phi_arg_p (cond_lhs, grhs1))
1319 return 0;
1320 cond_lhs = gimple_assign_lhs (g);
1321 cond_rhs = fold_convert (TREE_TYPE (grhs1), cond_rhs);
1322 if (TREE_CODE (cond_rhs) != INTEGER_CST
1323 || TREE_OVERFLOW (cond_rhs))
1324 return 0;
1325 if (gimple_assign_rhs_class (g) == GIMPLE_BINARY_RHS)
1326 {
1327 cond_rhs = int_const_binop (gimple_assign_rhs_code (g), cond_rhs,
1328 gimple_assign_rhs2 (g));
1329 if (TREE_OVERFLOW (cond_rhs))
1330 return 0;
1331 }
1332 cond_rhs = fold_convert (TREE_TYPE (cond_lhs), cond_rhs);
1333 if (TREE_CODE (cond_rhs) != INTEGER_CST
1334 || TREE_OVERFLOW (cond_rhs))
1335 return 0;
1336 }
1337
1338 if (((code == NE_EXPR && e1 == false_edge)
1339 || (code == EQ_EXPR && e1 == true_edge))
1340 && arg0 == lhs
1341 && ((arg1 == rhs1
1342 && operand_equal_for_phi_arg_p (rhs2, cond_lhs)
1343 && neutral_element_p (code_def, cond_rhs, true))
1344 || (arg1 == rhs2
1345 && operand_equal_for_phi_arg_p (rhs1, cond_lhs)
1346 && neutral_element_p (code_def, cond_rhs, false))
1347 || (operand_equal_for_phi_arg_p (arg1, cond_rhs)
1348 && ((operand_equal_for_phi_arg_p (rhs2, cond_lhs)
1349 && absorbing_element_p (code_def, cond_rhs, true, rhs2))
1350 || (operand_equal_for_phi_arg_p (rhs1, cond_lhs)
1351 && absorbing_element_p (code_def,
1352 cond_rhs, false, rhs2))))))
1353 {
1354 gsi = gsi_for_stmt (cond);
1355 /* Moving ASSIGN might change VR of lhs, e.g. when moving u_6
1356 def-stmt in:
1357 if (n_5 != 0)
1358 goto <bb 3>;
1359 else
1360 goto <bb 4>;
1361
1362 <bb 3>:
1363 # RANGE [0, 4294967294]
1364 u_6 = n_5 + 4294967295;
1365
1366 <bb 4>:
1367 # u_3 = PHI <u_6(3), 4294967295(2)> */
1368 reset_flow_sensitive_info (lhs);
1369 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs)))
1370 {
1371 /* If available, we can use VR of phi result at least. */
1372 tree phires = gimple_phi_result (phi);
1373 struct range_info_def *phires_range_info
1374 = SSA_NAME_RANGE_INFO (phires);
1375 if (phires_range_info)
1376 duplicate_ssa_name_range_info (lhs, SSA_NAME_RANGE_TYPE (phires),
1377 phires_range_info);
1378 }
1379 gimple_stmt_iterator gsi_from;
1380 for (int i = prep_cnt - 1; i >= 0; --i)
1381 {
1382 tree plhs = gimple_assign_lhs (prep_stmt[i]);
1383 reset_flow_sensitive_info (plhs);
1384 gsi_from = gsi_for_stmt (prep_stmt[i]);
1385 gsi_move_before (&gsi_from, &gsi);
1386 }
1387 gsi_from = gsi_for_stmt (assign);
1388 gsi_move_before (&gsi_from, &gsi);
1389 replace_phi_edge_with_variable (cond_bb, e1, phi, lhs);
1390 return 2;
1391 }
1392
1393 return 0;
1394 }
1395
1396 /* The function minmax_replacement does the main work of doing the minmax
1397 replacement. Return true if the replacement is done. Otherwise return
1398 false.
1399 BB is the basic block where the replacement is going to be done on. ARG0
1400 is argument 0 from the PHI. Likewise for ARG1. */
1401
1402 static bool
1403 minmax_replacement (basic_block cond_bb, basic_block middle_bb,
1404 edge e0, edge e1, gimple *phi,
1405 tree arg0, tree arg1)
1406 {
1407 tree result;
1408 edge true_edge, false_edge;
1409 enum tree_code minmax, ass_code;
1410 tree smaller, larger, arg_true, arg_false;
1411 gimple_stmt_iterator gsi, gsi_from;
1412
1413 tree type = TREE_TYPE (PHI_RESULT (phi));
1414
1415 /* The optimization may be unsafe due to NaNs. */
1416 if (HONOR_NANS (type) || HONOR_SIGNED_ZEROS (type))
1417 return false;
1418
1419 gcond *cond = as_a <gcond *> (last_stmt (cond_bb));
1420 enum tree_code cmp = gimple_cond_code (cond);
1421 tree rhs = gimple_cond_rhs (cond);
1422
1423 /* Turn EQ/NE of extreme values to order comparisons. */
1424 if ((cmp == NE_EXPR || cmp == EQ_EXPR)
1425 && TREE_CODE (rhs) == INTEGER_CST
1426 && INTEGRAL_TYPE_P (TREE_TYPE (rhs)))
1427 {
1428 if (wi::eq_p (wi::to_wide (rhs), wi::min_value (TREE_TYPE (rhs))))
1429 {
1430 cmp = (cmp == EQ_EXPR) ? LT_EXPR : GE_EXPR;
1431 rhs = wide_int_to_tree (TREE_TYPE (rhs),
1432 wi::min_value (TREE_TYPE (rhs)) + 1);
1433 }
1434 else if (wi::eq_p (wi::to_wide (rhs), wi::max_value (TREE_TYPE (rhs))))
1435 {
1436 cmp = (cmp == EQ_EXPR) ? GT_EXPR : LE_EXPR;
1437 rhs = wide_int_to_tree (TREE_TYPE (rhs),
1438 wi::max_value (TREE_TYPE (rhs)) - 1);
1439 }
1440 }
1441
1442 /* This transformation is only valid for order comparisons. Record which
1443 operand is smaller/larger if the result of the comparison is true. */
1444 tree alt_smaller = NULL_TREE;
1445 tree alt_larger = NULL_TREE;
1446 if (cmp == LT_EXPR || cmp == LE_EXPR)
1447 {
1448 smaller = gimple_cond_lhs (cond);
1449 larger = rhs;
1450 /* If we have smaller < CST it is equivalent to smaller <= CST-1.
1451 Likewise smaller <= CST is equivalent to smaller < CST+1. */
1452 if (TREE_CODE (larger) == INTEGER_CST
1453 && INTEGRAL_TYPE_P (TREE_TYPE (larger)))
1454 {
1455 if (cmp == LT_EXPR)
1456 {
1457 wi::overflow_type overflow;
1458 wide_int alt = wi::sub (wi::to_wide (larger), 1,
1459 TYPE_SIGN (TREE_TYPE (larger)),
1460 &overflow);
1461 if (! overflow)
1462 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1463 }
1464 else
1465 {
1466 wi::overflow_type overflow;
1467 wide_int alt = wi::add (wi::to_wide (larger), 1,
1468 TYPE_SIGN (TREE_TYPE (larger)),
1469 &overflow);
1470 if (! overflow)
1471 alt_larger = wide_int_to_tree (TREE_TYPE (larger), alt);
1472 }
1473 }
1474 }
1475 else if (cmp == GT_EXPR || cmp == GE_EXPR)
1476 {
1477 smaller = rhs;
1478 larger = gimple_cond_lhs (cond);
1479 /* If we have larger > CST it is equivalent to larger >= CST+1.
1480 Likewise larger >= CST is equivalent to larger > CST-1. */
1481 if (TREE_CODE (smaller) == INTEGER_CST
1482 && INTEGRAL_TYPE_P (TREE_TYPE (smaller)))
1483 {
1484 wi::overflow_type overflow;
1485 if (cmp == GT_EXPR)
1486 {
1487 wide_int alt = wi::add (wi::to_wide (smaller), 1,
1488 TYPE_SIGN (TREE_TYPE (smaller)),
1489 &overflow);
1490 if (! overflow)
1491 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1492 }
1493 else
1494 {
1495 wide_int alt = wi::sub (wi::to_wide (smaller), 1,
1496 TYPE_SIGN (TREE_TYPE (smaller)),
1497 &overflow);
1498 if (! overflow)
1499 alt_smaller = wide_int_to_tree (TREE_TYPE (smaller), alt);
1500 }
1501 }
1502 }
1503 else
1504 return false;
1505
1506 /* Handle the special case of (signed_type)x < 0 being equivalent
1507 to x > MAX_VAL(signed_type) and (signed_type)x >= 0 equivalent
1508 to x <= MAX_VAL(signed_type). */
1509 if ((cmp == GE_EXPR || cmp == LT_EXPR)
1510 && INTEGRAL_TYPE_P (type)
1511 && TYPE_UNSIGNED (type)
1512 && integer_zerop (rhs))
1513 {
1514 tree op = gimple_cond_lhs (cond);
1515 if (TREE_CODE (op) == SSA_NAME
1516 && INTEGRAL_TYPE_P (TREE_TYPE (op))
1517 && !TYPE_UNSIGNED (TREE_TYPE (op)))
1518 {
1519 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
1520 if (gimple_assign_cast_p (def_stmt))
1521 {
1522 tree op1 = gimple_assign_rhs1 (def_stmt);
1523 if (INTEGRAL_TYPE_P (TREE_TYPE (op1))
1524 && TYPE_UNSIGNED (TREE_TYPE (op1))
1525 && (TYPE_PRECISION (TREE_TYPE (op))
1526 == TYPE_PRECISION (TREE_TYPE (op1)))
1527 && useless_type_conversion_p (type, TREE_TYPE (op1)))
1528 {
1529 wide_int w1 = wi::max_value (TREE_TYPE (op));
1530 wide_int w2 = wi::add (w1, 1);
1531 if (cmp == LT_EXPR)
1532 {
1533 larger = op1;
1534 smaller = wide_int_to_tree (TREE_TYPE (op1), w1);
1535 alt_smaller = wide_int_to_tree (TREE_TYPE (op1), w2);
1536 alt_larger = NULL_TREE;
1537 }
1538 else
1539 {
1540 smaller = op1;
1541 larger = wide_int_to_tree (TREE_TYPE (op1), w1);
1542 alt_larger = wide_int_to_tree (TREE_TYPE (op1), w2);
1543 alt_smaller = NULL_TREE;
1544 }
1545 }
1546 }
1547 }
1548 }
1549
1550 /* We need to know which is the true edge and which is the false
1551 edge so that we know if have abs or negative abs. */
1552 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
1553
1554 /* Forward the edges over the middle basic block. */
1555 if (true_edge->dest == middle_bb)
1556 true_edge = EDGE_SUCC (true_edge->dest, 0);
1557 if (false_edge->dest == middle_bb)
1558 false_edge = EDGE_SUCC (false_edge->dest, 0);
1559
1560 if (true_edge == e0)
1561 {
1562 gcc_assert (false_edge == e1);
1563 arg_true = arg0;
1564 arg_false = arg1;
1565 }
1566 else
1567 {
1568 gcc_assert (false_edge == e0);
1569 gcc_assert (true_edge == e1);
1570 arg_true = arg1;
1571 arg_false = arg0;
1572 }
1573
1574 if (empty_block_p (middle_bb))
1575 {
1576 if ((operand_equal_for_phi_arg_p (arg_true, smaller)
1577 || (alt_smaller
1578 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1579 && (operand_equal_for_phi_arg_p (arg_false, larger)
1580 || (alt_larger
1581 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1582 {
1583 /* Case
1584
1585 if (smaller < larger)
1586 rslt = smaller;
1587 else
1588 rslt = larger; */
1589 minmax = MIN_EXPR;
1590 }
1591 else if ((operand_equal_for_phi_arg_p (arg_false, smaller)
1592 || (alt_smaller
1593 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1594 && (operand_equal_for_phi_arg_p (arg_true, larger)
1595 || (alt_larger
1596 && operand_equal_for_phi_arg_p (arg_true, alt_larger))))
1597 minmax = MAX_EXPR;
1598 else
1599 return false;
1600 }
1601 else
1602 {
1603 /* Recognize the following case, assuming d <= u:
1604
1605 if (a <= u)
1606 b = MAX (a, d);
1607 x = PHI <b, u>
1608
1609 This is equivalent to
1610
1611 b = MAX (a, d);
1612 x = MIN (b, u); */
1613
1614 gimple *assign = last_and_only_stmt (middle_bb);
1615 tree lhs, op0, op1, bound;
1616
1617 if (!assign
1618 || gimple_code (assign) != GIMPLE_ASSIGN)
1619 return false;
1620
1621 lhs = gimple_assign_lhs (assign);
1622 ass_code = gimple_assign_rhs_code (assign);
1623 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR)
1624 return false;
1625 op0 = gimple_assign_rhs1 (assign);
1626 op1 = gimple_assign_rhs2 (assign);
1627
1628 if (true_edge->src == middle_bb)
1629 {
1630 /* We got here if the condition is true, i.e., SMALLER < LARGER. */
1631 if (!operand_equal_for_phi_arg_p (lhs, arg_true))
1632 return false;
1633
1634 if (operand_equal_for_phi_arg_p (arg_false, larger)
1635 || (alt_larger
1636 && operand_equal_for_phi_arg_p (arg_false, alt_larger)))
1637 {
1638 /* Case
1639
1640 if (smaller < larger)
1641 {
1642 r' = MAX_EXPR (smaller, bound)
1643 }
1644 r = PHI <r', larger> --> to be turned to MIN_EXPR. */
1645 if (ass_code != MAX_EXPR)
1646 return false;
1647
1648 minmax = MIN_EXPR;
1649 if (operand_equal_for_phi_arg_p (op0, smaller)
1650 || (alt_smaller
1651 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1652 bound = op1;
1653 else if (operand_equal_for_phi_arg_p (op1, smaller)
1654 || (alt_smaller
1655 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
1656 bound = op0;
1657 else
1658 return false;
1659
1660 /* We need BOUND <= LARGER. */
1661 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1662 bound, larger)))
1663 return false;
1664 }
1665 else if (operand_equal_for_phi_arg_p (arg_false, smaller)
1666 || (alt_smaller
1667 && operand_equal_for_phi_arg_p (arg_false, alt_smaller)))
1668 {
1669 /* Case
1670
1671 if (smaller < larger)
1672 {
1673 r' = MIN_EXPR (larger, bound)
1674 }
1675 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */
1676 if (ass_code != MIN_EXPR)
1677 return false;
1678
1679 minmax = MAX_EXPR;
1680 if (operand_equal_for_phi_arg_p (op0, larger)
1681 || (alt_larger
1682 && operand_equal_for_phi_arg_p (op0, alt_larger)))
1683 bound = op1;
1684 else if (operand_equal_for_phi_arg_p (op1, larger)
1685 || (alt_larger
1686 && operand_equal_for_phi_arg_p (op1, alt_larger)))
1687 bound = op0;
1688 else
1689 return false;
1690
1691 /* We need BOUND >= SMALLER. */
1692 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1693 bound, smaller)))
1694 return false;
1695 }
1696 else
1697 return false;
1698 }
1699 else
1700 {
1701 /* We got here if the condition is false, i.e., SMALLER > LARGER. */
1702 if (!operand_equal_for_phi_arg_p (lhs, arg_false))
1703 return false;
1704
1705 if (operand_equal_for_phi_arg_p (arg_true, larger)
1706 || (alt_larger
1707 && operand_equal_for_phi_arg_p (arg_true, alt_larger)))
1708 {
1709 /* Case
1710
1711 if (smaller > larger)
1712 {
1713 r' = MIN_EXPR (smaller, bound)
1714 }
1715 r = PHI <r', larger> --> to be turned to MAX_EXPR. */
1716 if (ass_code != MIN_EXPR)
1717 return false;
1718
1719 minmax = MAX_EXPR;
1720 if (operand_equal_for_phi_arg_p (op0, smaller)
1721 || (alt_smaller
1722 && operand_equal_for_phi_arg_p (op0, alt_smaller)))
1723 bound = op1;
1724 else if (operand_equal_for_phi_arg_p (op1, smaller)
1725 || (alt_smaller
1726 && operand_equal_for_phi_arg_p (op1, alt_smaller)))
1727 bound = op0;
1728 else
1729 return false;
1730
1731 /* We need BOUND >= LARGER. */
1732 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node,
1733 bound, larger)))
1734 return false;
1735 }
1736 else if (operand_equal_for_phi_arg_p (arg_true, smaller)
1737 || (alt_smaller
1738 && operand_equal_for_phi_arg_p (arg_true, alt_smaller)))
1739 {
1740 /* Case
1741
1742 if (smaller > larger)
1743 {
1744 r' = MAX_EXPR (larger, bound)
1745 }
1746 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */
1747 if (ass_code != MAX_EXPR)
1748 return false;
1749
1750 minmax = MIN_EXPR;
1751 if (operand_equal_for_phi_arg_p (op0, larger))
1752 bound = op1;
1753 else if (operand_equal_for_phi_arg_p (op1, larger))
1754 bound = op0;
1755 else
1756 return false;
1757
1758 /* We need BOUND <= SMALLER. */
1759 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node,
1760 bound, smaller)))
1761 return false;
1762 }
1763 else
1764 return false;
1765 }
1766
1767 /* Move the statement from the middle block. */
1768 gsi = gsi_last_bb (cond_bb);
1769 gsi_from = gsi_last_nondebug_bb (middle_bb);
1770 reset_flow_sensitive_info (SINGLE_SSA_TREE_OPERAND (gsi_stmt (gsi_from),
1771 SSA_OP_DEF));
1772 gsi_move_before (&gsi_from, &gsi);
1773 }
1774
1775 /* Emit the statement to compute min/max. */
1776 gimple_seq stmts = NULL;
1777 tree phi_result = PHI_RESULT (phi);
1778 result = gimple_build (&stmts, minmax, TREE_TYPE (phi_result), arg0, arg1);
1779 /* Duplicate range info if we're the only things setting the target PHI. */
1780 if (!gimple_seq_empty_p (stmts)
1781 && EDGE_COUNT (gimple_bb (phi)->preds) == 2
1782 && !POINTER_TYPE_P (TREE_TYPE (phi_result))
1783 && SSA_NAME_RANGE_INFO (phi_result))
1784 duplicate_ssa_name_range_info (result, SSA_NAME_RANGE_TYPE (phi_result),
1785 SSA_NAME_RANGE_INFO (phi_result));
1786
1787 gsi = gsi_last_bb (cond_bb);
1788 gsi_insert_seq_before (&gsi, stmts, GSI_NEW_STMT);
1789
1790 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
1791
1792 return true;
1793 }
1794
1795 /* Convert
1796
1797 <bb 2>
1798 if (b_4(D) != 0)
1799 goto <bb 3>
1800 else
1801 goto <bb 4>
1802
1803 <bb 3>
1804 _2 = (unsigned long) b_4(D);
1805 _9 = __builtin_popcountl (_2);
1806 OR
1807 _9 = __builtin_popcountl (b_4(D));
1808
1809 <bb 4>
1810 c_12 = PHI <0(2), _9(3)>
1811
1812 Into
1813 <bb 2>
1814 _2 = (unsigned long) b_4(D);
1815 _9 = __builtin_popcountl (_2);
1816 OR
1817 _9 = __builtin_popcountl (b_4(D));
1818
1819 <bb 4>
1820 c_12 = PHI <_9(2)>
1821
1822 Similarly for __builtin_clz or __builtin_ctz if
1823 C?Z_DEFINED_VALUE_AT_ZERO is 2, optab is present and
1824 instead of 0 above it uses the value from that macro. */
1825
1826 static bool
1827 cond_removal_in_popcount_clz_ctz_pattern (basic_block cond_bb,
1828 basic_block middle_bb,
1829 edge e1, edge e2, gimple *phi,
1830 tree arg0, tree arg1)
1831 {
1832 gimple *cond;
1833 gimple_stmt_iterator gsi, gsi_from;
1834 gimple *call;
1835 gimple *cast = NULL;
1836 tree lhs, arg;
1837
1838 /* Check that
1839 _2 = (unsigned long) b_4(D);
1840 _9 = __builtin_popcountl (_2);
1841 OR
1842 _9 = __builtin_popcountl (b_4(D));
1843 are the only stmts in the middle_bb. */
1844
1845 gsi = gsi_start_nondebug_after_labels_bb (middle_bb);
1846 if (gsi_end_p (gsi))
1847 return false;
1848 cast = gsi_stmt (gsi);
1849 gsi_next_nondebug (&gsi);
1850 if (!gsi_end_p (gsi))
1851 {
1852 call = gsi_stmt (gsi);
1853 gsi_next_nondebug (&gsi);
1854 if (!gsi_end_p (gsi))
1855 return false;
1856 }
1857 else
1858 {
1859 call = cast;
1860 cast = NULL;
1861 }
1862
1863 /* Check that we have a popcount/clz/ctz builtin. */
1864 if (!is_gimple_call (call) || gimple_call_num_args (call) != 1)
1865 return false;
1866
1867 arg = gimple_call_arg (call, 0);
1868 lhs = gimple_get_lhs (call);
1869
1870 if (lhs == NULL_TREE)
1871 return false;
1872
1873 combined_fn cfn = gimple_call_combined_fn (call);
1874 internal_fn ifn = IFN_LAST;
1875 int val = 0;
1876 switch (cfn)
1877 {
1878 CASE_CFN_POPCOUNT:
1879 break;
1880 CASE_CFN_CLZ:
1881 if (INTEGRAL_TYPE_P (TREE_TYPE (arg)))
1882 {
1883 tree type = TREE_TYPE (arg);
1884 if (direct_internal_fn_supported_p (IFN_CLZ, type, OPTIMIZE_FOR_BOTH)
1885 && CLZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type),
1886 val) == 2)
1887 {
1888 ifn = IFN_CLZ;
1889 break;
1890 }
1891 }
1892 return false;
1893 CASE_CFN_CTZ:
1894 if (INTEGRAL_TYPE_P (TREE_TYPE (arg)))
1895 {
1896 tree type = TREE_TYPE (arg);
1897 if (direct_internal_fn_supported_p (IFN_CTZ, type, OPTIMIZE_FOR_BOTH)
1898 && CTZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (type),
1899 val) == 2)
1900 {
1901 ifn = IFN_CTZ;
1902 break;
1903 }
1904 }
1905 return false;
1906 default:
1907 return false;
1908 }
1909
1910 if (cast)
1911 {
1912 /* We have a cast stmt feeding popcount/clz/ctz builtin. */
1913 /* Check that we have a cast prior to that. */
1914 if (gimple_code (cast) != GIMPLE_ASSIGN
1915 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (cast)))
1916 return false;
1917 /* Result of the cast stmt is the argument to the builtin. */
1918 if (arg != gimple_assign_lhs (cast))
1919 return false;
1920 arg = gimple_assign_rhs1 (cast);
1921 }
1922
1923 cond = last_stmt (cond_bb);
1924
1925 /* Cond_bb has a check for b_4 [!=|==] 0 before calling the popcount/clz/ctz
1926 builtin. */
1927 if (gimple_code (cond) != GIMPLE_COND
1928 || (gimple_cond_code (cond) != NE_EXPR
1929 && gimple_cond_code (cond) != EQ_EXPR)
1930 || !integer_zerop (gimple_cond_rhs (cond))
1931 || arg != gimple_cond_lhs (cond))
1932 return false;
1933
1934 /* Canonicalize. */
1935 if ((e2->flags & EDGE_TRUE_VALUE
1936 && gimple_cond_code (cond) == NE_EXPR)
1937 || (e1->flags & EDGE_TRUE_VALUE
1938 && gimple_cond_code (cond) == EQ_EXPR))
1939 {
1940 std::swap (arg0, arg1);
1941 std::swap (e1, e2);
1942 }
1943
1944 /* Check PHI arguments. */
1945 if (lhs != arg0
1946 || TREE_CODE (arg1) != INTEGER_CST
1947 || wi::to_wide (arg1) != val)
1948 return false;
1949
1950 /* And insert the popcount/clz/ctz builtin and cast stmt before the
1951 cond_bb. */
1952 gsi = gsi_last_bb (cond_bb);
1953 if (cast)
1954 {
1955 gsi_from = gsi_for_stmt (cast);
1956 gsi_move_before (&gsi_from, &gsi);
1957 reset_flow_sensitive_info (gimple_get_lhs (cast));
1958 }
1959 gsi_from = gsi_for_stmt (call);
1960 if (ifn == IFN_LAST || gimple_call_internal_p (call))
1961 gsi_move_before (&gsi_from, &gsi);
1962 else
1963 {
1964 /* For __builtin_c[lt]z* force .C[LT]Z ifn, because only
1965 the latter is well defined at zero. */
1966 call = gimple_build_call_internal (ifn, 1, gimple_call_arg (call, 0));
1967 gimple_call_set_lhs (call, lhs);
1968 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
1969 gsi_remove (&gsi_from, true);
1970 }
1971 reset_flow_sensitive_info (lhs);
1972
1973 /* Now update the PHI and remove unneeded bbs. */
1974 replace_phi_edge_with_variable (cond_bb, e2, phi, lhs);
1975 return true;
1976 }
1977
1978 /* The function absolute_replacement does the main work of doing the absolute
1979 replacement. Return true if the replacement is done. Otherwise return
1980 false.
1981 bb is the basic block where the replacement is going to be done on. arg0
1982 is argument 0 from the phi. Likewise for arg1. */
1983
1984 static bool
1985 abs_replacement (basic_block cond_bb, basic_block middle_bb,
1986 edge e0 ATTRIBUTE_UNUSED, edge e1,
1987 gimple *phi, tree arg0, tree arg1)
1988 {
1989 tree result;
1990 gassign *new_stmt;
1991 gimple *cond;
1992 gimple_stmt_iterator gsi;
1993 edge true_edge, false_edge;
1994 gimple *assign;
1995 edge e;
1996 tree rhs, lhs;
1997 bool negate;
1998 enum tree_code cond_code;
1999
2000 /* If the type says honor signed zeros we cannot do this
2001 optimization. */
2002 if (HONOR_SIGNED_ZEROS (arg1))
2003 return false;
2004
2005 /* OTHER_BLOCK must have only one executable statement which must have the
2006 form arg0 = -arg1 or arg1 = -arg0. */
2007
2008 assign = last_and_only_stmt (middle_bb);
2009 /* If we did not find the proper negation assignment, then we cannot
2010 optimize. */
2011 if (assign == NULL)
2012 return false;
2013
2014 /* If we got here, then we have found the only executable statement
2015 in OTHER_BLOCK. If it is anything other than arg = -arg1 or
2016 arg1 = -arg0, then we cannot optimize. */
2017 if (gimple_code (assign) != GIMPLE_ASSIGN)
2018 return false;
2019
2020 lhs = gimple_assign_lhs (assign);
2021
2022 if (gimple_assign_rhs_code (assign) != NEGATE_EXPR)
2023 return false;
2024
2025 rhs = gimple_assign_rhs1 (assign);
2026
2027 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
2028 if (!(lhs == arg0 && rhs == arg1)
2029 && !(lhs == arg1 && rhs == arg0))
2030 return false;
2031
2032 cond = last_stmt (cond_bb);
2033 result = PHI_RESULT (phi);
2034
2035 /* Only relationals comparing arg[01] against zero are interesting. */
2036 cond_code = gimple_cond_code (cond);
2037 if (cond_code != GT_EXPR && cond_code != GE_EXPR
2038 && cond_code != LT_EXPR && cond_code != LE_EXPR)
2039 return false;
2040
2041 /* Make sure the conditional is arg[01] OP y. */
2042 if (gimple_cond_lhs (cond) != rhs)
2043 return false;
2044
2045 if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond)))
2046 ? real_zerop (gimple_cond_rhs (cond))
2047 : integer_zerop (gimple_cond_rhs (cond)))
2048 ;
2049 else
2050 return false;
2051
2052 /* We need to know which is the true edge and which is the false
2053 edge so that we know if have abs or negative abs. */
2054 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
2055
2056 /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
2057 will need to negate the result. Similarly for LT_EXPR/LE_EXPR if
2058 the false edge goes to OTHER_BLOCK. */
2059 if (cond_code == GT_EXPR || cond_code == GE_EXPR)
2060 e = true_edge;
2061 else
2062 e = false_edge;
2063
2064 if (e->dest == middle_bb)
2065 negate = true;
2066 else
2067 negate = false;
2068
2069 /* If the code negates only iff positive then make sure to not
2070 introduce undefined behavior when negating or computing the absolute.
2071 ??? We could use range info if present to check for arg1 == INT_MIN. */
2072 if (negate
2073 && (ANY_INTEGRAL_TYPE_P (TREE_TYPE (arg1))
2074 && ! TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg1))))
2075 return false;
2076
2077 result = duplicate_ssa_name (result, NULL);
2078
2079 if (negate)
2080 lhs = make_ssa_name (TREE_TYPE (result));
2081 else
2082 lhs = result;
2083
2084 /* Build the modify expression with abs expression. */
2085 new_stmt = gimple_build_assign (lhs, ABS_EXPR, rhs);
2086
2087 gsi = gsi_last_bb (cond_bb);
2088 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
2089
2090 if (negate)
2091 {
2092 /* Get the right GSI. We want to insert after the recently
2093 added ABS_EXPR statement (which we know is the first statement
2094 in the block. */
2095 new_stmt = gimple_build_assign (result, NEGATE_EXPR, lhs);
2096
2097 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
2098 }
2099
2100 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
2101
2102 /* Note that we optimized this PHI. */
2103 return true;
2104 }
2105
2106 /* Optimize x < 0 ? ~y : y into (x >> (prec-1)) ^ y. */
2107
2108 static bool
2109 xor_replacement (basic_block cond_bb, basic_block middle_bb,
2110 edge e0 ATTRIBUTE_UNUSED, edge e1,
2111 gimple *phi, tree arg0, tree arg1)
2112 {
2113 if (!INTEGRAL_TYPE_P (TREE_TYPE (arg1)))
2114 return false;
2115
2116 /* OTHER_BLOCK must have only one executable statement which must have the
2117 form arg0 = ~arg1 or arg1 = ~arg0. */
2118
2119 gimple *assign = last_and_only_stmt (middle_bb);
2120 /* If we did not find the proper one's complement assignment, then we cannot
2121 optimize. */
2122 if (assign == NULL)
2123 return false;
2124
2125 /* If we got here, then we have found the only executable statement
2126 in OTHER_BLOCK. If it is anything other than arg = ~arg1 or
2127 arg1 = ~arg0, then we cannot optimize. */
2128 if (!is_gimple_assign (assign))
2129 return false;
2130
2131 if (gimple_assign_rhs_code (assign) != BIT_NOT_EXPR)
2132 return false;
2133
2134 tree lhs = gimple_assign_lhs (assign);
2135 tree rhs = gimple_assign_rhs1 (assign);
2136
2137 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */
2138 if (!(lhs == arg0 && rhs == arg1) && !(lhs == arg1 && rhs == arg0))
2139 return false;
2140
2141 gimple *cond = last_stmt (cond_bb);
2142 tree result = PHI_RESULT (phi);
2143
2144 /* Only relationals comparing arg[01] against zero are interesting. */
2145 enum tree_code cond_code = gimple_cond_code (cond);
2146 if (cond_code != LT_EXPR && cond_code != GE_EXPR)
2147 return false;
2148
2149 /* Make sure the conditional is x OP 0. */
2150 tree clhs = gimple_cond_lhs (cond);
2151 if (TREE_CODE (clhs) != SSA_NAME
2152 || !INTEGRAL_TYPE_P (TREE_TYPE (clhs))
2153 || TYPE_UNSIGNED (TREE_TYPE (clhs))
2154 || TYPE_PRECISION (TREE_TYPE (clhs)) != TYPE_PRECISION (TREE_TYPE (arg1))
2155 || !integer_zerop (gimple_cond_rhs (cond)))
2156 return false;
2157
2158 /* We need to know which is the true edge and which is the false
2159 edge so that we know if have xor or inverted xor. */
2160 edge true_edge, false_edge;
2161 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge);
2162
2163 /* For GE_EXPR, if the true edge goes to OTHER_BLOCK, then we
2164 will need to invert the result. Similarly for LT_EXPR if
2165 the false edge goes to OTHER_BLOCK. */
2166 edge e;
2167 if (cond_code == GE_EXPR)
2168 e = true_edge;
2169 else
2170 e = false_edge;
2171
2172 bool invert = e->dest == middle_bb;
2173
2174 result = duplicate_ssa_name (result, NULL);
2175
2176 gimple_stmt_iterator gsi = gsi_last_bb (cond_bb);
2177
2178 int prec = TYPE_PRECISION (TREE_TYPE (clhs));
2179 gimple *new_stmt
2180 = gimple_build_assign (make_ssa_name (TREE_TYPE (clhs)), RSHIFT_EXPR, clhs,
2181 build_int_cst (integer_type_node, prec - 1));
2182 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
2183
2184 if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (clhs)))
2185 {
2186 new_stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (result)),
2187 NOP_EXPR, gimple_assign_lhs (new_stmt));
2188 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
2189 }
2190 lhs = gimple_assign_lhs (new_stmt);
2191
2192 if (invert)
2193 {
2194 new_stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (result)),
2195 BIT_NOT_EXPR, rhs);
2196 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
2197 rhs = gimple_assign_lhs (new_stmt);
2198 }
2199
2200 new_stmt = gimple_build_assign (result, BIT_XOR_EXPR, lhs, rhs);
2201 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
2202
2203 replace_phi_edge_with_variable (cond_bb, e1, phi, result);
2204
2205 /* Note that we optimized this PHI. */
2206 return true;
2207 }
2208
2209 /* Auxiliary functions to determine the set of memory accesses which
2210 can't trap because they are preceded by accesses to the same memory
2211 portion. We do that for MEM_REFs, so we only need to track
2212 the SSA_NAME of the pointer indirectly referenced. The algorithm
2213 simply is a walk over all instructions in dominator order. When
2214 we see an MEM_REF we determine if we've already seen a same
2215 ref anywhere up to the root of the dominator tree. If we do the
2216 current access can't trap. If we don't see any dominating access
2217 the current access might trap, but might also make later accesses
2218 non-trapping, so we remember it. We need to be careful with loads
2219 or stores, for instance a load might not trap, while a store would,
2220 so if we see a dominating read access this doesn't mean that a later
2221 write access would not trap. Hence we also need to differentiate the
2222 type of access(es) seen.
2223
2224 ??? We currently are very conservative and assume that a load might
2225 trap even if a store doesn't (write-only memory). This probably is
2226 overly conservative.
2227
2228 We currently support a special case that for !TREE_ADDRESSABLE automatic
2229 variables, it could ignore whether something is a load or store because the
2230 local stack should be always writable. */
2231
2232 /* A hash-table of references (MEM_REF/ARRAY_REF/COMPONENT_REF), and in which
2233 basic block an *_REF through it was seen, which would constitute a
2234 no-trap region for same accesses.
2235
2236 Size is needed to support 2 MEM_REFs of different types, like
2237 MEM<double>(s_1) and MEM<long>(s_1), which would compare equal with
2238 OEP_ADDRESS_OF. */
2239 struct ref_to_bb
2240 {
2241 tree exp;
2242 HOST_WIDE_INT size;
2243 unsigned int phase;
2244 basic_block bb;
2245 };
2246
2247 /* Hashtable helpers. */
2248
2249 struct refs_hasher : free_ptr_hash<ref_to_bb>
2250 {
2251 static inline hashval_t hash (const ref_to_bb *);
2252 static inline bool equal (const ref_to_bb *, const ref_to_bb *);
2253 };
2254
2255 /* Used for quick clearing of the hash-table when we see calls.
2256 Hash entries with phase < nt_call_phase are invalid. */
2257 static unsigned int nt_call_phase;
2258
2259 /* The hash function. */
2260
2261 inline hashval_t
2262 refs_hasher::hash (const ref_to_bb *n)
2263 {
2264 inchash::hash hstate;
2265 inchash::add_expr (n->exp, hstate, OEP_ADDRESS_OF);
2266 hstate.add_hwi (n->size);
2267 return hstate.end ();
2268 }
2269
2270 /* The equality function of *P1 and *P2. */
2271
2272 inline bool
2273 refs_hasher::equal (const ref_to_bb *n1, const ref_to_bb *n2)
2274 {
2275 return operand_equal_p (n1->exp, n2->exp, OEP_ADDRESS_OF)
2276 && n1->size == n2->size;
2277 }
2278
2279 class nontrapping_dom_walker : public dom_walker
2280 {
2281 public:
2282 nontrapping_dom_walker (cdi_direction direction, hash_set<tree> *ps)
2283 : dom_walker (direction), m_nontrapping (ps), m_seen_refs (128)
2284 {}
2285
2286 virtual edge before_dom_children (basic_block);
2287 virtual void after_dom_children (basic_block);
2288
2289 private:
2290
2291 /* We see the expression EXP in basic block BB. If it's an interesting
2292 expression (an MEM_REF through an SSA_NAME) possibly insert the
2293 expression into the set NONTRAP or the hash table of seen expressions.
2294 STORE is true if this expression is on the LHS, otherwise it's on
2295 the RHS. */
2296 void add_or_mark_expr (basic_block, tree, bool);
2297
2298 hash_set<tree> *m_nontrapping;
2299
2300 /* The hash table for remembering what we've seen. */
2301 hash_table<refs_hasher> m_seen_refs;
2302 };
2303
2304 /* Called by walk_dominator_tree, when entering the block BB. */
2305 edge
2306 nontrapping_dom_walker::before_dom_children (basic_block bb)
2307 {
2308 edge e;
2309 edge_iterator ei;
2310 gimple_stmt_iterator gsi;
2311
2312 /* If we haven't seen all our predecessors, clear the hash-table. */
2313 FOR_EACH_EDGE (e, ei, bb->preds)
2314 if ((((size_t)e->src->aux) & 2) == 0)
2315 {
2316 nt_call_phase++;
2317 break;
2318 }
2319
2320 /* Mark this BB as being on the path to dominator root and as visited. */
2321 bb->aux = (void*)(1 | 2);
2322
2323 /* And walk the statements in order. */
2324 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2325 {
2326 gimple *stmt = gsi_stmt (gsi);
2327
2328 if ((gimple_code (stmt) == GIMPLE_ASM && gimple_vdef (stmt))
2329 || (is_gimple_call (stmt)
2330 && (!nonfreeing_call_p (stmt) || !nonbarrier_call_p (stmt))))
2331 nt_call_phase++;
2332 else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
2333 {
2334 add_or_mark_expr (bb, gimple_assign_lhs (stmt), true);
2335 add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), false);
2336 }
2337 }
2338 return NULL;
2339 }
2340
2341 /* Called by walk_dominator_tree, when basic block BB is exited. */
2342 void
2343 nontrapping_dom_walker::after_dom_children (basic_block bb)
2344 {
2345 /* This BB isn't on the path to dominator root anymore. */
2346 bb->aux = (void*)2;
2347 }
2348
2349 /* We see the expression EXP in basic block BB. If it's an interesting
2350 expression of:
2351 1) MEM_REF
2352 2) ARRAY_REF
2353 3) COMPONENT_REF
2354 possibly insert the expression into the set NONTRAP or the hash table
2355 of seen expressions. STORE is true if this expression is on the LHS,
2356 otherwise it's on the RHS. */
2357 void
2358 nontrapping_dom_walker::add_or_mark_expr (basic_block bb, tree exp, bool store)
2359 {
2360 HOST_WIDE_INT size;
2361
2362 if ((TREE_CODE (exp) == MEM_REF || TREE_CODE (exp) == ARRAY_REF
2363 || TREE_CODE (exp) == COMPONENT_REF)
2364 && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0)
2365 {
2366 struct ref_to_bb map;
2367 ref_to_bb **slot;
2368 struct ref_to_bb *r2bb;
2369 basic_block found_bb = 0;
2370
2371 if (!store)
2372 {
2373 tree base = get_base_address (exp);
2374 /* Only record a LOAD of a local variable without address-taken, as
2375 the local stack is always writable. This allows cselim on a STORE
2376 with a dominating LOAD. */
2377 if (!auto_var_p (base) || TREE_ADDRESSABLE (base))
2378 return;
2379 }
2380
2381 /* Try to find the last seen *_REF, which can trap. */
2382 map.exp = exp;
2383 map.size = size;
2384 slot = m_seen_refs.find_slot (&map, INSERT);
2385 r2bb = *slot;
2386 if (r2bb && r2bb->phase >= nt_call_phase)
2387 found_bb = r2bb->bb;
2388
2389 /* If we've found a trapping *_REF, _and_ it dominates EXP
2390 (it's in a basic block on the path from us to the dominator root)
2391 then we can't trap. */
2392 if (found_bb && (((size_t)found_bb->aux) & 1) == 1)
2393 {
2394 m_nontrapping->add (exp);
2395 }
2396 else
2397 {
2398 /* EXP might trap, so insert it into the hash table. */
2399 if (r2bb)
2400 {
2401 r2bb->phase = nt_call_phase;
2402 r2bb->bb = bb;
2403 }
2404 else
2405 {
2406 r2bb = XNEW (struct ref_to_bb);
2407 r2bb->phase = nt_call_phase;
2408 r2bb->bb = bb;
2409 r2bb->exp = exp;
2410 r2bb->size = size;
2411 *slot = r2bb;
2412 }
2413 }
2414 }
2415 }
2416
2417 /* This is the entry point of gathering non trapping memory accesses.
2418 It will do a dominator walk over the whole function, and it will
2419 make use of the bb->aux pointers. It returns a set of trees
2420 (the MEM_REFs itself) which can't trap. */
2421 static hash_set<tree> *
2422 get_non_trapping (void)
2423 {
2424 nt_call_phase = 0;
2425 hash_set<tree> *nontrap = new hash_set<tree>;
2426 /* We're going to do a dominator walk, so ensure that we have
2427 dominance information. */
2428 calculate_dominance_info (CDI_DOMINATORS);
2429
2430 nontrapping_dom_walker (CDI_DOMINATORS, nontrap)
2431 .walk (cfun->cfg->x_entry_block_ptr);
2432
2433 clear_aux_for_blocks ();
2434 return nontrap;
2435 }
2436
2437 /* Do the main work of conditional store replacement. We already know
2438 that the recognized pattern looks like so:
2439
2440 split:
2441 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1)
2442 MIDDLE_BB:
2443 something
2444 fallthrough (edge E0)
2445 JOIN_BB:
2446 some more
2447
2448 We check that MIDDLE_BB contains only one store, that that store
2449 doesn't trap (not via NOTRAP, but via checking if an access to the same
2450 memory location dominates us, or the store is to a local addressable
2451 object) and that the store has a "simple" RHS. */
2452
2453 static bool
2454 cond_store_replacement (basic_block middle_bb, basic_block join_bb,
2455 edge e0, edge e1, hash_set<tree> *nontrap)
2456 {
2457 gimple *assign = last_and_only_stmt (middle_bb);
2458 tree lhs, rhs, name, name2;
2459 gphi *newphi;
2460 gassign *new_stmt;
2461 gimple_stmt_iterator gsi;
2462 location_t locus;
2463
2464 /* Check if middle_bb contains of only one store. */
2465 if (!assign
2466 || !gimple_assign_single_p (assign)
2467 || gimple_has_volatile_ops (assign))
2468 return false;
2469
2470 /* And no PHI nodes so all uses in the single stmt are also
2471 available where we insert to. */
2472 if (!gimple_seq_empty_p (phi_nodes (middle_bb)))
2473 return false;
2474
2475 locus = gimple_location (assign);
2476 lhs = gimple_assign_lhs (assign);
2477 rhs = gimple_assign_rhs1 (assign);
2478 if ((TREE_CODE (lhs) != MEM_REF
2479 && TREE_CODE (lhs) != ARRAY_REF
2480 && TREE_CODE (lhs) != COMPONENT_REF)
2481 || !is_gimple_reg_type (TREE_TYPE (lhs)))
2482 return false;
2483
2484 /* Prove that we can move the store down. We could also check
2485 TREE_THIS_NOTRAP here, but in that case we also could move stores,
2486 whose value is not available readily, which we want to avoid. */
2487 if (!nontrap->contains (lhs))
2488 {
2489 /* If LHS is an access to a local variable without address-taken
2490 (or when we allow data races) and known not to trap, we could
2491 always safely move down the store. */
2492 tree base = get_base_address (lhs);
2493 if (!auto_var_p (base)
2494 || (TREE_ADDRESSABLE (base) && !flag_store_data_races)
2495 || tree_could_trap_p (lhs))
2496 return false;
2497 }
2498
2499 /* Now we've checked the constraints, so do the transformation:
2500 1) Remove the single store. */
2501 gsi = gsi_for_stmt (assign);
2502 unlink_stmt_vdef (assign);
2503 gsi_remove (&gsi, true);
2504 release_defs (assign);
2505
2506 /* Make both store and load use alias-set zero as we have to
2507 deal with the case of the store being a conditional change
2508 of the dynamic type. */
2509 lhs = unshare_expr (lhs);
2510 tree *basep = &lhs;
2511 while (handled_component_p (*basep))
2512 basep = &TREE_OPERAND (*basep, 0);
2513 if (TREE_CODE (*basep) == MEM_REF
2514 || TREE_CODE (*basep) == TARGET_MEM_REF)
2515 TREE_OPERAND (*basep, 1)
2516 = fold_convert (ptr_type_node, TREE_OPERAND (*basep, 1));
2517 else
2518 *basep = build2 (MEM_REF, TREE_TYPE (*basep),
2519 build_fold_addr_expr (*basep),
2520 build_zero_cst (ptr_type_node));
2521
2522 /* 2) Insert a load from the memory of the store to the temporary
2523 on the edge which did not contain the store. */
2524 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
2525 new_stmt = gimple_build_assign (name, lhs);
2526 gimple_set_location (new_stmt, locus);
2527 lhs = unshare_expr (lhs);
2528 /* Set TREE_NO_WARNING on the rhs of the load to avoid uninit
2529 warnings. */
2530 TREE_NO_WARNING (gimple_assign_rhs1 (new_stmt)) = 1;
2531 gsi_insert_on_edge (e1, new_stmt);
2532
2533 /* 3) Create a PHI node at the join block, with one argument
2534 holding the old RHS, and the other holding the temporary
2535 where we stored the old memory contents. */
2536 name2 = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
2537 newphi = create_phi_node (name2, join_bb);
2538 add_phi_arg (newphi, rhs, e0, locus);
2539 add_phi_arg (newphi, name, e1, locus);
2540
2541 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
2542
2543 /* 4) Insert that PHI node. */
2544 gsi = gsi_after_labels (join_bb);
2545 if (gsi_end_p (gsi))
2546 {
2547 gsi = gsi_last_bb (join_bb);
2548 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
2549 }
2550 else
2551 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
2552
2553 if (dump_file && (dump_flags & TDF_DETAILS))
2554 {
2555 fprintf (dump_file, "\nConditional store replacement happened!");
2556 fprintf (dump_file, "\nReplaced the store with a load.");
2557 fprintf (dump_file, "\nInserted a new PHI statement in joint block:\n");
2558 print_gimple_stmt (dump_file, new_stmt, 0, TDF_VOPS|TDF_MEMSYMS);
2559 }
2560
2561 return true;
2562 }
2563
2564 /* Do the main work of conditional store replacement. */
2565
2566 static bool
2567 cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
2568 basic_block join_bb, gimple *then_assign,
2569 gimple *else_assign)
2570 {
2571 tree lhs_base, lhs, then_rhs, else_rhs, name;
2572 location_t then_locus, else_locus;
2573 gimple_stmt_iterator gsi;
2574 gphi *newphi;
2575 gassign *new_stmt;
2576
2577 if (then_assign == NULL
2578 || !gimple_assign_single_p (then_assign)
2579 || gimple_clobber_p (then_assign)
2580 || gimple_has_volatile_ops (then_assign)
2581 || else_assign == NULL
2582 || !gimple_assign_single_p (else_assign)
2583 || gimple_clobber_p (else_assign)
2584 || gimple_has_volatile_ops (else_assign))
2585 return false;
2586
2587 lhs = gimple_assign_lhs (then_assign);
2588 if (!is_gimple_reg_type (TREE_TYPE (lhs))
2589 || !operand_equal_p (lhs, gimple_assign_lhs (else_assign), 0))
2590 return false;
2591
2592 lhs_base = get_base_address (lhs);
2593 if (lhs_base == NULL_TREE
2594 || (!DECL_P (lhs_base) && TREE_CODE (lhs_base) != MEM_REF))
2595 return false;
2596
2597 then_rhs = gimple_assign_rhs1 (then_assign);
2598 else_rhs = gimple_assign_rhs1 (else_assign);
2599 then_locus = gimple_location (then_assign);
2600 else_locus = gimple_location (else_assign);
2601
2602 /* Now we've checked the constraints, so do the transformation:
2603 1) Remove the stores. */
2604 gsi = gsi_for_stmt (then_assign);
2605 unlink_stmt_vdef (then_assign);
2606 gsi_remove (&gsi, true);
2607 release_defs (then_assign);
2608
2609 gsi = gsi_for_stmt (else_assign);
2610 unlink_stmt_vdef (else_assign);
2611 gsi_remove (&gsi, true);
2612 release_defs (else_assign);
2613
2614 /* 2) Create a PHI node at the join block, with one argument
2615 holding the old RHS, and the other holding the temporary
2616 where we stored the old memory contents. */
2617 name = make_temp_ssa_name (TREE_TYPE (lhs), NULL, "cstore");
2618 newphi = create_phi_node (name, join_bb);
2619 add_phi_arg (newphi, then_rhs, EDGE_SUCC (then_bb, 0), then_locus);
2620 add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
2621
2622 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi));
2623
2624 /* 3) Insert that PHI node. */
2625 gsi = gsi_after_labels (join_bb);
2626 if (gsi_end_p (gsi))
2627 {
2628 gsi = gsi_last_bb (join_bb);
2629 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
2630 }
2631 else
2632 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
2633
2634 return true;
2635 }
2636
2637 /* Return the single store in BB with VDEF or NULL if there are
2638 other stores in the BB or loads following the store. */
2639
2640 static gimple *
2641 single_trailing_store_in_bb (basic_block bb, tree vdef)
2642 {
2643 if (SSA_NAME_IS_DEFAULT_DEF (vdef))
2644 return NULL;
2645 gimple *store = SSA_NAME_DEF_STMT (vdef);
2646 if (gimple_bb (store) != bb
2647 || gimple_code (store) == GIMPLE_PHI)
2648 return NULL;
2649
2650 /* Verify there is no other store in this BB. */
2651 if (!SSA_NAME_IS_DEFAULT_DEF (gimple_vuse (store))
2652 && gimple_bb (SSA_NAME_DEF_STMT (gimple_vuse (store))) == bb
2653 && gimple_code (SSA_NAME_DEF_STMT (gimple_vuse (store))) != GIMPLE_PHI)
2654 return NULL;
2655
2656 /* Verify there is no load or store after the store. */
2657 use_operand_p use_p;
2658 imm_use_iterator imm_iter;
2659 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_vdef (store))
2660 if (USE_STMT (use_p) != store
2661 && gimple_bb (USE_STMT (use_p)) == bb)
2662 return NULL;
2663
2664 return store;
2665 }
2666
2667 /* Conditional store replacement. We already know
2668 that the recognized pattern looks like so:
2669
2670 split:
2671 if (cond) goto THEN_BB; else goto ELSE_BB (edge E1)
2672 THEN_BB:
2673 ...
2674 X = Y;
2675 ...
2676 goto JOIN_BB;
2677 ELSE_BB:
2678 ...
2679 X = Z;
2680 ...
2681 fallthrough (edge E0)
2682 JOIN_BB:
2683 some more
2684
2685 We check that it is safe to sink the store to JOIN_BB by verifying that
2686 there are no read-after-write or write-after-write dependencies in
2687 THEN_BB and ELSE_BB. */
2688
2689 static bool
2690 cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
2691 basic_block join_bb)
2692 {
2693 vec<data_reference_p> then_datarefs, else_datarefs;
2694 vec<ddr_p> then_ddrs, else_ddrs;
2695 gimple *then_store, *else_store;
2696 bool found, ok = false, res;
2697 struct data_dependence_relation *ddr;
2698 data_reference_p then_dr, else_dr;
2699 int i, j;
2700 tree then_lhs, else_lhs;
2701 basic_block blocks[3];
2702
2703 /* Handle the case with single store in THEN_BB and ELSE_BB. That is
2704 cheap enough to always handle as it allows us to elide dependence
2705 checking. */
2706 gphi *vphi = NULL;
2707 for (gphi_iterator si = gsi_start_phis (join_bb); !gsi_end_p (si);
2708 gsi_next (&si))
2709 if (virtual_operand_p (gimple_phi_result (si.phi ())))
2710 {
2711 vphi = si.phi ();
2712 break;
2713 }
2714 if (!vphi)
2715 return false;
2716 tree then_vdef = PHI_ARG_DEF_FROM_EDGE (vphi, single_succ_edge (then_bb));
2717 tree else_vdef = PHI_ARG_DEF_FROM_EDGE (vphi, single_succ_edge (else_bb));
2718 gimple *then_assign = single_trailing_store_in_bb (then_bb, then_vdef);
2719 if (then_assign)
2720 {
2721 gimple *else_assign = single_trailing_store_in_bb (else_bb, else_vdef);
2722 if (else_assign)
2723 return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
2724 then_assign, else_assign);
2725 }
2726
2727 /* If either vectorization or if-conversion is disabled then do
2728 not sink any stores. */
2729 if (param_max_stores_to_sink == 0
2730 || (!flag_tree_loop_vectorize && !flag_tree_slp_vectorize)
2731 || !flag_tree_loop_if_convert)
2732 return false;
2733
2734 /* Find data references. */
2735 then_datarefs.create (1);
2736 else_datarefs.create (1);
2737 if ((find_data_references_in_bb (NULL, then_bb, &then_datarefs)
2738 == chrec_dont_know)
2739 || !then_datarefs.length ()
2740 || (find_data_references_in_bb (NULL, else_bb, &else_datarefs)
2741 == chrec_dont_know)
2742 || !else_datarefs.length ())
2743 {
2744 free_data_refs (then_datarefs);
2745 free_data_refs (else_datarefs);
2746 return false;
2747 }
2748
2749 /* Find pairs of stores with equal LHS. */
2750 auto_vec<gimple *, 1> then_stores, else_stores;
2751 FOR_EACH_VEC_ELT (then_datarefs, i, then_dr)
2752 {
2753 if (DR_IS_READ (then_dr))
2754 continue;
2755
2756 then_store = DR_STMT (then_dr);
2757 then_lhs = gimple_get_lhs (then_store);
2758 if (then_lhs == NULL_TREE)
2759 continue;
2760 found = false;
2761
2762 FOR_EACH_VEC_ELT (else_datarefs, j, else_dr)
2763 {
2764 if (DR_IS_READ (else_dr))
2765 continue;
2766
2767 else_store = DR_STMT (else_dr);
2768 else_lhs = gimple_get_lhs (else_store);
2769 if (else_lhs == NULL_TREE)
2770 continue;
2771
2772 if (operand_equal_p (then_lhs, else_lhs, 0))
2773 {
2774 found = true;
2775 break;
2776 }
2777 }
2778
2779 if (!found)
2780 continue;
2781
2782 then_stores.safe_push (then_store);
2783 else_stores.safe_push (else_store);
2784 }
2785
2786 /* No pairs of stores found. */
2787 if (!then_stores.length ()
2788 || then_stores.length () > (unsigned) param_max_stores_to_sink)
2789 {
2790 free_data_refs (then_datarefs);
2791 free_data_refs (else_datarefs);
2792 return false;
2793 }
2794
2795 /* Compute and check data dependencies in both basic blocks. */
2796 then_ddrs.create (1);
2797 else_ddrs.create (1);
2798 if (!compute_all_dependences (then_datarefs, &then_ddrs,
2799 vNULL, false)
2800 || !compute_all_dependences (else_datarefs, &else_ddrs,
2801 vNULL, false))
2802 {
2803 free_dependence_relations (then_ddrs);
2804 free_dependence_relations (else_ddrs);
2805 free_data_refs (then_datarefs);
2806 free_data_refs (else_datarefs);
2807 return false;
2808 }
2809 blocks[0] = then_bb;
2810 blocks[1] = else_bb;
2811 blocks[2] = join_bb;
2812 renumber_gimple_stmt_uids_in_blocks (blocks, 3);
2813
2814 /* Check that there are no read-after-write or write-after-write dependencies
2815 in THEN_BB. */
2816 FOR_EACH_VEC_ELT (then_ddrs, i, ddr)
2817 {
2818 struct data_reference *dra = DDR_A (ddr);
2819 struct data_reference *drb = DDR_B (ddr);
2820
2821 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
2822 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
2823 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
2824 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
2825 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
2826 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
2827 {
2828 free_dependence_relations (then_ddrs);
2829 free_dependence_relations (else_ddrs);
2830 free_data_refs (then_datarefs);
2831 free_data_refs (else_datarefs);
2832 return false;
2833 }
2834 }
2835
2836 /* Check that there are no read-after-write or write-after-write dependencies
2837 in ELSE_BB. */
2838 FOR_EACH_VEC_ELT (else_ddrs, i, ddr)
2839 {
2840 struct data_reference *dra = DDR_A (ddr);
2841 struct data_reference *drb = DDR_B (ddr);
2842
2843 if (DDR_ARE_DEPENDENT (ddr) != chrec_known
2844 && ((DR_IS_READ (dra) && DR_IS_WRITE (drb)
2845 && gimple_uid (DR_STMT (dra)) > gimple_uid (DR_STMT (drb)))
2846 || (DR_IS_READ (drb) && DR_IS_WRITE (dra)
2847 && gimple_uid (DR_STMT (drb)) > gimple_uid (DR_STMT (dra)))
2848 || (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))))
2849 {
2850 free_dependence_relations (then_ddrs);
2851 free_dependence_relations (else_ddrs);
2852 free_data_refs (then_datarefs);
2853 free_data_refs (else_datarefs);
2854 return false;
2855 }
2856 }
2857
2858 /* Sink stores with same LHS. */
2859 FOR_EACH_VEC_ELT (then_stores, i, then_store)
2860 {
2861 else_store = else_stores[i];
2862 res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
2863 then_store, else_store);
2864 ok = ok || res;
2865 }
2866
2867 free_dependence_relations (then_ddrs);
2868 free_dependence_relations (else_ddrs);
2869 free_data_refs (then_datarefs);
2870 free_data_refs (else_datarefs);
2871
2872 return ok;
2873 }
2874
2875 /* Return TRUE if STMT has a VUSE whose corresponding VDEF is in BB. */
2876
2877 static bool
2878 local_mem_dependence (gimple *stmt, basic_block bb)
2879 {
2880 tree vuse = gimple_vuse (stmt);
2881 gimple *def;
2882
2883 if (!vuse)
2884 return false;
2885
2886 def = SSA_NAME_DEF_STMT (vuse);
2887 return (def && gimple_bb (def) == bb);
2888 }
2889
2890 /* Given a "diamond" control-flow pattern where BB0 tests a condition,
2891 BB1 and BB2 are "then" and "else" blocks dependent on this test,
2892 and BB3 rejoins control flow following BB1 and BB2, look for
2893 opportunities to hoist loads as follows. If BB3 contains a PHI of
2894 two loads, one each occurring in BB1 and BB2, and the loads are
2895 provably of adjacent fields in the same structure, then move both
2896 loads into BB0. Of course this can only be done if there are no
2897 dependencies preventing such motion.
2898
2899 One of the hoisted loads will always be speculative, so the
2900 transformation is currently conservative:
2901
2902 - The fields must be strictly adjacent.
2903 - The two fields must occupy a single memory block that is
2904 guaranteed to not cross a page boundary.
2905
2906 The last is difficult to prove, as such memory blocks should be
2907 aligned on the minimum of the stack alignment boundary and the
2908 alignment guaranteed by heap allocation interfaces. Thus we rely
2909 on a parameter for the alignment value.
2910
2911 Provided a good value is used for the last case, the first
2912 restriction could possibly be relaxed. */
2913
2914 static void
2915 hoist_adjacent_loads (basic_block bb0, basic_block bb1,
2916 basic_block bb2, basic_block bb3)
2917 {
2918 int param_align = param_l1_cache_line_size;
2919 unsigned param_align_bits = (unsigned) (param_align * BITS_PER_UNIT);
2920 gphi_iterator gsi;
2921
2922 /* Walk the phis in bb3 looking for an opportunity. We are looking
2923 for phis of two SSA names, one each of which is defined in bb1 and
2924 bb2. */
2925 for (gsi = gsi_start_phis (bb3); !gsi_end_p (gsi); gsi_next (&gsi))
2926 {
2927 gphi *phi_stmt = gsi.phi ();
2928 gimple *def1, *def2;
2929 tree arg1, arg2, ref1, ref2, field1, field2;
2930 tree tree_offset1, tree_offset2, tree_size2, next;
2931 int offset1, offset2, size2;
2932 unsigned align1;
2933 gimple_stmt_iterator gsi2;
2934 basic_block bb_for_def1, bb_for_def2;
2935
2936 if (gimple_phi_num_args (phi_stmt) != 2
2937 || virtual_operand_p (gimple_phi_result (phi_stmt)))
2938 continue;
2939
2940 arg1 = gimple_phi_arg_def (phi_stmt, 0);
2941 arg2 = gimple_phi_arg_def (phi_stmt, 1);
2942
2943 if (TREE_CODE (arg1) != SSA_NAME
2944 || TREE_CODE (arg2) != SSA_NAME
2945 || SSA_NAME_IS_DEFAULT_DEF (arg1)
2946 || SSA_NAME_IS_DEFAULT_DEF (arg2))
2947 continue;
2948
2949 def1 = SSA_NAME_DEF_STMT (arg1);
2950 def2 = SSA_NAME_DEF_STMT (arg2);
2951
2952 if ((gimple_bb (def1) != bb1 || gimple_bb (def2) != bb2)
2953 && (gimple_bb (def2) != bb1 || gimple_bb (def1) != bb2))
2954 continue;
2955
2956 /* Check the mode of the arguments to be sure a conditional move
2957 can be generated for it. */
2958 if (optab_handler (movcc_optab, TYPE_MODE (TREE_TYPE (arg1)))
2959 == CODE_FOR_nothing)
2960 continue;
2961
2962 /* Both statements must be assignments whose RHS is a COMPONENT_REF. */
2963 if (!gimple_assign_single_p (def1)
2964 || !gimple_assign_single_p (def2)
2965 || gimple_has_volatile_ops (def1)
2966 || gimple_has_volatile_ops (def2))
2967 continue;
2968
2969 ref1 = gimple_assign_rhs1 (def1);
2970 ref2 = gimple_assign_rhs1 (def2);
2971
2972 if (TREE_CODE (ref1) != COMPONENT_REF
2973 || TREE_CODE (ref2) != COMPONENT_REF)
2974 continue;
2975
2976 /* The zeroth operand of the two component references must be
2977 identical. It is not sufficient to compare get_base_address of
2978 the two references, because this could allow for different
2979 elements of the same array in the two trees. It is not safe to
2980 assume that the existence of one array element implies the
2981 existence of a different one. */
2982 if (!operand_equal_p (TREE_OPERAND (ref1, 0), TREE_OPERAND (ref2, 0), 0))
2983 continue;
2984
2985 field1 = TREE_OPERAND (ref1, 1);
2986 field2 = TREE_OPERAND (ref2, 1);
2987
2988 /* Check for field adjacency, and ensure field1 comes first. */
2989 for (next = DECL_CHAIN (field1);
2990 next && TREE_CODE (next) != FIELD_DECL;
2991 next = DECL_CHAIN (next))
2992 ;
2993
2994 if (next != field2)
2995 {
2996 for (next = DECL_CHAIN (field2);
2997 next && TREE_CODE (next) != FIELD_DECL;
2998 next = DECL_CHAIN (next))
2999 ;
3000
3001 if (next != field1)
3002 continue;
3003
3004 std::swap (field1, field2);
3005 std::swap (def1, def2);
3006 }
3007
3008 bb_for_def1 = gimple_bb (def1);
3009 bb_for_def2 = gimple_bb (def2);
3010
3011 /* Check for proper alignment of the first field. */
3012 tree_offset1 = bit_position (field1);
3013 tree_offset2 = bit_position (field2);
3014 tree_size2 = DECL_SIZE (field2);
3015
3016 if (!tree_fits_uhwi_p (tree_offset1)
3017 || !tree_fits_uhwi_p (tree_offset2)
3018 || !tree_fits_uhwi_p (tree_size2))
3019 continue;
3020
3021 offset1 = tree_to_uhwi (tree_offset1);
3022 offset2 = tree_to_uhwi (tree_offset2);
3023 size2 = tree_to_uhwi (tree_size2);
3024 align1 = DECL_ALIGN (field1) % param_align_bits;
3025
3026 if (offset1 % BITS_PER_UNIT != 0)
3027 continue;
3028
3029 /* For profitability, the two field references should fit within
3030 a single cache line. */
3031 if (align1 + offset2 - offset1 + size2 > param_align_bits)
3032 continue;
3033
3034 /* The two expressions cannot be dependent upon vdefs defined
3035 in bb1/bb2. */
3036 if (local_mem_dependence (def1, bb_for_def1)
3037 || local_mem_dependence (def2, bb_for_def2))
3038 continue;
3039
3040 /* The conditions are satisfied; hoist the loads from bb1 and bb2 into
3041 bb0. We hoist the first one first so that a cache miss is handled
3042 efficiently regardless of hardware cache-fill policy. */
3043 gsi2 = gsi_for_stmt (def1);
3044 gsi_move_to_bb_end (&gsi2, bb0);
3045 gsi2 = gsi_for_stmt (def2);
3046 gsi_move_to_bb_end (&gsi2, bb0);
3047
3048 if (dump_file && (dump_flags & TDF_DETAILS))
3049 {
3050 fprintf (dump_file,
3051 "\nHoisting adjacent loads from %d and %d into %d: \n",
3052 bb_for_def1->index, bb_for_def2->index, bb0->index);
3053 print_gimple_stmt (dump_file, def1, 0, TDF_VOPS|TDF_MEMSYMS);
3054 print_gimple_stmt (dump_file, def2, 0, TDF_VOPS|TDF_MEMSYMS);
3055 }
3056 }
3057 }
3058
3059 /* Determine whether we should attempt to hoist adjacent loads out of
3060 diamond patterns in pass_phiopt. Always hoist loads if
3061 -fhoist-adjacent-loads is specified and the target machine has
3062 both a conditional move instruction and a defined cache line size. */
3063
3064 static bool
3065 gate_hoist_loads (void)
3066 {
3067 return (flag_hoist_adjacent_loads == 1
3068 && param_l1_cache_line_size
3069 && HAVE_conditional_move);
3070 }
3071
3072 /* This pass tries to replaces an if-then-else block with an
3073 assignment. We have four kinds of transformations. Some of these
3074 transformations are also performed by the ifcvt RTL optimizer.
3075
3076 Conditional Replacement
3077 -----------------------
3078
3079 This transformation, implemented in conditional_replacement,
3080 replaces
3081
3082 bb0:
3083 if (cond) goto bb2; else goto bb1;
3084 bb1:
3085 bb2:
3086 x = PHI <0 (bb1), 1 (bb0), ...>;
3087
3088 with
3089
3090 bb0:
3091 x' = cond;
3092 goto bb2;
3093 bb2:
3094 x = PHI <x' (bb0), ...>;
3095
3096 We remove bb1 as it becomes unreachable. This occurs often due to
3097 gimplification of conditionals.
3098
3099 Value Replacement
3100 -----------------
3101
3102 This transformation, implemented in value_replacement, replaces
3103
3104 bb0:
3105 if (a != b) goto bb2; else goto bb1;
3106 bb1:
3107 bb2:
3108 x = PHI <a (bb1), b (bb0), ...>;
3109
3110 with
3111
3112 bb0:
3113 bb2:
3114 x = PHI <b (bb0), ...>;
3115
3116 This opportunity can sometimes occur as a result of other
3117 optimizations.
3118
3119
3120 Another case caught by value replacement looks like this:
3121
3122 bb0:
3123 t1 = a == CONST;
3124 t2 = b > c;
3125 t3 = t1 & t2;
3126 if (t3 != 0) goto bb1; else goto bb2;
3127 bb1:
3128 bb2:
3129 x = PHI (CONST, a)
3130
3131 Gets replaced with:
3132 bb0:
3133 bb2:
3134 t1 = a == CONST;
3135 t2 = b > c;
3136 t3 = t1 & t2;
3137 x = a;
3138
3139 ABS Replacement
3140 ---------------
3141
3142 This transformation, implemented in abs_replacement, replaces
3143
3144 bb0:
3145 if (a >= 0) goto bb2; else goto bb1;
3146 bb1:
3147 x = -a;
3148 bb2:
3149 x = PHI <x (bb1), a (bb0), ...>;
3150
3151 with
3152
3153 bb0:
3154 x' = ABS_EXPR< a >;
3155 bb2:
3156 x = PHI <x' (bb0), ...>;
3157
3158 MIN/MAX Replacement
3159 -------------------
3160
3161 This transformation, minmax_replacement replaces
3162
3163 bb0:
3164 if (a <= b) goto bb2; else goto bb1;
3165 bb1:
3166 bb2:
3167 x = PHI <b (bb1), a (bb0), ...>;
3168
3169 with
3170
3171 bb0:
3172 x' = MIN_EXPR (a, b)
3173 bb2:
3174 x = PHI <x' (bb0), ...>;
3175
3176 A similar transformation is done for MAX_EXPR.
3177
3178
3179 This pass also performs a fifth transformation of a slightly different
3180 flavor.
3181
3182 Factor conversion in COND_EXPR
3183 ------------------------------
3184
3185 This transformation factors the conversion out of COND_EXPR with
3186 factor_out_conditional_conversion.
3187
3188 For example:
3189 if (a <= CST) goto <bb 3>; else goto <bb 4>;
3190 <bb 3>:
3191 tmp = (int) a;
3192 <bb 4>:
3193 tmp = PHI <tmp, CST>
3194
3195 Into:
3196 if (a <= CST) goto <bb 3>; else goto <bb 4>;
3197 <bb 3>:
3198 <bb 4>:
3199 a = PHI <a, CST>
3200 tmp = (int) a;
3201
3202 Adjacent Load Hoisting
3203 ----------------------
3204
3205 This transformation replaces
3206
3207 bb0:
3208 if (...) goto bb2; else goto bb1;
3209 bb1:
3210 x1 = (<expr>).field1;
3211 goto bb3;
3212 bb2:
3213 x2 = (<expr>).field2;
3214 bb3:
3215 # x = PHI <x1, x2>;
3216
3217 with
3218
3219 bb0:
3220 x1 = (<expr>).field1;
3221 x2 = (<expr>).field2;
3222 if (...) goto bb2; else goto bb1;
3223 bb1:
3224 goto bb3;
3225 bb2:
3226 bb3:
3227 # x = PHI <x1, x2>;
3228
3229 The purpose of this transformation is to enable generation of conditional
3230 move instructions such as Intel CMOVE or PowerPC ISEL. Because one of
3231 the loads is speculative, the transformation is restricted to very
3232 specific cases to avoid introducing a page fault. We are looking for
3233 the common idiom:
3234
3235 if (...)
3236 x = y->left;
3237 else
3238 x = y->right;
3239
3240 where left and right are typically adjacent pointers in a tree structure. */
3241
3242 namespace {
3243
3244 const pass_data pass_data_phiopt =
3245 {
3246 GIMPLE_PASS, /* type */
3247 "phiopt", /* name */
3248 OPTGROUP_NONE, /* optinfo_flags */
3249 TV_TREE_PHIOPT, /* tv_id */
3250 ( PROP_cfg | PROP_ssa ), /* properties_required */
3251 0, /* properties_provided */
3252 0, /* properties_destroyed */
3253 0, /* todo_flags_start */
3254 0, /* todo_flags_finish */
3255 };
3256
3257 class pass_phiopt : public gimple_opt_pass
3258 {
3259 public:
3260 pass_phiopt (gcc::context *ctxt)
3261 : gimple_opt_pass (pass_data_phiopt, ctxt), early_p (false)
3262 {}
3263
3264 /* opt_pass methods: */
3265 opt_pass * clone () { return new pass_phiopt (m_ctxt); }
3266 void set_pass_param (unsigned n, bool param)
3267 {
3268 gcc_assert (n == 0);
3269 early_p = param;
3270 }
3271 virtual bool gate (function *) { return flag_ssa_phiopt; }
3272 virtual unsigned int execute (function *)
3273 {
3274 return tree_ssa_phiopt_worker (false,
3275 !early_p ? gate_hoist_loads () : false,
3276 early_p);
3277 }
3278
3279 private:
3280 bool early_p;
3281 }; // class pass_phiopt
3282
3283 } // anon namespace
3284
3285 gimple_opt_pass *
3286 make_pass_phiopt (gcc::context *ctxt)
3287 {
3288 return new pass_phiopt (ctxt);
3289 }
3290
3291 namespace {
3292
3293 const pass_data pass_data_cselim =
3294 {
3295 GIMPLE_PASS, /* type */
3296 "cselim", /* name */
3297 OPTGROUP_NONE, /* optinfo_flags */
3298 TV_TREE_PHIOPT, /* tv_id */
3299 ( PROP_cfg | PROP_ssa ), /* properties_required */
3300 0, /* properties_provided */
3301 0, /* properties_destroyed */
3302 0, /* todo_flags_start */
3303 0, /* todo_flags_finish */
3304 };
3305
3306 class pass_cselim : public gimple_opt_pass
3307 {
3308 public:
3309 pass_cselim (gcc::context *ctxt)
3310 : gimple_opt_pass (pass_data_cselim, ctxt)
3311 {}
3312
3313 /* opt_pass methods: */
3314 virtual bool gate (function *) { return flag_tree_cselim; }
3315 virtual unsigned int execute (function *) { return tree_ssa_cs_elim (); }
3316
3317 }; // class pass_cselim
3318
3319 } // anon namespace
3320
3321 gimple_opt_pass *
3322 make_pass_cselim (gcc::context *ctxt)
3323 {
3324 return new pass_cselim (ctxt);
3325 }