Daily bump.
[gcc.git] / gcc / tree-ssa-sink.c
1 /* Code sinking for trees
2 Copyright (C) 2001-2021 Free Software Foundation, Inc.
3 Contributed by Daniel Berlin <dan@dberlin.org>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "cfghooks.h"
28 #include "tree-pass.h"
29 #include "ssa.h"
30 #include "gimple-pretty-print.h"
31 #include "fold-const.h"
32 #include "stor-layout.h"
33 #include "cfganal.h"
34 #include "gimple-iterator.h"
35 #include "tree-cfg.h"
36 #include "cfgloop.h"
37 #include "tree-eh.h"
38
39 /* TODO:
40 1. Sinking store only using scalar promotion (IE without moving the RHS):
41
42 *q = p;
43 p = p + 1;
44 if (something)
45 *q = <not p>;
46 else
47 y = *q;
48
49
50 should become
51 sinktemp = p;
52 p = p + 1;
53 if (something)
54 *q = <not p>;
55 else
56 {
57 *q = sinktemp;
58 y = *q
59 }
60 Store copy propagation will take care of the store elimination above.
61
62
63 2. Sinking using Partial Dead Code Elimination. */
64
65
66 static struct
67 {
68 /* The number of statements sunk down the flowgraph by code sinking. */
69 int sunk;
70
71 /* The number of stores commoned and sunk down by store commoning. */
72 int commoned;
73 } sink_stats;
74
75
76 /* Given a PHI, and one of its arguments (DEF), find the edge for
77 that argument and return it. If the argument occurs twice in the PHI node,
78 we return NULL. */
79
80 static basic_block
81 find_bb_for_arg (gphi *phi, tree def)
82 {
83 size_t i;
84 bool foundone = false;
85 basic_block result = NULL;
86 for (i = 0; i < gimple_phi_num_args (phi); i++)
87 if (PHI_ARG_DEF (phi, i) == def)
88 {
89 if (foundone)
90 return NULL;
91 foundone = true;
92 result = gimple_phi_arg_edge (phi, i)->src;
93 }
94 return result;
95 }
96
97 /* When the first immediate use is in a statement, then return true if all
98 immediate uses in IMM are in the same statement.
99 We could also do the case where the first immediate use is in a phi node,
100 and all the other uses are in phis in the same basic block, but this
101 requires some expensive checking later (you have to make sure no def/vdef
102 in the statement occurs for multiple edges in the various phi nodes it's
103 used in, so that you only have one place you can sink it to. */
104
105 static bool
106 all_immediate_uses_same_place (def_operand_p def_p)
107 {
108 tree var = DEF_FROM_PTR (def_p);
109 imm_use_iterator imm_iter;
110 use_operand_p use_p;
111
112 gimple *firstuse = NULL;
113 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, var)
114 {
115 if (is_gimple_debug (USE_STMT (use_p)))
116 continue;
117 if (firstuse == NULL)
118 firstuse = USE_STMT (use_p);
119 else
120 if (firstuse != USE_STMT (use_p))
121 return false;
122 }
123
124 return true;
125 }
126
127 /* Find the nearest common dominator of all of the immediate uses in IMM. */
128
129 static basic_block
130 nearest_common_dominator_of_uses (def_operand_p def_p, bool *debug_stmts)
131 {
132 tree var = DEF_FROM_PTR (def_p);
133 auto_bitmap blocks;
134 basic_block commondom;
135 unsigned int j;
136 bitmap_iterator bi;
137 imm_use_iterator imm_iter;
138 use_operand_p use_p;
139
140 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, var)
141 {
142 gimple *usestmt = USE_STMT (use_p);
143 basic_block useblock;
144
145 if (gphi *phi = dyn_cast <gphi *> (usestmt))
146 {
147 int idx = PHI_ARG_INDEX_FROM_USE (use_p);
148
149 useblock = gimple_phi_arg_edge (phi, idx)->src;
150 }
151 else if (is_gimple_debug (usestmt))
152 {
153 *debug_stmts = true;
154 continue;
155 }
156 else
157 {
158 useblock = gimple_bb (usestmt);
159 }
160
161 /* Short circuit. Nothing dominates the entry block. */
162 if (useblock == ENTRY_BLOCK_PTR_FOR_FN (cfun))
163 return NULL;
164
165 bitmap_set_bit (blocks, useblock->index);
166 }
167 commondom = BASIC_BLOCK_FOR_FN (cfun, bitmap_first_set_bit (blocks));
168 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, j, bi)
169 commondom = nearest_common_dominator (CDI_DOMINATORS, commondom,
170 BASIC_BLOCK_FOR_FN (cfun, j));
171 return commondom;
172 }
173
174 /* Given EARLY_BB and LATE_BB, two blocks in a path through the dominator
175 tree, return the best basic block between them (inclusive) to place
176 statements.
177
178 We want the most control dependent block in the shallowest loop nest.
179
180 If the resulting block is in a shallower loop nest, then use it. Else
181 only use the resulting block if it has significantly lower execution
182 frequency than EARLY_BB to avoid gratuitous statement movement. We
183 consider statements with VOPS more desirable to move.
184
185 This pass would obviously benefit from PDO as it utilizes block
186 frequencies. It would also benefit from recomputing frequencies
187 if profile data is not available since frequencies often get out
188 of sync with reality. */
189
190 static basic_block
191 select_best_block (basic_block early_bb,
192 basic_block late_bb,
193 gimple *stmt)
194 {
195 basic_block best_bb = late_bb;
196 basic_block temp_bb = late_bb;
197 int threshold;
198
199 while (temp_bb != early_bb)
200 {
201 /* If we've moved into a lower loop nest, then that becomes
202 our best block. */
203 if (bb_loop_depth (temp_bb) < bb_loop_depth (best_bb))
204 best_bb = temp_bb;
205
206 /* Walk up the dominator tree, hopefully we'll find a shallower
207 loop nest. */
208 temp_bb = get_immediate_dominator (CDI_DOMINATORS, temp_bb);
209 }
210
211 /* If we found a shallower loop nest, then we always consider that
212 a win. This will always give us the most control dependent block
213 within that loop nest. */
214 if (bb_loop_depth (best_bb) < bb_loop_depth (early_bb))
215 return best_bb;
216
217 /* Get the sinking threshold. If the statement to be moved has memory
218 operands, then increase the threshold by 7% as those are even more
219 profitable to avoid, clamping at 100%. */
220 threshold = param_sink_frequency_threshold;
221 if (gimple_vuse (stmt) || gimple_vdef (stmt))
222 {
223 threshold += 7;
224 if (threshold > 100)
225 threshold = 100;
226 }
227
228 /* If BEST_BB is at the same nesting level, then require it to have
229 significantly lower execution frequency to avoid gratuitous movement. */
230 if (bb_loop_depth (best_bb) == bb_loop_depth (early_bb)
231 /* If result of comparsion is unknown, prefer EARLY_BB.
232 Thus use !(...>=..) rather than (...<...) */
233 && !(best_bb->count.apply_scale (100, 1)
234 >= early_bb->count.apply_scale (threshold, 1)))
235 return best_bb;
236
237 /* No better block found, so return EARLY_BB, which happens to be the
238 statement's original block. */
239 return early_bb;
240 }
241
242 /* Given a statement (STMT) and the basic block it is currently in (FROMBB),
243 determine the location to sink the statement to, if any.
244 Returns true if there is such location; in that case, TOGSI points to the
245 statement before that STMT should be moved. */
246
247 static bool
248 statement_sink_location (gimple *stmt, basic_block frombb,
249 gimple_stmt_iterator *togsi, bool *zero_uses_p)
250 {
251 gimple *use;
252 use_operand_p one_use = NULL_USE_OPERAND_P;
253 basic_block sinkbb;
254 use_operand_p use_p;
255 def_operand_p def_p;
256 ssa_op_iter iter;
257 imm_use_iterator imm_iter;
258
259 *zero_uses_p = false;
260
261 /* We only can sink assignments and non-looping const/pure calls. */
262 int cf;
263 if (!is_gimple_assign (stmt)
264 && (!is_gimple_call (stmt)
265 || !((cf = gimple_call_flags (stmt)) & (ECF_CONST|ECF_PURE))
266 || (cf & ECF_LOOPING_CONST_OR_PURE)))
267 return false;
268
269 /* We only can sink stmts with a single definition. */
270 def_p = single_ssa_def_operand (stmt, SSA_OP_ALL_DEFS);
271 if (def_p == NULL_DEF_OPERAND_P)
272 return false;
273
274 /* There are a few classes of things we can't or don't move, some because we
275 don't have code to handle it, some because it's not profitable and some
276 because it's not legal.
277
278 We can't sink things that may be global stores, at least not without
279 calculating a lot more information, because we may cause it to no longer
280 be seen by an external routine that needs it depending on where it gets
281 moved to.
282
283 We can't sink statements that end basic blocks without splitting the
284 incoming edge for the sink location to place it there.
285
286 We can't sink statements that have volatile operands.
287
288 We don't want to sink dead code, so anything with 0 immediate uses is not
289 sunk.
290
291 Don't sink BLKmode assignments if current function has any local explicit
292 register variables, as BLKmode assignments may involve memcpy or memset
293 calls or, on some targets, inline expansion thereof that sometimes need
294 to use specific hard registers.
295
296 */
297 if (stmt_ends_bb_p (stmt)
298 || gimple_has_side_effects (stmt)
299 || (cfun->has_local_explicit_reg_vars
300 && TYPE_MODE (TREE_TYPE (gimple_get_lhs (stmt))) == BLKmode))
301 return false;
302
303 /* Return if there are no immediate uses of this stmt. */
304 if (has_zero_uses (DEF_FROM_PTR (def_p)))
305 {
306 *zero_uses_p = true;
307 return false;
308 }
309
310 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (DEF_FROM_PTR (def_p)))
311 return false;
312
313 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
314 {
315 tree use = USE_FROM_PTR (use_p);
316 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use))
317 return false;
318 }
319
320 use = NULL;
321
322 /* If stmt is a store the one and only use needs to be the VOP
323 merging PHI node. */
324 if (virtual_operand_p (DEF_FROM_PTR (def_p)))
325 {
326 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
327 {
328 gimple *use_stmt = USE_STMT (use_p);
329
330 /* A killing definition is not a use. */
331 if ((gimple_has_lhs (use_stmt)
332 && operand_equal_p (gimple_get_lhs (stmt),
333 gimple_get_lhs (use_stmt), 0))
334 || stmt_kills_ref_p (use_stmt, gimple_get_lhs (stmt)))
335 {
336 /* If use_stmt is or might be a nop assignment then USE_STMT
337 acts as a use as well as definition. */
338 if (stmt != use_stmt
339 && ref_maybe_used_by_stmt_p (use_stmt,
340 gimple_get_lhs (stmt)))
341 return false;
342 continue;
343 }
344
345 if (gimple_code (use_stmt) != GIMPLE_PHI)
346 return false;
347
348 if (use
349 && use != use_stmt)
350 return false;
351
352 use = use_stmt;
353 }
354 if (!use)
355 return false;
356 }
357 /* If all the immediate uses are not in the same place, find the nearest
358 common dominator of all the immediate uses. For PHI nodes, we have to
359 find the nearest common dominator of all of the predecessor blocks, since
360 that is where insertion would have to take place. */
361 else if (gimple_vuse (stmt)
362 || !all_immediate_uses_same_place (def_p))
363 {
364 bool debug_stmts = false;
365 basic_block commondom = nearest_common_dominator_of_uses (def_p,
366 &debug_stmts);
367
368 if (commondom == frombb)
369 return false;
370
371 /* If this is a load then do not sink past any stores.
372 Look for virtual definitions in the path from frombb to the sink
373 location computed from the real uses and if found, adjust
374 that it a common dominator. */
375 if (gimple_vuse (stmt))
376 {
377 /* Do not sink loads from hard registers. */
378 if (gimple_assign_single_p (stmt)
379 && TREE_CODE (gimple_assign_rhs1 (stmt)) == VAR_DECL
380 && DECL_HARD_REGISTER (gimple_assign_rhs1 (stmt)))
381 return false;
382
383 imm_use_iterator imm_iter;
384 use_operand_p use_p;
385 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_vuse (stmt))
386 {
387 gimple *use_stmt = USE_STMT (use_p);
388 basic_block bb = gimple_bb (use_stmt);
389 /* For PHI nodes the block we know sth about is the incoming block
390 with the use. */
391 if (gimple_code (use_stmt) == GIMPLE_PHI)
392 {
393 /* In case the PHI node post-dominates the current insert
394 location we can disregard it. But make sure it is not
395 dominating it as well as can happen in a CFG cycle. */
396 if (commondom != bb
397 && !dominated_by_p (CDI_DOMINATORS, commondom, bb)
398 && dominated_by_p (CDI_POST_DOMINATORS, commondom, bb)
399 /* If the blocks are possibly within the same irreducible
400 cycle the above check breaks down. */
401 && !(bb->flags & commondom->flags & BB_IRREDUCIBLE_LOOP))
402 continue;
403 bb = EDGE_PRED (bb, PHI_ARG_INDEX_FROM_USE (use_p))->src;
404 }
405 else if (!gimple_vdef (use_stmt))
406 continue;
407 /* If the use is not dominated by the path entry it is not on
408 the path. */
409 if (!dominated_by_p (CDI_DOMINATORS, bb, frombb))
410 continue;
411 /* There is no easy way to disregard defs not on the path from
412 frombb to commondom so just consider them all. */
413 commondom = nearest_common_dominator (CDI_DOMINATORS,
414 bb, commondom);
415 if (commondom == frombb)
416 return false;
417 }
418 }
419
420 /* Our common dominator has to be dominated by frombb in order to be a
421 trivially safe place to put this statement, since it has multiple
422 uses. */
423 if (!dominated_by_p (CDI_DOMINATORS, commondom, frombb))
424 return false;
425
426 commondom = select_best_block (frombb, commondom, stmt);
427
428 if (commondom == frombb)
429 return false;
430
431 *togsi = gsi_after_labels (commondom);
432
433 return true;
434 }
435 else
436 {
437 FOR_EACH_IMM_USE_FAST (one_use, imm_iter, DEF_FROM_PTR (def_p))
438 {
439 if (is_gimple_debug (USE_STMT (one_use)))
440 continue;
441 break;
442 }
443 use = USE_STMT (one_use);
444
445 if (gimple_code (use) != GIMPLE_PHI)
446 {
447 sinkbb = select_best_block (frombb, gimple_bb (use), stmt);
448
449 if (sinkbb == frombb)
450 return false;
451
452 if (sinkbb == gimple_bb (use))
453 *togsi = gsi_for_stmt (use);
454 else
455 *togsi = gsi_after_labels (sinkbb);
456
457 return true;
458 }
459 }
460
461 sinkbb = find_bb_for_arg (as_a <gphi *> (use), DEF_FROM_PTR (def_p));
462
463 /* This can happen if there are multiple uses in a PHI. */
464 if (!sinkbb)
465 return false;
466
467 sinkbb = select_best_block (frombb, sinkbb, stmt);
468 if (!sinkbb || sinkbb == frombb)
469 return false;
470
471 /* If the latch block is empty, don't make it non-empty by sinking
472 something into it. */
473 if (sinkbb == frombb->loop_father->latch
474 && empty_block_p (sinkbb))
475 return false;
476
477 *togsi = gsi_after_labels (sinkbb);
478
479 return true;
480 }
481
482 /* Very simplistic code to sink common stores from the predecessor through
483 our virtual PHI. We do this before sinking stmts from BB as it might
484 expose sinking opportunities of the merged stores.
485 Once we have partial dead code elimination through sth like SSU-PRE this
486 should be moved there. */
487
488 static unsigned
489 sink_common_stores_to_bb (basic_block bb)
490 {
491 unsigned todo = 0;
492 gphi *phi;
493
494 if (EDGE_COUNT (bb->preds) > 1
495 && (phi = get_virtual_phi (bb)))
496 {
497 /* Repeat until no more common stores are found. */
498 while (1)
499 {
500 gimple *first_store = NULL;
501 auto_vec <tree, 5> vdefs;
502 gimple_stmt_iterator gsi;
503
504 /* Search for common stores defined by all virtual PHI args.
505 ??? Common stores not present in all predecessors could
506 be handled by inserting a forwarder to sink to. Generally
507 this involves deciding which stores to do this for if
508 multiple common stores are present for different sets of
509 predecessors. See PR11832 for an interesting case. */
510 for (unsigned i = 0; i < gimple_phi_num_args (phi); ++i)
511 {
512 tree arg = gimple_phi_arg_def (phi, i);
513 gimple *def = SSA_NAME_DEF_STMT (arg);
514 if (! is_gimple_assign (def)
515 || stmt_can_throw_internal (cfun, def)
516 || (gimple_phi_arg_edge (phi, i)->flags & EDGE_ABNORMAL))
517 {
518 /* ??? We could handle some cascading with the def being
519 another PHI. We'd have to insert multiple PHIs for
520 the rhs then though (if they are not all equal). */
521 first_store = NULL;
522 break;
523 }
524 /* ??? Do not try to do anything fancy with aliasing, thus
525 do not sink across non-aliased loads (or even stores,
526 so different store order will make the sinking fail). */
527 bool all_uses_on_phi = true;
528 imm_use_iterator iter;
529 use_operand_p use_p;
530 FOR_EACH_IMM_USE_FAST (use_p, iter, arg)
531 if (USE_STMT (use_p) != phi)
532 {
533 all_uses_on_phi = false;
534 break;
535 }
536 if (! all_uses_on_phi)
537 {
538 first_store = NULL;
539 break;
540 }
541 /* Check all stores are to the same LHS. */
542 if (! first_store)
543 first_store = def;
544 /* ??? We could handle differing SSA uses in the LHS by inserting
545 PHIs for them. */
546 else if (! operand_equal_p (gimple_assign_lhs (first_store),
547 gimple_assign_lhs (def), 0)
548 || (gimple_clobber_p (first_store)
549 != gimple_clobber_p (def)))
550 {
551 first_store = NULL;
552 break;
553 }
554 vdefs.safe_push (arg);
555 }
556 if (! first_store)
557 break;
558
559 /* Check if we need a PHI node to merge the stored values. */
560 bool allsame = true;
561 if (!gimple_clobber_p (first_store))
562 for (unsigned i = 1; i < vdefs.length (); ++i)
563 {
564 gimple *def = SSA_NAME_DEF_STMT (vdefs[i]);
565 if (! operand_equal_p (gimple_assign_rhs1 (first_store),
566 gimple_assign_rhs1 (def), 0))
567 {
568 allsame = false;
569 break;
570 }
571 }
572
573 /* We cannot handle aggregate values if we need to merge them. */
574 tree type = TREE_TYPE (gimple_assign_lhs (first_store));
575 if (! allsame
576 && ! is_gimple_reg_type (type))
577 break;
578
579 if (dump_enabled_p ())
580 {
581 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS,
582 first_store,
583 "sinking common stores %sto ",
584 allsame ? "with same value " : "");
585 dump_generic_expr (MSG_OPTIMIZED_LOCATIONS, TDF_SLIM,
586 gimple_assign_lhs (first_store));
587 dump_printf (MSG_OPTIMIZED_LOCATIONS, "\n");
588 }
589
590 /* Insert a PHI to merge differing stored values if necessary.
591 Note that in general inserting PHIs isn't a very good idea as
592 it makes the job of coalescing and register allocation harder.
593 Even common SSA uses on the rhs/lhs might extend their lifetime
594 across multiple edges by this code motion which makes
595 register allocation harder. */
596 tree from;
597 if (! allsame)
598 {
599 from = make_ssa_name (type);
600 gphi *newphi = create_phi_node (from, bb);
601 for (unsigned i = 0; i < vdefs.length (); ++i)
602 {
603 gimple *def = SSA_NAME_DEF_STMT (vdefs[i]);
604 add_phi_arg (newphi, gimple_assign_rhs1 (def),
605 EDGE_PRED (bb, i), UNKNOWN_LOCATION);
606 }
607 }
608 else
609 from = gimple_assign_rhs1 (first_store);
610
611 /* Remove all stores. */
612 for (unsigned i = 0; i < vdefs.length (); ++i)
613 TREE_VISITED (vdefs[i]) = 1;
614 for (unsigned i = 0; i < vdefs.length (); ++i)
615 /* If we have more than one use of a VDEF on the PHI make sure
616 we remove the defining stmt only once. */
617 if (TREE_VISITED (vdefs[i]))
618 {
619 TREE_VISITED (vdefs[i]) = 0;
620 gimple *def = SSA_NAME_DEF_STMT (vdefs[i]);
621 gsi = gsi_for_stmt (def);
622 unlink_stmt_vdef (def);
623 gsi_remove (&gsi, true);
624 release_defs (def);
625 }
626
627 /* Insert the first store at the beginning of the merge BB. */
628 gimple_set_vdef (first_store, gimple_phi_result (phi));
629 SSA_NAME_DEF_STMT (gimple_vdef (first_store)) = first_store;
630 gimple_phi_set_result (phi, make_ssa_name (gimple_vop (cfun)));
631 gimple_set_vuse (first_store, gimple_phi_result (phi));
632 gimple_assign_set_rhs1 (first_store, from);
633 /* ??? Should we reset first_stores location? */
634 gsi = gsi_after_labels (bb);
635 gsi_insert_before (&gsi, first_store, GSI_SAME_STMT);
636 sink_stats.commoned++;
637
638 todo |= TODO_cleanup_cfg;
639 }
640
641 /* We could now have empty predecessors that we could remove,
642 forming a proper CFG for further sinking. Note that even
643 CFG cleanup doesn't do this fully at the moment and it
644 doesn't preserve post-dominators in the process either.
645 The mergephi pass might do it though. gcc.dg/tree-ssa/ssa-sink-13.c
646 shows this nicely if you disable tail merging or (same effect)
647 make the stored values unequal. */
648 }
649
650 return todo;
651 }
652
653 /* Perform code sinking on BB */
654
655 static unsigned
656 sink_code_in_bb (basic_block bb)
657 {
658 basic_block son;
659 gimple_stmt_iterator gsi;
660 edge_iterator ei;
661 edge e;
662 bool last = true;
663 unsigned todo = 0;
664
665 /* Sink common stores from the predecessor through our virtual PHI. */
666 todo |= sink_common_stores_to_bb (bb);
667
668 /* If this block doesn't dominate anything, there can't be any place to sink
669 the statements to. */
670 if (first_dom_son (CDI_DOMINATORS, bb) == NULL)
671 goto earlyout;
672
673 /* We can't move things across abnormal edges, so don't try. */
674 FOR_EACH_EDGE (e, ei, bb->succs)
675 if (e->flags & EDGE_ABNORMAL)
676 goto earlyout;
677
678 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi);)
679 {
680 gimple *stmt = gsi_stmt (gsi);
681 gimple_stmt_iterator togsi;
682 bool zero_uses_p;
683
684 if (!statement_sink_location (stmt, bb, &togsi, &zero_uses_p))
685 {
686 gimple_stmt_iterator saved = gsi;
687 if (!gsi_end_p (gsi))
688 gsi_prev (&gsi);
689 /* If we face a dead stmt remove it as it possibly blocks
690 sinking of uses. */
691 if (zero_uses_p
692 && ! gimple_vdef (stmt))
693 {
694 gsi_remove (&saved, true);
695 release_defs (stmt);
696 }
697 else
698 last = false;
699 continue;
700 }
701 if (dump_file)
702 {
703 fprintf (dump_file, "Sinking ");
704 print_gimple_stmt (dump_file, stmt, 0, TDF_VOPS);
705 fprintf (dump_file, " from bb %d to bb %d\n",
706 bb->index, (gsi_bb (togsi))->index);
707 }
708
709 /* Update virtual operands of statements in the path we
710 do not sink to. */
711 if (gimple_vdef (stmt))
712 {
713 imm_use_iterator iter;
714 use_operand_p use_p;
715 gimple *vuse_stmt;
716
717 FOR_EACH_IMM_USE_STMT (vuse_stmt, iter, gimple_vdef (stmt))
718 if (gimple_code (vuse_stmt) != GIMPLE_PHI)
719 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
720 SET_USE (use_p, gimple_vuse (stmt));
721 }
722
723 /* If this is the end of the basic block, we need to insert at the end
724 of the basic block. */
725 if (gsi_end_p (togsi))
726 gsi_move_to_bb_end (&gsi, gsi_bb (togsi));
727 else
728 gsi_move_before (&gsi, &togsi);
729
730 sink_stats.sunk++;
731
732 /* If we've just removed the last statement of the BB, the
733 gsi_end_p() test below would fail, but gsi_prev() would have
734 succeeded, and we want it to succeed. So we keep track of
735 whether we're at the last statement and pick up the new last
736 statement. */
737 if (last)
738 {
739 gsi = gsi_last_bb (bb);
740 continue;
741 }
742
743 last = false;
744 if (!gsi_end_p (gsi))
745 gsi_prev (&gsi);
746
747 }
748 earlyout:
749 for (son = first_dom_son (CDI_POST_DOMINATORS, bb);
750 son;
751 son = next_dom_son (CDI_POST_DOMINATORS, son))
752 {
753 todo |= sink_code_in_bb (son);
754 }
755
756 return todo;
757 }
758
759 /* Perform code sinking.
760 This moves code down the flowgraph when we know it would be
761 profitable to do so, or it wouldn't increase the number of
762 executions of the statement.
763
764 IE given
765
766 a_1 = b + c;
767 if (<something>)
768 {
769 }
770 else
771 {
772 foo (&b, &c);
773 a_5 = b + c;
774 }
775 a_6 = PHI (a_5, a_1);
776 USE a_6.
777
778 we'll transform this into:
779
780 if (<something>)
781 {
782 a_1 = b + c;
783 }
784 else
785 {
786 foo (&b, &c);
787 a_5 = b + c;
788 }
789 a_6 = PHI (a_5, a_1);
790 USE a_6.
791
792 Note that this reduces the number of computations of a = b + c to 1
793 when we take the else edge, instead of 2.
794 */
795 namespace {
796
797 const pass_data pass_data_sink_code =
798 {
799 GIMPLE_PASS, /* type */
800 "sink", /* name */
801 OPTGROUP_NONE, /* optinfo_flags */
802 TV_TREE_SINK, /* tv_id */
803 /* PROP_no_crit_edges is ensured by running split_edges_for_insertion in
804 pass_data_sink_code::execute (). */
805 ( PROP_cfg | PROP_ssa ), /* properties_required */
806 0, /* properties_provided */
807 0, /* properties_destroyed */
808 0, /* todo_flags_start */
809 TODO_update_ssa, /* todo_flags_finish */
810 };
811
812 class pass_sink_code : public gimple_opt_pass
813 {
814 public:
815 pass_sink_code (gcc::context *ctxt)
816 : gimple_opt_pass (pass_data_sink_code, ctxt)
817 {}
818
819 /* opt_pass methods: */
820 virtual bool gate (function *) { return flag_tree_sink != 0; }
821 virtual unsigned int execute (function *);
822
823 }; // class pass_sink_code
824
825 unsigned int
826 pass_sink_code::execute (function *fun)
827 {
828 loop_optimizer_init (LOOPS_NORMAL);
829 split_edges_for_insertion ();
830 connect_infinite_loops_to_exit ();
831 memset (&sink_stats, 0, sizeof (sink_stats));
832 calculate_dominance_info (CDI_DOMINATORS);
833 calculate_dominance_info (CDI_POST_DOMINATORS);
834 unsigned todo = sink_code_in_bb (EXIT_BLOCK_PTR_FOR_FN (fun));
835 statistics_counter_event (fun, "Sunk statements", sink_stats.sunk);
836 statistics_counter_event (fun, "Commoned stores", sink_stats.commoned);
837 free_dominance_info (CDI_POST_DOMINATORS);
838 remove_fake_exit_edges ();
839 loop_optimizer_finalize ();
840
841 return todo;
842 }
843
844 } // anon namespace
845
846 gimple_opt_pass *
847 make_pass_sink_code (gcc::context *ctxt)
848 {
849 return new pass_sink_code (ctxt);
850 }