Daily bump.
[gcc.git] / gcc / tree-eh.c
1 /* Exception handling semantics and decomposition for trees.
2 Copyright (C) 2003-2021 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "cfghooks.h"
28 #include "tree-pass.h"
29 #include "ssa.h"
30 #include "cgraph.h"
31 #include "diagnostic-core.h"
32 #include "fold-const.h"
33 #include "calls.h"
34 #include "except.h"
35 #include "cfganal.h"
36 #include "cfgcleanup.h"
37 #include "tree-eh.h"
38 #include "gimple-iterator.h"
39 #include "tree-cfg.h"
40 #include "tree-into-ssa.h"
41 #include "tree-ssa.h"
42 #include "tree-inline.h"
43 #include "langhooks.h"
44 #include "cfgloop.h"
45 #include "gimple-low.h"
46 #include "stringpool.h"
47 #include "attribs.h"
48 #include "asan.h"
49 #include "gimplify.h"
50
51 /* In some instances a tree and a gimple need to be stored in a same table,
52 i.e. in hash tables. This is a structure to do this. */
53 typedef union {tree *tp; tree t; gimple *g;} treemple;
54
55 /* Misc functions used in this file. */
56
57 /* Remember and lookup EH landing pad data for arbitrary statements.
58 Really this means any statement that could_throw_p. We could
59 stuff this information into the stmt_ann data structure, but:
60
61 (1) We absolutely rely on this information being kept until
62 we get to rtl. Once we're done with lowering here, if we lose
63 the information there's no way to recover it!
64
65 (2) There are many more statements that *cannot* throw as
66 compared to those that can. We should be saving some amount
67 of space by only allocating memory for those that can throw. */
68
69 /* Add statement T in function IFUN to landing pad NUM. */
70
71 static void
72 add_stmt_to_eh_lp_fn (struct function *ifun, gimple *t, int num)
73 {
74 gcc_assert (num != 0);
75
76 if (!get_eh_throw_stmt_table (ifun))
77 set_eh_throw_stmt_table (ifun, hash_map<gimple *, int>::create_ggc (31));
78
79 gcc_assert (!get_eh_throw_stmt_table (ifun)->put (t, num));
80 }
81
82 /* Add statement T in the current function (cfun) to EH landing pad NUM. */
83
84 void
85 add_stmt_to_eh_lp (gimple *t, int num)
86 {
87 add_stmt_to_eh_lp_fn (cfun, t, num);
88 }
89
90 /* Add statement T to the single EH landing pad in REGION. */
91
92 static void
93 record_stmt_eh_region (eh_region region, gimple *t)
94 {
95 if (region == NULL)
96 return;
97 if (region->type == ERT_MUST_NOT_THROW)
98 add_stmt_to_eh_lp_fn (cfun, t, -region->index);
99 else
100 {
101 eh_landing_pad lp = region->landing_pads;
102 if (lp == NULL)
103 lp = gen_eh_landing_pad (region);
104 else
105 gcc_assert (lp->next_lp == NULL);
106 add_stmt_to_eh_lp_fn (cfun, t, lp->index);
107 }
108 }
109
110
111 /* Remove statement T in function IFUN from its EH landing pad. */
112
113 bool
114 remove_stmt_from_eh_lp_fn (struct function *ifun, gimple *t)
115 {
116 if (!get_eh_throw_stmt_table (ifun))
117 return false;
118
119 if (!get_eh_throw_stmt_table (ifun)->get (t))
120 return false;
121
122 get_eh_throw_stmt_table (ifun)->remove (t);
123 return true;
124 }
125
126
127 /* Remove statement T in the current function (cfun) from its
128 EH landing pad. */
129
130 bool
131 remove_stmt_from_eh_lp (gimple *t)
132 {
133 return remove_stmt_from_eh_lp_fn (cfun, t);
134 }
135
136 /* Determine if statement T is inside an EH region in function IFUN.
137 Positive numbers indicate a landing pad index; negative numbers
138 indicate a MUST_NOT_THROW region index; zero indicates that the
139 statement is not recorded in the region table. */
140
141 int
142 lookup_stmt_eh_lp_fn (struct function *ifun, const gimple *t)
143 {
144 if (ifun->eh->throw_stmt_table == NULL)
145 return 0;
146
147 int *lp_nr = ifun->eh->throw_stmt_table->get (const_cast <gimple *> (t));
148 return lp_nr ? *lp_nr : 0;
149 }
150
151 /* Likewise, but always use the current function. */
152
153 int
154 lookup_stmt_eh_lp (const gimple *t)
155 {
156 /* We can get called from initialized data when -fnon-call-exceptions
157 is on; prevent crash. */
158 if (!cfun)
159 return 0;
160 return lookup_stmt_eh_lp_fn (cfun, t);
161 }
162
163 /* First pass of EH node decomposition. Build up a tree of GIMPLE_TRY_FINALLY
164 nodes and LABEL_DECL nodes. We will use this during the second phase to
165 determine if a goto leaves the body of a TRY_FINALLY_EXPR node. */
166
167 struct finally_tree_node
168 {
169 /* When storing a GIMPLE_TRY, we have to record a gimple. However
170 when deciding whether a GOTO to a certain LABEL_DECL (which is a
171 tree) leaves the TRY block, its necessary to record a tree in
172 this field. Thus a treemple is used. */
173 treemple child;
174 gtry *parent;
175 };
176
177 /* Hashtable helpers. */
178
179 struct finally_tree_hasher : free_ptr_hash <finally_tree_node>
180 {
181 static inline hashval_t hash (const finally_tree_node *);
182 static inline bool equal (const finally_tree_node *,
183 const finally_tree_node *);
184 };
185
186 inline hashval_t
187 finally_tree_hasher::hash (const finally_tree_node *v)
188 {
189 return (intptr_t)v->child.t >> 4;
190 }
191
192 inline bool
193 finally_tree_hasher::equal (const finally_tree_node *v,
194 const finally_tree_node *c)
195 {
196 return v->child.t == c->child.t;
197 }
198
199 /* Note that this table is *not* marked GTY. It is short-lived. */
200 static hash_table<finally_tree_hasher> *finally_tree;
201
202 static void
203 record_in_finally_tree (treemple child, gtry *parent)
204 {
205 struct finally_tree_node *n;
206 finally_tree_node **slot;
207
208 n = XNEW (struct finally_tree_node);
209 n->child = child;
210 n->parent = parent;
211
212 slot = finally_tree->find_slot (n, INSERT);
213 gcc_assert (!*slot);
214 *slot = n;
215 }
216
217 static void
218 collect_finally_tree (gimple *stmt, gtry *region);
219
220 /* Go through the gimple sequence. Works with collect_finally_tree to
221 record all GIMPLE_LABEL and GIMPLE_TRY statements. */
222
223 static void
224 collect_finally_tree_1 (gimple_seq seq, gtry *region)
225 {
226 gimple_stmt_iterator gsi;
227
228 for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
229 collect_finally_tree (gsi_stmt (gsi), region);
230 }
231
232 static void
233 collect_finally_tree (gimple *stmt, gtry *region)
234 {
235 treemple temp;
236
237 switch (gimple_code (stmt))
238 {
239 case GIMPLE_LABEL:
240 temp.t = gimple_label_label (as_a <glabel *> (stmt));
241 record_in_finally_tree (temp, region);
242 break;
243
244 case GIMPLE_TRY:
245 if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
246 {
247 temp.g = stmt;
248 record_in_finally_tree (temp, region);
249 collect_finally_tree_1 (gimple_try_eval (stmt),
250 as_a <gtry *> (stmt));
251 collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
252 }
253 else if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH)
254 {
255 collect_finally_tree_1 (gimple_try_eval (stmt), region);
256 collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
257 }
258 break;
259
260 case GIMPLE_CATCH:
261 collect_finally_tree_1 (gimple_catch_handler (
262 as_a <gcatch *> (stmt)),
263 region);
264 break;
265
266 case GIMPLE_EH_FILTER:
267 collect_finally_tree_1 (gimple_eh_filter_failure (stmt), region);
268 break;
269
270 case GIMPLE_EH_ELSE:
271 {
272 geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
273 collect_finally_tree_1 (gimple_eh_else_n_body (eh_else_stmt), region);
274 collect_finally_tree_1 (gimple_eh_else_e_body (eh_else_stmt), region);
275 }
276 break;
277
278 default:
279 /* A type, a decl, or some kind of statement that we're not
280 interested in. Don't walk them. */
281 break;
282 }
283 }
284
285
286 /* Use the finally tree to determine if a jump from START to TARGET
287 would leave the try_finally node that START lives in. */
288
289 static bool
290 outside_finally_tree (treemple start, gimple *target)
291 {
292 struct finally_tree_node n, *p;
293
294 do
295 {
296 n.child = start;
297 p = finally_tree->find (&n);
298 if (!p)
299 return true;
300 start.g = p->parent;
301 }
302 while (start.g != target);
303
304 return false;
305 }
306
307 /* Second pass of EH node decomposition. Actually transform the GIMPLE_TRY
308 nodes into a set of gotos, magic labels, and eh regions.
309 The eh region creation is straight-forward, but frobbing all the gotos
310 and such into shape isn't. */
311
312 /* The sequence into which we record all EH stuff. This will be
313 placed at the end of the function when we're all done. */
314 static gimple_seq eh_seq;
315
316 /* Record whether an EH region contains something that can throw,
317 indexed by EH region number. */
318 static bitmap eh_region_may_contain_throw_map;
319
320 /* The GOTO_QUEUE is an array of GIMPLE_GOTO and GIMPLE_RETURN
321 statements that are seen to escape this GIMPLE_TRY_FINALLY node.
322 The idea is to record a gimple statement for everything except for
323 the conditionals, which get their labels recorded. Since labels are
324 of type 'tree', we need this node to store both gimple and tree
325 objects. REPL_STMT is the sequence used to replace the goto/return
326 statement. CONT_STMT is used to store the statement that allows
327 the return/goto to jump to the original destination. */
328
329 struct goto_queue_node
330 {
331 treemple stmt;
332 location_t location;
333 gimple_seq repl_stmt;
334 gimple *cont_stmt;
335 int index;
336 /* This is used when index >= 0 to indicate that stmt is a label (as
337 opposed to a goto stmt). */
338 int is_label;
339 };
340
341 /* State of the world while lowering. */
342
343 struct leh_state
344 {
345 /* What's "current" while constructing the eh region tree. These
346 correspond to variables of the same name in cfun->eh, which we
347 don't have easy access to. */
348 eh_region cur_region;
349
350 /* What's "current" for the purposes of __builtin_eh_pointer. For
351 a CATCH, this is the associated TRY. For an EH_FILTER, this is
352 the associated ALLOWED_EXCEPTIONS, etc. */
353 eh_region ehp_region;
354
355 /* Processing of TRY_FINALLY requires a bit more state. This is
356 split out into a separate structure so that we don't have to
357 copy so much when processing other nodes. */
358 struct leh_tf_state *tf;
359
360 /* Outer non-clean up region. */
361 eh_region outer_non_cleanup;
362 };
363
364 struct leh_tf_state
365 {
366 /* Pointer to the GIMPLE_TRY_FINALLY node under discussion. The
367 try_finally_expr is the original GIMPLE_TRY_FINALLY. We need to retain
368 this so that outside_finally_tree can reliably reference the tree used
369 in the collect_finally_tree data structures. */
370 gtry *try_finally_expr;
371 gtry *top_p;
372
373 /* While lowering a top_p usually it is expanded into multiple statements,
374 thus we need the following field to store them. */
375 gimple_seq top_p_seq;
376
377 /* The state outside this try_finally node. */
378 struct leh_state *outer;
379
380 /* The exception region created for it. */
381 eh_region region;
382
383 /* The goto queue. */
384 struct goto_queue_node *goto_queue;
385 size_t goto_queue_size;
386 size_t goto_queue_active;
387
388 /* Pointer map to help in searching goto_queue when it is large. */
389 hash_map<gimple *, goto_queue_node *> *goto_queue_map;
390
391 /* The set of unique labels seen as entries in the goto queue. */
392 vec<tree> dest_array;
393
394 /* A label to be added at the end of the completed transformed
395 sequence. It will be set if may_fallthru was true *at one time*,
396 though subsequent transformations may have cleared that flag. */
397 tree fallthru_label;
398
399 /* True if it is possible to fall out the bottom of the try block.
400 Cleared if the fallthru is converted to a goto. */
401 bool may_fallthru;
402
403 /* True if any entry in goto_queue is a GIMPLE_RETURN. */
404 bool may_return;
405
406 /* True if the finally block can receive an exception edge.
407 Cleared if the exception case is handled by code duplication. */
408 bool may_throw;
409 };
410
411 static gimple_seq lower_eh_must_not_throw (struct leh_state *, gtry *);
412
413 /* Search for STMT in the goto queue. Return the replacement,
414 or null if the statement isn't in the queue. */
415
416 #define LARGE_GOTO_QUEUE 20
417
418 static void lower_eh_constructs_1 (struct leh_state *state, gimple_seq *seq);
419
420 static gimple_seq
421 find_goto_replacement (struct leh_tf_state *tf, treemple stmt)
422 {
423 unsigned int i;
424
425 if (tf->goto_queue_active < LARGE_GOTO_QUEUE)
426 {
427 for (i = 0; i < tf->goto_queue_active; i++)
428 if ( tf->goto_queue[i].stmt.g == stmt.g)
429 return tf->goto_queue[i].repl_stmt;
430 return NULL;
431 }
432
433 /* If we have a large number of entries in the goto_queue, create a
434 pointer map and use that for searching. */
435
436 if (!tf->goto_queue_map)
437 {
438 tf->goto_queue_map = new hash_map<gimple *, goto_queue_node *>;
439 for (i = 0; i < tf->goto_queue_active; i++)
440 {
441 bool existed = tf->goto_queue_map->put (tf->goto_queue[i].stmt.g,
442 &tf->goto_queue[i]);
443 gcc_assert (!existed);
444 }
445 }
446
447 goto_queue_node **slot = tf->goto_queue_map->get (stmt.g);
448 if (slot != NULL)
449 return ((*slot)->repl_stmt);
450
451 return NULL;
452 }
453
454 /* A subroutine of replace_goto_queue_1. Handles the sub-clauses of a
455 lowered GIMPLE_COND. If, by chance, the replacement is a simple goto,
456 then we can just splat it in, otherwise we add the new stmts immediately
457 after the GIMPLE_COND and redirect. */
458
459 static void
460 replace_goto_queue_cond_clause (tree *tp, struct leh_tf_state *tf,
461 gimple_stmt_iterator *gsi)
462 {
463 tree label;
464 gimple_seq new_seq;
465 treemple temp;
466 location_t loc = gimple_location (gsi_stmt (*gsi));
467
468 temp.tp = tp;
469 new_seq = find_goto_replacement (tf, temp);
470 if (!new_seq)
471 return;
472
473 if (gimple_seq_singleton_p (new_seq)
474 && gimple_code (gimple_seq_first_stmt (new_seq)) == GIMPLE_GOTO)
475 {
476 *tp = gimple_goto_dest (gimple_seq_first_stmt (new_seq));
477 return;
478 }
479
480 label = create_artificial_label (loc);
481 /* Set the new label for the GIMPLE_COND */
482 *tp = label;
483
484 gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
485 gsi_insert_seq_after (gsi, gimple_seq_copy (new_seq), GSI_CONTINUE_LINKING);
486 }
487
488 /* The real work of replace_goto_queue. Returns with TSI updated to
489 point to the next statement. */
490
491 static void replace_goto_queue_stmt_list (gimple_seq *, struct leh_tf_state *);
492
493 static void
494 replace_goto_queue_1 (gimple *stmt, struct leh_tf_state *tf,
495 gimple_stmt_iterator *gsi)
496 {
497 gimple_seq seq;
498 treemple temp;
499 temp.g = NULL;
500
501 switch (gimple_code (stmt))
502 {
503 case GIMPLE_GOTO:
504 case GIMPLE_RETURN:
505 temp.g = stmt;
506 seq = find_goto_replacement (tf, temp);
507 if (seq)
508 {
509 gimple_stmt_iterator i;
510 seq = gimple_seq_copy (seq);
511 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
512 gimple_set_location (gsi_stmt (i), gimple_location (stmt));
513 gsi_insert_seq_before (gsi, seq, GSI_SAME_STMT);
514 gsi_remove (gsi, false);
515 return;
516 }
517 break;
518
519 case GIMPLE_COND:
520 replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 2), tf, gsi);
521 replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 3), tf, gsi);
522 break;
523
524 case GIMPLE_TRY:
525 replace_goto_queue_stmt_list (gimple_try_eval_ptr (stmt), tf);
526 replace_goto_queue_stmt_list (gimple_try_cleanup_ptr (stmt), tf);
527 break;
528 case GIMPLE_CATCH:
529 replace_goto_queue_stmt_list (gimple_catch_handler_ptr (
530 as_a <gcatch *> (stmt)),
531 tf);
532 break;
533 case GIMPLE_EH_FILTER:
534 replace_goto_queue_stmt_list (gimple_eh_filter_failure_ptr (stmt), tf);
535 break;
536 case GIMPLE_EH_ELSE:
537 {
538 geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
539 replace_goto_queue_stmt_list (gimple_eh_else_n_body_ptr (eh_else_stmt),
540 tf);
541 replace_goto_queue_stmt_list (gimple_eh_else_e_body_ptr (eh_else_stmt),
542 tf);
543 }
544 break;
545
546 default:
547 /* These won't have gotos in them. */
548 break;
549 }
550
551 gsi_next (gsi);
552 }
553
554 /* A subroutine of replace_goto_queue. Handles GIMPLE_SEQ. */
555
556 static void
557 replace_goto_queue_stmt_list (gimple_seq *seq, struct leh_tf_state *tf)
558 {
559 gimple_stmt_iterator gsi = gsi_start (*seq);
560
561 while (!gsi_end_p (gsi))
562 replace_goto_queue_1 (gsi_stmt (gsi), tf, &gsi);
563 }
564
565 /* Replace all goto queue members. */
566
567 static void
568 replace_goto_queue (struct leh_tf_state *tf)
569 {
570 if (tf->goto_queue_active == 0)
571 return;
572 replace_goto_queue_stmt_list (&tf->top_p_seq, tf);
573 replace_goto_queue_stmt_list (&eh_seq, tf);
574 }
575
576 /* Add a new record to the goto queue contained in TF. NEW_STMT is the
577 data to be added, IS_LABEL indicates whether NEW_STMT is a label or
578 a gimple return. */
579
580 static void
581 record_in_goto_queue (struct leh_tf_state *tf,
582 treemple new_stmt,
583 int index,
584 bool is_label,
585 location_t location)
586 {
587 size_t active, size;
588 struct goto_queue_node *q;
589
590 gcc_assert (!tf->goto_queue_map);
591
592 active = tf->goto_queue_active;
593 size = tf->goto_queue_size;
594 if (active >= size)
595 {
596 size = (size ? size * 2 : 32);
597 tf->goto_queue_size = size;
598 tf->goto_queue
599 = XRESIZEVEC (struct goto_queue_node, tf->goto_queue, size);
600 }
601
602 q = &tf->goto_queue[active];
603 tf->goto_queue_active = active + 1;
604
605 memset (q, 0, sizeof (*q));
606 q->stmt = new_stmt;
607 q->index = index;
608 q->location = location;
609 q->is_label = is_label;
610 }
611
612 /* Record the LABEL label in the goto queue contained in TF.
613 TF is not null. */
614
615 static void
616 record_in_goto_queue_label (struct leh_tf_state *tf, treemple stmt, tree label,
617 location_t location)
618 {
619 int index;
620 treemple temp, new_stmt;
621
622 if (!label)
623 return;
624
625 /* Computed and non-local gotos do not get processed. Given
626 their nature we can neither tell whether we've escaped the
627 finally block nor redirect them if we knew. */
628 if (TREE_CODE (label) != LABEL_DECL)
629 return;
630
631 /* No need to record gotos that don't leave the try block. */
632 temp.t = label;
633 if (!outside_finally_tree (temp, tf->try_finally_expr))
634 return;
635
636 if (! tf->dest_array.exists ())
637 {
638 tf->dest_array.create (10);
639 tf->dest_array.quick_push (label);
640 index = 0;
641 }
642 else
643 {
644 int n = tf->dest_array.length ();
645 for (index = 0; index < n; ++index)
646 if (tf->dest_array[index] == label)
647 break;
648 if (index == n)
649 tf->dest_array.safe_push (label);
650 }
651
652 /* In the case of a GOTO we want to record the destination label,
653 since with a GIMPLE_COND we have an easy access to the then/else
654 labels. */
655 new_stmt = stmt;
656 record_in_goto_queue (tf, new_stmt, index, true, location);
657 }
658
659 /* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally
660 node, and if so record that fact in the goto queue associated with that
661 try_finally node. */
662
663 static void
664 maybe_record_in_goto_queue (struct leh_state *state, gimple *stmt)
665 {
666 struct leh_tf_state *tf = state->tf;
667 treemple new_stmt;
668
669 if (!tf)
670 return;
671
672 switch (gimple_code (stmt))
673 {
674 case GIMPLE_COND:
675 {
676 gcond *cond_stmt = as_a <gcond *> (stmt);
677 new_stmt.tp = gimple_op_ptr (cond_stmt, 2);
678 record_in_goto_queue_label (tf, new_stmt,
679 gimple_cond_true_label (cond_stmt),
680 EXPR_LOCATION (*new_stmt.tp));
681 new_stmt.tp = gimple_op_ptr (cond_stmt, 3);
682 record_in_goto_queue_label (tf, new_stmt,
683 gimple_cond_false_label (cond_stmt),
684 EXPR_LOCATION (*new_stmt.tp));
685 }
686 break;
687 case GIMPLE_GOTO:
688 new_stmt.g = stmt;
689 record_in_goto_queue_label (tf, new_stmt, gimple_goto_dest (stmt),
690 gimple_location (stmt));
691 break;
692
693 case GIMPLE_RETURN:
694 tf->may_return = true;
695 new_stmt.g = stmt;
696 record_in_goto_queue (tf, new_stmt, -1, false, gimple_location (stmt));
697 break;
698
699 default:
700 gcc_unreachable ();
701 }
702 }
703
704
705 #if CHECKING_P
706 /* We do not process GIMPLE_SWITCHes for now. As long as the original source
707 was in fact structured, and we've not yet done jump threading, then none
708 of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this. */
709
710 static void
711 verify_norecord_switch_expr (struct leh_state *state,
712 gswitch *switch_expr)
713 {
714 struct leh_tf_state *tf = state->tf;
715 size_t i, n;
716
717 if (!tf)
718 return;
719
720 n = gimple_switch_num_labels (switch_expr);
721
722 for (i = 0; i < n; ++i)
723 {
724 treemple temp;
725 tree lab = CASE_LABEL (gimple_switch_label (switch_expr, i));
726 temp.t = lab;
727 gcc_assert (!outside_finally_tree (temp, tf->try_finally_expr));
728 }
729 }
730 #else
731 #define verify_norecord_switch_expr(state, switch_expr)
732 #endif
733
734 /* Redirect a RETURN_EXPR pointed to by Q to FINLAB. If MOD is
735 non-null, insert it before the new branch. */
736
737 static void
738 do_return_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod)
739 {
740 gimple *x;
741
742 /* In the case of a return, the queue node must be a gimple statement. */
743 gcc_assert (!q->is_label);
744
745 /* Note that the return value may have already been computed, e.g.,
746
747 int x;
748 int foo (void)
749 {
750 x = 0;
751 try {
752 return x;
753 } finally {
754 x++;
755 }
756 }
757
758 should return 0, not 1. We don't have to do anything to make
759 this happens because the return value has been placed in the
760 RESULT_DECL already. */
761
762 q->cont_stmt = q->stmt.g;
763
764 if (mod)
765 gimple_seq_add_seq (&q->repl_stmt, mod);
766
767 x = gimple_build_goto (finlab);
768 gimple_set_location (x, q->location);
769 gimple_seq_add_stmt (&q->repl_stmt, x);
770 }
771
772 /* Similar, but easier, for GIMPLE_GOTO. */
773
774 static void
775 do_goto_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod,
776 struct leh_tf_state *tf)
777 {
778 ggoto *x;
779
780 gcc_assert (q->is_label);
781
782 q->cont_stmt = gimple_build_goto (tf->dest_array[q->index]);
783
784 if (mod)
785 gimple_seq_add_seq (&q->repl_stmt, mod);
786
787 x = gimple_build_goto (finlab);
788 gimple_set_location (x, q->location);
789 gimple_seq_add_stmt (&q->repl_stmt, x);
790 }
791
792 /* Emit a standard landing pad sequence into SEQ for REGION. */
793
794 static void
795 emit_post_landing_pad (gimple_seq *seq, eh_region region)
796 {
797 eh_landing_pad lp = region->landing_pads;
798 glabel *x;
799
800 if (lp == NULL)
801 lp = gen_eh_landing_pad (region);
802
803 lp->post_landing_pad = create_artificial_label (UNKNOWN_LOCATION);
804 EH_LANDING_PAD_NR (lp->post_landing_pad) = lp->index;
805
806 x = gimple_build_label (lp->post_landing_pad);
807 gimple_seq_add_stmt (seq, x);
808 }
809
810 /* Emit a RESX statement into SEQ for REGION. */
811
812 static void
813 emit_resx (gimple_seq *seq, eh_region region)
814 {
815 gresx *x = gimple_build_resx (region->index);
816 gimple_seq_add_stmt (seq, x);
817 if (region->outer)
818 record_stmt_eh_region (region->outer, x);
819 }
820
821 /* Note that the current EH region may contain a throw, or a
822 call to a function which itself may contain a throw. */
823
824 static void
825 note_eh_region_may_contain_throw (eh_region region)
826 {
827 while (bitmap_set_bit (eh_region_may_contain_throw_map, region->index))
828 {
829 if (region->type == ERT_MUST_NOT_THROW)
830 break;
831 region = region->outer;
832 if (region == NULL)
833 break;
834 }
835 }
836
837 /* Check if REGION has been marked as containing a throw. If REGION is
838 NULL, this predicate is false. */
839
840 static inline bool
841 eh_region_may_contain_throw (eh_region r)
842 {
843 return r && bitmap_bit_p (eh_region_may_contain_throw_map, r->index);
844 }
845
846 /* We want to transform
847 try { body; } catch { stuff; }
848 to
849 normal_sequence:
850 body;
851 over:
852 eh_sequence:
853 landing_pad:
854 stuff;
855 goto over;
856
857 TP is a GIMPLE_TRY node. REGION is the region whose post_landing_pad
858 should be placed before the second operand, or NULL. OVER is
859 an existing label that should be put at the exit, or NULL. */
860
861 static gimple_seq
862 frob_into_branch_around (gtry *tp, eh_region region, tree over)
863 {
864 gimple *x;
865 gimple_seq cleanup, result;
866 location_t loc = gimple_location (tp);
867
868 cleanup = gimple_try_cleanup (tp);
869 result = gimple_try_eval (tp);
870
871 if (region)
872 emit_post_landing_pad (&eh_seq, region);
873
874 if (gimple_seq_may_fallthru (cleanup))
875 {
876 if (!over)
877 over = create_artificial_label (loc);
878 x = gimple_build_goto (over);
879 gimple_set_location (x, loc);
880 gimple_seq_add_stmt (&cleanup, x);
881 }
882 gimple_seq_add_seq (&eh_seq, cleanup);
883
884 if (over)
885 {
886 x = gimple_build_label (over);
887 gimple_seq_add_stmt (&result, x);
888 }
889 return result;
890 }
891
892 /* A subroutine of lower_try_finally. Duplicate the tree rooted at T.
893 Make sure to record all new labels found. */
894
895 static gimple_seq
896 lower_try_finally_dup_block (gimple_seq seq, struct leh_state *outer_state,
897 location_t loc)
898 {
899 gtry *region = NULL;
900 gimple_seq new_seq;
901 gimple_stmt_iterator gsi;
902
903 new_seq = copy_gimple_seq_and_replace_locals (seq);
904
905 for (gsi = gsi_start (new_seq); !gsi_end_p (gsi); gsi_next (&gsi))
906 {
907 gimple *stmt = gsi_stmt (gsi);
908 if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
909 {
910 tree block = gimple_block (stmt);
911 gimple_set_location (stmt, loc);
912 gimple_set_block (stmt, block);
913 }
914 }
915
916 if (outer_state->tf)
917 region = outer_state->tf->try_finally_expr;
918 collect_finally_tree_1 (new_seq, region);
919
920 return new_seq;
921 }
922
923 /* A subroutine of lower_try_finally. Create a fallthru label for
924 the given try_finally state. The only tricky bit here is that
925 we have to make sure to record the label in our outer context. */
926
927 static tree
928 lower_try_finally_fallthru_label (struct leh_tf_state *tf)
929 {
930 tree label = tf->fallthru_label;
931 treemple temp;
932
933 if (!label)
934 {
935 label = create_artificial_label (gimple_location (tf->try_finally_expr));
936 tf->fallthru_label = label;
937 if (tf->outer->tf)
938 {
939 temp.t = label;
940 record_in_finally_tree (temp, tf->outer->tf->try_finally_expr);
941 }
942 }
943 return label;
944 }
945
946 /* A subroutine of lower_try_finally. If FINALLY consits of a
947 GIMPLE_EH_ELSE node, return it. */
948
949 static inline geh_else *
950 get_eh_else (gimple_seq finally)
951 {
952 gimple *x = gimple_seq_first_stmt (finally);
953 if (gimple_code (x) == GIMPLE_EH_ELSE)
954 {
955 gcc_assert (gimple_seq_singleton_p (finally));
956 return as_a <geh_else *> (x);
957 }
958 return NULL;
959 }
960
961 /* A subroutine of lower_try_finally. If the eh_protect_cleanup_actions
962 langhook returns non-null, then the language requires that the exception
963 path out of a try_finally be treated specially. To wit: the code within
964 the finally block may not itself throw an exception. We have two choices
965 here. First we can duplicate the finally block and wrap it in a
966 must_not_throw region. Second, we can generate code like
967
968 try {
969 finally_block;
970 } catch {
971 if (fintmp == eh_edge)
972 protect_cleanup_actions;
973 }
974
975 where "fintmp" is the temporary used in the switch statement generation
976 alternative considered below. For the nonce, we always choose the first
977 option.
978
979 THIS_STATE may be null if this is a try-cleanup, not a try-finally. */
980
981 static void
982 honor_protect_cleanup_actions (struct leh_state *outer_state,
983 struct leh_state *this_state,
984 struct leh_tf_state *tf)
985 {
986 gimple_seq finally = gimple_try_cleanup (tf->top_p);
987
988 /* EH_ELSE doesn't come from user code; only compiler generated stuff.
989 It does need to be handled here, so as to separate the (different)
990 EH path from the normal path. But we should not attempt to wrap
991 it with a must-not-throw node (which indeed gets in the way). */
992 if (geh_else *eh_else = get_eh_else (finally))
993 {
994 gimple_try_set_cleanup (tf->top_p, gimple_eh_else_n_body (eh_else));
995 finally = gimple_eh_else_e_body (eh_else);
996
997 /* Let the ELSE see the exception that's being processed, but
998 since the cleanup is outside the try block, process it with
999 outer_state, otherwise it may be used as a cleanup for
1000 itself, and Bad Things (TM) ensue. */
1001 eh_region save_ehp = outer_state->ehp_region;
1002 outer_state->ehp_region = this_state->cur_region;
1003 lower_eh_constructs_1 (outer_state, &finally);
1004 outer_state->ehp_region = save_ehp;
1005 }
1006 else
1007 {
1008 /* First check for nothing to do. */
1009 if (lang_hooks.eh_protect_cleanup_actions == NULL)
1010 return;
1011 tree actions = lang_hooks.eh_protect_cleanup_actions ();
1012 if (actions == NULL)
1013 return;
1014
1015 if (this_state)
1016 finally = lower_try_finally_dup_block (finally, outer_state,
1017 gimple_location (tf->try_finally_expr));
1018
1019 /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
1020 set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
1021 to be in an enclosing scope, but needs to be implemented at this level
1022 to avoid a nesting violation (see wrap_temporary_cleanups in
1023 cp/decl.c). Since it's logically at an outer level, we should call
1024 terminate before we get to it, so strip it away before adding the
1025 MUST_NOT_THROW filter. */
1026 gimple_stmt_iterator gsi = gsi_start (finally);
1027 gimple *x = gsi_stmt (gsi);
1028 if (gimple_code (x) == GIMPLE_TRY
1029 && gimple_try_kind (x) == GIMPLE_TRY_CATCH
1030 && gimple_try_catch_is_cleanup (x))
1031 {
1032 gsi_insert_seq_before (&gsi, gimple_try_eval (x), GSI_SAME_STMT);
1033 gsi_remove (&gsi, false);
1034 }
1035
1036 /* Wrap the block with protect_cleanup_actions as the action. */
1037 geh_mnt *eh_mnt = gimple_build_eh_must_not_throw (actions);
1038 gtry *try_stmt = gimple_build_try (finally,
1039 gimple_seq_alloc_with_stmt (eh_mnt),
1040 GIMPLE_TRY_CATCH);
1041 finally = lower_eh_must_not_throw (outer_state, try_stmt);
1042 }
1043
1044 /* Drop all of this into the exception sequence. */
1045 emit_post_landing_pad (&eh_seq, tf->region);
1046 gimple_seq_add_seq (&eh_seq, finally);
1047 if (gimple_seq_may_fallthru (finally))
1048 emit_resx (&eh_seq, tf->region);
1049
1050 /* Having now been handled, EH isn't to be considered with
1051 the rest of the outgoing edges. */
1052 tf->may_throw = false;
1053 }
1054
1055 /* A subroutine of lower_try_finally. We have determined that there is
1056 no fallthru edge out of the finally block. This means that there is
1057 no outgoing edge corresponding to any incoming edge. Restructure the
1058 try_finally node for this special case. */
1059
1060 static void
1061 lower_try_finally_nofallthru (struct leh_state *state,
1062 struct leh_tf_state *tf)
1063 {
1064 tree lab;
1065 gimple *x;
1066 geh_else *eh_else;
1067 gimple_seq finally;
1068 struct goto_queue_node *q, *qe;
1069
1070 lab = create_artificial_label (gimple_location (tf->try_finally_expr));
1071
1072 /* We expect that tf->top_p is a GIMPLE_TRY. */
1073 finally = gimple_try_cleanup (tf->top_p);
1074 tf->top_p_seq = gimple_try_eval (tf->top_p);
1075
1076 x = gimple_build_label (lab);
1077 gimple_seq_add_stmt (&tf->top_p_seq, x);
1078
1079 q = tf->goto_queue;
1080 qe = q + tf->goto_queue_active;
1081 for (; q < qe; ++q)
1082 if (q->index < 0)
1083 do_return_redirection (q, lab, NULL);
1084 else
1085 do_goto_redirection (q, lab, NULL, tf);
1086
1087 replace_goto_queue (tf);
1088
1089 /* Emit the finally block into the stream. Lower EH_ELSE at this time. */
1090 eh_else = get_eh_else (finally);
1091 if (eh_else)
1092 {
1093 finally = gimple_eh_else_n_body (eh_else);
1094 lower_eh_constructs_1 (state, &finally);
1095 gimple_seq_add_seq (&tf->top_p_seq, finally);
1096
1097 if (tf->may_throw)
1098 {
1099 finally = gimple_eh_else_e_body (eh_else);
1100 lower_eh_constructs_1 (state, &finally);
1101
1102 emit_post_landing_pad (&eh_seq, tf->region);
1103 gimple_seq_add_seq (&eh_seq, finally);
1104 }
1105 }
1106 else
1107 {
1108 lower_eh_constructs_1 (state, &finally);
1109 gimple_seq_add_seq (&tf->top_p_seq, finally);
1110
1111 if (tf->may_throw)
1112 {
1113 emit_post_landing_pad (&eh_seq, tf->region);
1114
1115 x = gimple_build_goto (lab);
1116 gimple_set_location (x, gimple_location (tf->try_finally_expr));
1117 gimple_seq_add_stmt (&eh_seq, x);
1118 }
1119 }
1120 }
1121
1122 /* A subroutine of lower_try_finally. We have determined that there is
1123 exactly one destination of the finally block. Restructure the
1124 try_finally node for this special case. */
1125
1126 static void
1127 lower_try_finally_onedest (struct leh_state *state, struct leh_tf_state *tf)
1128 {
1129 struct goto_queue_node *q, *qe;
1130 geh_else *eh_else;
1131 glabel *label_stmt;
1132 gimple *x;
1133 gimple_seq finally;
1134 gimple_stmt_iterator gsi;
1135 tree finally_label;
1136 location_t loc = gimple_location (tf->try_finally_expr);
1137
1138 finally = gimple_try_cleanup (tf->top_p);
1139 tf->top_p_seq = gimple_try_eval (tf->top_p);
1140
1141 /* Since there's only one destination, and the destination edge can only
1142 either be EH or non-EH, that implies that all of our incoming edges
1143 are of the same type. Therefore we can lower EH_ELSE immediately. */
1144 eh_else = get_eh_else (finally);
1145 if (eh_else)
1146 {
1147 if (tf->may_throw)
1148 finally = gimple_eh_else_e_body (eh_else);
1149 else
1150 finally = gimple_eh_else_n_body (eh_else);
1151 }
1152
1153 lower_eh_constructs_1 (state, &finally);
1154
1155 for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1156 {
1157 gimple *stmt = gsi_stmt (gsi);
1158 if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
1159 {
1160 tree block = gimple_block (stmt);
1161 gimple_set_location (stmt, gimple_location (tf->try_finally_expr));
1162 gimple_set_block (stmt, block);
1163 }
1164 }
1165
1166 if (tf->may_throw)
1167 {
1168 /* Only reachable via the exception edge. Add the given label to
1169 the head of the FINALLY block. Append a RESX at the end. */
1170 emit_post_landing_pad (&eh_seq, tf->region);
1171 gimple_seq_add_seq (&eh_seq, finally);
1172 emit_resx (&eh_seq, tf->region);
1173 return;
1174 }
1175
1176 if (tf->may_fallthru)
1177 {
1178 /* Only reachable via the fallthru edge. Do nothing but let
1179 the two blocks run together; we'll fall out the bottom. */
1180 gimple_seq_add_seq (&tf->top_p_seq, finally);
1181 return;
1182 }
1183
1184 finally_label = create_artificial_label (loc);
1185 label_stmt = gimple_build_label (finally_label);
1186 gimple_seq_add_stmt (&tf->top_p_seq, label_stmt);
1187
1188 gimple_seq_add_seq (&tf->top_p_seq, finally);
1189
1190 q = tf->goto_queue;
1191 qe = q + tf->goto_queue_active;
1192
1193 if (tf->may_return)
1194 {
1195 /* Reachable by return expressions only. Redirect them. */
1196 for (; q < qe; ++q)
1197 do_return_redirection (q, finally_label, NULL);
1198 replace_goto_queue (tf);
1199 }
1200 else
1201 {
1202 /* Reachable by goto expressions only. Redirect them. */
1203 for (; q < qe; ++q)
1204 do_goto_redirection (q, finally_label, NULL, tf);
1205 replace_goto_queue (tf);
1206
1207 if (tf->dest_array[0] == tf->fallthru_label)
1208 {
1209 /* Reachable by goto to fallthru label only. Redirect it
1210 to the new label (already created, sadly), and do not
1211 emit the final branch out, or the fallthru label. */
1212 tf->fallthru_label = NULL;
1213 return;
1214 }
1215 }
1216
1217 /* Place the original return/goto to the original destination
1218 immediately after the finally block. */
1219 x = tf->goto_queue[0].cont_stmt;
1220 gimple_seq_add_stmt (&tf->top_p_seq, x);
1221 maybe_record_in_goto_queue (state, x);
1222 }
1223
1224 /* A subroutine of lower_try_finally. There are multiple edges incoming
1225 and outgoing from the finally block. Implement this by duplicating the
1226 finally block for every destination. */
1227
1228 static void
1229 lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf)
1230 {
1231 gimple_seq finally;
1232 gimple_seq new_stmt;
1233 gimple_seq seq;
1234 gimple *x;
1235 geh_else *eh_else;
1236 tree tmp;
1237 location_t tf_loc = gimple_location (tf->try_finally_expr);
1238
1239 finally = gimple_try_cleanup (tf->top_p);
1240
1241 /* Notice EH_ELSE, and simplify some of the remaining code
1242 by considering FINALLY to be the normal return path only. */
1243 eh_else = get_eh_else (finally);
1244 if (eh_else)
1245 finally = gimple_eh_else_n_body (eh_else);
1246
1247 tf->top_p_seq = gimple_try_eval (tf->top_p);
1248 new_stmt = NULL;
1249
1250 if (tf->may_fallthru)
1251 {
1252 seq = lower_try_finally_dup_block (finally, state, tf_loc);
1253 lower_eh_constructs_1 (state, &seq);
1254 gimple_seq_add_seq (&new_stmt, seq);
1255
1256 tmp = lower_try_finally_fallthru_label (tf);
1257 x = gimple_build_goto (tmp);
1258 gimple_set_location (x, tf_loc);
1259 gimple_seq_add_stmt (&new_stmt, x);
1260 }
1261
1262 if (tf->may_throw)
1263 {
1264 /* We don't need to copy the EH path of EH_ELSE,
1265 since it is only emitted once. */
1266 if (eh_else)
1267 seq = gimple_eh_else_e_body (eh_else);
1268 else
1269 seq = lower_try_finally_dup_block (finally, state, tf_loc);
1270 lower_eh_constructs_1 (state, &seq);
1271
1272 emit_post_landing_pad (&eh_seq, tf->region);
1273 gimple_seq_add_seq (&eh_seq, seq);
1274 emit_resx (&eh_seq, tf->region);
1275 }
1276
1277 if (tf->goto_queue)
1278 {
1279 struct goto_queue_node *q, *qe;
1280 int return_index, index;
1281 struct labels_s
1282 {
1283 struct goto_queue_node *q;
1284 tree label;
1285 } *labels;
1286
1287 return_index = tf->dest_array.length ();
1288 labels = XCNEWVEC (struct labels_s, return_index + 1);
1289
1290 q = tf->goto_queue;
1291 qe = q + tf->goto_queue_active;
1292 for (; q < qe; q++)
1293 {
1294 index = q->index < 0 ? return_index : q->index;
1295
1296 if (!labels[index].q)
1297 labels[index].q = q;
1298 }
1299
1300 for (index = 0; index < return_index + 1; index++)
1301 {
1302 tree lab;
1303
1304 q = labels[index].q;
1305 if (! q)
1306 continue;
1307
1308 lab = labels[index].label
1309 = create_artificial_label (tf_loc);
1310
1311 if (index == return_index)
1312 do_return_redirection (q, lab, NULL);
1313 else
1314 do_goto_redirection (q, lab, NULL, tf);
1315
1316 x = gimple_build_label (lab);
1317 gimple_seq_add_stmt (&new_stmt, x);
1318
1319 seq = lower_try_finally_dup_block (finally, state, q->location);
1320 lower_eh_constructs_1 (state, &seq);
1321 gimple_seq_add_seq (&new_stmt, seq);
1322
1323 gimple_seq_add_stmt (&new_stmt, q->cont_stmt);
1324 maybe_record_in_goto_queue (state, q->cont_stmt);
1325 }
1326
1327 for (q = tf->goto_queue; q < qe; q++)
1328 {
1329 tree lab;
1330
1331 index = q->index < 0 ? return_index : q->index;
1332
1333 if (labels[index].q == q)
1334 continue;
1335
1336 lab = labels[index].label;
1337
1338 if (index == return_index)
1339 do_return_redirection (q, lab, NULL);
1340 else
1341 do_goto_redirection (q, lab, NULL, tf);
1342 }
1343
1344 replace_goto_queue (tf);
1345 free (labels);
1346 }
1347
1348 /* Need to link new stmts after running replace_goto_queue due
1349 to not wanting to process the same goto stmts twice. */
1350 gimple_seq_add_seq (&tf->top_p_seq, new_stmt);
1351 }
1352
1353 /* A subroutine of lower_try_finally. There are multiple edges incoming
1354 and outgoing from the finally block. Implement this by instrumenting
1355 each incoming edge and creating a switch statement at the end of the
1356 finally block that branches to the appropriate destination. */
1357
1358 static void
1359 lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
1360 {
1361 struct goto_queue_node *q, *qe;
1362 tree finally_tmp, finally_label;
1363 int return_index, eh_index, fallthru_index;
1364 int nlabels, ndests, j, last_case_index;
1365 tree last_case;
1366 auto_vec<tree> case_label_vec;
1367 gimple_seq switch_body = NULL;
1368 gimple *x;
1369 geh_else *eh_else;
1370 tree tmp;
1371 gimple *switch_stmt;
1372 gimple_seq finally;
1373 hash_map<tree, gimple *> *cont_map = NULL;
1374 /* The location of the TRY_FINALLY stmt. */
1375 location_t tf_loc = gimple_location (tf->try_finally_expr);
1376 /* The location of the finally block. */
1377 location_t finally_loc;
1378
1379 finally = gimple_try_cleanup (tf->top_p);
1380 eh_else = get_eh_else (finally);
1381
1382 /* Mash the TRY block to the head of the chain. */
1383 tf->top_p_seq = gimple_try_eval (tf->top_p);
1384
1385 /* The location of the finally is either the last stmt in the finally
1386 block or the location of the TRY_FINALLY itself. */
1387 x = gimple_seq_last_stmt (finally);
1388 finally_loc = x ? gimple_location (x) : tf_loc;
1389
1390 /* Prepare for switch statement generation. */
1391 nlabels = tf->dest_array.length ();
1392 return_index = nlabels;
1393 eh_index = return_index + tf->may_return;
1394 fallthru_index = eh_index + (tf->may_throw && !eh_else);
1395 ndests = fallthru_index + tf->may_fallthru;
1396
1397 finally_tmp = create_tmp_var (integer_type_node, "finally_tmp");
1398 finally_label = create_artificial_label (finally_loc);
1399
1400 /* We use vec::quick_push on case_label_vec throughout this function,
1401 since we know the size in advance and allocate precisely as muce
1402 space as needed. */
1403 case_label_vec.create (ndests);
1404 last_case = NULL;
1405 last_case_index = 0;
1406
1407 /* Begin inserting code for getting to the finally block. Things
1408 are done in this order to correspond to the sequence the code is
1409 laid out. */
1410
1411 if (tf->may_fallthru)
1412 {
1413 x = gimple_build_assign (finally_tmp,
1414 build_int_cst (integer_type_node,
1415 fallthru_index));
1416 gimple_set_location (x, finally_loc);
1417 gimple_seq_add_stmt (&tf->top_p_seq, x);
1418
1419 tmp = build_int_cst (integer_type_node, fallthru_index);
1420 last_case = build_case_label (tmp, NULL,
1421 create_artificial_label (finally_loc));
1422 case_label_vec.quick_push (last_case);
1423 last_case_index++;
1424
1425 x = gimple_build_label (CASE_LABEL (last_case));
1426 gimple_seq_add_stmt (&switch_body, x);
1427
1428 tmp = lower_try_finally_fallthru_label (tf);
1429 x = gimple_build_goto (tmp);
1430 gimple_set_location (x, finally_loc);
1431 gimple_seq_add_stmt (&switch_body, x);
1432 }
1433
1434 /* For EH_ELSE, emit the exception path (plus resx) now, then
1435 subsequently we only need consider the normal path. */
1436 if (eh_else)
1437 {
1438 if (tf->may_throw)
1439 {
1440 finally = gimple_eh_else_e_body (eh_else);
1441 lower_eh_constructs_1 (state, &finally);
1442
1443 emit_post_landing_pad (&eh_seq, tf->region);
1444 gimple_seq_add_seq (&eh_seq, finally);
1445 emit_resx (&eh_seq, tf->region);
1446 }
1447
1448 finally = gimple_eh_else_n_body (eh_else);
1449 }
1450 else if (tf->may_throw)
1451 {
1452 emit_post_landing_pad (&eh_seq, tf->region);
1453
1454 x = gimple_build_assign (finally_tmp,
1455 build_int_cst (integer_type_node, eh_index));
1456 gimple_seq_add_stmt (&eh_seq, x);
1457
1458 x = gimple_build_goto (finally_label);
1459 gimple_set_location (x, tf_loc);
1460 gimple_seq_add_stmt (&eh_seq, x);
1461
1462 tmp = build_int_cst (integer_type_node, eh_index);
1463 last_case = build_case_label (tmp, NULL,
1464 create_artificial_label (tf_loc));
1465 case_label_vec.quick_push (last_case);
1466 last_case_index++;
1467
1468 x = gimple_build_label (CASE_LABEL (last_case));
1469 gimple_seq_add_stmt (&eh_seq, x);
1470 emit_resx (&eh_seq, tf->region);
1471 }
1472
1473 x = gimple_build_label (finally_label);
1474 gimple_seq_add_stmt (&tf->top_p_seq, x);
1475
1476 lower_eh_constructs_1 (state, &finally);
1477 gimple_seq_add_seq (&tf->top_p_seq, finally);
1478
1479 /* Redirect each incoming goto edge. */
1480 q = tf->goto_queue;
1481 qe = q + tf->goto_queue_active;
1482 j = last_case_index + tf->may_return;
1483 /* Prepare the assignments to finally_tmp that are executed upon the
1484 entrance through a particular edge. */
1485 for (; q < qe; ++q)
1486 {
1487 gimple_seq mod = NULL;
1488 int switch_id;
1489 unsigned int case_index;
1490
1491 if (q->index < 0)
1492 {
1493 x = gimple_build_assign (finally_tmp,
1494 build_int_cst (integer_type_node,
1495 return_index));
1496 gimple_seq_add_stmt (&mod, x);
1497 do_return_redirection (q, finally_label, mod);
1498 switch_id = return_index;
1499 }
1500 else
1501 {
1502 x = gimple_build_assign (finally_tmp,
1503 build_int_cst (integer_type_node, q->index));
1504 gimple_seq_add_stmt (&mod, x);
1505 do_goto_redirection (q, finally_label, mod, tf);
1506 switch_id = q->index;
1507 }
1508
1509 case_index = j + q->index;
1510 if (case_label_vec.length () <= case_index || !case_label_vec[case_index])
1511 {
1512 tree case_lab;
1513 tmp = build_int_cst (integer_type_node, switch_id);
1514 case_lab = build_case_label (tmp, NULL,
1515 create_artificial_label (tf_loc));
1516 /* We store the cont_stmt in the pointer map, so that we can recover
1517 it in the loop below. */
1518 if (!cont_map)
1519 cont_map = new hash_map<tree, gimple *>;
1520 cont_map->put (case_lab, q->cont_stmt);
1521 case_label_vec.quick_push (case_lab);
1522 }
1523 }
1524 for (j = last_case_index; j < last_case_index + nlabels; j++)
1525 {
1526 gimple *cont_stmt;
1527
1528 last_case = case_label_vec[j];
1529
1530 gcc_assert (last_case);
1531 gcc_assert (cont_map);
1532
1533 cont_stmt = *cont_map->get (last_case);
1534
1535 x = gimple_build_label (CASE_LABEL (last_case));
1536 gimple_seq_add_stmt (&switch_body, x);
1537 gimple_seq_add_stmt (&switch_body, cont_stmt);
1538 maybe_record_in_goto_queue (state, cont_stmt);
1539 }
1540 if (cont_map)
1541 delete cont_map;
1542
1543 replace_goto_queue (tf);
1544
1545 /* Make sure that the last case is the default label, as one is required.
1546 Then sort the labels, which is also required in GIMPLE. */
1547 CASE_LOW (last_case) = NULL;
1548 tree tem = case_label_vec.pop ();
1549 gcc_assert (tem == last_case);
1550 sort_case_labels (case_label_vec);
1551
1552 /* Build the switch statement, setting last_case to be the default
1553 label. */
1554 switch_stmt = gimple_build_switch (finally_tmp, last_case,
1555 case_label_vec);
1556 gimple_set_location (switch_stmt, finally_loc);
1557
1558 /* Need to link SWITCH_STMT after running replace_goto_queue
1559 due to not wanting to process the same goto stmts twice. */
1560 gimple_seq_add_stmt (&tf->top_p_seq, switch_stmt);
1561 gimple_seq_add_seq (&tf->top_p_seq, switch_body);
1562 }
1563
1564 /* Decide whether or not we are going to duplicate the finally block.
1565 There are several considerations.
1566
1567 Second, we'd like to prevent egregious code growth. One way to
1568 do this is to estimate the size of the finally block, multiply
1569 that by the number of copies we'd need to make, and compare against
1570 the estimate of the size of the switch machinery we'd have to add. */
1571
1572 static bool
1573 decide_copy_try_finally (int ndests, bool may_throw, gimple_seq finally)
1574 {
1575 int f_estimate, sw_estimate;
1576 geh_else *eh_else;
1577
1578 /* If there's an EH_ELSE involved, the exception path is separate
1579 and really doesn't come into play for this computation. */
1580 eh_else = get_eh_else (finally);
1581 if (eh_else)
1582 {
1583 ndests -= may_throw;
1584 finally = gimple_eh_else_n_body (eh_else);
1585 }
1586
1587 if (!optimize)
1588 {
1589 gimple_stmt_iterator gsi;
1590
1591 if (ndests == 1)
1592 return true;
1593
1594 for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1595 {
1596 /* Duplicate __builtin_stack_restore in the hope of eliminating it
1597 on the EH paths and, consequently, useless cleanups. */
1598 gimple *stmt = gsi_stmt (gsi);
1599 if (!is_gimple_debug (stmt)
1600 && !gimple_clobber_p (stmt)
1601 && !gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
1602 return false;
1603 }
1604 return true;
1605 }
1606
1607 /* Finally estimate N times, plus N gotos. */
1608 f_estimate = estimate_num_insns_seq (finally, &eni_size_weights);
1609 f_estimate = (f_estimate + 1) * ndests;
1610
1611 /* Switch statement (cost 10), N variable assignments, N gotos. */
1612 sw_estimate = 10 + 2 * ndests;
1613
1614 /* Optimize for size clearly wants our best guess. */
1615 if (optimize_function_for_size_p (cfun))
1616 return f_estimate < sw_estimate;
1617
1618 /* ??? These numbers are completely made up so far. */
1619 if (optimize > 1)
1620 return f_estimate < 100 || f_estimate < sw_estimate * 2;
1621 else
1622 return f_estimate < 40 || f_estimate * 2 < sw_estimate * 3;
1623 }
1624
1625 /* REG is current region of a LEH state.
1626 is the enclosing region for a possible cleanup region, or the region
1627 itself. Returns TRUE if such a region would be unreachable.
1628
1629 Cleanup regions within a must-not-throw region aren't actually reachable
1630 even if there are throwing stmts within them, because the personality
1631 routine will call terminate before unwinding. */
1632
1633 static bool
1634 cleanup_is_dead_in (leh_state *state)
1635 {
1636 if (flag_checking)
1637 {
1638 eh_region reg = state->cur_region;
1639 while (reg && reg->type == ERT_CLEANUP)
1640 reg = reg->outer;
1641
1642 gcc_assert (reg == state->outer_non_cleanup);
1643 }
1644
1645 eh_region reg = state->outer_non_cleanup;
1646 return (reg && reg->type == ERT_MUST_NOT_THROW);
1647 }
1648
1649 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_FINALLY nodes
1650 to a sequence of labels and blocks, plus the exception region trees
1651 that record all the magic. This is complicated by the need to
1652 arrange for the FINALLY block to be executed on all exits. */
1653
1654 static gimple_seq
1655 lower_try_finally (struct leh_state *state, gtry *tp)
1656 {
1657 struct leh_tf_state this_tf;
1658 struct leh_state this_state;
1659 int ndests;
1660 gimple_seq old_eh_seq;
1661
1662 /* Process the try block. */
1663
1664 memset (&this_tf, 0, sizeof (this_tf));
1665 this_tf.try_finally_expr = tp;
1666 this_tf.top_p = tp;
1667 this_tf.outer = state;
1668 if (using_eh_for_cleanups_p () && !cleanup_is_dead_in (state))
1669 {
1670 this_tf.region = gen_eh_region_cleanup (state->cur_region);
1671 this_state.cur_region = this_tf.region;
1672 }
1673 else
1674 {
1675 this_tf.region = NULL;
1676 this_state.cur_region = state->cur_region;
1677 }
1678
1679 this_state.outer_non_cleanup = state->outer_non_cleanup;
1680 this_state.ehp_region = state->ehp_region;
1681 this_state.tf = &this_tf;
1682
1683 old_eh_seq = eh_seq;
1684 eh_seq = NULL;
1685
1686 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1687
1688 /* Determine if the try block is escaped through the bottom. */
1689 this_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1690
1691 /* Determine if any exceptions are possible within the try block. */
1692 if (this_tf.region)
1693 this_tf.may_throw = eh_region_may_contain_throw (this_tf.region);
1694 if (this_tf.may_throw)
1695 honor_protect_cleanup_actions (state, &this_state, &this_tf);
1696
1697 /* Determine how many edges (still) reach the finally block. Or rather,
1698 how many destinations are reached by the finally block. Use this to
1699 determine how we process the finally block itself. */
1700
1701 ndests = this_tf.dest_array.length ();
1702 ndests += this_tf.may_fallthru;
1703 ndests += this_tf.may_return;
1704 ndests += this_tf.may_throw;
1705
1706 /* If the FINALLY block is not reachable, dike it out. */
1707 if (ndests == 0)
1708 {
1709 gimple_seq_add_seq (&this_tf.top_p_seq, gimple_try_eval (tp));
1710 gimple_try_set_cleanup (tp, NULL);
1711 }
1712 /* If the finally block doesn't fall through, then any destination
1713 we might try to impose there isn't reached either. There may be
1714 some minor amount of cleanup and redirection still needed. */
1715 else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp)))
1716 lower_try_finally_nofallthru (state, &this_tf);
1717
1718 /* We can easily special-case redirection to a single destination. */
1719 else if (ndests == 1)
1720 lower_try_finally_onedest (state, &this_tf);
1721 else if (decide_copy_try_finally (ndests, this_tf.may_throw,
1722 gimple_try_cleanup (tp)))
1723 lower_try_finally_copy (state, &this_tf);
1724 else
1725 lower_try_finally_switch (state, &this_tf);
1726
1727 /* If someone requested we add a label at the end of the transformed
1728 block, do so. */
1729 if (this_tf.fallthru_label)
1730 {
1731 /* This must be reached only if ndests == 0. */
1732 gimple *x = gimple_build_label (this_tf.fallthru_label);
1733 gimple_seq_add_stmt (&this_tf.top_p_seq, x);
1734 }
1735
1736 this_tf.dest_array.release ();
1737 free (this_tf.goto_queue);
1738 if (this_tf.goto_queue_map)
1739 delete this_tf.goto_queue_map;
1740
1741 /* If there was an old (aka outer) eh_seq, append the current eh_seq.
1742 If there was no old eh_seq, then the append is trivially already done. */
1743 if (old_eh_seq)
1744 {
1745 if (eh_seq == NULL)
1746 eh_seq = old_eh_seq;
1747 else
1748 {
1749 gimple_seq new_eh_seq = eh_seq;
1750 eh_seq = old_eh_seq;
1751 gimple_seq_add_seq (&eh_seq, new_eh_seq);
1752 }
1753 }
1754
1755 return this_tf.top_p_seq;
1756 }
1757
1758 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_CATCH with a
1759 list of GIMPLE_CATCH to a sequence of labels and blocks, plus the
1760 exception region trees that records all the magic. */
1761
1762 static gimple_seq
1763 lower_catch (struct leh_state *state, gtry *tp)
1764 {
1765 eh_region try_region = NULL;
1766 struct leh_state this_state = *state;
1767 gimple_stmt_iterator gsi;
1768 tree out_label;
1769 gimple_seq new_seq, cleanup;
1770 gimple *x;
1771 geh_dispatch *eh_dispatch;
1772 location_t try_catch_loc = gimple_location (tp);
1773 location_t catch_loc = UNKNOWN_LOCATION;
1774
1775 if (flag_exceptions)
1776 {
1777 try_region = gen_eh_region_try (state->cur_region);
1778 this_state.cur_region = try_region;
1779 this_state.outer_non_cleanup = this_state.cur_region;
1780 }
1781
1782 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1783
1784 if (!eh_region_may_contain_throw (try_region))
1785 return gimple_try_eval (tp);
1786
1787 new_seq = NULL;
1788 eh_dispatch = gimple_build_eh_dispatch (try_region->index);
1789 gimple_seq_add_stmt (&new_seq, eh_dispatch);
1790 emit_resx (&new_seq, try_region);
1791
1792 this_state.cur_region = state->cur_region;
1793 this_state.outer_non_cleanup = state->outer_non_cleanup;
1794 this_state.ehp_region = try_region;
1795
1796 /* Add eh_seq from lowering EH in the cleanup sequence after the cleanup
1797 itself, so that e.g. for coverage purposes the nested cleanups don't
1798 appear before the cleanup body. See PR64634 for details. */
1799 gimple_seq old_eh_seq = eh_seq;
1800 eh_seq = NULL;
1801
1802 out_label = NULL;
1803 cleanup = gimple_try_cleanup (tp);
1804 for (gsi = gsi_start (cleanup);
1805 !gsi_end_p (gsi);
1806 gsi_next (&gsi))
1807 {
1808 eh_catch c;
1809 gcatch *catch_stmt;
1810 gimple_seq handler;
1811
1812 catch_stmt = as_a <gcatch *> (gsi_stmt (gsi));
1813 if (catch_loc == UNKNOWN_LOCATION)
1814 catch_loc = gimple_location (catch_stmt);
1815 c = gen_eh_region_catch (try_region, gimple_catch_types (catch_stmt));
1816
1817 handler = gimple_catch_handler (catch_stmt);
1818 lower_eh_constructs_1 (&this_state, &handler);
1819
1820 c->label = create_artificial_label (UNKNOWN_LOCATION);
1821 x = gimple_build_label (c->label);
1822 gimple_seq_add_stmt (&new_seq, x);
1823
1824 gimple_seq_add_seq (&new_seq, handler);
1825
1826 if (gimple_seq_may_fallthru (new_seq))
1827 {
1828 if (!out_label)
1829 out_label = create_artificial_label (try_catch_loc);
1830
1831 x = gimple_build_goto (out_label);
1832 gimple_seq_add_stmt (&new_seq, x);
1833 }
1834 if (!c->type_list)
1835 break;
1836 }
1837
1838 /* Try to set a location on the dispatching construct to avoid inheriting
1839 the location of the previous statement. */
1840 gimple_set_location (eh_dispatch, catch_loc);
1841
1842 gimple_try_set_cleanup (tp, new_seq);
1843
1844 gimple_seq new_eh_seq = eh_seq;
1845 eh_seq = old_eh_seq;
1846 gimple_seq ret_seq = frob_into_branch_around (tp, try_region, out_label);
1847 gimple_seq_add_seq (&eh_seq, new_eh_seq);
1848 return ret_seq;
1849 }
1850
1851 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with a
1852 GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception
1853 region trees that record all the magic. */
1854
1855 static gimple_seq
1856 lower_eh_filter (struct leh_state *state, gtry *tp)
1857 {
1858 struct leh_state this_state = *state;
1859 eh_region this_region = NULL;
1860 gimple *inner, *x;
1861 gimple_seq new_seq;
1862
1863 inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1864
1865 if (flag_exceptions)
1866 {
1867 this_region = gen_eh_region_allowed (state->cur_region,
1868 gimple_eh_filter_types (inner));
1869 this_state.cur_region = this_region;
1870 this_state.outer_non_cleanup = this_state.cur_region;
1871 }
1872
1873 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1874
1875 if (!eh_region_may_contain_throw (this_region))
1876 return gimple_try_eval (tp);
1877
1878 this_state.cur_region = state->cur_region;
1879 this_state.ehp_region = this_region;
1880
1881 new_seq = NULL;
1882 x = gimple_build_eh_dispatch (this_region->index);
1883 gimple_set_location (x, gimple_location (tp));
1884 gimple_seq_add_stmt (&new_seq, x);
1885 emit_resx (&new_seq, this_region);
1886
1887 this_region->u.allowed.label = create_artificial_label (UNKNOWN_LOCATION);
1888 x = gimple_build_label (this_region->u.allowed.label);
1889 gimple_seq_add_stmt (&new_seq, x);
1890
1891 lower_eh_constructs_1 (&this_state, gimple_eh_filter_failure_ptr (inner));
1892 gimple_seq_add_seq (&new_seq, gimple_eh_filter_failure (inner));
1893
1894 gimple_try_set_cleanup (tp, new_seq);
1895
1896 return frob_into_branch_around (tp, this_region, NULL);
1897 }
1898
1899 /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with
1900 an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks,
1901 plus the exception region trees that record all the magic. */
1902
1903 static gimple_seq
1904 lower_eh_must_not_throw (struct leh_state *state, gtry *tp)
1905 {
1906 struct leh_state this_state = *state;
1907
1908 if (flag_exceptions)
1909 {
1910 gimple *inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1911 eh_region this_region;
1912
1913 this_region = gen_eh_region_must_not_throw (state->cur_region);
1914 this_region->u.must_not_throw.failure_decl
1915 = gimple_eh_must_not_throw_fndecl (
1916 as_a <geh_mnt *> (inner));
1917 this_region->u.must_not_throw.failure_loc
1918 = LOCATION_LOCUS (gimple_location (tp));
1919
1920 /* In order to get mangling applied to this decl, we must mark it
1921 used now. Otherwise, pass_ipa_free_lang_data won't think it
1922 needs to happen. */
1923 TREE_USED (this_region->u.must_not_throw.failure_decl) = 1;
1924
1925 this_state.cur_region = this_region;
1926 this_state.outer_non_cleanup = this_state.cur_region;
1927 }
1928
1929 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1930
1931 return gimple_try_eval (tp);
1932 }
1933
1934 /* Implement a cleanup expression. This is similar to try-finally,
1935 except that we only execute the cleanup block for exception edges. */
1936
1937 static gimple_seq
1938 lower_cleanup (struct leh_state *state, gtry *tp)
1939 {
1940 struct leh_state this_state = *state;
1941 eh_region this_region = NULL;
1942 struct leh_tf_state fake_tf;
1943 gimple_seq result;
1944 bool cleanup_dead = cleanup_is_dead_in (state);
1945
1946 if (flag_exceptions && !cleanup_dead)
1947 {
1948 this_region = gen_eh_region_cleanup (state->cur_region);
1949 this_state.cur_region = this_region;
1950 this_state.outer_non_cleanup = state->outer_non_cleanup;
1951 }
1952
1953 lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1954
1955 if (cleanup_dead || !eh_region_may_contain_throw (this_region))
1956 return gimple_try_eval (tp);
1957
1958 /* Build enough of a try-finally state so that we can reuse
1959 honor_protect_cleanup_actions. */
1960 memset (&fake_tf, 0, sizeof (fake_tf));
1961 fake_tf.top_p = fake_tf.try_finally_expr = tp;
1962 fake_tf.outer = state;
1963 fake_tf.region = this_region;
1964 fake_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1965 fake_tf.may_throw = true;
1966
1967 honor_protect_cleanup_actions (state, NULL, &fake_tf);
1968
1969 if (fake_tf.may_throw)
1970 {
1971 /* In this case honor_protect_cleanup_actions had nothing to do,
1972 and we should process this normally. */
1973 lower_eh_constructs_1 (state, gimple_try_cleanup_ptr (tp));
1974 result = frob_into_branch_around (tp, this_region,
1975 fake_tf.fallthru_label);
1976 }
1977 else
1978 {
1979 /* In this case honor_protect_cleanup_actions did nearly all of
1980 the work. All we have left is to append the fallthru_label. */
1981
1982 result = gimple_try_eval (tp);
1983 if (fake_tf.fallthru_label)
1984 {
1985 gimple *x = gimple_build_label (fake_tf.fallthru_label);
1986 gimple_seq_add_stmt (&result, x);
1987 }
1988 }
1989 return result;
1990 }
1991
1992 /* Main loop for lowering eh constructs. Also moves gsi to the next
1993 statement. */
1994
1995 static void
1996 lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi)
1997 {
1998 gimple_seq replace;
1999 gimple *x;
2000 gimple *stmt = gsi_stmt (*gsi);
2001
2002 switch (gimple_code (stmt))
2003 {
2004 case GIMPLE_CALL:
2005 {
2006 tree fndecl = gimple_call_fndecl (stmt);
2007 tree rhs, lhs;
2008
2009 if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
2010 switch (DECL_FUNCTION_CODE (fndecl))
2011 {
2012 case BUILT_IN_EH_POINTER:
2013 /* The front end may have generated a call to
2014 __builtin_eh_pointer (0) within a catch region. Replace
2015 this zero argument with the current catch region number. */
2016 if (state->ehp_region)
2017 {
2018 tree nr = build_int_cst (integer_type_node,
2019 state->ehp_region->index);
2020 gimple_call_set_arg (stmt, 0, nr);
2021 }
2022 else
2023 {
2024 /* The user has dome something silly. Remove it. */
2025 rhs = null_pointer_node;
2026 goto do_replace;
2027 }
2028 break;
2029
2030 case BUILT_IN_EH_FILTER:
2031 /* ??? This should never appear, but since it's a builtin it
2032 is accessible to abuse by users. Just remove it and
2033 replace the use with the arbitrary value zero. */
2034 rhs = build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), 0);
2035 do_replace:
2036 lhs = gimple_call_lhs (stmt);
2037 x = gimple_build_assign (lhs, rhs);
2038 gsi_insert_before (gsi, x, GSI_SAME_STMT);
2039 /* FALLTHRU */
2040
2041 case BUILT_IN_EH_COPY_VALUES:
2042 /* Likewise this should not appear. Remove it. */
2043 gsi_remove (gsi, true);
2044 return;
2045
2046 default:
2047 break;
2048 }
2049 }
2050 /* FALLTHRU */
2051
2052 case GIMPLE_ASSIGN:
2053 /* If the stmt can throw, use a new temporary for the assignment
2054 to a LHS. This makes sure the old value of the LHS is
2055 available on the EH edge. Only do so for statements that
2056 potentially fall through (no noreturn calls e.g.), otherwise
2057 this new assignment might create fake fallthru regions. */
2058 if (stmt_could_throw_p (cfun, stmt)
2059 && gimple_has_lhs (stmt)
2060 && gimple_stmt_may_fallthru (stmt)
2061 && !tree_could_throw_p (gimple_get_lhs (stmt))
2062 && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt))))
2063 {
2064 tree lhs = gimple_get_lhs (stmt);
2065 tree tmp = create_tmp_var (TREE_TYPE (lhs));
2066 gimple *s = gimple_build_assign (lhs, tmp);
2067 gimple_set_location (s, gimple_location (stmt));
2068 gimple_set_block (s, gimple_block (stmt));
2069 gimple_set_lhs (stmt, tmp);
2070 gsi_insert_after (gsi, s, GSI_SAME_STMT);
2071 }
2072 /* Look for things that can throw exceptions, and record them. */
2073 if (state->cur_region && stmt_could_throw_p (cfun, stmt))
2074 {
2075 record_stmt_eh_region (state->cur_region, stmt);
2076 note_eh_region_may_contain_throw (state->cur_region);
2077 }
2078 break;
2079
2080 case GIMPLE_COND:
2081 case GIMPLE_GOTO:
2082 case GIMPLE_RETURN:
2083 maybe_record_in_goto_queue (state, stmt);
2084 break;
2085
2086 case GIMPLE_SWITCH:
2087 verify_norecord_switch_expr (state, as_a <gswitch *> (stmt));
2088 break;
2089
2090 case GIMPLE_TRY:
2091 {
2092 gtry *try_stmt = as_a <gtry *> (stmt);
2093 if (gimple_try_kind (try_stmt) == GIMPLE_TRY_FINALLY)
2094 replace = lower_try_finally (state, try_stmt);
2095 else
2096 {
2097 x = gimple_seq_first_stmt (gimple_try_cleanup (try_stmt));
2098 if (!x)
2099 {
2100 replace = gimple_try_eval (try_stmt);
2101 lower_eh_constructs_1 (state, &replace);
2102 }
2103 else
2104 switch (gimple_code (x))
2105 {
2106 case GIMPLE_CATCH:
2107 replace = lower_catch (state, try_stmt);
2108 break;
2109 case GIMPLE_EH_FILTER:
2110 replace = lower_eh_filter (state, try_stmt);
2111 break;
2112 case GIMPLE_EH_MUST_NOT_THROW:
2113 replace = lower_eh_must_not_throw (state, try_stmt);
2114 break;
2115 case GIMPLE_EH_ELSE:
2116 /* This code is only valid with GIMPLE_TRY_FINALLY. */
2117 gcc_unreachable ();
2118 default:
2119 replace = lower_cleanup (state, try_stmt);
2120 break;
2121 }
2122 }
2123 }
2124
2125 /* Remove the old stmt and insert the transformed sequence
2126 instead. */
2127 gsi_insert_seq_before (gsi, replace, GSI_SAME_STMT);
2128 gsi_remove (gsi, true);
2129
2130 /* Return since we don't want gsi_next () */
2131 return;
2132
2133 case GIMPLE_EH_ELSE:
2134 /* We should be eliminating this in lower_try_finally et al. */
2135 gcc_unreachable ();
2136
2137 default:
2138 /* A type, a decl, or some kind of statement that we're not
2139 interested in. Don't walk them. */
2140 break;
2141 }
2142
2143 gsi_next (gsi);
2144 }
2145
2146 /* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */
2147
2148 static void
2149 lower_eh_constructs_1 (struct leh_state *state, gimple_seq *pseq)
2150 {
2151 gimple_stmt_iterator gsi;
2152 for (gsi = gsi_start (*pseq); !gsi_end_p (gsi);)
2153 lower_eh_constructs_2 (state, &gsi);
2154 }
2155
2156 namespace {
2157
2158 const pass_data pass_data_lower_eh =
2159 {
2160 GIMPLE_PASS, /* type */
2161 "eh", /* name */
2162 OPTGROUP_NONE, /* optinfo_flags */
2163 TV_TREE_EH, /* tv_id */
2164 PROP_gimple_lcf, /* properties_required */
2165 PROP_gimple_leh, /* properties_provided */
2166 0, /* properties_destroyed */
2167 0, /* todo_flags_start */
2168 0, /* todo_flags_finish */
2169 };
2170
2171 class pass_lower_eh : public gimple_opt_pass
2172 {
2173 public:
2174 pass_lower_eh (gcc::context *ctxt)
2175 : gimple_opt_pass (pass_data_lower_eh, ctxt)
2176 {}
2177
2178 /* opt_pass methods: */
2179 virtual unsigned int execute (function *);
2180
2181 }; // class pass_lower_eh
2182
2183 unsigned int
2184 pass_lower_eh::execute (function *fun)
2185 {
2186 struct leh_state null_state;
2187 gimple_seq bodyp;
2188
2189 bodyp = gimple_body (current_function_decl);
2190 if (bodyp == NULL)
2191 return 0;
2192
2193 finally_tree = new hash_table<finally_tree_hasher> (31);
2194 eh_region_may_contain_throw_map = BITMAP_ALLOC (NULL);
2195 memset (&null_state, 0, sizeof (null_state));
2196
2197 collect_finally_tree_1 (bodyp, NULL);
2198 lower_eh_constructs_1 (&null_state, &bodyp);
2199 gimple_set_body (current_function_decl, bodyp);
2200
2201 /* We assume there's a return statement, or something, at the end of
2202 the function, and thus ploping the EH sequence afterward won't
2203 change anything. */
2204 gcc_assert (!gimple_seq_may_fallthru (bodyp));
2205 gimple_seq_add_seq (&bodyp, eh_seq);
2206
2207 /* We assume that since BODYP already existed, adding EH_SEQ to it
2208 didn't change its value, and we don't have to re-set the function. */
2209 gcc_assert (bodyp == gimple_body (current_function_decl));
2210
2211 delete finally_tree;
2212 finally_tree = NULL;
2213 BITMAP_FREE (eh_region_may_contain_throw_map);
2214 eh_seq = NULL;
2215
2216 /* If this function needs a language specific EH personality routine
2217 and the frontend didn't already set one do so now. */
2218 if (function_needs_eh_personality (fun) == eh_personality_lang
2219 && !DECL_FUNCTION_PERSONALITY (current_function_decl))
2220 DECL_FUNCTION_PERSONALITY (current_function_decl)
2221 = lang_hooks.eh_personality ();
2222
2223 return 0;
2224 }
2225
2226 } // anon namespace
2227
2228 gimple_opt_pass *
2229 make_pass_lower_eh (gcc::context *ctxt)
2230 {
2231 return new pass_lower_eh (ctxt);
2232 }
2233 \f
2234 /* Create the multiple edges from an EH_DISPATCH statement to all of
2235 the possible handlers for its EH region. Return true if there's
2236 no fallthru edge; false if there is. */
2237
2238 bool
2239 make_eh_dispatch_edges (geh_dispatch *stmt)
2240 {
2241 eh_region r;
2242 eh_catch c;
2243 basic_block src, dst;
2244
2245 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2246 src = gimple_bb (stmt);
2247
2248 switch (r->type)
2249 {
2250 case ERT_TRY:
2251 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2252 {
2253 dst = label_to_block (cfun, c->label);
2254 make_edge (src, dst, 0);
2255
2256 /* A catch-all handler doesn't have a fallthru. */
2257 if (c->type_list == NULL)
2258 return false;
2259 }
2260 break;
2261
2262 case ERT_ALLOWED_EXCEPTIONS:
2263 dst = label_to_block (cfun, r->u.allowed.label);
2264 make_edge (src, dst, 0);
2265 break;
2266
2267 default:
2268 gcc_unreachable ();
2269 }
2270
2271 return true;
2272 }
2273
2274 /* Create the single EH edge from STMT to its nearest landing pad,
2275 if there is such a landing pad within the current function. */
2276
2277 void
2278 make_eh_edges (gimple *stmt)
2279 {
2280 basic_block src, dst;
2281 eh_landing_pad lp;
2282 int lp_nr;
2283
2284 lp_nr = lookup_stmt_eh_lp (stmt);
2285 if (lp_nr <= 0)
2286 return;
2287
2288 lp = get_eh_landing_pad_from_number (lp_nr);
2289 gcc_assert (lp != NULL);
2290
2291 src = gimple_bb (stmt);
2292 dst = label_to_block (cfun, lp->post_landing_pad);
2293 make_edge (src, dst, EDGE_EH);
2294 }
2295
2296 /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree;
2297 do not actually perform the final edge redirection.
2298
2299 CHANGE_REGION is true when we're being called from cleanup_empty_eh and
2300 we intend to change the destination EH region as well; this means
2301 EH_LANDING_PAD_NR must already be set on the destination block label.
2302 If false, we're being called from generic cfg manipulation code and we
2303 should preserve our place within the region tree. */
2304
2305 static void
2306 redirect_eh_edge_1 (edge edge_in, basic_block new_bb, bool change_region)
2307 {
2308 eh_landing_pad old_lp, new_lp;
2309 basic_block old_bb;
2310 gimple *throw_stmt;
2311 int old_lp_nr, new_lp_nr;
2312 tree old_label, new_label;
2313 edge_iterator ei;
2314 edge e;
2315
2316 old_bb = edge_in->dest;
2317 old_label = gimple_block_label (old_bb);
2318 old_lp_nr = EH_LANDING_PAD_NR (old_label);
2319 gcc_assert (old_lp_nr > 0);
2320 old_lp = get_eh_landing_pad_from_number (old_lp_nr);
2321
2322 throw_stmt = last_stmt (edge_in->src);
2323 gcc_checking_assert (lookup_stmt_eh_lp (throw_stmt) == old_lp_nr);
2324
2325 new_label = gimple_block_label (new_bb);
2326
2327 /* Look for an existing region that might be using NEW_BB already. */
2328 new_lp_nr = EH_LANDING_PAD_NR (new_label);
2329 if (new_lp_nr)
2330 {
2331 new_lp = get_eh_landing_pad_from_number (new_lp_nr);
2332 gcc_assert (new_lp);
2333
2334 /* Unless CHANGE_REGION is true, the new and old landing pad
2335 had better be associated with the same EH region. */
2336 gcc_assert (change_region || new_lp->region == old_lp->region);
2337 }
2338 else
2339 {
2340 new_lp = NULL;
2341 gcc_assert (!change_region);
2342 }
2343
2344 /* Notice when we redirect the last EH edge away from OLD_BB. */
2345 FOR_EACH_EDGE (e, ei, old_bb->preds)
2346 if (e != edge_in && (e->flags & EDGE_EH))
2347 break;
2348
2349 if (new_lp)
2350 {
2351 /* NEW_LP already exists. If there are still edges into OLD_LP,
2352 there's nothing to do with the EH tree. If there are no more
2353 edges into OLD_LP, then we want to remove OLD_LP as it is unused.
2354 If CHANGE_REGION is true, then our caller is expecting to remove
2355 the landing pad. */
2356 if (e == NULL && !change_region)
2357 remove_eh_landing_pad (old_lp);
2358 }
2359 else
2360 {
2361 /* No correct landing pad exists. If there are no more edges
2362 into OLD_LP, then we can simply re-use the existing landing pad.
2363 Otherwise, we have to create a new landing pad. */
2364 if (e == NULL)
2365 {
2366 EH_LANDING_PAD_NR (old_lp->post_landing_pad) = 0;
2367 new_lp = old_lp;
2368 }
2369 else
2370 new_lp = gen_eh_landing_pad (old_lp->region);
2371 new_lp->post_landing_pad = new_label;
2372 EH_LANDING_PAD_NR (new_label) = new_lp->index;
2373 }
2374
2375 /* Maybe move the throwing statement to the new region. */
2376 if (old_lp != new_lp)
2377 {
2378 remove_stmt_from_eh_lp (throw_stmt);
2379 add_stmt_to_eh_lp (throw_stmt, new_lp->index);
2380 }
2381 }
2382
2383 /* Redirect EH edge E to NEW_BB. */
2384
2385 edge
2386 redirect_eh_edge (edge edge_in, basic_block new_bb)
2387 {
2388 redirect_eh_edge_1 (edge_in, new_bb, false);
2389 return ssa_redirect_edge (edge_in, new_bb);
2390 }
2391
2392 /* This is a subroutine of gimple_redirect_edge_and_branch. Update the
2393 labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB.
2394 The actual edge update will happen in the caller. */
2395
2396 void
2397 redirect_eh_dispatch_edge (geh_dispatch *stmt, edge e, basic_block new_bb)
2398 {
2399 tree new_lab = gimple_block_label (new_bb);
2400 bool any_changed = false;
2401 basic_block old_bb;
2402 eh_region r;
2403 eh_catch c;
2404
2405 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2406 switch (r->type)
2407 {
2408 case ERT_TRY:
2409 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2410 {
2411 old_bb = label_to_block (cfun, c->label);
2412 if (old_bb == e->dest)
2413 {
2414 c->label = new_lab;
2415 any_changed = true;
2416 }
2417 }
2418 break;
2419
2420 case ERT_ALLOWED_EXCEPTIONS:
2421 old_bb = label_to_block (cfun, r->u.allowed.label);
2422 gcc_assert (old_bb == e->dest);
2423 r->u.allowed.label = new_lab;
2424 any_changed = true;
2425 break;
2426
2427 default:
2428 gcc_unreachable ();
2429 }
2430
2431 gcc_assert (any_changed);
2432 }
2433 \f
2434 /* Helper function for operation_could_trap_p and stmt_could_throw_p. */
2435
2436 bool
2437 operation_could_trap_helper_p (enum tree_code op,
2438 bool fp_operation,
2439 bool honor_trapv,
2440 bool honor_nans,
2441 bool honor_snans,
2442 tree divisor,
2443 bool *handled)
2444 {
2445 *handled = true;
2446 switch (op)
2447 {
2448 case TRUNC_DIV_EXPR:
2449 case CEIL_DIV_EXPR:
2450 case FLOOR_DIV_EXPR:
2451 case ROUND_DIV_EXPR:
2452 case EXACT_DIV_EXPR:
2453 case CEIL_MOD_EXPR:
2454 case FLOOR_MOD_EXPR:
2455 case ROUND_MOD_EXPR:
2456 case TRUNC_MOD_EXPR:
2457 case RDIV_EXPR:
2458 if (honor_snans)
2459 return true;
2460 if (fp_operation)
2461 return flag_trapping_math;
2462 if (!TREE_CONSTANT (divisor) || integer_zerop (divisor))
2463 return true;
2464 return false;
2465
2466 case LT_EXPR:
2467 case LE_EXPR:
2468 case GT_EXPR:
2469 case GE_EXPR:
2470 case LTGT_EXPR:
2471 /* Some floating point comparisons may trap. */
2472 return honor_nans;
2473
2474 case EQ_EXPR:
2475 case NE_EXPR:
2476 case UNORDERED_EXPR:
2477 case ORDERED_EXPR:
2478 case UNLT_EXPR:
2479 case UNLE_EXPR:
2480 case UNGT_EXPR:
2481 case UNGE_EXPR:
2482 case UNEQ_EXPR:
2483 return honor_snans;
2484
2485 case NEGATE_EXPR:
2486 case ABS_EXPR:
2487 case CONJ_EXPR:
2488 /* These operations don't trap with floating point. */
2489 if (honor_trapv)
2490 return true;
2491 return false;
2492
2493 case ABSU_EXPR:
2494 /* ABSU_EXPR never traps. */
2495 return false;
2496
2497 case PLUS_EXPR:
2498 case MINUS_EXPR:
2499 case MULT_EXPR:
2500 /* Any floating arithmetic may trap. */
2501 if (fp_operation && flag_trapping_math)
2502 return true;
2503 if (honor_trapv)
2504 return true;
2505 return false;
2506
2507 case COMPLEX_EXPR:
2508 case CONSTRUCTOR:
2509 /* Constructing an object cannot trap. */
2510 return false;
2511
2512 case COND_EXPR:
2513 case VEC_COND_EXPR:
2514 /* Whether *COND_EXPR can trap depends on whether the
2515 first argument can trap, so signal it as not handled.
2516 Whether lhs is floating or not doesn't matter. */
2517 *handled = false;
2518 return false;
2519
2520 default:
2521 /* Any floating arithmetic may trap. */
2522 if (fp_operation && flag_trapping_math)
2523 return true;
2524
2525 *handled = false;
2526 return false;
2527 }
2528 }
2529
2530 /* Return true if operation OP may trap. FP_OPERATION is true if OP is applied
2531 on floating-point values. HONOR_TRAPV is true if OP is applied on integer
2532 type operands that may trap. If OP is a division operator, DIVISOR contains
2533 the value of the divisor. */
2534
2535 bool
2536 operation_could_trap_p (enum tree_code op, bool fp_operation, bool honor_trapv,
2537 tree divisor)
2538 {
2539 bool honor_nans = (fp_operation && flag_trapping_math
2540 && !flag_finite_math_only);
2541 bool honor_snans = fp_operation && flag_signaling_nans != 0;
2542 bool handled;
2543
2544 /* This function cannot tell whether or not COND_EXPR and VEC_COND_EXPR could
2545 trap, because that depends on the respective condition op. */
2546 gcc_assert (op != COND_EXPR && op != VEC_COND_EXPR);
2547
2548 if (TREE_CODE_CLASS (op) != tcc_comparison
2549 && TREE_CODE_CLASS (op) != tcc_unary
2550 && TREE_CODE_CLASS (op) != tcc_binary)
2551 return false;
2552
2553 return operation_could_trap_helper_p (op, fp_operation, honor_trapv,
2554 honor_nans, honor_snans, divisor,
2555 &handled);
2556 }
2557
2558
2559 /* Returns true if it is possible to prove that the index of
2560 an array access REF (an ARRAY_REF expression) falls into the
2561 array bounds. */
2562
2563 static bool
2564 in_array_bounds_p (tree ref)
2565 {
2566 tree idx = TREE_OPERAND (ref, 1);
2567 tree min, max;
2568
2569 if (TREE_CODE (idx) != INTEGER_CST)
2570 return false;
2571
2572 min = array_ref_low_bound (ref);
2573 max = array_ref_up_bound (ref);
2574 if (!min
2575 || !max
2576 || TREE_CODE (min) != INTEGER_CST
2577 || TREE_CODE (max) != INTEGER_CST)
2578 return false;
2579
2580 if (tree_int_cst_lt (idx, min)
2581 || tree_int_cst_lt (max, idx))
2582 return false;
2583
2584 return true;
2585 }
2586
2587 /* Returns true if it is possible to prove that the range of
2588 an array access REF (an ARRAY_RANGE_REF expression) falls
2589 into the array bounds. */
2590
2591 static bool
2592 range_in_array_bounds_p (tree ref)
2593 {
2594 tree domain_type = TYPE_DOMAIN (TREE_TYPE (ref));
2595 tree range_min, range_max, min, max;
2596
2597 range_min = TYPE_MIN_VALUE (domain_type);
2598 range_max = TYPE_MAX_VALUE (domain_type);
2599 if (!range_min
2600 || !range_max
2601 || TREE_CODE (range_min) != INTEGER_CST
2602 || TREE_CODE (range_max) != INTEGER_CST)
2603 return false;
2604
2605 min = array_ref_low_bound (ref);
2606 max = array_ref_up_bound (ref);
2607 if (!min
2608 || !max
2609 || TREE_CODE (min) != INTEGER_CST
2610 || TREE_CODE (max) != INTEGER_CST)
2611 return false;
2612
2613 if (tree_int_cst_lt (range_min, min)
2614 || tree_int_cst_lt (max, range_max))
2615 return false;
2616
2617 return true;
2618 }
2619
2620 /* Return true if EXPR can trap, as in dereferencing an invalid pointer
2621 location or floating point arithmetic. C.f. the rtl version, may_trap_p.
2622 This routine expects only GIMPLE lhs or rhs input. */
2623
2624 bool
2625 tree_could_trap_p (tree expr)
2626 {
2627 enum tree_code code;
2628 bool fp_operation = false;
2629 bool honor_trapv = false;
2630 tree t, base, div = NULL_TREE;
2631
2632 if (!expr)
2633 return false;
2634
2635 /* In COND_EXPR and VEC_COND_EXPR only the condition may trap, but
2636 they won't appear as operands in GIMPLE form, so this is just for the
2637 GENERIC uses where it needs to recurse on the operands and so
2638 *COND_EXPR itself doesn't trap. */
2639 if (TREE_CODE (expr) == COND_EXPR || TREE_CODE (expr) == VEC_COND_EXPR)
2640 return false;
2641
2642 code = TREE_CODE (expr);
2643 t = TREE_TYPE (expr);
2644
2645 if (t)
2646 {
2647 if (COMPARISON_CLASS_P (expr))
2648 fp_operation = FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 0)));
2649 else
2650 fp_operation = FLOAT_TYPE_P (t);
2651 honor_trapv = INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t);
2652 }
2653
2654 if (TREE_CODE_CLASS (code) == tcc_binary)
2655 div = TREE_OPERAND (expr, 1);
2656 if (operation_could_trap_p (code, fp_operation, honor_trapv, div))
2657 return true;
2658
2659 restart:
2660 switch (code)
2661 {
2662 case COMPONENT_REF:
2663 case REALPART_EXPR:
2664 case IMAGPART_EXPR:
2665 case BIT_FIELD_REF:
2666 case VIEW_CONVERT_EXPR:
2667 case WITH_SIZE_EXPR:
2668 expr = TREE_OPERAND (expr, 0);
2669 code = TREE_CODE (expr);
2670 goto restart;
2671
2672 case ARRAY_RANGE_REF:
2673 base = TREE_OPERAND (expr, 0);
2674 if (tree_could_trap_p (base))
2675 return true;
2676 if (TREE_THIS_NOTRAP (expr))
2677 return false;
2678 return !range_in_array_bounds_p (expr);
2679
2680 case ARRAY_REF:
2681 base = TREE_OPERAND (expr, 0);
2682 if (tree_could_trap_p (base))
2683 return true;
2684 if (TREE_THIS_NOTRAP (expr))
2685 return false;
2686 return !in_array_bounds_p (expr);
2687
2688 case TARGET_MEM_REF:
2689 case MEM_REF:
2690 if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR
2691 && tree_could_trap_p (TREE_OPERAND (TREE_OPERAND (expr, 0), 0)))
2692 return true;
2693 if (TREE_THIS_NOTRAP (expr))
2694 return false;
2695 /* We cannot prove that the access is in-bounds when we have
2696 variable-index TARGET_MEM_REFs. */
2697 if (code == TARGET_MEM_REF
2698 && (TMR_INDEX (expr) || TMR_INDEX2 (expr)))
2699 return true;
2700 if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
2701 {
2702 tree base = TREE_OPERAND (TREE_OPERAND (expr, 0), 0);
2703 poly_offset_int off = mem_ref_offset (expr);
2704 if (maybe_lt (off, 0))
2705 return true;
2706 if (TREE_CODE (base) == STRING_CST)
2707 return maybe_le (TREE_STRING_LENGTH (base), off);
2708 tree size = DECL_SIZE_UNIT (base);
2709 if (size == NULL_TREE
2710 || !poly_int_tree_p (size)
2711 || maybe_le (wi::to_poly_offset (size), off))
2712 return true;
2713 /* Now we are sure the first byte of the access is inside
2714 the object. */
2715 return false;
2716 }
2717 return true;
2718
2719 case INDIRECT_REF:
2720 return !TREE_THIS_NOTRAP (expr);
2721
2722 case ASM_EXPR:
2723 return TREE_THIS_VOLATILE (expr);
2724
2725 case CALL_EXPR:
2726 t = get_callee_fndecl (expr);
2727 /* Assume that calls to weak functions may trap. */
2728 if (!t || !DECL_P (t))
2729 return true;
2730 if (DECL_WEAK (t))
2731 return tree_could_trap_p (t);
2732 return false;
2733
2734 case FUNCTION_DECL:
2735 /* Assume that accesses to weak functions may trap, unless we know
2736 they are certainly defined in current TU or in some other
2737 LTO partition. */
2738 if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2739 {
2740 cgraph_node *node = cgraph_node::get (expr);
2741 if (node)
2742 node = node->function_symbol ();
2743 return !(node && node->in_other_partition);
2744 }
2745 return false;
2746
2747 case VAR_DECL:
2748 /* Assume that accesses to weak vars may trap, unless we know
2749 they are certainly defined in current TU or in some other
2750 LTO partition. */
2751 if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2752 {
2753 varpool_node *node = varpool_node::get (expr);
2754 if (node)
2755 node = node->ultimate_alias_target ();
2756 return !(node && node->in_other_partition);
2757 }
2758 return false;
2759
2760 default:
2761 return false;
2762 }
2763 }
2764
2765 /* Return non-NULL if there is an integer operation with trapping overflow
2766 we can rewrite into non-trapping. Called via walk_tree from
2767 rewrite_to_non_trapping_overflow. */
2768
2769 static tree
2770 find_trapping_overflow (tree *tp, int *walk_subtrees, void *data)
2771 {
2772 if (EXPR_P (*tp)
2773 && ANY_INTEGRAL_TYPE_P (TREE_TYPE (*tp))
2774 && !operation_no_trapping_overflow (TREE_TYPE (*tp), TREE_CODE (*tp)))
2775 return *tp;
2776 if (IS_TYPE_OR_DECL_P (*tp)
2777 || (TREE_CODE (*tp) == SAVE_EXPR && data == NULL))
2778 *walk_subtrees = 0;
2779 return NULL_TREE;
2780 }
2781
2782 /* Rewrite selected operations into unsigned arithmetics, so that they
2783 don't trap on overflow. */
2784
2785 static tree
2786 replace_trapping_overflow (tree *tp, int *walk_subtrees, void *data)
2787 {
2788 if (find_trapping_overflow (tp, walk_subtrees, data))
2789 {
2790 tree type = TREE_TYPE (*tp);
2791 tree utype = unsigned_type_for (type);
2792 *walk_subtrees = 0;
2793 int len = TREE_OPERAND_LENGTH (*tp);
2794 for (int i = 0; i < len; ++i)
2795 walk_tree (&TREE_OPERAND (*tp, i), replace_trapping_overflow,
2796 data, (hash_set<tree> *) data);
2797
2798 if (TREE_CODE (*tp) == ABS_EXPR)
2799 {
2800 TREE_SET_CODE (*tp, ABSU_EXPR);
2801 TREE_TYPE (*tp) = utype;
2802 *tp = fold_convert (type, *tp);
2803 }
2804 else
2805 {
2806 TREE_TYPE (*tp) = utype;
2807 len = TREE_OPERAND_LENGTH (*tp);
2808 for (int i = 0; i < len; ++i)
2809 TREE_OPERAND (*tp, i)
2810 = fold_convert (utype, TREE_OPERAND (*tp, i));
2811 *tp = fold_convert (type, *tp);
2812 }
2813 }
2814 return NULL_TREE;
2815 }
2816
2817 /* If any subexpression of EXPR can trap due to -ftrapv, rewrite it
2818 using unsigned arithmetics to avoid traps in it. */
2819
2820 tree
2821 rewrite_to_non_trapping_overflow (tree expr)
2822 {
2823 if (!flag_trapv)
2824 return expr;
2825 hash_set<tree> pset;
2826 if (!walk_tree (&expr, find_trapping_overflow, &pset, &pset))
2827 return expr;
2828 expr = unshare_expr (expr);
2829 pset.empty ();
2830 walk_tree (&expr, replace_trapping_overflow, &pset, &pset);
2831 return expr;
2832 }
2833
2834 /* Helper for stmt_could_throw_p. Return true if STMT (assumed to be a
2835 an assignment or a conditional) may throw. */
2836
2837 static bool
2838 stmt_could_throw_1_p (gassign *stmt)
2839 {
2840 enum tree_code code = gimple_assign_rhs_code (stmt);
2841 bool honor_nans = false;
2842 bool honor_snans = false;
2843 bool fp_operation = false;
2844 bool honor_trapv = false;
2845 tree t;
2846 size_t i;
2847 bool handled, ret;
2848
2849 if (TREE_CODE_CLASS (code) == tcc_comparison
2850 || TREE_CODE_CLASS (code) == tcc_unary
2851 || TREE_CODE_CLASS (code) == tcc_binary)
2852 {
2853 if (TREE_CODE_CLASS (code) == tcc_comparison)
2854 t = TREE_TYPE (gimple_assign_rhs1 (stmt));
2855 else
2856 t = gimple_expr_type (stmt);
2857 fp_operation = FLOAT_TYPE_P (t);
2858 if (fp_operation)
2859 {
2860 honor_nans = flag_trapping_math && !flag_finite_math_only;
2861 honor_snans = flag_signaling_nans != 0;
2862 }
2863 else if (INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t))
2864 honor_trapv = true;
2865 }
2866
2867 /* First check the LHS. */
2868 if (tree_could_trap_p (gimple_assign_lhs (stmt)))
2869 return true;
2870
2871 /* Check if the main expression may trap. */
2872 ret = operation_could_trap_helper_p (code, fp_operation, honor_trapv,
2873 honor_nans, honor_snans,
2874 gimple_assign_rhs2 (stmt),
2875 &handled);
2876 if (handled)
2877 return ret;
2878
2879 /* If the expression does not trap, see if any of the individual operands may
2880 trap. */
2881 for (i = 1; i < gimple_num_ops (stmt); i++)
2882 if (tree_could_trap_p (gimple_op (stmt, i)))
2883 return true;
2884
2885 return false;
2886 }
2887
2888
2889 /* Return true if statement STMT within FUN could throw an exception. */
2890
2891 bool
2892 stmt_could_throw_p (function *fun, gimple *stmt)
2893 {
2894 if (!flag_exceptions)
2895 return false;
2896
2897 /* The only statements that can throw an exception are assignments,
2898 conditionals, calls, resx, and asms. */
2899 switch (gimple_code (stmt))
2900 {
2901 case GIMPLE_RESX:
2902 return true;
2903
2904 case GIMPLE_CALL:
2905 return !gimple_call_nothrow_p (as_a <gcall *> (stmt));
2906
2907 case GIMPLE_COND:
2908 {
2909 if (fun && !fun->can_throw_non_call_exceptions)
2910 return false;
2911 gcond *cond = as_a <gcond *> (stmt);
2912 tree lhs = gimple_cond_lhs (cond);
2913 return operation_could_trap_p (gimple_cond_code (cond),
2914 FLOAT_TYPE_P (TREE_TYPE (lhs)),
2915 false, NULL_TREE);
2916 }
2917
2918 case GIMPLE_ASSIGN:
2919 if ((fun && !fun->can_throw_non_call_exceptions)
2920 || gimple_clobber_p (stmt))
2921 return false;
2922 return stmt_could_throw_1_p (as_a <gassign *> (stmt));
2923
2924 case GIMPLE_ASM:
2925 if (fun && !fun->can_throw_non_call_exceptions)
2926 return false;
2927 return gimple_asm_volatile_p (as_a <gasm *> (stmt));
2928
2929 default:
2930 return false;
2931 }
2932 }
2933
2934 /* Return true if STMT in function FUN must be assumed necessary because of
2935 non-call exceptions. */
2936
2937 bool
2938 stmt_unremovable_because_of_non_call_eh_p (function *fun, gimple *stmt)
2939 {
2940 return (fun->can_throw_non_call_exceptions
2941 && !fun->can_delete_dead_exceptions
2942 && stmt_could_throw_p (fun, stmt));
2943 }
2944
2945 /* Return true if expression T could throw an exception. */
2946
2947 bool
2948 tree_could_throw_p (tree t)
2949 {
2950 if (!flag_exceptions)
2951 return false;
2952 if (TREE_CODE (t) == MODIFY_EXPR)
2953 {
2954 if (cfun->can_throw_non_call_exceptions
2955 && tree_could_trap_p (TREE_OPERAND (t, 0)))
2956 return true;
2957 t = TREE_OPERAND (t, 1);
2958 }
2959
2960 if (TREE_CODE (t) == WITH_SIZE_EXPR)
2961 t = TREE_OPERAND (t, 0);
2962 if (TREE_CODE (t) == CALL_EXPR)
2963 return (call_expr_flags (t) & ECF_NOTHROW) == 0;
2964 if (cfun->can_throw_non_call_exceptions)
2965 return tree_could_trap_p (t);
2966 return false;
2967 }
2968
2969 /* Return true if STMT can throw an exception that is not caught within its
2970 function FUN. FUN can be NULL but the function is extra conservative
2971 then. */
2972
2973 bool
2974 stmt_can_throw_external (function *fun, gimple *stmt)
2975 {
2976 int lp_nr;
2977
2978 if (!stmt_could_throw_p (fun, stmt))
2979 return false;
2980 if (!fun)
2981 return true;
2982
2983 lp_nr = lookup_stmt_eh_lp_fn (fun, stmt);
2984 return lp_nr == 0;
2985 }
2986
2987 /* Return true if STMT can throw an exception that is caught within its
2988 function FUN. */
2989
2990 bool
2991 stmt_can_throw_internal (function *fun, gimple *stmt)
2992 {
2993 int lp_nr;
2994
2995 gcc_checking_assert (fun);
2996 if (!stmt_could_throw_p (fun, stmt))
2997 return false;
2998
2999 lp_nr = lookup_stmt_eh_lp_fn (fun, stmt);
3000 return lp_nr > 0;
3001 }
3002
3003 /* Given a statement STMT in IFUN, if STMT can no longer throw, then
3004 remove any entry it might have from the EH table. Return true if
3005 any change was made. */
3006
3007 bool
3008 maybe_clean_eh_stmt_fn (struct function *ifun, gimple *stmt)
3009 {
3010 if (stmt_could_throw_p (ifun, stmt))
3011 return false;
3012 return remove_stmt_from_eh_lp_fn (ifun, stmt);
3013 }
3014
3015 /* Likewise, but always use the current function. */
3016
3017 bool
3018 maybe_clean_eh_stmt (gimple *stmt)
3019 {
3020 return maybe_clean_eh_stmt_fn (cfun, stmt);
3021 }
3022
3023 /* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced
3024 OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT
3025 in the table if it should be in there. Return TRUE if a replacement was
3026 done that my require an EH edge purge. */
3027
3028 bool
3029 maybe_clean_or_replace_eh_stmt (gimple *old_stmt, gimple *new_stmt)
3030 {
3031 int lp_nr = lookup_stmt_eh_lp (old_stmt);
3032
3033 if (lp_nr != 0)
3034 {
3035 bool new_stmt_could_throw = stmt_could_throw_p (cfun, new_stmt);
3036
3037 if (new_stmt == old_stmt && new_stmt_could_throw)
3038 return false;
3039
3040 remove_stmt_from_eh_lp (old_stmt);
3041 if (new_stmt_could_throw)
3042 {
3043 add_stmt_to_eh_lp (new_stmt, lp_nr);
3044 return false;
3045 }
3046 else
3047 return true;
3048 }
3049
3050 return false;
3051 }
3052
3053 /* Given a statement OLD_STMT in OLD_FUN and a duplicate statement NEW_STMT
3054 in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT. The MAP
3055 operand is the return value of duplicate_eh_regions. */
3056
3057 bool
3058 maybe_duplicate_eh_stmt_fn (struct function *new_fun, gimple *new_stmt,
3059 struct function *old_fun, gimple *old_stmt,
3060 hash_map<void *, void *> *map,
3061 int default_lp_nr)
3062 {
3063 int old_lp_nr, new_lp_nr;
3064
3065 if (!stmt_could_throw_p (new_fun, new_stmt))
3066 return false;
3067
3068 old_lp_nr = lookup_stmt_eh_lp_fn (old_fun, old_stmt);
3069 if (old_lp_nr == 0)
3070 {
3071 if (default_lp_nr == 0)
3072 return false;
3073 new_lp_nr = default_lp_nr;
3074 }
3075 else if (old_lp_nr > 0)
3076 {
3077 eh_landing_pad old_lp, new_lp;
3078
3079 old_lp = (*old_fun->eh->lp_array)[old_lp_nr];
3080 new_lp = static_cast<eh_landing_pad> (*map->get (old_lp));
3081 new_lp_nr = new_lp->index;
3082 }
3083 else
3084 {
3085 eh_region old_r, new_r;
3086
3087 old_r = (*old_fun->eh->region_array)[-old_lp_nr];
3088 new_r = static_cast<eh_region> (*map->get (old_r));
3089 new_lp_nr = -new_r->index;
3090 }
3091
3092 add_stmt_to_eh_lp_fn (new_fun, new_stmt, new_lp_nr);
3093 return true;
3094 }
3095
3096 /* Similar, but both OLD_STMT and NEW_STMT are within the current function,
3097 and thus no remapping is required. */
3098
3099 bool
3100 maybe_duplicate_eh_stmt (gimple *new_stmt, gimple *old_stmt)
3101 {
3102 int lp_nr;
3103
3104 if (!stmt_could_throw_p (cfun, new_stmt))
3105 return false;
3106
3107 lp_nr = lookup_stmt_eh_lp (old_stmt);
3108 if (lp_nr == 0)
3109 return false;
3110
3111 add_stmt_to_eh_lp (new_stmt, lp_nr);
3112 return true;
3113 }
3114 \f
3115 /* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
3116 GIMPLE_TRY) that are similar enough to be considered the same. Currently
3117 this only handles handlers consisting of a single call, as that's the
3118 important case for C++: a destructor call for a particular object showing
3119 up in multiple handlers. */
3120
3121 static bool
3122 same_handler_p (gimple_seq oneh, gimple_seq twoh)
3123 {
3124 gimple_stmt_iterator gsi;
3125 gimple *ones, *twos;
3126 unsigned int ai;
3127
3128 gsi = gsi_start (oneh);
3129 if (!gsi_one_before_end_p (gsi))
3130 return false;
3131 ones = gsi_stmt (gsi);
3132
3133 gsi = gsi_start (twoh);
3134 if (!gsi_one_before_end_p (gsi))
3135 return false;
3136 twos = gsi_stmt (gsi);
3137
3138 if (!is_gimple_call (ones)
3139 || !is_gimple_call (twos)
3140 || gimple_call_lhs (ones)
3141 || gimple_call_lhs (twos)
3142 || gimple_call_chain (ones)
3143 || gimple_call_chain (twos)
3144 || !gimple_call_same_target_p (ones, twos)
3145 || gimple_call_num_args (ones) != gimple_call_num_args (twos))
3146 return false;
3147
3148 for (ai = 0; ai < gimple_call_num_args (ones); ++ai)
3149 if (!operand_equal_p (gimple_call_arg (ones, ai),
3150 gimple_call_arg (twos, ai), 0))
3151 return false;
3152
3153 return true;
3154 }
3155
3156 /* Optimize
3157 try { A() } finally { try { ~B() } catch { ~A() } }
3158 try { ... } finally { ~A() }
3159 into
3160 try { A() } catch { ~B() }
3161 try { ~B() ... } finally { ~A() }
3162
3163 This occurs frequently in C++, where A is a local variable and B is a
3164 temporary used in the initializer for A. */
3165
3166 static void
3167 optimize_double_finally (gtry *one, gtry *two)
3168 {
3169 gimple *oneh;
3170 gimple_stmt_iterator gsi;
3171 gimple_seq cleanup;
3172
3173 cleanup = gimple_try_cleanup (one);
3174 gsi = gsi_start (cleanup);
3175 if (!gsi_one_before_end_p (gsi))
3176 return;
3177
3178 oneh = gsi_stmt (gsi);
3179 if (gimple_code (oneh) != GIMPLE_TRY
3180 || gimple_try_kind (oneh) != GIMPLE_TRY_CATCH)
3181 return;
3182
3183 if (same_handler_p (gimple_try_cleanup (oneh), gimple_try_cleanup (two)))
3184 {
3185 gimple_seq seq = gimple_try_eval (oneh);
3186
3187 gimple_try_set_cleanup (one, seq);
3188 gimple_try_set_kind (one, GIMPLE_TRY_CATCH);
3189 seq = copy_gimple_seq_and_replace_locals (seq);
3190 gimple_seq_add_seq (&seq, gimple_try_eval (two));
3191 gimple_try_set_eval (two, seq);
3192 }
3193 }
3194
3195 /* Perform EH refactoring optimizations that are simpler to do when code
3196 flow has been lowered but EH structures haven't. */
3197
3198 static void
3199 refactor_eh_r (gimple_seq seq)
3200 {
3201 gimple_stmt_iterator gsi;
3202 gimple *one, *two;
3203
3204 one = NULL;
3205 two = NULL;
3206 gsi = gsi_start (seq);
3207 while (1)
3208 {
3209 one = two;
3210 if (gsi_end_p (gsi))
3211 two = NULL;
3212 else
3213 two = gsi_stmt (gsi);
3214 if (one && two)
3215 if (gtry *try_one = dyn_cast <gtry *> (one))
3216 if (gtry *try_two = dyn_cast <gtry *> (two))
3217 if (gimple_try_kind (try_one) == GIMPLE_TRY_FINALLY
3218 && gimple_try_kind (try_two) == GIMPLE_TRY_FINALLY)
3219 optimize_double_finally (try_one, try_two);
3220 if (one)
3221 switch (gimple_code (one))
3222 {
3223 case GIMPLE_TRY:
3224 refactor_eh_r (gimple_try_eval (one));
3225 refactor_eh_r (gimple_try_cleanup (one));
3226 break;
3227 case GIMPLE_CATCH:
3228 refactor_eh_r (gimple_catch_handler (as_a <gcatch *> (one)));
3229 break;
3230 case GIMPLE_EH_FILTER:
3231 refactor_eh_r (gimple_eh_filter_failure (one));
3232 break;
3233 case GIMPLE_EH_ELSE:
3234 {
3235 geh_else *eh_else_stmt = as_a <geh_else *> (one);
3236 refactor_eh_r (gimple_eh_else_n_body (eh_else_stmt));
3237 refactor_eh_r (gimple_eh_else_e_body (eh_else_stmt));
3238 }
3239 break;
3240 default:
3241 break;
3242 }
3243 if (two)
3244 gsi_next (&gsi);
3245 else
3246 break;
3247 }
3248 }
3249
3250 namespace {
3251
3252 const pass_data pass_data_refactor_eh =
3253 {
3254 GIMPLE_PASS, /* type */
3255 "ehopt", /* name */
3256 OPTGROUP_NONE, /* optinfo_flags */
3257 TV_TREE_EH, /* tv_id */
3258 PROP_gimple_lcf, /* properties_required */
3259 0, /* properties_provided */
3260 0, /* properties_destroyed */
3261 0, /* todo_flags_start */
3262 0, /* todo_flags_finish */
3263 };
3264
3265 class pass_refactor_eh : public gimple_opt_pass
3266 {
3267 public:
3268 pass_refactor_eh (gcc::context *ctxt)
3269 : gimple_opt_pass (pass_data_refactor_eh, ctxt)
3270 {}
3271
3272 /* opt_pass methods: */
3273 virtual bool gate (function *) { return flag_exceptions != 0; }
3274 virtual unsigned int execute (function *)
3275 {
3276 refactor_eh_r (gimple_body (current_function_decl));
3277 return 0;
3278 }
3279
3280 }; // class pass_refactor_eh
3281
3282 } // anon namespace
3283
3284 gimple_opt_pass *
3285 make_pass_refactor_eh (gcc::context *ctxt)
3286 {
3287 return new pass_refactor_eh (ctxt);
3288 }
3289 \f
3290 /* At the end of gimple optimization, we can lower RESX. */
3291
3292 static bool
3293 lower_resx (basic_block bb, gresx *stmt,
3294 hash_map<eh_region, tree> *mnt_map)
3295 {
3296 int lp_nr;
3297 eh_region src_r, dst_r;
3298 gimple_stmt_iterator gsi;
3299 gimple *x;
3300 tree fn, src_nr;
3301 bool ret = false;
3302
3303 lp_nr = lookup_stmt_eh_lp (stmt);
3304 if (lp_nr != 0)
3305 dst_r = get_eh_region_from_lp_number (lp_nr);
3306 else
3307 dst_r = NULL;
3308
3309 src_r = get_eh_region_from_number (gimple_resx_region (stmt));
3310 gsi = gsi_last_bb (bb);
3311
3312 if (src_r == NULL)
3313 {
3314 /* We can wind up with no source region when pass_cleanup_eh shows
3315 that there are no entries into an eh region and deletes it, but
3316 then the block that contains the resx isn't removed. This can
3317 happen without optimization when the switch statement created by
3318 lower_try_finally_switch isn't simplified to remove the eh case.
3319
3320 Resolve this by expanding the resx node to an abort. */
3321
3322 fn = builtin_decl_implicit (BUILT_IN_TRAP);
3323 x = gimple_build_call (fn, 0);
3324 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3325
3326 while (EDGE_COUNT (bb->succs) > 0)
3327 remove_edge (EDGE_SUCC (bb, 0));
3328 }
3329 else if (dst_r)
3330 {
3331 /* When we have a destination region, we resolve this by copying
3332 the excptr and filter values into place, and changing the edge
3333 to immediately after the landing pad. */
3334 edge e;
3335
3336 if (lp_nr < 0)
3337 {
3338 basic_block new_bb;
3339 tree lab;
3340
3341 /* We are resuming into a MUST_NOT_CALL region. Expand a call to
3342 the failure decl into a new block, if needed. */
3343 gcc_assert (dst_r->type == ERT_MUST_NOT_THROW);
3344
3345 tree *slot = mnt_map->get (dst_r);
3346 if (slot == NULL)
3347 {
3348 gimple_stmt_iterator gsi2;
3349
3350 new_bb = create_empty_bb (bb);
3351 new_bb->count = bb->count;
3352 add_bb_to_loop (new_bb, bb->loop_father);
3353 lab = gimple_block_label (new_bb);
3354 gsi2 = gsi_start_bb (new_bb);
3355
3356 fn = dst_r->u.must_not_throw.failure_decl;
3357 x = gimple_build_call (fn, 0);
3358 gimple_set_location (x, dst_r->u.must_not_throw.failure_loc);
3359 gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING);
3360
3361 mnt_map->put (dst_r, lab);
3362 }
3363 else
3364 {
3365 lab = *slot;
3366 new_bb = label_to_block (cfun, lab);
3367 }
3368
3369 gcc_assert (EDGE_COUNT (bb->succs) == 0);
3370 e = make_single_succ_edge (bb, new_bb, EDGE_FALLTHRU);
3371 }
3372 else
3373 {
3374 edge_iterator ei;
3375 tree dst_nr = build_int_cst (integer_type_node, dst_r->index);
3376
3377 fn = builtin_decl_implicit (BUILT_IN_EH_COPY_VALUES);
3378 src_nr = build_int_cst (integer_type_node, src_r->index);
3379 x = gimple_build_call (fn, 2, dst_nr, src_nr);
3380 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3381
3382 /* Update the flags for the outgoing edge. */
3383 e = single_succ_edge (bb);
3384 gcc_assert (e->flags & EDGE_EH);
3385 e->flags = (e->flags & ~EDGE_EH) | EDGE_FALLTHRU;
3386 e->probability = profile_probability::always ();
3387
3388 /* If there are no more EH users of the landing pad, delete it. */
3389 FOR_EACH_EDGE (e, ei, e->dest->preds)
3390 if (e->flags & EDGE_EH)
3391 break;
3392 if (e == NULL)
3393 {
3394 eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr);
3395 remove_eh_landing_pad (lp);
3396 }
3397 }
3398
3399 ret = true;
3400 }
3401 else
3402 {
3403 tree var;
3404
3405 /* When we don't have a destination region, this exception escapes
3406 up the call chain. We resolve this by generating a call to the
3407 _Unwind_Resume library function. */
3408
3409 /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
3410 with no arguments for C++. Check for that. */
3411 if (src_r->use_cxa_end_cleanup)
3412 {
3413 fn = builtin_decl_implicit (BUILT_IN_CXA_END_CLEANUP);
3414 x = gimple_build_call (fn, 0);
3415 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3416 }
3417 else
3418 {
3419 fn = builtin_decl_implicit (BUILT_IN_EH_POINTER);
3420 src_nr = build_int_cst (integer_type_node, src_r->index);
3421 x = gimple_build_call (fn, 1, src_nr);
3422 var = create_tmp_var (ptr_type_node);
3423 var = make_ssa_name (var, x);
3424 gimple_call_set_lhs (x, var);
3425 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3426
3427 /* When exception handling is delegated to a caller function, we
3428 have to guarantee that shadow memory variables living on stack
3429 will be cleaner before control is given to a parent function. */
3430 if (sanitize_flags_p (SANITIZE_ADDRESS))
3431 {
3432 tree decl
3433 = builtin_decl_implicit (BUILT_IN_ASAN_HANDLE_NO_RETURN);
3434 gimple *g = gimple_build_call (decl, 0);
3435 gimple_set_location (g, gimple_location (stmt));
3436 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
3437 }
3438
3439 fn = builtin_decl_implicit (BUILT_IN_UNWIND_RESUME);
3440 x = gimple_build_call (fn, 1, var);
3441 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3442 }
3443
3444 gcc_assert (EDGE_COUNT (bb->succs) == 0);
3445 }
3446
3447 gsi_remove (&gsi, true);
3448
3449 return ret;
3450 }
3451
3452 namespace {
3453
3454 const pass_data pass_data_lower_resx =
3455 {
3456 GIMPLE_PASS, /* type */
3457 "resx", /* name */
3458 OPTGROUP_NONE, /* optinfo_flags */
3459 TV_TREE_EH, /* tv_id */
3460 PROP_gimple_lcf, /* properties_required */
3461 0, /* properties_provided */
3462 0, /* properties_destroyed */
3463 0, /* todo_flags_start */
3464 0, /* todo_flags_finish */
3465 };
3466
3467 class pass_lower_resx : public gimple_opt_pass
3468 {
3469 public:
3470 pass_lower_resx (gcc::context *ctxt)
3471 : gimple_opt_pass (pass_data_lower_resx, ctxt)
3472 {}
3473
3474 /* opt_pass methods: */
3475 virtual bool gate (function *) { return flag_exceptions != 0; }
3476 virtual unsigned int execute (function *);
3477
3478 }; // class pass_lower_resx
3479
3480 unsigned
3481 pass_lower_resx::execute (function *fun)
3482 {
3483 basic_block bb;
3484 bool dominance_invalidated = false;
3485 bool any_rewritten = false;
3486
3487 hash_map<eh_region, tree> mnt_map;
3488
3489 FOR_EACH_BB_FN (bb, fun)
3490 {
3491 gimple *last = last_stmt (bb);
3492 if (last && is_gimple_resx (last))
3493 {
3494 dominance_invalidated |=
3495 lower_resx (bb, as_a <gresx *> (last), &mnt_map);
3496 any_rewritten = true;
3497 }
3498 }
3499
3500 if (dominance_invalidated)
3501 {
3502 free_dominance_info (CDI_DOMINATORS);
3503 free_dominance_info (CDI_POST_DOMINATORS);
3504 }
3505
3506 return any_rewritten ? TODO_update_ssa_only_virtuals : 0;
3507 }
3508
3509 } // anon namespace
3510
3511 gimple_opt_pass *
3512 make_pass_lower_resx (gcc::context *ctxt)
3513 {
3514 return new pass_lower_resx (ctxt);
3515 }
3516
3517 /* Try to optimize var = {v} {CLOBBER} stmts followed just by
3518 external throw. */
3519
3520 static void
3521 optimize_clobbers (basic_block bb)
3522 {
3523 gimple_stmt_iterator gsi = gsi_last_bb (bb);
3524 bool any_clobbers = false;
3525 bool seen_stack_restore = false;
3526 edge_iterator ei;
3527 edge e;
3528
3529 /* Only optimize anything if the bb contains at least one clobber,
3530 ends with resx (checked by caller), optionally contains some
3531 debug stmts or labels, or at most one __builtin_stack_restore
3532 call, and has an incoming EH edge. */
3533 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3534 {
3535 gimple *stmt = gsi_stmt (gsi);
3536 if (is_gimple_debug (stmt))
3537 continue;
3538 if (gimple_clobber_p (stmt))
3539 {
3540 any_clobbers = true;
3541 continue;
3542 }
3543 if (!seen_stack_restore
3544 && gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
3545 {
3546 seen_stack_restore = true;
3547 continue;
3548 }
3549 if (gimple_code (stmt) == GIMPLE_LABEL)
3550 break;
3551 return;
3552 }
3553 if (!any_clobbers)
3554 return;
3555 FOR_EACH_EDGE (e, ei, bb->preds)
3556 if (e->flags & EDGE_EH)
3557 break;
3558 if (e == NULL)
3559 return;
3560 gsi = gsi_last_bb (bb);
3561 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3562 {
3563 gimple *stmt = gsi_stmt (gsi);
3564 if (!gimple_clobber_p (stmt))
3565 continue;
3566 unlink_stmt_vdef (stmt);
3567 gsi_remove (&gsi, true);
3568 release_defs (stmt);
3569 }
3570 }
3571
3572 /* Try to sink var = {v} {CLOBBER} stmts followed just by
3573 internal throw to successor BB.
3574 SUNK, if not NULL, is an array of sequences indexed by basic-block
3575 index to sink to and to pick up sinking opportunities from.
3576 If FOUND_OPPORTUNITY is not NULL then do not perform the optimization
3577 but set *FOUND_OPPORTUNITY to true. */
3578
3579 static int
3580 sink_clobbers (basic_block bb,
3581 gimple_seq *sunk = NULL, bool *found_opportunity = NULL)
3582 {
3583 edge e;
3584 edge_iterator ei;
3585 gimple_stmt_iterator gsi, dgsi;
3586 basic_block succbb;
3587 bool any_clobbers = false;
3588 unsigned todo = 0;
3589
3590 /* Only optimize if BB has a single EH successor and
3591 all predecessor edges are EH too. */
3592 if (!single_succ_p (bb)
3593 || (single_succ_edge (bb)->flags & EDGE_EH) == 0)
3594 return 0;
3595
3596 FOR_EACH_EDGE (e, ei, bb->preds)
3597 {
3598 if ((e->flags & EDGE_EH) == 0)
3599 return 0;
3600 }
3601
3602 /* And BB contains only CLOBBER stmts before the final
3603 RESX. */
3604 gsi = gsi_last_bb (bb);
3605 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3606 {
3607 gimple *stmt = gsi_stmt (gsi);
3608 if (is_gimple_debug (stmt))
3609 continue;
3610 if (gimple_code (stmt) == GIMPLE_LABEL)
3611 break;
3612 if (!gimple_clobber_p (stmt))
3613 return 0;
3614 any_clobbers = true;
3615 }
3616 if (!any_clobbers && (!sunk || gimple_seq_empty_p (sunk[bb->index])))
3617 return 0;
3618
3619 /* If this was a dry run, tell it we found clobbers to sink. */
3620 if (found_opportunity)
3621 {
3622 *found_opportunity = true;
3623 return 0;
3624 }
3625
3626 edge succe = single_succ_edge (bb);
3627 succbb = succe->dest;
3628
3629 /* See if there is a virtual PHI node to take an updated virtual
3630 operand from. */
3631 gphi *vphi = NULL;
3632 for (gphi_iterator gpi = gsi_start_phis (succbb);
3633 !gsi_end_p (gpi); gsi_next (&gpi))
3634 {
3635 tree res = gimple_phi_result (gpi.phi ());
3636 if (virtual_operand_p (res))
3637 {
3638 vphi = gpi.phi ();
3639 break;
3640 }
3641 }
3642
3643 gimple *first_sunk = NULL;
3644 gimple *last_sunk = NULL;
3645 if (sunk && !(succbb->flags & BB_VISITED))
3646 dgsi = gsi_start (sunk[succbb->index]);
3647 else
3648 dgsi = gsi_after_labels (succbb);
3649 gsi = gsi_last_bb (bb);
3650 for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3651 {
3652 gimple *stmt = gsi_stmt (gsi);
3653 tree lhs;
3654 if (is_gimple_debug (stmt))
3655 continue;
3656 if (gimple_code (stmt) == GIMPLE_LABEL)
3657 break;
3658 lhs = gimple_assign_lhs (stmt);
3659 /* Unfortunately we don't have dominance info updated at this
3660 point, so checking if
3661 dominated_by_p (CDI_DOMINATORS, succbb,
3662 gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (lhs, 0)))
3663 would be too costly. Thus, avoid sinking any clobbers that
3664 refer to non-(D) SSA_NAMEs. */
3665 if (TREE_CODE (lhs) == MEM_REF
3666 && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME
3667 && !SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (lhs, 0)))
3668 {
3669 unlink_stmt_vdef (stmt);
3670 gsi_remove (&gsi, true);
3671 release_defs (stmt);
3672 continue;
3673 }
3674
3675 /* As we do not change stmt order when sinking across a
3676 forwarder edge we can keep virtual operands in place. */
3677 gsi_remove (&gsi, false);
3678 gsi_insert_before (&dgsi, stmt, GSI_NEW_STMT);
3679 if (!first_sunk)
3680 first_sunk = stmt;
3681 last_sunk = stmt;
3682 }
3683 if (sunk && !gimple_seq_empty_p (sunk[bb->index]))
3684 {
3685 if (!first_sunk)
3686 first_sunk = gsi_stmt (gsi_last (sunk[bb->index]));
3687 last_sunk = gsi_stmt (gsi_start (sunk[bb->index]));
3688 gsi_insert_seq_before_without_update (&dgsi,
3689 sunk[bb->index], GSI_NEW_STMT);
3690 sunk[bb->index] = NULL;
3691 }
3692 if (first_sunk)
3693 {
3694 /* Adjust virtual operands if we sunk across a virtual PHI. */
3695 if (vphi)
3696 {
3697 imm_use_iterator iter;
3698 use_operand_p use_p;
3699 gimple *use_stmt;
3700 tree phi_def = gimple_phi_result (vphi);
3701 FOR_EACH_IMM_USE_STMT (use_stmt, iter, phi_def)
3702 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3703 SET_USE (use_p, gimple_vdef (first_sunk));
3704 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (phi_def))
3705 {
3706 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (first_sunk)) = 1;
3707 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (phi_def) = 0;
3708 }
3709 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi, succe),
3710 gimple_vuse (last_sunk));
3711 SET_USE (gimple_vuse_op (last_sunk), phi_def);
3712 }
3713 /* If there isn't a single predecessor but no virtual PHI node
3714 arrange for virtual operands to be renamed. */
3715 else if (!single_pred_p (succbb)
3716 && TREE_CODE (gimple_vuse (last_sunk)) == SSA_NAME)
3717 {
3718 mark_virtual_operand_for_renaming (gimple_vuse (last_sunk));
3719 todo |= TODO_update_ssa_only_virtuals;
3720 }
3721 }
3722
3723 return todo;
3724 }
3725
3726 /* At the end of inlining, we can lower EH_DISPATCH. Return true when
3727 we have found some duplicate labels and removed some edges. */
3728
3729 static bool
3730 lower_eh_dispatch (basic_block src, geh_dispatch *stmt)
3731 {
3732 gimple_stmt_iterator gsi;
3733 int region_nr;
3734 eh_region r;
3735 tree filter, fn;
3736 gimple *x;
3737 bool redirected = false;
3738
3739 region_nr = gimple_eh_dispatch_region (stmt);
3740 r = get_eh_region_from_number (region_nr);
3741
3742 gsi = gsi_last_bb (src);
3743
3744 switch (r->type)
3745 {
3746 case ERT_TRY:
3747 {
3748 auto_vec<tree> labels;
3749 tree default_label = NULL;
3750 eh_catch c;
3751 edge_iterator ei;
3752 edge e;
3753 hash_set<tree> seen_values;
3754
3755 /* Collect the labels for a switch. Zero the post_landing_pad
3756 field becase we'll no longer have anything keeping these labels
3757 in existence and the optimizer will be free to merge these
3758 blocks at will. */
3759 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
3760 {
3761 tree tp_node, flt_node, lab = c->label;
3762 bool have_label = false;
3763
3764 c->label = NULL;
3765 tp_node = c->type_list;
3766 flt_node = c->filter_list;
3767
3768 if (tp_node == NULL)
3769 {
3770 default_label = lab;
3771 break;
3772 }
3773 do
3774 {
3775 /* Filter out duplicate labels that arise when this handler
3776 is shadowed by an earlier one. When no labels are
3777 attached to the handler anymore, we remove
3778 the corresponding edge and then we delete unreachable
3779 blocks at the end of this pass. */
3780 if (! seen_values.contains (TREE_VALUE (flt_node)))
3781 {
3782 tree t = build_case_label (TREE_VALUE (flt_node),
3783 NULL, lab);
3784 labels.safe_push (t);
3785 seen_values.add (TREE_VALUE (flt_node));
3786 have_label = true;
3787 }
3788
3789 tp_node = TREE_CHAIN (tp_node);
3790 flt_node = TREE_CHAIN (flt_node);
3791 }
3792 while (tp_node);
3793 if (! have_label)
3794 {
3795 remove_edge (find_edge (src, label_to_block (cfun, lab)));
3796 redirected = true;
3797 }
3798 }
3799
3800 /* Clean up the edge flags. */
3801 FOR_EACH_EDGE (e, ei, src->succs)
3802 {
3803 if (e->flags & EDGE_FALLTHRU)
3804 {
3805 /* If there was no catch-all, use the fallthru edge. */
3806 if (default_label == NULL)
3807 default_label = gimple_block_label (e->dest);
3808 e->flags &= ~EDGE_FALLTHRU;
3809 }
3810 }
3811 gcc_assert (default_label != NULL);
3812
3813 /* Don't generate a switch if there's only a default case.
3814 This is common in the form of try { A; } catch (...) { B; }. */
3815 if (!labels.exists ())
3816 {
3817 e = single_succ_edge (src);
3818 e->flags |= EDGE_FALLTHRU;
3819 }
3820 else
3821 {
3822 fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3823 x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3824 region_nr));
3825 filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3826 filter = make_ssa_name (filter, x);
3827 gimple_call_set_lhs (x, filter);
3828 gimple_set_location (x, gimple_location (stmt));
3829 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3830
3831 /* Turn the default label into a default case. */
3832 default_label = build_case_label (NULL, NULL, default_label);
3833 sort_case_labels (labels);
3834
3835 x = gimple_build_switch (filter, default_label, labels);
3836 gimple_set_location (x, gimple_location (stmt));
3837 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3838 }
3839 }
3840 break;
3841
3842 case ERT_ALLOWED_EXCEPTIONS:
3843 {
3844 edge b_e = BRANCH_EDGE (src);
3845 edge f_e = FALLTHRU_EDGE (src);
3846
3847 fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3848 x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3849 region_nr));
3850 filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3851 filter = make_ssa_name (filter, x);
3852 gimple_call_set_lhs (x, filter);
3853 gimple_set_location (x, gimple_location (stmt));
3854 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3855
3856 r->u.allowed.label = NULL;
3857 x = gimple_build_cond (EQ_EXPR, filter,
3858 build_int_cst (TREE_TYPE (filter),
3859 r->u.allowed.filter),
3860 NULL_TREE, NULL_TREE);
3861 gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3862
3863 b_e->flags = b_e->flags | EDGE_TRUE_VALUE;
3864 f_e->flags = (f_e->flags & ~EDGE_FALLTHRU) | EDGE_FALSE_VALUE;
3865 }
3866 break;
3867
3868 default:
3869 gcc_unreachable ();
3870 }
3871
3872 /* Replace the EH_DISPATCH with the SWITCH or COND generated above. */
3873 gsi_remove (&gsi, true);
3874 return redirected;
3875 }
3876
3877 namespace {
3878
3879 const pass_data pass_data_lower_eh_dispatch =
3880 {
3881 GIMPLE_PASS, /* type */
3882 "ehdisp", /* name */
3883 OPTGROUP_NONE, /* optinfo_flags */
3884 TV_TREE_EH, /* tv_id */
3885 PROP_gimple_lcf, /* properties_required */
3886 0, /* properties_provided */
3887 0, /* properties_destroyed */
3888 0, /* todo_flags_start */
3889 0, /* todo_flags_finish */
3890 };
3891
3892 class pass_lower_eh_dispatch : public gimple_opt_pass
3893 {
3894 public:
3895 pass_lower_eh_dispatch (gcc::context *ctxt)
3896 : gimple_opt_pass (pass_data_lower_eh_dispatch, ctxt)
3897 {}
3898
3899 /* opt_pass methods: */
3900 virtual bool gate (function *fun) { return fun->eh->region_tree != NULL; }
3901 virtual unsigned int execute (function *);
3902
3903 }; // class pass_lower_eh_dispatch
3904
3905 unsigned
3906 pass_lower_eh_dispatch::execute (function *fun)
3907 {
3908 basic_block bb;
3909 int flags = 0;
3910 bool redirected = false;
3911 bool any_resx_to_process = false;
3912
3913 assign_filter_values ();
3914
3915 FOR_EACH_BB_FN (bb, fun)
3916 {
3917 gimple *last = last_stmt (bb);
3918 if (last == NULL)
3919 continue;
3920 if (gimple_code (last) == GIMPLE_EH_DISPATCH)
3921 {
3922 redirected |= lower_eh_dispatch (bb,
3923 as_a <geh_dispatch *> (last));
3924 flags |= TODO_update_ssa_only_virtuals;
3925 }
3926 else if (gimple_code (last) == GIMPLE_RESX)
3927 {
3928 if (stmt_can_throw_external (fun, last))
3929 optimize_clobbers (bb);
3930 else if (!any_resx_to_process)
3931 sink_clobbers (bb, NULL, &any_resx_to_process);
3932 }
3933 bb->flags &= ~BB_VISITED;
3934 }
3935 if (redirected)
3936 {
3937 free_dominance_info (CDI_DOMINATORS);
3938 delete_unreachable_blocks ();
3939 }
3940
3941 if (any_resx_to_process)
3942 {
3943 /* Make sure to catch all secondary sinking opportunities by processing
3944 blocks in RPO order and after all CFG modifications from lowering
3945 and unreachable block removal. */
3946 int *rpo = XNEWVEC (int, n_basic_blocks_for_fn (fun));
3947 int rpo_n = pre_and_rev_post_order_compute_fn (fun, NULL, rpo, false);
3948 gimple_seq *sunk = XCNEWVEC (gimple_seq, last_basic_block_for_fn (fun));
3949 for (int i = 0; i < rpo_n; ++i)
3950 {
3951 bb = BASIC_BLOCK_FOR_FN (fun, rpo[i]);
3952 gimple *last = last_stmt (bb);
3953 if (last
3954 && gimple_code (last) == GIMPLE_RESX
3955 && !stmt_can_throw_external (fun, last))
3956 flags |= sink_clobbers (bb, sunk);
3957 /* If there were any clobbers sunk into this BB, insert them now. */
3958 if (!gimple_seq_empty_p (sunk[bb->index]))
3959 {
3960 gimple_stmt_iterator gsi = gsi_after_labels (bb);
3961 gsi_insert_seq_before (&gsi, sunk[bb->index], GSI_NEW_STMT);
3962 sunk[bb->index] = NULL;
3963 }
3964 bb->flags |= BB_VISITED;
3965 }
3966 free (rpo);
3967 free (sunk);
3968 }
3969
3970 return flags;
3971 }
3972
3973 } // anon namespace
3974
3975 gimple_opt_pass *
3976 make_pass_lower_eh_dispatch (gcc::context *ctxt)
3977 {
3978 return new pass_lower_eh_dispatch (ctxt);
3979 }
3980 \f
3981 /* Walk statements, see what regions and, optionally, landing pads
3982 are really referenced.
3983
3984 Returns in R_REACHABLEP an sbitmap with bits set for reachable regions,
3985 and in LP_REACHABLE an sbitmap with bits set for reachable landing pads.
3986
3987 Passing NULL for LP_REACHABLE is valid, in this case only reachable
3988 regions are marked.
3989
3990 The caller is responsible for freeing the returned sbitmaps. */
3991
3992 static void
3993 mark_reachable_handlers (sbitmap *r_reachablep, sbitmap *lp_reachablep)
3994 {
3995 sbitmap r_reachable, lp_reachable;
3996 basic_block bb;
3997 bool mark_landing_pads = (lp_reachablep != NULL);
3998 gcc_checking_assert (r_reachablep != NULL);
3999
4000 r_reachable = sbitmap_alloc (cfun->eh->region_array->length ());
4001 bitmap_clear (r_reachable);
4002 *r_reachablep = r_reachable;
4003
4004 if (mark_landing_pads)
4005 {
4006 lp_reachable = sbitmap_alloc (cfun->eh->lp_array->length ());
4007 bitmap_clear (lp_reachable);
4008 *lp_reachablep = lp_reachable;
4009 }
4010 else
4011 lp_reachable = NULL;
4012
4013 FOR_EACH_BB_FN (bb, cfun)
4014 {
4015 gimple_stmt_iterator gsi;
4016
4017 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4018 {
4019 gimple *stmt = gsi_stmt (gsi);
4020
4021 if (mark_landing_pads)
4022 {
4023 int lp_nr = lookup_stmt_eh_lp (stmt);
4024
4025 /* Negative LP numbers are MUST_NOT_THROW regions which
4026 are not considered BB enders. */
4027 if (lp_nr < 0)
4028 bitmap_set_bit (r_reachable, -lp_nr);
4029
4030 /* Positive LP numbers are real landing pads, and BB enders. */
4031 else if (lp_nr > 0)
4032 {
4033 gcc_assert (gsi_one_before_end_p (gsi));
4034 eh_region region = get_eh_region_from_lp_number (lp_nr);
4035 bitmap_set_bit (r_reachable, region->index);
4036 bitmap_set_bit (lp_reachable, lp_nr);
4037 }
4038 }
4039
4040 /* Avoid removing regions referenced from RESX/EH_DISPATCH. */
4041 switch (gimple_code (stmt))
4042 {
4043 case GIMPLE_RESX:
4044 bitmap_set_bit (r_reachable,
4045 gimple_resx_region (as_a <gresx *> (stmt)));
4046 break;
4047 case GIMPLE_EH_DISPATCH:
4048 bitmap_set_bit (r_reachable,
4049 gimple_eh_dispatch_region (
4050 as_a <geh_dispatch *> (stmt)));
4051 break;
4052 case GIMPLE_CALL:
4053 if (gimple_call_builtin_p (stmt, BUILT_IN_EH_COPY_VALUES))
4054 for (int i = 0; i < 2; ++i)
4055 {
4056 tree rt = gimple_call_arg (stmt, i);
4057 HOST_WIDE_INT ri = tree_to_shwi (rt);
4058
4059 gcc_assert (ri == (int)ri);
4060 bitmap_set_bit (r_reachable, ri);
4061 }
4062 break;
4063 default:
4064 break;
4065 }
4066 }
4067 }
4068 }
4069
4070 /* Remove unreachable handlers and unreachable landing pads. */
4071
4072 static void
4073 remove_unreachable_handlers (void)
4074 {
4075 sbitmap r_reachable, lp_reachable;
4076 eh_region region;
4077 eh_landing_pad lp;
4078 unsigned i;
4079
4080 mark_reachable_handlers (&r_reachable, &lp_reachable);
4081
4082 if (dump_file)
4083 {
4084 fprintf (dump_file, "Before removal of unreachable regions:\n");
4085 dump_eh_tree (dump_file, cfun);
4086 fprintf (dump_file, "Reachable regions: ");
4087 dump_bitmap_file (dump_file, r_reachable);
4088 fprintf (dump_file, "Reachable landing pads: ");
4089 dump_bitmap_file (dump_file, lp_reachable);
4090 }
4091
4092 if (dump_file)
4093 {
4094 FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
4095 if (region && !bitmap_bit_p (r_reachable, region->index))
4096 fprintf (dump_file,
4097 "Removing unreachable region %d\n",
4098 region->index);
4099 }
4100
4101 remove_unreachable_eh_regions (r_reachable);
4102
4103 FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
4104 if (lp && !bitmap_bit_p (lp_reachable, lp->index))
4105 {
4106 if (dump_file)
4107 fprintf (dump_file,
4108 "Removing unreachable landing pad %d\n",
4109 lp->index);
4110 remove_eh_landing_pad (lp);
4111 }
4112
4113 if (dump_file)
4114 {
4115 fprintf (dump_file, "\n\nAfter removal of unreachable regions:\n");
4116 dump_eh_tree (dump_file, cfun);
4117 fprintf (dump_file, "\n\n");
4118 }
4119
4120 sbitmap_free (r_reachable);
4121 sbitmap_free (lp_reachable);
4122
4123 if (flag_checking)
4124 verify_eh_tree (cfun);
4125 }
4126
4127 /* Remove unreachable handlers if any landing pads have been removed after
4128 last ehcleanup pass (due to gimple_purge_dead_eh_edges). */
4129
4130 void
4131 maybe_remove_unreachable_handlers (void)
4132 {
4133 eh_landing_pad lp;
4134 unsigned i;
4135
4136 if (cfun->eh == NULL)
4137 return;
4138
4139 FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
4140 if (lp
4141 && (lp->post_landing_pad == NULL_TREE
4142 || label_to_block (cfun, lp->post_landing_pad) == NULL))
4143 {
4144 remove_unreachable_handlers ();
4145 return;
4146 }
4147 }
4148
4149 /* Remove regions that do not have landing pads. This assumes
4150 that remove_unreachable_handlers has already been run, and
4151 that we've just manipulated the landing pads since then.
4152
4153 Preserve regions with landing pads and regions that prevent
4154 exceptions from propagating further, even if these regions
4155 are not reachable. */
4156
4157 static void
4158 remove_unreachable_handlers_no_lp (void)
4159 {
4160 eh_region region;
4161 sbitmap r_reachable;
4162 unsigned i;
4163
4164 mark_reachable_handlers (&r_reachable, /*lp_reachablep=*/NULL);
4165
4166 FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
4167 {
4168 if (! region)
4169 continue;
4170
4171 if (region->landing_pads != NULL
4172 || region->type == ERT_MUST_NOT_THROW)
4173 bitmap_set_bit (r_reachable, region->index);
4174
4175 if (dump_file
4176 && !bitmap_bit_p (r_reachable, region->index))
4177 fprintf (dump_file,
4178 "Removing unreachable region %d\n",
4179 region->index);
4180 }
4181
4182 remove_unreachable_eh_regions (r_reachable);
4183
4184 sbitmap_free (r_reachable);
4185 }
4186
4187 /* Undo critical edge splitting on an EH landing pad. Earlier, we
4188 optimisticaly split all sorts of edges, including EH edges. The
4189 optimization passes in between may not have needed them; if not,
4190 we should undo the split.
4191
4192 Recognize this case by having one EH edge incoming to the BB and
4193 one normal edge outgoing; BB should be empty apart from the
4194 post_landing_pad label.
4195
4196 Note that this is slightly different from the empty handler case
4197 handled by cleanup_empty_eh, in that the actual handler may yet
4198 have actual code but the landing pad has been separated from the
4199 handler. As such, cleanup_empty_eh relies on this transformation
4200 having been done first. */
4201
4202 static bool
4203 unsplit_eh (eh_landing_pad lp)
4204 {
4205 basic_block bb = label_to_block (cfun, lp->post_landing_pad);
4206 gimple_stmt_iterator gsi;
4207 edge e_in, e_out;
4208
4209 /* Quickly check the edge counts on BB for singularity. */
4210 if (!single_pred_p (bb) || !single_succ_p (bb))
4211 return false;
4212 e_in = single_pred_edge (bb);
4213 e_out = single_succ_edge (bb);
4214
4215 /* Input edge must be EH and output edge must be normal. */
4216 if ((e_in->flags & EDGE_EH) == 0 || (e_out->flags & EDGE_EH) != 0)
4217 return false;
4218
4219 /* The block must be empty except for the labels and debug insns. */
4220 gsi = gsi_after_labels (bb);
4221 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4222 gsi_next_nondebug (&gsi);
4223 if (!gsi_end_p (gsi))
4224 return false;
4225
4226 /* The destination block must not already have a landing pad
4227 for a different region. */
4228 for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4229 {
4230 glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4231 tree lab;
4232 int lp_nr;
4233
4234 if (!label_stmt)
4235 break;
4236 lab = gimple_label_label (label_stmt);
4237 lp_nr = EH_LANDING_PAD_NR (lab);
4238 if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4239 return false;
4240 }
4241
4242 /* The new destination block must not already be a destination of
4243 the source block, lest we merge fallthru and eh edges and get
4244 all sorts of confused. */
4245 if (find_edge (e_in->src, e_out->dest))
4246 return false;
4247
4248 /* ??? We can get degenerate phis due to cfg cleanups. I would have
4249 thought this should have been cleaned up by a phicprop pass, but
4250 that doesn't appear to handle virtuals. Propagate by hand. */
4251 if (!gimple_seq_empty_p (phi_nodes (bb)))
4252 {
4253 for (gphi_iterator gpi = gsi_start_phis (bb); !gsi_end_p (gpi); )
4254 {
4255 gimple *use_stmt;
4256 gphi *phi = gpi.phi ();
4257 tree lhs = gimple_phi_result (phi);
4258 tree rhs = gimple_phi_arg_def (phi, 0);
4259 use_operand_p use_p;
4260 imm_use_iterator iter;
4261
4262 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
4263 {
4264 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4265 SET_USE (use_p, rhs);
4266 }
4267
4268 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
4269 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
4270
4271 remove_phi_node (&gpi, true);
4272 }
4273 }
4274
4275 if (dump_file && (dump_flags & TDF_DETAILS))
4276 fprintf (dump_file, "Unsplit EH landing pad %d to block %i.\n",
4277 lp->index, e_out->dest->index);
4278
4279 /* Redirect the edge. Since redirect_eh_edge_1 expects to be moving
4280 a successor edge, humor it. But do the real CFG change with the
4281 predecessor of E_OUT in order to preserve the ordering of arguments
4282 to the PHI nodes in E_OUT->DEST. */
4283 redirect_eh_edge_1 (e_in, e_out->dest, false);
4284 redirect_edge_pred (e_out, e_in->src);
4285 e_out->flags = e_in->flags;
4286 e_out->probability = e_in->probability;
4287 remove_edge (e_in);
4288
4289 return true;
4290 }
4291
4292 /* Examine each landing pad block and see if it matches unsplit_eh. */
4293
4294 static bool
4295 unsplit_all_eh (void)
4296 {
4297 bool changed = false;
4298 eh_landing_pad lp;
4299 int i;
4300
4301 for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4302 if (lp)
4303 changed |= unsplit_eh (lp);
4304
4305 return changed;
4306 }
4307
4308 /* Wrapper around unsplit_all_eh that makes it usable everywhere. */
4309
4310 void
4311 unsplit_eh_edges (void)
4312 {
4313 bool changed;
4314
4315 /* unsplit_all_eh can die looking up unreachable landing pads. */
4316 maybe_remove_unreachable_handlers ();
4317
4318 changed = unsplit_all_eh ();
4319
4320 /* If EH edges have been unsplit, delete unreachable forwarder blocks. */
4321 if (changed)
4322 {
4323 free_dominance_info (CDI_DOMINATORS);
4324 free_dominance_info (CDI_POST_DOMINATORS);
4325 delete_unreachable_blocks ();
4326 }
4327 }
4328
4329 /* A subroutine of cleanup_empty_eh. Redirect all EH edges incoming
4330 to OLD_BB to NEW_BB; return true on success, false on failure.
4331
4332 OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
4333 PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
4334 Virtual PHIs may be deleted and marked for renaming. */
4335
4336 static bool
4337 cleanup_empty_eh_merge_phis (basic_block new_bb, basic_block old_bb,
4338 edge old_bb_out, bool change_region)
4339 {
4340 gphi_iterator ngsi, ogsi;
4341 edge_iterator ei;
4342 edge e;
4343 bitmap ophi_handled;
4344
4345 /* The destination block must not be a regular successor for any
4346 of the preds of the landing pad. Thus, avoid turning
4347 <..>
4348 | \ EH
4349 | <..>
4350 | /
4351 <..>
4352 into
4353 <..>
4354 | | EH
4355 <..>
4356 which CFG verification would choke on. See PR45172 and PR51089. */
4357 if (!single_pred_p (new_bb))
4358 FOR_EACH_EDGE (e, ei, old_bb->preds)
4359 if (find_edge (e->src, new_bb))
4360 return false;
4361
4362 FOR_EACH_EDGE (e, ei, old_bb->preds)
4363 redirect_edge_var_map_clear (e);
4364
4365 ophi_handled = BITMAP_ALLOC (NULL);
4366
4367 /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
4368 for the edges we're going to move. */
4369 for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); gsi_next (&ngsi))
4370 {
4371 gphi *ophi, *nphi = ngsi.phi ();
4372 tree nresult, nop;
4373
4374 nresult = gimple_phi_result (nphi);
4375 nop = gimple_phi_arg_def (nphi, old_bb_out->dest_idx);
4376
4377 /* Find the corresponding PHI in OLD_BB so we can forward-propagate
4378 the source ssa_name. */
4379 ophi = NULL;
4380 for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4381 {
4382 ophi = ogsi.phi ();
4383 if (gimple_phi_result (ophi) == nop)
4384 break;
4385 ophi = NULL;
4386 }
4387
4388 /* If we did find the corresponding PHI, copy those inputs. */
4389 if (ophi)
4390 {
4391 /* If NOP is used somewhere else beyond phis in new_bb, give up. */
4392 if (!has_single_use (nop))
4393 {
4394 imm_use_iterator imm_iter;
4395 use_operand_p use_p;
4396
4397 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, nop)
4398 {
4399 if (!gimple_debug_bind_p (USE_STMT (use_p))
4400 && (gimple_code (USE_STMT (use_p)) != GIMPLE_PHI
4401 || gimple_bb (USE_STMT (use_p)) != new_bb))
4402 goto fail;
4403 }
4404 }
4405 bitmap_set_bit (ophi_handled, SSA_NAME_VERSION (nop));
4406 FOR_EACH_EDGE (e, ei, old_bb->preds)
4407 {
4408 location_t oloc;
4409 tree oop;
4410
4411 if ((e->flags & EDGE_EH) == 0)
4412 continue;
4413 oop = gimple_phi_arg_def (ophi, e->dest_idx);
4414 oloc = gimple_phi_arg_location (ophi, e->dest_idx);
4415 redirect_edge_var_map_add (e, nresult, oop, oloc);
4416 }
4417 }
4418 /* If we didn't find the PHI, if it's a real variable or a VOP, we know
4419 from the fact that OLD_BB is tree_empty_eh_handler_p that the
4420 variable is unchanged from input to the block and we can simply
4421 re-use the input to NEW_BB from the OLD_BB_OUT edge. */
4422 else
4423 {
4424 location_t nloc
4425 = gimple_phi_arg_location (nphi, old_bb_out->dest_idx);
4426 FOR_EACH_EDGE (e, ei, old_bb->preds)
4427 redirect_edge_var_map_add (e, nresult, nop, nloc);
4428 }
4429 }
4430
4431 /* Second, verify that all PHIs from OLD_BB have been handled. If not,
4432 we don't know what values from the other edges into NEW_BB to use. */
4433 for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4434 {
4435 gphi *ophi = ogsi.phi ();
4436 tree oresult = gimple_phi_result (ophi);
4437 if (!bitmap_bit_p (ophi_handled, SSA_NAME_VERSION (oresult)))
4438 goto fail;
4439 }
4440
4441 /* Finally, move the edges and update the PHIs. */
4442 for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)); )
4443 if (e->flags & EDGE_EH)
4444 {
4445 /* ??? CFG manipluation routines do not try to update loop
4446 form on edge redirection. Do so manually here for now. */
4447 /* If we redirect a loop entry or latch edge that will either create
4448 a multiple entry loop or rotate the loop. If the loops merge
4449 we may have created a loop with multiple latches.
4450 All of this isn't easily fixed thus cancel the affected loop
4451 and mark the other loop as possibly having multiple latches. */
4452 if (e->dest == e->dest->loop_father->header)
4453 {
4454 mark_loop_for_removal (e->dest->loop_father);
4455 new_bb->loop_father->latch = NULL;
4456 loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
4457 }
4458 redirect_eh_edge_1 (e, new_bb, change_region);
4459 redirect_edge_succ (e, new_bb);
4460 flush_pending_stmts (e);
4461 }
4462 else
4463 ei_next (&ei);
4464
4465 BITMAP_FREE (ophi_handled);
4466 return true;
4467
4468 fail:
4469 FOR_EACH_EDGE (e, ei, old_bb->preds)
4470 redirect_edge_var_map_clear (e);
4471 BITMAP_FREE (ophi_handled);
4472 return false;
4473 }
4474
4475 /* A subroutine of cleanup_empty_eh. Move a landing pad LP from its
4476 old region to NEW_REGION at BB. */
4477
4478 static void
4479 cleanup_empty_eh_move_lp (basic_block bb, edge e_out,
4480 eh_landing_pad lp, eh_region new_region)
4481 {
4482 gimple_stmt_iterator gsi;
4483 eh_landing_pad *pp;
4484
4485 for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp)
4486 continue;
4487 *pp = lp->next_lp;
4488
4489 lp->region = new_region;
4490 lp->next_lp = new_region->landing_pads;
4491 new_region->landing_pads = lp;
4492
4493 /* Delete the RESX that was matched within the empty handler block. */
4494 gsi = gsi_last_bb (bb);
4495 unlink_stmt_vdef (gsi_stmt (gsi));
4496 gsi_remove (&gsi, true);
4497
4498 /* Clean up E_OUT for the fallthru. */
4499 e_out->flags = (e_out->flags & ~EDGE_EH) | EDGE_FALLTHRU;
4500 e_out->probability = profile_probability::always ();
4501 }
4502
4503 /* A subroutine of cleanup_empty_eh. Handle more complex cases of
4504 unsplitting than unsplit_eh was prepared to handle, e.g. when
4505 multiple incoming edges and phis are involved. */
4506
4507 static bool
4508 cleanup_empty_eh_unsplit (basic_block bb, edge e_out, eh_landing_pad lp)
4509 {
4510 gimple_stmt_iterator gsi;
4511 tree lab;
4512
4513 /* We really ought not have totally lost everything following
4514 a landing pad label. Given that BB is empty, there had better
4515 be a successor. */
4516 gcc_assert (e_out != NULL);
4517
4518 /* The destination block must not already have a landing pad
4519 for a different region. */
4520 lab = NULL;
4521 for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4522 {
4523 glabel *stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4524 int lp_nr;
4525
4526 if (!stmt)
4527 break;
4528 lab = gimple_label_label (stmt);
4529 lp_nr = EH_LANDING_PAD_NR (lab);
4530 if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4531 return false;
4532 }
4533
4534 /* Attempt to move the PHIs into the successor block. */
4535 if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, false))
4536 {
4537 if (dump_file && (dump_flags & TDF_DETAILS))
4538 fprintf (dump_file,
4539 "Unsplit EH landing pad %d to block %i "
4540 "(via cleanup_empty_eh).\n",
4541 lp->index, e_out->dest->index);
4542 return true;
4543 }
4544
4545 return false;
4546 }
4547
4548 /* Return true if edge E_FIRST is part of an empty infinite loop
4549 or leads to such a loop through a series of single successor
4550 empty bbs. */
4551
4552 static bool
4553 infinite_empty_loop_p (edge e_first)
4554 {
4555 bool inf_loop = false;
4556 edge e;
4557
4558 if (e_first->dest == e_first->src)
4559 return true;
4560
4561 e_first->src->aux = (void *) 1;
4562 for (e = e_first; single_succ_p (e->dest); e = single_succ_edge (e->dest))
4563 {
4564 gimple_stmt_iterator gsi;
4565 if (e->dest->aux)
4566 {
4567 inf_loop = true;
4568 break;
4569 }
4570 e->dest->aux = (void *) 1;
4571 gsi = gsi_after_labels (e->dest);
4572 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4573 gsi_next_nondebug (&gsi);
4574 if (!gsi_end_p (gsi))
4575 break;
4576 }
4577 e_first->src->aux = NULL;
4578 for (e = e_first; e->dest->aux; e = single_succ_edge (e->dest))
4579 e->dest->aux = NULL;
4580
4581 return inf_loop;
4582 }
4583
4584 /* Examine the block associated with LP to determine if it's an empty
4585 handler for its EH region. If so, attempt to redirect EH edges to
4586 an outer region. Return true the CFG was updated in any way. This
4587 is similar to jump forwarding, just across EH edges. */
4588
4589 static bool
4590 cleanup_empty_eh (eh_landing_pad lp)
4591 {
4592 basic_block bb = label_to_block (cfun, lp->post_landing_pad);
4593 gimple_stmt_iterator gsi;
4594 gimple *resx;
4595 eh_region new_region;
4596 edge_iterator ei;
4597 edge e, e_out;
4598 bool has_non_eh_pred;
4599 bool ret = false;
4600 int new_lp_nr;
4601
4602 /* There can be zero or one edges out of BB. This is the quickest test. */
4603 switch (EDGE_COUNT (bb->succs))
4604 {
4605 case 0:
4606 e_out = NULL;
4607 break;
4608 case 1:
4609 e_out = single_succ_edge (bb);
4610 break;
4611 default:
4612 return false;
4613 }
4614
4615 gsi = gsi_last_nondebug_bb (bb);
4616 resx = gsi_stmt (gsi);
4617 if (resx && is_gimple_resx (resx))
4618 {
4619 if (stmt_can_throw_external (cfun, resx))
4620 optimize_clobbers (bb);
4621 else if (sink_clobbers (bb))
4622 ret = true;
4623 }
4624
4625 gsi = gsi_after_labels (bb);
4626
4627 /* Make sure to skip debug statements. */
4628 if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4629 gsi_next_nondebug (&gsi);
4630
4631 /* If the block is totally empty, look for more unsplitting cases. */
4632 if (gsi_end_p (gsi))
4633 {
4634 /* For the degenerate case of an infinite loop bail out.
4635 If bb has no successors and is totally empty, which can happen e.g.
4636 because of incorrect noreturn attribute, bail out too. */
4637 if (e_out == NULL
4638 || infinite_empty_loop_p (e_out))
4639 return ret;
4640
4641 return ret | cleanup_empty_eh_unsplit (bb, e_out, lp);
4642 }
4643
4644 /* The block should consist only of a single RESX statement, modulo a
4645 preceding call to __builtin_stack_restore if there is no outgoing
4646 edge, since the call can be eliminated in this case. */
4647 resx = gsi_stmt (gsi);
4648 if (!e_out && gimple_call_builtin_p (resx, BUILT_IN_STACK_RESTORE))
4649 {
4650 gsi_next_nondebug (&gsi);
4651 resx = gsi_stmt (gsi);
4652 }
4653 if (!is_gimple_resx (resx))
4654 return ret;
4655 gcc_assert (gsi_one_nondebug_before_end_p (gsi));
4656
4657 /* Determine if there are non-EH edges, or resx edges into the handler. */
4658 has_non_eh_pred = false;
4659 FOR_EACH_EDGE (e, ei, bb->preds)
4660 if (!(e->flags & EDGE_EH))
4661 has_non_eh_pred = true;
4662
4663 /* Find the handler that's outer of the empty handler by looking at
4664 where the RESX instruction was vectored. */
4665 new_lp_nr = lookup_stmt_eh_lp (resx);
4666 new_region = get_eh_region_from_lp_number (new_lp_nr);
4667
4668 /* If there's no destination region within the current function,
4669 redirection is trivial via removing the throwing statements from
4670 the EH region, removing the EH edges, and allowing the block
4671 to go unreachable. */
4672 if (new_region == NULL)
4673 {
4674 gcc_assert (e_out == NULL);
4675 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4676 if (e->flags & EDGE_EH)
4677 {
4678 gimple *stmt = last_stmt (e->src);
4679 remove_stmt_from_eh_lp (stmt);
4680 remove_edge (e);
4681 }
4682 else
4683 ei_next (&ei);
4684 goto succeed;
4685 }
4686
4687 /* If the destination region is a MUST_NOT_THROW, allow the runtime
4688 to handle the abort and allow the blocks to go unreachable. */
4689 if (new_region->type == ERT_MUST_NOT_THROW)
4690 {
4691 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4692 if (e->flags & EDGE_EH)
4693 {
4694 gimple *stmt = last_stmt (e->src);
4695 remove_stmt_from_eh_lp (stmt);
4696 add_stmt_to_eh_lp (stmt, new_lp_nr);
4697 remove_edge (e);
4698 }
4699 else
4700 ei_next (&ei);
4701 goto succeed;
4702 }
4703
4704 /* Try to redirect the EH edges and merge the PHIs into the destination
4705 landing pad block. If the merge succeeds, we'll already have redirected
4706 all the EH edges. The handler itself will go unreachable if there were
4707 no normal edges. */
4708 if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, true))
4709 goto succeed;
4710
4711 /* Finally, if all input edges are EH edges, then we can (potentially)
4712 reduce the number of transfers from the runtime by moving the landing
4713 pad from the original region to the new region. This is a win when
4714 we remove the last CLEANUP region along a particular exception
4715 propagation path. Since nothing changes except for the region with
4716 which the landing pad is associated, the PHI nodes do not need to be
4717 adjusted at all. */
4718 if (!has_non_eh_pred)
4719 {
4720 cleanup_empty_eh_move_lp (bb, e_out, lp, new_region);
4721 if (dump_file && (dump_flags & TDF_DETAILS))
4722 fprintf (dump_file, "Empty EH handler %i moved to EH region %i.\n",
4723 lp->index, new_region->index);
4724
4725 /* ??? The CFG didn't change, but we may have rendered the
4726 old EH region unreachable. Trigger a cleanup there. */
4727 return true;
4728 }
4729
4730 return ret;
4731
4732 succeed:
4733 if (dump_file && (dump_flags & TDF_DETAILS))
4734 fprintf (dump_file, "Empty EH handler %i removed.\n", lp->index);
4735 remove_eh_landing_pad (lp);
4736 return true;
4737 }
4738
4739 /* Do a post-order traversal of the EH region tree. Examine each
4740 post_landing_pad block and see if we can eliminate it as empty. */
4741
4742 static bool
4743 cleanup_all_empty_eh (void)
4744 {
4745 bool changed = false;
4746 eh_landing_pad lp;
4747 int i;
4748
4749 /* The post-order traversal may lead to quadraticness in the redirection
4750 of incoming EH edges from inner LPs, so first try to walk the region
4751 tree from inner to outer LPs in order to eliminate these edges. */
4752 for (i = vec_safe_length (cfun->eh->lp_array) - 1; i >= 1; --i)
4753 {
4754 lp = (*cfun->eh->lp_array)[i];
4755 if (lp)
4756 changed |= cleanup_empty_eh (lp);
4757 }
4758
4759 /* Now do the post-order traversal to eliminate outer empty LPs. */
4760 for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4761 if (lp)
4762 changed |= cleanup_empty_eh (lp);
4763
4764 return changed;
4765 }
4766
4767 /* Perform cleanups and lowering of exception handling
4768 1) cleanups regions with handlers doing nothing are optimized out
4769 2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
4770 3) Info about regions that are containing instructions, and regions
4771 reachable via local EH edges is collected
4772 4) Eh tree is pruned for regions no longer necessary.
4773
4774 TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
4775 Unify those that have the same failure decl and locus.
4776 */
4777
4778 static unsigned int
4779 execute_cleanup_eh_1 (void)
4780 {
4781 /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
4782 looking up unreachable landing pads. */
4783 remove_unreachable_handlers ();
4784
4785 /* Watch out for the region tree vanishing due to all unreachable. */
4786 if (cfun->eh->region_tree)
4787 {
4788 bool changed = false;
4789
4790 if (optimize)
4791 changed |= unsplit_all_eh ();
4792 changed |= cleanup_all_empty_eh ();
4793
4794 if (changed)
4795 {
4796 free_dominance_info (CDI_DOMINATORS);
4797 free_dominance_info (CDI_POST_DOMINATORS);
4798
4799 /* We delayed all basic block deletion, as we may have performed
4800 cleanups on EH edges while non-EH edges were still present. */
4801 delete_unreachable_blocks ();
4802
4803 /* We manipulated the landing pads. Remove any region that no
4804 longer has a landing pad. */
4805 remove_unreachable_handlers_no_lp ();
4806
4807 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
4808 }
4809 }
4810
4811 return 0;
4812 }
4813
4814 namespace {
4815
4816 const pass_data pass_data_cleanup_eh =
4817 {
4818 GIMPLE_PASS, /* type */
4819 "ehcleanup", /* name */
4820 OPTGROUP_NONE, /* optinfo_flags */
4821 TV_TREE_EH, /* tv_id */
4822 PROP_gimple_lcf, /* properties_required */
4823 0, /* properties_provided */
4824 0, /* properties_destroyed */
4825 0, /* todo_flags_start */
4826 0, /* todo_flags_finish */
4827 };
4828
4829 class pass_cleanup_eh : public gimple_opt_pass
4830 {
4831 public:
4832 pass_cleanup_eh (gcc::context *ctxt)
4833 : gimple_opt_pass (pass_data_cleanup_eh, ctxt)
4834 {}
4835
4836 /* opt_pass methods: */
4837 opt_pass * clone () { return new pass_cleanup_eh (m_ctxt); }
4838 virtual bool gate (function *fun)
4839 {
4840 return fun->eh != NULL && fun->eh->region_tree != NULL;
4841 }
4842
4843 virtual unsigned int execute (function *);
4844
4845 }; // class pass_cleanup_eh
4846
4847 unsigned int
4848 pass_cleanup_eh::execute (function *fun)
4849 {
4850 int ret = execute_cleanup_eh_1 ();
4851
4852 /* If the function no longer needs an EH personality routine
4853 clear it. This exposes cross-language inlining opportunities
4854 and avoids references to a never defined personality routine. */
4855 if (DECL_FUNCTION_PERSONALITY (current_function_decl)
4856 && function_needs_eh_personality (fun) != eh_personality_lang)
4857 DECL_FUNCTION_PERSONALITY (current_function_decl) = NULL_TREE;
4858
4859 return ret;
4860 }
4861
4862 } // anon namespace
4863
4864 gimple_opt_pass *
4865 make_pass_cleanup_eh (gcc::context *ctxt)
4866 {
4867 return new pass_cleanup_eh (ctxt);
4868 }
4869 \f
4870 /* Disable warnings about missing quoting in GCC diagnostics for
4871 the verification errors. Their format strings don't follow GCC
4872 diagnostic conventions but are only used for debugging. */
4873 #if __GNUC__ >= 10
4874 # pragma GCC diagnostic push
4875 # pragma GCC diagnostic ignored "-Wformat-diag"
4876 #endif
4877
4878 /* Verify that BB containing STMT as the last statement, has precisely the
4879 edge that make_eh_edges would create. */
4880
4881 DEBUG_FUNCTION bool
4882 verify_eh_edges (gimple *stmt)
4883 {
4884 basic_block bb = gimple_bb (stmt);
4885 eh_landing_pad lp = NULL;
4886 int lp_nr;
4887 edge_iterator ei;
4888 edge e, eh_edge;
4889
4890 lp_nr = lookup_stmt_eh_lp (stmt);
4891 if (lp_nr > 0)
4892 lp = get_eh_landing_pad_from_number (lp_nr);
4893
4894 eh_edge = NULL;
4895 FOR_EACH_EDGE (e, ei, bb->succs)
4896 {
4897 if (e->flags & EDGE_EH)
4898 {
4899 if (eh_edge)
4900 {
4901 error ("BB %i has multiple EH edges", bb->index);
4902 return true;
4903 }
4904 else
4905 eh_edge = e;
4906 }
4907 }
4908
4909 if (lp == NULL)
4910 {
4911 if (eh_edge)
4912 {
4913 error ("BB %i cannot throw but has an EH edge", bb->index);
4914 return true;
4915 }
4916 return false;
4917 }
4918
4919 if (!stmt_could_throw_p (cfun, stmt))
4920 {
4921 error ("BB %i last statement has incorrectly set lp", bb->index);
4922 return true;
4923 }
4924
4925 if (eh_edge == NULL)
4926 {
4927 error ("BB %i is missing an EH edge", bb->index);
4928 return true;
4929 }
4930
4931 if (eh_edge->dest != label_to_block (cfun, lp->post_landing_pad))
4932 {
4933 error ("Incorrect EH edge %i->%i", bb->index, eh_edge->dest->index);
4934 return true;
4935 }
4936
4937 return false;
4938 }
4939
4940 /* Similarly, but handle GIMPLE_EH_DISPATCH specifically. */
4941
4942 DEBUG_FUNCTION bool
4943 verify_eh_dispatch_edge (geh_dispatch *stmt)
4944 {
4945 eh_region r;
4946 eh_catch c;
4947 basic_block src, dst;
4948 bool want_fallthru = true;
4949 edge_iterator ei;
4950 edge e, fall_edge;
4951
4952 r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
4953 src = gimple_bb (stmt);
4954
4955 FOR_EACH_EDGE (e, ei, src->succs)
4956 gcc_assert (e->aux == NULL);
4957
4958 switch (r->type)
4959 {
4960 case ERT_TRY:
4961 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
4962 {
4963 dst = label_to_block (cfun, c->label);
4964 e = find_edge (src, dst);
4965 if (e == NULL)
4966 {
4967 error ("BB %i is missing an edge", src->index);
4968 return true;
4969 }
4970 e->aux = (void *)e;
4971
4972 /* A catch-all handler doesn't have a fallthru. */
4973 if (c->type_list == NULL)
4974 {
4975 want_fallthru = false;
4976 break;
4977 }
4978 }
4979 break;
4980
4981 case ERT_ALLOWED_EXCEPTIONS:
4982 dst = label_to_block (cfun, r->u.allowed.label);
4983 e = find_edge (src, dst);
4984 if (e == NULL)
4985 {
4986 error ("BB %i is missing an edge", src->index);
4987 return true;
4988 }
4989 e->aux = (void *)e;
4990 break;
4991
4992 default:
4993 gcc_unreachable ();
4994 }
4995
4996 fall_edge = NULL;
4997 FOR_EACH_EDGE (e, ei, src->succs)
4998 {
4999 if (e->flags & EDGE_FALLTHRU)
5000 {
5001 if (fall_edge != NULL)
5002 {
5003 error ("BB %i too many fallthru edges", src->index);
5004 return true;
5005 }
5006 fall_edge = e;
5007 }
5008 else if (e->aux)
5009 e->aux = NULL;
5010 else
5011 {
5012 error ("BB %i has incorrect edge", src->index);
5013 return true;
5014 }
5015 }
5016 if ((fall_edge != NULL) ^ want_fallthru)
5017 {
5018 error ("BB %i has incorrect fallthru edge", src->index);
5019 return true;
5020 }
5021
5022 return false;
5023 }
5024
5025 #if __GNUC__ >= 10
5026 # pragma GCC diagnostic pop
5027 #endif