Thread options & clone events (core + remote)
[binutils-gdb.git] / gdb / infrun.c
1 /* Target-struct-independent code to start (run) and stop an inferior
2 process.
3
4 Copyright (C) 1986-2023 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "displaced-stepping.h"
23 #include "infrun.h"
24 #include <ctype.h>
25 #include "symtab.h"
26 #include "frame.h"
27 #include "inferior.h"
28 #include "breakpoint.h"
29 #include "gdbcore.h"
30 #include "gdbcmd.h"
31 #include "target.h"
32 #include "target-connection.h"
33 #include "gdbthread.h"
34 #include "annotate.h"
35 #include "symfile.h"
36 #include "top.h"
37 #include "ui.h"
38 #include "inf-loop.h"
39 #include "regcache.h"
40 #include "value.h"
41 #include "observable.h"
42 #include "language.h"
43 #include "solib.h"
44 #include "main.h"
45 #include "block.h"
46 #include "mi/mi-common.h"
47 #include "event-top.h"
48 #include "record.h"
49 #include "record-full.h"
50 #include "inline-frame.h"
51 #include "jit.h"
52 #include "tracepoint.h"
53 #include "skip.h"
54 #include "probe.h"
55 #include "objfiles.h"
56 #include "completer.h"
57 #include "target-descriptions.h"
58 #include "target-dcache.h"
59 #include "terminal.h"
60 #include "solist.h"
61 #include "gdbsupport/event-loop.h"
62 #include "thread-fsm.h"
63 #include "gdbsupport/enum-flags.h"
64 #include "progspace-and-thread.h"
65 #include "gdbsupport/gdb_optional.h"
66 #include "arch-utils.h"
67 #include "gdbsupport/scope-exit.h"
68 #include "gdbsupport/forward-scope-exit.h"
69 #include "gdbsupport/gdb_select.h"
70 #include <unordered_map>
71 #include "async-event.h"
72 #include "gdbsupport/selftest.h"
73 #include "scoped-mock-context.h"
74 #include "test-target.h"
75 #include "gdbsupport/common-debug.h"
76 #include "gdbsupport/buildargv.h"
77 #include "extension.h"
78 #include "disasm.h"
79 #include "interps.h"
80
81 /* Prototypes for local functions */
82
83 static void sig_print_info (enum gdb_signal);
84
85 static void sig_print_header (void);
86
87 static void follow_inferior_reset_breakpoints (void);
88
89 static bool currently_stepping (struct thread_info *tp);
90
91 static void insert_hp_step_resume_breakpoint_at_frame (frame_info_ptr);
92
93 static void insert_step_resume_breakpoint_at_caller (frame_info_ptr);
94
95 static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
96
97 static bool maybe_software_singlestep (struct gdbarch *gdbarch);
98
99 static void resume (gdb_signal sig);
100
101 static void wait_for_inferior (inferior *inf);
102
103 static void restart_threads (struct thread_info *event_thread,
104 inferior *inf = nullptr);
105
106 static bool start_step_over (void);
107
108 static bool step_over_info_valid_p (void);
109
110 /* Asynchronous signal handler registered as event loop source for
111 when we have pending events ready to be passed to the core. */
112 static struct async_event_handler *infrun_async_inferior_event_token;
113
114 /* Stores whether infrun_async was previously enabled or disabled.
115 Starts off as -1, indicating "never enabled/disabled". */
116 static int infrun_is_async = -1;
117
118 /* See infrun.h. */
119
120 void
121 infrun_async (int enable)
122 {
123 if (infrun_is_async != enable)
124 {
125 infrun_is_async = enable;
126
127 infrun_debug_printf ("enable=%d", enable);
128
129 if (enable)
130 mark_async_event_handler (infrun_async_inferior_event_token);
131 else
132 clear_async_event_handler (infrun_async_inferior_event_token);
133 }
134 }
135
136 /* See infrun.h. */
137
138 void
139 mark_infrun_async_event_handler (void)
140 {
141 mark_async_event_handler (infrun_async_inferior_event_token);
142 }
143
144 /* When set, stop the 'step' command if we enter a function which has
145 no line number information. The normal behavior is that we step
146 over such function. */
147 bool step_stop_if_no_debug = false;
148 static void
149 show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
150 struct cmd_list_element *c, const char *value)
151 {
152 gdb_printf (file, _("Mode of the step operation is %s.\n"), value);
153 }
154
155 /* proceed and normal_stop use this to notify the user when the
156 inferior stopped in a different thread than it had been running in.
157 It can also be used to find for which thread normal_stop last
158 reported a stop. */
159 static thread_info_ref previous_thread;
160
161 /* See infrun.h. */
162
163 void
164 update_previous_thread ()
165 {
166 if (inferior_ptid == null_ptid)
167 previous_thread = nullptr;
168 else
169 previous_thread = thread_info_ref::new_reference (inferior_thread ());
170 }
171
172 /* See infrun.h. */
173
174 thread_info *
175 get_previous_thread ()
176 {
177 return previous_thread.get ();
178 }
179
180 /* If set (default for legacy reasons), when following a fork, GDB
181 will detach from one of the fork branches, child or parent.
182 Exactly which branch is detached depends on 'set follow-fork-mode'
183 setting. */
184
185 static bool detach_fork = true;
186
187 bool debug_infrun = false;
188 static void
189 show_debug_infrun (struct ui_file *file, int from_tty,
190 struct cmd_list_element *c, const char *value)
191 {
192 gdb_printf (file, _("Inferior debugging is %s.\n"), value);
193 }
194
195 /* Support for disabling address space randomization. */
196
197 bool disable_randomization = true;
198
199 static void
200 show_disable_randomization (struct ui_file *file, int from_tty,
201 struct cmd_list_element *c, const char *value)
202 {
203 if (target_supports_disable_randomization ())
204 gdb_printf (file,
205 _("Disabling randomization of debuggee's "
206 "virtual address space is %s.\n"),
207 value);
208 else
209 gdb_puts (_("Disabling randomization of debuggee's "
210 "virtual address space is unsupported on\n"
211 "this platform.\n"), file);
212 }
213
214 static void
215 set_disable_randomization (const char *args, int from_tty,
216 struct cmd_list_element *c)
217 {
218 if (!target_supports_disable_randomization ())
219 error (_("Disabling randomization of debuggee's "
220 "virtual address space is unsupported on\n"
221 "this platform."));
222 }
223
224 /* User interface for non-stop mode. */
225
226 bool non_stop = false;
227 static bool non_stop_1 = false;
228
229 static void
230 set_non_stop (const char *args, int from_tty,
231 struct cmd_list_element *c)
232 {
233 if (target_has_execution ())
234 {
235 non_stop_1 = non_stop;
236 error (_("Cannot change this setting while the inferior is running."));
237 }
238
239 non_stop = non_stop_1;
240 }
241
242 static void
243 show_non_stop (struct ui_file *file, int from_tty,
244 struct cmd_list_element *c, const char *value)
245 {
246 gdb_printf (file,
247 _("Controlling the inferior in non-stop mode is %s.\n"),
248 value);
249 }
250
251 /* "Observer mode" is somewhat like a more extreme version of
252 non-stop, in which all GDB operations that might affect the
253 target's execution have been disabled. */
254
255 static bool observer_mode = false;
256 static bool observer_mode_1 = false;
257
258 static void
259 set_observer_mode (const char *args, int from_tty,
260 struct cmd_list_element *c)
261 {
262 if (target_has_execution ())
263 {
264 observer_mode_1 = observer_mode;
265 error (_("Cannot change this setting while the inferior is running."));
266 }
267
268 observer_mode = observer_mode_1;
269
270 may_write_registers = !observer_mode;
271 may_write_memory = !observer_mode;
272 may_insert_breakpoints = !observer_mode;
273 may_insert_tracepoints = !observer_mode;
274 /* We can insert fast tracepoints in or out of observer mode,
275 but enable them if we're going into this mode. */
276 if (observer_mode)
277 may_insert_fast_tracepoints = true;
278 may_stop = !observer_mode;
279 update_target_permissions ();
280
281 /* Going *into* observer mode we must force non-stop, then
282 going out we leave it that way. */
283 if (observer_mode)
284 {
285 pagination_enabled = false;
286 non_stop = non_stop_1 = true;
287 }
288
289 if (from_tty)
290 gdb_printf (_("Observer mode is now %s.\n"),
291 (observer_mode ? "on" : "off"));
292 }
293
294 static void
295 show_observer_mode (struct ui_file *file, int from_tty,
296 struct cmd_list_element *c, const char *value)
297 {
298 gdb_printf (file, _("Observer mode is %s.\n"), value);
299 }
300
301 /* This updates the value of observer mode based on changes in
302 permissions. Note that we are deliberately ignoring the values of
303 may-write-registers and may-write-memory, since the user may have
304 reason to enable these during a session, for instance to turn on a
305 debugging-related global. */
306
307 void
308 update_observer_mode (void)
309 {
310 bool newval = (!may_insert_breakpoints
311 && !may_insert_tracepoints
312 && may_insert_fast_tracepoints
313 && !may_stop
314 && non_stop);
315
316 /* Let the user know if things change. */
317 if (newval != observer_mode)
318 gdb_printf (_("Observer mode is now %s.\n"),
319 (newval ? "on" : "off"));
320
321 observer_mode = observer_mode_1 = newval;
322 }
323
324 /* Tables of how to react to signals; the user sets them. */
325
326 static unsigned char signal_stop[GDB_SIGNAL_LAST];
327 static unsigned char signal_print[GDB_SIGNAL_LAST];
328 static unsigned char signal_program[GDB_SIGNAL_LAST];
329
330 /* Table of signals that are registered with "catch signal". A
331 non-zero entry indicates that the signal is caught by some "catch
332 signal" command. */
333 static unsigned char signal_catch[GDB_SIGNAL_LAST];
334
335 /* Table of signals that the target may silently handle.
336 This is automatically determined from the flags above,
337 and simply cached here. */
338 static unsigned char signal_pass[GDB_SIGNAL_LAST];
339
340 #define SET_SIGS(nsigs,sigs,flags) \
341 do { \
342 int signum = (nsigs); \
343 while (signum-- > 0) \
344 if ((sigs)[signum]) \
345 (flags)[signum] = 1; \
346 } while (0)
347
348 #define UNSET_SIGS(nsigs,sigs,flags) \
349 do { \
350 int signum = (nsigs); \
351 while (signum-- > 0) \
352 if ((sigs)[signum]) \
353 (flags)[signum] = 0; \
354 } while (0)
355
356 /* Update the target's copy of SIGNAL_PROGRAM. The sole purpose of
357 this function is to avoid exporting `signal_program'. */
358
359 void
360 update_signals_program_target (void)
361 {
362 target_program_signals (signal_program);
363 }
364
365 /* Value to pass to target_resume() to cause all threads to resume. */
366
367 #define RESUME_ALL minus_one_ptid
368
369 /* Command list pointer for the "stop" placeholder. */
370
371 static struct cmd_list_element *stop_command;
372
373 /* Nonzero if we want to give control to the user when we're notified
374 of shared library events by the dynamic linker. */
375 int stop_on_solib_events;
376
377 /* Enable or disable optional shared library event breakpoints
378 as appropriate when the above flag is changed. */
379
380 static void
381 set_stop_on_solib_events (const char *args,
382 int from_tty, struct cmd_list_element *c)
383 {
384 update_solib_breakpoints ();
385 }
386
387 static void
388 show_stop_on_solib_events (struct ui_file *file, int from_tty,
389 struct cmd_list_element *c, const char *value)
390 {
391 gdb_printf (file, _("Stopping for shared library events is %s.\n"),
392 value);
393 }
394
395 /* True after stop if current stack frame should be printed. */
396
397 static bool stop_print_frame;
398
399 /* This is a cached copy of the target/ptid/waitstatus of the last
400 event returned by target_wait().
401 This information is returned by get_last_target_status(). */
402 static process_stratum_target *target_last_proc_target;
403 static ptid_t target_last_wait_ptid;
404 static struct target_waitstatus target_last_waitstatus;
405
406 void init_thread_stepping_state (struct thread_info *tss);
407
408 static const char follow_fork_mode_child[] = "child";
409 static const char follow_fork_mode_parent[] = "parent";
410
411 static const char *const follow_fork_mode_kind_names[] = {
412 follow_fork_mode_child,
413 follow_fork_mode_parent,
414 nullptr
415 };
416
417 static const char *follow_fork_mode_string = follow_fork_mode_parent;
418 static void
419 show_follow_fork_mode_string (struct ui_file *file, int from_tty,
420 struct cmd_list_element *c, const char *value)
421 {
422 gdb_printf (file,
423 _("Debugger response to a program "
424 "call of fork or vfork is \"%s\".\n"),
425 value);
426 }
427 \f
428
429 /* Handle changes to the inferior list based on the type of fork,
430 which process is being followed, and whether the other process
431 should be detached. On entry inferior_ptid must be the ptid of
432 the fork parent. At return inferior_ptid is the ptid of the
433 followed inferior. */
434
435 static bool
436 follow_fork_inferior (bool follow_child, bool detach_fork)
437 {
438 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
439
440 infrun_debug_printf ("follow_child = %d, detach_fork = %d",
441 follow_child, detach_fork);
442
443 target_waitkind fork_kind = inferior_thread ()->pending_follow.kind ();
444 gdb_assert (fork_kind == TARGET_WAITKIND_FORKED
445 || fork_kind == TARGET_WAITKIND_VFORKED);
446 bool has_vforked = fork_kind == TARGET_WAITKIND_VFORKED;
447 ptid_t parent_ptid = inferior_ptid;
448 ptid_t child_ptid = inferior_thread ()->pending_follow.child_ptid ();
449
450 if (has_vforked
451 && !non_stop /* Non-stop always resumes both branches. */
452 && current_ui->prompt_state == PROMPT_BLOCKED
453 && !(follow_child || detach_fork || sched_multi))
454 {
455 /* The parent stays blocked inside the vfork syscall until the
456 child execs or exits. If we don't let the child run, then
457 the parent stays blocked. If we're telling the parent to run
458 in the foreground, the user will not be able to ctrl-c to get
459 back the terminal, effectively hanging the debug session. */
460 gdb_printf (gdb_stderr, _("\
461 Can not resume the parent process over vfork in the foreground while\n\
462 holding the child stopped. Try \"set detach-on-fork\" or \
463 \"set schedule-multiple\".\n"));
464 return true;
465 }
466
467 inferior *parent_inf = current_inferior ();
468 inferior *child_inf = nullptr;
469
470 gdb_assert (parent_inf->thread_waiting_for_vfork_done == nullptr);
471
472 if (!follow_child)
473 {
474 /* Detach new forked process? */
475 if (detach_fork)
476 {
477 /* Before detaching from the child, remove all breakpoints
478 from it. If we forked, then this has already been taken
479 care of by infrun.c. If we vforked however, any
480 breakpoint inserted in the parent is visible in the
481 child, even those added while stopped in a vfork
482 catchpoint. This will remove the breakpoints from the
483 parent also, but they'll be reinserted below. */
484 if (has_vforked)
485 {
486 /* Keep breakpoints list in sync. */
487 remove_breakpoints_inf (current_inferior ());
488 }
489
490 if (print_inferior_events)
491 {
492 /* Ensure that we have a process ptid. */
493 ptid_t process_ptid = ptid_t (child_ptid.pid ());
494
495 target_terminal::ours_for_output ();
496 gdb_printf (_("[Detaching after %s from child %s]\n"),
497 has_vforked ? "vfork" : "fork",
498 target_pid_to_str (process_ptid).c_str ());
499 }
500 }
501 else
502 {
503 /* Add process to GDB's tables. */
504 child_inf = add_inferior (child_ptid.pid ());
505
506 child_inf->attach_flag = parent_inf->attach_flag;
507 copy_terminal_info (child_inf, parent_inf);
508 child_inf->set_arch (parent_inf->arch ());
509 child_inf->tdesc_info = parent_inf->tdesc_info;
510
511 child_inf->symfile_flags = SYMFILE_NO_READ;
512
513 /* If this is a vfork child, then the address-space is
514 shared with the parent. */
515 if (has_vforked)
516 {
517 child_inf->pspace = parent_inf->pspace;
518 child_inf->aspace = parent_inf->aspace;
519
520 exec_on_vfork (child_inf);
521
522 /* The parent will be frozen until the child is done
523 with the shared region. Keep track of the
524 parent. */
525 child_inf->vfork_parent = parent_inf;
526 child_inf->pending_detach = false;
527 parent_inf->vfork_child = child_inf;
528 parent_inf->pending_detach = false;
529 }
530 else
531 {
532 child_inf->aspace = new address_space ();
533 child_inf->pspace = new program_space (child_inf->aspace);
534 child_inf->removable = true;
535 clone_program_space (child_inf->pspace, parent_inf->pspace);
536 }
537 }
538
539 if (has_vforked)
540 {
541 /* If we detached from the child, then we have to be careful
542 to not insert breakpoints in the parent until the child
543 is done with the shared memory region. However, if we're
544 staying attached to the child, then we can and should
545 insert breakpoints, so that we can debug it. A
546 subsequent child exec or exit is enough to know when does
547 the child stops using the parent's address space. */
548 parent_inf->thread_waiting_for_vfork_done
549 = detach_fork ? inferior_thread () : nullptr;
550 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
551
552 infrun_debug_printf
553 ("parent_inf->thread_waiting_for_vfork_done == %s",
554 (parent_inf->thread_waiting_for_vfork_done == nullptr
555 ? "nullptr"
556 : (parent_inf->thread_waiting_for_vfork_done
557 ->ptid.to_string ().c_str ())));
558 }
559 }
560 else
561 {
562 /* Follow the child. */
563
564 if (print_inferior_events)
565 {
566 std::string parent_pid = target_pid_to_str (parent_ptid);
567 std::string child_pid = target_pid_to_str (child_ptid);
568
569 target_terminal::ours_for_output ();
570 gdb_printf (_("[Attaching after %s %s to child %s]\n"),
571 parent_pid.c_str (),
572 has_vforked ? "vfork" : "fork",
573 child_pid.c_str ());
574 }
575
576 /* Add the new inferior first, so that the target_detach below
577 doesn't unpush the target. */
578
579 child_inf = add_inferior (child_ptid.pid ());
580
581 child_inf->attach_flag = parent_inf->attach_flag;
582 copy_terminal_info (child_inf, parent_inf);
583 child_inf->set_arch (parent_inf->arch ());
584 child_inf->tdesc_info = parent_inf->tdesc_info;
585
586 if (has_vforked)
587 {
588 /* If this is a vfork child, then the address-space is shared
589 with the parent. */
590 child_inf->aspace = parent_inf->aspace;
591 child_inf->pspace = parent_inf->pspace;
592
593 exec_on_vfork (child_inf);
594 }
595 else if (detach_fork)
596 {
597 /* We follow the child and detach from the parent: move the parent's
598 program space to the child. This simplifies some things, like
599 doing "next" over fork() and landing on the expected line in the
600 child (note, that is broken with "set detach-on-fork off").
601
602 Before assigning brand new spaces for the parent, remove
603 breakpoints from it: because the new pspace won't match
604 currently inserted locations, the normal detach procedure
605 wouldn't remove them, and we would leave them inserted when
606 detaching. */
607 remove_breakpoints_inf (parent_inf);
608
609 child_inf->aspace = parent_inf->aspace;
610 child_inf->pspace = parent_inf->pspace;
611 parent_inf->aspace = new address_space ();
612 parent_inf->pspace = new program_space (parent_inf->aspace);
613 clone_program_space (parent_inf->pspace, child_inf->pspace);
614
615 /* The parent inferior is still the current one, so keep things
616 in sync. */
617 set_current_program_space (parent_inf->pspace);
618 }
619 else
620 {
621 child_inf->aspace = new address_space ();
622 child_inf->pspace = new program_space (child_inf->aspace);
623 child_inf->removable = true;
624 child_inf->symfile_flags = SYMFILE_NO_READ;
625 clone_program_space (child_inf->pspace, parent_inf->pspace);
626 }
627 }
628
629 gdb_assert (current_inferior () == parent_inf);
630
631 /* If we are setting up an inferior for the child, target_follow_fork is
632 responsible for pushing the appropriate targets on the new inferior's
633 target stack and adding the initial thread (with ptid CHILD_PTID).
634
635 If we are not setting up an inferior for the child (because following
636 the parent and detach_fork is true), it is responsible for detaching
637 from CHILD_PTID. */
638 target_follow_fork (child_inf, child_ptid, fork_kind, follow_child,
639 detach_fork);
640
641 gdb::observers::inferior_forked.notify (parent_inf, child_inf, fork_kind);
642
643 /* target_follow_fork must leave the parent as the current inferior. If we
644 want to follow the child, we make it the current one below. */
645 gdb_assert (current_inferior () == parent_inf);
646
647 /* If there is a child inferior, target_follow_fork must have created a thread
648 for it. */
649 if (child_inf != nullptr)
650 gdb_assert (!child_inf->thread_list.empty ());
651
652 /* Clear the parent thread's pending follow field. Do this before calling
653 target_detach, so that the target can differentiate the two following
654 cases:
655
656 - We continue past a fork with "follow-fork-mode == child" &&
657 "detach-on-fork on", and therefore detach the parent. In that
658 case the target should not detach the fork child.
659 - We run to a fork catchpoint and the user types "detach". In that
660 case, the target should detach the fork child in addition to the
661 parent.
662
663 The former case will have pending_follow cleared, the later will have
664 pending_follow set. */
665 thread_info *parent_thread = parent_inf->find_thread (parent_ptid);
666 gdb_assert (parent_thread != nullptr);
667 parent_thread->pending_follow.set_spurious ();
668
669 /* Detach the parent if needed. */
670 if (follow_child)
671 {
672 /* If we're vforking, we want to hold on to the parent until
673 the child exits or execs. At child exec or exit time we
674 can remove the old breakpoints from the parent and detach
675 or resume debugging it. Otherwise, detach the parent now;
676 we'll want to reuse it's program/address spaces, but we
677 can't set them to the child before removing breakpoints
678 from the parent, otherwise, the breakpoints module could
679 decide to remove breakpoints from the wrong process (since
680 they'd be assigned to the same address space). */
681
682 if (has_vforked)
683 {
684 gdb_assert (child_inf->vfork_parent == nullptr);
685 gdb_assert (parent_inf->vfork_child == nullptr);
686 child_inf->vfork_parent = parent_inf;
687 child_inf->pending_detach = false;
688 parent_inf->vfork_child = child_inf;
689 parent_inf->pending_detach = detach_fork;
690 }
691 else if (detach_fork)
692 {
693 if (print_inferior_events)
694 {
695 /* Ensure that we have a process ptid. */
696 ptid_t process_ptid = ptid_t (parent_ptid.pid ());
697
698 target_terminal::ours_for_output ();
699 gdb_printf (_("[Detaching after fork from "
700 "parent %s]\n"),
701 target_pid_to_str (process_ptid).c_str ());
702 }
703
704 target_detach (parent_inf, 0);
705 }
706 }
707
708 /* If we ended up creating a new inferior, call post_create_inferior to inform
709 the various subcomponents. */
710 if (child_inf != nullptr)
711 {
712 /* If FOLLOW_CHILD, we leave CHILD_INF as the current inferior
713 (do not restore the parent as the current inferior). */
714 gdb::optional<scoped_restore_current_thread> maybe_restore;
715
716 if (!follow_child && !sched_multi)
717 maybe_restore.emplace ();
718
719 switch_to_thread (*child_inf->threads ().begin ());
720 post_create_inferior (0);
721 }
722
723 return false;
724 }
725
726 /* Set the last target status as TP having stopped. */
727
728 static void
729 set_last_target_status_stopped (thread_info *tp)
730 {
731 set_last_target_status (tp->inf->process_target (), tp->ptid,
732 target_waitstatus {}.set_stopped (GDB_SIGNAL_0));
733 }
734
735 /* Tell the target to follow the fork we're stopped at. Returns true
736 if the inferior should be resumed; false, if the target for some
737 reason decided it's best not to resume. */
738
739 static bool
740 follow_fork ()
741 {
742 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
743
744 bool follow_child = (follow_fork_mode_string == follow_fork_mode_child);
745 bool should_resume = true;
746
747 /* Copy user stepping state to the new inferior thread. FIXME: the
748 followed fork child thread should have a copy of most of the
749 parent thread structure's run control related fields, not just these.
750 Initialized to avoid "may be used uninitialized" warnings from gcc. */
751 struct breakpoint *step_resume_breakpoint = nullptr;
752 struct breakpoint *exception_resume_breakpoint = nullptr;
753 CORE_ADDR step_range_start = 0;
754 CORE_ADDR step_range_end = 0;
755 int current_line = 0;
756 symtab *current_symtab = nullptr;
757 struct frame_id step_frame_id = { 0 };
758
759 if (!non_stop)
760 {
761 thread_info *cur_thr = inferior_thread ();
762
763 ptid_t resume_ptid
764 = user_visible_resume_ptid (cur_thr->control.stepping_command);
765 process_stratum_target *resume_target
766 = user_visible_resume_target (resume_ptid);
767
768 /* Check if there's a thread that we're about to resume, other
769 than the current, with an unfollowed fork/vfork. If so,
770 switch back to it, to tell the target to follow it (in either
771 direction). We'll afterwards refuse to resume, and inform
772 the user what happened. */
773 for (thread_info *tp : all_non_exited_threads (resume_target,
774 resume_ptid))
775 {
776 if (tp == cur_thr)
777 continue;
778
779 /* follow_fork_inferior clears tp->pending_follow, and below
780 we'll need the value after the follow_fork_inferior
781 call. */
782 target_waitkind kind = tp->pending_follow.kind ();
783
784 if (kind != TARGET_WAITKIND_SPURIOUS)
785 {
786 infrun_debug_printf ("need to follow-fork [%s] first",
787 tp->ptid.to_string ().c_str ());
788
789 switch_to_thread (tp);
790
791 /* Set up inferior(s) as specified by the caller, and
792 tell the target to do whatever is necessary to follow
793 either parent or child. */
794 if (follow_child)
795 {
796 /* The thread that started the execution command
797 won't exist in the child. Abort the command and
798 immediately stop in this thread, in the child,
799 inside fork. */
800 should_resume = false;
801 }
802 else
803 {
804 /* Following the parent, so let the thread fork its
805 child freely, it won't influence the current
806 execution command. */
807 if (follow_fork_inferior (follow_child, detach_fork))
808 {
809 /* Target refused to follow, or there's some
810 other reason we shouldn't resume. */
811 switch_to_thread (cur_thr);
812 set_last_target_status_stopped (cur_thr);
813 return false;
814 }
815
816 /* If we're following a vfork, when we need to leave
817 the just-forked thread as selected, as we need to
818 solo-resume it to collect the VFORK_DONE event.
819 If we're following a fork, however, switch back
820 to the original thread that we continue stepping
821 it, etc. */
822 if (kind != TARGET_WAITKIND_VFORKED)
823 {
824 gdb_assert (kind == TARGET_WAITKIND_FORKED);
825 switch_to_thread (cur_thr);
826 }
827 }
828
829 break;
830 }
831 }
832 }
833
834 thread_info *tp = inferior_thread ();
835
836 /* If there were any forks/vforks that were caught and are now to be
837 followed, then do so now. */
838 switch (tp->pending_follow.kind ())
839 {
840 case TARGET_WAITKIND_FORKED:
841 case TARGET_WAITKIND_VFORKED:
842 {
843 ptid_t parent, child;
844 std::unique_ptr<struct thread_fsm> thread_fsm;
845
846 /* If the user did a next/step, etc, over a fork call,
847 preserve the stepping state in the fork child. */
848 if (follow_child && should_resume)
849 {
850 step_resume_breakpoint = clone_momentary_breakpoint
851 (tp->control.step_resume_breakpoint);
852 step_range_start = tp->control.step_range_start;
853 step_range_end = tp->control.step_range_end;
854 current_line = tp->current_line;
855 current_symtab = tp->current_symtab;
856 step_frame_id = tp->control.step_frame_id;
857 exception_resume_breakpoint
858 = clone_momentary_breakpoint (tp->control.exception_resume_breakpoint);
859 thread_fsm = tp->release_thread_fsm ();
860
861 /* For now, delete the parent's sr breakpoint, otherwise,
862 parent/child sr breakpoints are considered duplicates,
863 and the child version will not be installed. Remove
864 this when the breakpoints module becomes aware of
865 inferiors and address spaces. */
866 delete_step_resume_breakpoint (tp);
867 tp->control.step_range_start = 0;
868 tp->control.step_range_end = 0;
869 tp->control.step_frame_id = null_frame_id;
870 delete_exception_resume_breakpoint (tp);
871 }
872
873 parent = inferior_ptid;
874 child = tp->pending_follow.child_ptid ();
875
876 /* If handling a vfork, stop all the inferior's threads, they will be
877 restarted when the vfork shared region is complete. */
878 if (tp->pending_follow.kind () == TARGET_WAITKIND_VFORKED
879 && target_is_non_stop_p ())
880 stop_all_threads ("handling vfork", tp->inf);
881
882 process_stratum_target *parent_targ = tp->inf->process_target ();
883 /* Set up inferior(s) as specified by the caller, and tell the
884 target to do whatever is necessary to follow either parent
885 or child. */
886 if (follow_fork_inferior (follow_child, detach_fork))
887 {
888 /* Target refused to follow, or there's some other reason
889 we shouldn't resume. */
890 should_resume = 0;
891 }
892 else
893 {
894 /* If we followed the child, switch to it... */
895 if (follow_child)
896 {
897 tp = parent_targ->find_thread (child);
898 switch_to_thread (tp);
899
900 /* ... and preserve the stepping state, in case the
901 user was stepping over the fork call. */
902 if (should_resume)
903 {
904 tp->control.step_resume_breakpoint
905 = step_resume_breakpoint;
906 tp->control.step_range_start = step_range_start;
907 tp->control.step_range_end = step_range_end;
908 tp->current_line = current_line;
909 tp->current_symtab = current_symtab;
910 tp->control.step_frame_id = step_frame_id;
911 tp->control.exception_resume_breakpoint
912 = exception_resume_breakpoint;
913 tp->set_thread_fsm (std::move (thread_fsm));
914 }
915 else
916 {
917 /* If we get here, it was because we're trying to
918 resume from a fork catchpoint, but, the user
919 has switched threads away from the thread that
920 forked. In that case, the resume command
921 issued is most likely not applicable to the
922 child, so just warn, and refuse to resume. */
923 warning (_("Not resuming: switched threads "
924 "before following fork child."));
925 }
926
927 /* Reset breakpoints in the child as appropriate. */
928 follow_inferior_reset_breakpoints ();
929 }
930 }
931 }
932 break;
933 case TARGET_WAITKIND_SPURIOUS:
934 /* Nothing to follow. */
935 break;
936 default:
937 internal_error ("Unexpected pending_follow.kind %d\n",
938 tp->pending_follow.kind ());
939 break;
940 }
941
942 if (!should_resume)
943 set_last_target_status_stopped (tp);
944 return should_resume;
945 }
946
947 static void
948 follow_inferior_reset_breakpoints (void)
949 {
950 struct thread_info *tp = inferior_thread ();
951
952 /* Was there a step_resume breakpoint? (There was if the user
953 did a "next" at the fork() call.) If so, explicitly reset its
954 thread number. Cloned step_resume breakpoints are disabled on
955 creation, so enable it here now that it is associated with the
956 correct thread.
957
958 step_resumes are a form of bp that are made to be per-thread.
959 Since we created the step_resume bp when the parent process
960 was being debugged, and now are switching to the child process,
961 from the breakpoint package's viewpoint, that's a switch of
962 "threads". We must update the bp's notion of which thread
963 it is for, or it'll be ignored when it triggers. */
964
965 if (tp->control.step_resume_breakpoint)
966 {
967 breakpoint_re_set_thread (tp->control.step_resume_breakpoint);
968 tp->control.step_resume_breakpoint->first_loc ().enabled = 1;
969 }
970
971 /* Treat exception_resume breakpoints like step_resume breakpoints. */
972 if (tp->control.exception_resume_breakpoint)
973 {
974 breakpoint_re_set_thread (tp->control.exception_resume_breakpoint);
975 tp->control.exception_resume_breakpoint->first_loc ().enabled = 1;
976 }
977
978 /* Reinsert all breakpoints in the child. The user may have set
979 breakpoints after catching the fork, in which case those
980 were never set in the child, but only in the parent. This makes
981 sure the inserted breakpoints match the breakpoint list. */
982
983 breakpoint_re_set ();
984 insert_breakpoints ();
985 }
986
987 /* The child has exited or execed: resume THREAD, a thread of the parent,
988 if it was meant to be executing. */
989
990 static void
991 proceed_after_vfork_done (thread_info *thread)
992 {
993 if (thread->state == THREAD_RUNNING
994 && !thread->executing ()
995 && !thread->stop_requested
996 && thread->stop_signal () == GDB_SIGNAL_0)
997 {
998 infrun_debug_printf ("resuming vfork parent thread %s",
999 thread->ptid.to_string ().c_str ());
1000
1001 switch_to_thread (thread);
1002 clear_proceed_status (0);
1003 proceed ((CORE_ADDR) -1, GDB_SIGNAL_DEFAULT);
1004 }
1005 }
1006
1007 /* Called whenever we notice an exec or exit event, to handle
1008 detaching or resuming a vfork parent. */
1009
1010 static void
1011 handle_vfork_child_exec_or_exit (int exec)
1012 {
1013 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
1014
1015 struct inferior *inf = current_inferior ();
1016
1017 if (inf->vfork_parent)
1018 {
1019 inferior *resume_parent = nullptr;
1020
1021 /* This exec or exit marks the end of the shared memory region
1022 between the parent and the child. Break the bonds. */
1023 inferior *vfork_parent = inf->vfork_parent;
1024 inf->vfork_parent->vfork_child = nullptr;
1025 inf->vfork_parent = nullptr;
1026
1027 /* If the user wanted to detach from the parent, now is the
1028 time. */
1029 if (vfork_parent->pending_detach)
1030 {
1031 struct program_space *pspace;
1032 struct address_space *aspace;
1033
1034 /* follow-fork child, detach-on-fork on. */
1035
1036 vfork_parent->pending_detach = false;
1037
1038 scoped_restore_current_pspace_and_thread restore_thread;
1039
1040 /* We're letting loose of the parent. */
1041 thread_info *tp = any_live_thread_of_inferior (vfork_parent);
1042 switch_to_thread (tp);
1043
1044 /* We're about to detach from the parent, which implicitly
1045 removes breakpoints from its address space. There's a
1046 catch here: we want to reuse the spaces for the child,
1047 but, parent/child are still sharing the pspace at this
1048 point, although the exec in reality makes the kernel give
1049 the child a fresh set of new pages. The problem here is
1050 that the breakpoints module being unaware of this, would
1051 likely chose the child process to write to the parent
1052 address space. Swapping the child temporarily away from
1053 the spaces has the desired effect. Yes, this is "sort
1054 of" a hack. */
1055
1056 pspace = inf->pspace;
1057 aspace = inf->aspace;
1058 inf->aspace = nullptr;
1059 inf->pspace = nullptr;
1060
1061 if (print_inferior_events)
1062 {
1063 std::string pidstr
1064 = target_pid_to_str (ptid_t (vfork_parent->pid));
1065
1066 target_terminal::ours_for_output ();
1067
1068 if (exec)
1069 {
1070 gdb_printf (_("[Detaching vfork parent %s "
1071 "after child exec]\n"), pidstr.c_str ());
1072 }
1073 else
1074 {
1075 gdb_printf (_("[Detaching vfork parent %s "
1076 "after child exit]\n"), pidstr.c_str ());
1077 }
1078 }
1079
1080 target_detach (vfork_parent, 0);
1081
1082 /* Put it back. */
1083 inf->pspace = pspace;
1084 inf->aspace = aspace;
1085 }
1086 else if (exec)
1087 {
1088 /* We're staying attached to the parent, so, really give the
1089 child a new address space. */
1090 inf->pspace = new program_space (maybe_new_address_space ());
1091 inf->aspace = inf->pspace->aspace;
1092 inf->removable = true;
1093 set_current_program_space (inf->pspace);
1094
1095 resume_parent = vfork_parent;
1096 }
1097 else
1098 {
1099 /* If this is a vfork child exiting, then the pspace and
1100 aspaces were shared with the parent. Since we're
1101 reporting the process exit, we'll be mourning all that is
1102 found in the address space, and switching to null_ptid,
1103 preparing to start a new inferior. But, since we don't
1104 want to clobber the parent's address/program spaces, we
1105 go ahead and create a new one for this exiting
1106 inferior. */
1107
1108 /* Switch to no-thread while running clone_program_space, so
1109 that clone_program_space doesn't want to read the
1110 selected frame of a dead process. */
1111 scoped_restore_current_thread restore_thread;
1112 switch_to_no_thread ();
1113
1114 inf->pspace = new program_space (maybe_new_address_space ());
1115 inf->aspace = inf->pspace->aspace;
1116 set_current_program_space (inf->pspace);
1117 inf->removable = true;
1118 inf->symfile_flags = SYMFILE_NO_READ;
1119 clone_program_space (inf->pspace, vfork_parent->pspace);
1120
1121 resume_parent = vfork_parent;
1122 }
1123
1124 gdb_assert (current_program_space == inf->pspace);
1125
1126 if (non_stop && resume_parent != nullptr)
1127 {
1128 /* If the user wanted the parent to be running, let it go
1129 free now. */
1130 scoped_restore_current_thread restore_thread;
1131
1132 infrun_debug_printf ("resuming vfork parent process %d",
1133 resume_parent->pid);
1134
1135 for (thread_info *thread : resume_parent->threads ())
1136 proceed_after_vfork_done (thread);
1137 }
1138 }
1139 }
1140
1141 /* Handle TARGET_WAITKIND_VFORK_DONE. */
1142
1143 static void
1144 handle_vfork_done (thread_info *event_thread)
1145 {
1146 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
1147
1148 /* We only care about this event if inferior::thread_waiting_for_vfork_done is
1149 set, that is if we are waiting for a vfork child not under our control
1150 (because we detached it) to exec or exit.
1151
1152 If an inferior has vforked and we are debugging the child, we don't use
1153 the vfork-done event to get notified about the end of the shared address
1154 space window. We rely instead on the child's exec or exit event, and the
1155 inferior::vfork_{parent,child} fields are used instead. See
1156 handle_vfork_child_exec_or_exit for that. */
1157 if (event_thread->inf->thread_waiting_for_vfork_done == nullptr)
1158 {
1159 infrun_debug_printf ("not waiting for a vfork-done event");
1160 return;
1161 }
1162
1163 /* We stopped all threads (other than the vforking thread) of the inferior in
1164 follow_fork and kept them stopped until now. It should therefore not be
1165 possible for another thread to have reported a vfork during that window.
1166 If THREAD_WAITING_FOR_VFORK_DONE is set, it has to be the same thread whose
1167 vfork-done we are handling right now. */
1168 gdb_assert (event_thread->inf->thread_waiting_for_vfork_done == event_thread);
1169
1170 event_thread->inf->thread_waiting_for_vfork_done = nullptr;
1171 event_thread->inf->pspace->breakpoints_not_allowed = 0;
1172
1173 /* On non-stop targets, we stopped all the inferior's threads in follow_fork,
1174 resume them now. On all-stop targets, everything that needs to be resumed
1175 will be when we resume the event thread. */
1176 if (target_is_non_stop_p ())
1177 {
1178 /* restart_threads and start_step_over may change the current thread, make
1179 sure we leave the event thread as the current thread. */
1180 scoped_restore_current_thread restore_thread;
1181
1182 insert_breakpoints ();
1183 start_step_over ();
1184
1185 if (!step_over_info_valid_p ())
1186 restart_threads (event_thread, event_thread->inf);
1187 }
1188 }
1189
1190 /* Enum strings for "set|show follow-exec-mode". */
1191
1192 static const char follow_exec_mode_new[] = "new";
1193 static const char follow_exec_mode_same[] = "same";
1194 static const char *const follow_exec_mode_names[] =
1195 {
1196 follow_exec_mode_new,
1197 follow_exec_mode_same,
1198 nullptr,
1199 };
1200
1201 static const char *follow_exec_mode_string = follow_exec_mode_same;
1202 static void
1203 show_follow_exec_mode_string (struct ui_file *file, int from_tty,
1204 struct cmd_list_element *c, const char *value)
1205 {
1206 gdb_printf (file, _("Follow exec mode is \"%s\".\n"), value);
1207 }
1208
1209 /* EXEC_FILE_TARGET is assumed to be non-NULL. */
1210
1211 static void
1212 follow_exec (ptid_t ptid, const char *exec_file_target)
1213 {
1214 int pid = ptid.pid ();
1215 ptid_t process_ptid;
1216
1217 /* Switch terminal for any messages produced e.g. by
1218 breakpoint_re_set. */
1219 target_terminal::ours_for_output ();
1220
1221 /* This is an exec event that we actually wish to pay attention to.
1222 Refresh our symbol table to the newly exec'd program, remove any
1223 momentary bp's, etc.
1224
1225 If there are breakpoints, they aren't really inserted now,
1226 since the exec() transformed our inferior into a fresh set
1227 of instructions.
1228
1229 We want to preserve symbolic breakpoints on the list, since
1230 we have hopes that they can be reset after the new a.out's
1231 symbol table is read.
1232
1233 However, any "raw" breakpoints must be removed from the list
1234 (e.g., the solib bp's), since their address is probably invalid
1235 now.
1236
1237 And, we DON'T want to call delete_breakpoints() here, since
1238 that may write the bp's "shadow contents" (the instruction
1239 value that was overwritten with a TRAP instruction). Since
1240 we now have a new a.out, those shadow contents aren't valid. */
1241
1242 mark_breakpoints_out ();
1243
1244 /* The target reports the exec event to the main thread, even if
1245 some other thread does the exec, and even if the main thread was
1246 stopped or already gone. We may still have non-leader threads of
1247 the process on our list. E.g., on targets that don't have thread
1248 exit events (like remote) and nothing forces an update of the
1249 thread list up to here. When debugging remotely, it's best to
1250 avoid extra traffic, when possible, so avoid syncing the thread
1251 list with the target, and instead go ahead and delete all threads
1252 of the process but the one that reported the event. Note this must
1253 be done before calling update_breakpoints_after_exec, as
1254 otherwise clearing the threads' resources would reference stale
1255 thread breakpoints -- it may have been one of these threads that
1256 stepped across the exec. We could just clear their stepping
1257 states, but as long as we're iterating, might as well delete
1258 them. Deleting them now rather than at the next user-visible
1259 stop provides a nicer sequence of events for user and MI
1260 notifications. */
1261 for (thread_info *th : all_threads_safe ())
1262 if (th->ptid.pid () == pid && th->ptid != ptid)
1263 delete_thread (th);
1264
1265 /* We also need to clear any left over stale state for the
1266 leader/event thread. E.g., if there was any step-resume
1267 breakpoint or similar, it's gone now. We cannot truly
1268 step-to-next statement through an exec(). */
1269 thread_info *th = inferior_thread ();
1270 th->control.step_resume_breakpoint = nullptr;
1271 th->control.exception_resume_breakpoint = nullptr;
1272 th->control.single_step_breakpoints = nullptr;
1273 th->control.step_range_start = 0;
1274 th->control.step_range_end = 0;
1275
1276 /* The user may have had the main thread held stopped in the
1277 previous image (e.g., schedlock on, or non-stop). Release
1278 it now. */
1279 th->stop_requested = 0;
1280
1281 update_breakpoints_after_exec ();
1282
1283 /* What is this a.out's name? */
1284 process_ptid = ptid_t (pid);
1285 gdb_printf (_("%s is executing new program: %s\n"),
1286 target_pid_to_str (process_ptid).c_str (),
1287 exec_file_target);
1288
1289 /* We've followed the inferior through an exec. Therefore, the
1290 inferior has essentially been killed & reborn. */
1291
1292 breakpoint_init_inferior (inf_execd);
1293
1294 gdb::unique_xmalloc_ptr<char> exec_file_host
1295 = exec_file_find (exec_file_target, nullptr);
1296
1297 /* If we were unable to map the executable target pathname onto a host
1298 pathname, tell the user that. Otherwise GDB's subsequent behavior
1299 is confusing. Maybe it would even be better to stop at this point
1300 so that the user can specify a file manually before continuing. */
1301 if (exec_file_host == nullptr)
1302 warning (_("Could not load symbols for executable %s.\n"
1303 "Do you need \"set sysroot\"?"),
1304 exec_file_target);
1305
1306 /* Reset the shared library package. This ensures that we get a
1307 shlib event when the child reaches "_start", at which point the
1308 dld will have had a chance to initialize the child. */
1309 /* Also, loading a symbol file below may trigger symbol lookups, and
1310 we don't want those to be satisfied by the libraries of the
1311 previous incarnation of this process. */
1312 no_shared_libraries (nullptr, 0);
1313
1314 inferior *execing_inferior = current_inferior ();
1315 inferior *following_inferior;
1316
1317 if (follow_exec_mode_string == follow_exec_mode_new)
1318 {
1319 /* The user wants to keep the old inferior and program spaces
1320 around. Create a new fresh one, and switch to it. */
1321
1322 /* Do exit processing for the original inferior before setting the new
1323 inferior's pid. Having two inferiors with the same pid would confuse
1324 find_inferior_p(t)id. Transfer the terminal state and info from the
1325 old to the new inferior. */
1326 following_inferior = add_inferior_with_spaces ();
1327
1328 swap_terminal_info (following_inferior, execing_inferior);
1329 exit_inferior (execing_inferior);
1330
1331 following_inferior->pid = pid;
1332 }
1333 else
1334 {
1335 /* follow-exec-mode is "same", we continue execution in the execing
1336 inferior. */
1337 following_inferior = execing_inferior;
1338
1339 /* The old description may no longer be fit for the new image.
1340 E.g, a 64-bit process exec'ed a 32-bit process. Clear the
1341 old description; we'll read a new one below. No need to do
1342 this on "follow-exec-mode new", as the old inferior stays
1343 around (its description is later cleared/refetched on
1344 restart). */
1345 target_clear_description ();
1346 }
1347
1348 target_follow_exec (following_inferior, ptid, exec_file_target);
1349
1350 gdb_assert (current_inferior () == following_inferior);
1351 gdb_assert (current_program_space == following_inferior->pspace);
1352
1353 /* Attempt to open the exec file. SYMFILE_DEFER_BP_RESET is used
1354 because the proper displacement for a PIE (Position Independent
1355 Executable) main symbol file will only be computed by
1356 solib_create_inferior_hook below. breakpoint_re_set would fail
1357 to insert the breakpoints with the zero displacement. */
1358 try_open_exec_file (exec_file_host.get (), following_inferior,
1359 SYMFILE_DEFER_BP_RESET);
1360
1361 /* If the target can specify a description, read it. Must do this
1362 after flipping to the new executable (because the target supplied
1363 description must be compatible with the executable's
1364 architecture, and the old executable may e.g., be 32-bit, while
1365 the new one 64-bit), and before anything involving memory or
1366 registers. */
1367 target_find_description ();
1368
1369 gdb::observers::inferior_execd.notify (execing_inferior, following_inferior);
1370
1371 breakpoint_re_set ();
1372
1373 /* Reinsert all breakpoints. (Those which were symbolic have
1374 been reset to the proper address in the new a.out, thanks
1375 to symbol_file_command...). */
1376 insert_breakpoints ();
1377
1378 /* The next resume of this inferior should bring it to the shlib
1379 startup breakpoints. (If the user had also set bp's on
1380 "main" from the old (parent) process, then they'll auto-
1381 matically get reset there in the new process.). */
1382 }
1383
1384 /* The chain of threads that need to do a step-over operation to get
1385 past e.g., a breakpoint. What technique is used to step over the
1386 breakpoint/watchpoint does not matter -- all threads end up in the
1387 same queue, to maintain rough temporal order of execution, in order
1388 to avoid starvation, otherwise, we could e.g., find ourselves
1389 constantly stepping the same couple threads past their breakpoints
1390 over and over, if the single-step finish fast enough. */
1391 thread_step_over_list global_thread_step_over_list;
1392
1393 /* Bit flags indicating what the thread needs to step over. */
1394
1395 enum step_over_what_flag
1396 {
1397 /* Step over a breakpoint. */
1398 STEP_OVER_BREAKPOINT = 1,
1399
1400 /* Step past a non-continuable watchpoint, in order to let the
1401 instruction execute so we can evaluate the watchpoint
1402 expression. */
1403 STEP_OVER_WATCHPOINT = 2
1404 };
1405 DEF_ENUM_FLAGS_TYPE (enum step_over_what_flag, step_over_what);
1406
1407 /* Info about an instruction that is being stepped over. */
1408
1409 struct step_over_info
1410 {
1411 /* If we're stepping past a breakpoint, this is the address space
1412 and address of the instruction the breakpoint is set at. We'll
1413 skip inserting all breakpoints here. Valid iff ASPACE is
1414 non-NULL. */
1415 const address_space *aspace = nullptr;
1416 CORE_ADDR address = 0;
1417
1418 /* The instruction being stepped over triggers a nonsteppable
1419 watchpoint. If true, we'll skip inserting watchpoints. */
1420 int nonsteppable_watchpoint_p = 0;
1421
1422 /* The thread's global number. */
1423 int thread = -1;
1424 };
1425
1426 /* The step-over info of the location that is being stepped over.
1427
1428 Note that with async/breakpoint always-inserted mode, a user might
1429 set a new breakpoint/watchpoint/etc. exactly while a breakpoint is
1430 being stepped over. As setting a new breakpoint inserts all
1431 breakpoints, we need to make sure the breakpoint being stepped over
1432 isn't inserted then. We do that by only clearing the step-over
1433 info when the step-over is actually finished (or aborted).
1434
1435 Presently GDB can only step over one breakpoint at any given time.
1436 Given threads that can't run code in the same address space as the
1437 breakpoint's can't really miss the breakpoint, GDB could be taught
1438 to step-over at most one breakpoint per address space (so this info
1439 could move to the address space object if/when GDB is extended).
1440 The set of breakpoints being stepped over will normally be much
1441 smaller than the set of all breakpoints, so a flag in the
1442 breakpoint location structure would be wasteful. A separate list
1443 also saves complexity and run-time, as otherwise we'd have to go
1444 through all breakpoint locations clearing their flag whenever we
1445 start a new sequence. Similar considerations weigh against storing
1446 this info in the thread object. Plus, not all step overs actually
1447 have breakpoint locations -- e.g., stepping past a single-step
1448 breakpoint, or stepping to complete a non-continuable
1449 watchpoint. */
1450 static struct step_over_info step_over_info;
1451
1452 /* Record the address of the breakpoint/instruction we're currently
1453 stepping over.
1454 N.B. We record the aspace and address now, instead of say just the thread,
1455 because when we need the info later the thread may be running. */
1456
1457 static void
1458 set_step_over_info (const address_space *aspace, CORE_ADDR address,
1459 int nonsteppable_watchpoint_p,
1460 int thread)
1461 {
1462 step_over_info.aspace = aspace;
1463 step_over_info.address = address;
1464 step_over_info.nonsteppable_watchpoint_p = nonsteppable_watchpoint_p;
1465 step_over_info.thread = thread;
1466 }
1467
1468 /* Called when we're not longer stepping over a breakpoint / an
1469 instruction, so all breakpoints are free to be (re)inserted. */
1470
1471 static void
1472 clear_step_over_info (void)
1473 {
1474 infrun_debug_printf ("clearing step over info");
1475 step_over_info.aspace = nullptr;
1476 step_over_info.address = 0;
1477 step_over_info.nonsteppable_watchpoint_p = 0;
1478 step_over_info.thread = -1;
1479 }
1480
1481 /* See infrun.h. */
1482
1483 int
1484 stepping_past_instruction_at (struct address_space *aspace,
1485 CORE_ADDR address)
1486 {
1487 return (step_over_info.aspace != nullptr
1488 && breakpoint_address_match (aspace, address,
1489 step_over_info.aspace,
1490 step_over_info.address));
1491 }
1492
1493 /* See infrun.h. */
1494
1495 int
1496 thread_is_stepping_over_breakpoint (int thread)
1497 {
1498 return (step_over_info.thread != -1
1499 && thread == step_over_info.thread);
1500 }
1501
1502 /* See infrun.h. */
1503
1504 int
1505 stepping_past_nonsteppable_watchpoint (void)
1506 {
1507 return step_over_info.nonsteppable_watchpoint_p;
1508 }
1509
1510 /* Returns true if step-over info is valid. */
1511
1512 static bool
1513 step_over_info_valid_p (void)
1514 {
1515 return (step_over_info.aspace != nullptr
1516 || stepping_past_nonsteppable_watchpoint ());
1517 }
1518
1519 \f
1520 /* Displaced stepping. */
1521
1522 /* In non-stop debugging mode, we must take special care to manage
1523 breakpoints properly; in particular, the traditional strategy for
1524 stepping a thread past a breakpoint it has hit is unsuitable.
1525 'Displaced stepping' is a tactic for stepping one thread past a
1526 breakpoint it has hit while ensuring that other threads running
1527 concurrently will hit the breakpoint as they should.
1528
1529 The traditional way to step a thread T off a breakpoint in a
1530 multi-threaded program in all-stop mode is as follows:
1531
1532 a0) Initially, all threads are stopped, and breakpoints are not
1533 inserted.
1534 a1) We single-step T, leaving breakpoints uninserted.
1535 a2) We insert breakpoints, and resume all threads.
1536
1537 In non-stop debugging, however, this strategy is unsuitable: we
1538 don't want to have to stop all threads in the system in order to
1539 continue or step T past a breakpoint. Instead, we use displaced
1540 stepping:
1541
1542 n0) Initially, T is stopped, other threads are running, and
1543 breakpoints are inserted.
1544 n1) We copy the instruction "under" the breakpoint to a separate
1545 location, outside the main code stream, making any adjustments
1546 to the instruction, register, and memory state as directed by
1547 T's architecture.
1548 n2) We single-step T over the instruction at its new location.
1549 n3) We adjust the resulting register and memory state as directed
1550 by T's architecture. This includes resetting T's PC to point
1551 back into the main instruction stream.
1552 n4) We resume T.
1553
1554 This approach depends on the following gdbarch methods:
1555
1556 - gdbarch_max_insn_length and gdbarch_displaced_step_location
1557 indicate where to copy the instruction, and how much space must
1558 be reserved there. We use these in step n1.
1559
1560 - gdbarch_displaced_step_copy_insn copies a instruction to a new
1561 address, and makes any necessary adjustments to the instruction,
1562 register contents, and memory. We use this in step n1.
1563
1564 - gdbarch_displaced_step_fixup adjusts registers and memory after
1565 we have successfully single-stepped the instruction, to yield the
1566 same effect the instruction would have had if we had executed it
1567 at its original address. We use this in step n3.
1568
1569 The gdbarch_displaced_step_copy_insn and
1570 gdbarch_displaced_step_fixup functions must be written so that
1571 copying an instruction with gdbarch_displaced_step_copy_insn,
1572 single-stepping across the copied instruction, and then applying
1573 gdbarch_displaced_insn_fixup should have the same effects on the
1574 thread's memory and registers as stepping the instruction in place
1575 would have. Exactly which responsibilities fall to the copy and
1576 which fall to the fixup is up to the author of those functions.
1577
1578 See the comments in gdbarch.sh for details.
1579
1580 Note that displaced stepping and software single-step cannot
1581 currently be used in combination, although with some care I think
1582 they could be made to. Software single-step works by placing
1583 breakpoints on all possible subsequent instructions; if the
1584 displaced instruction is a PC-relative jump, those breakpoints
1585 could fall in very strange places --- on pages that aren't
1586 executable, or at addresses that are not proper instruction
1587 boundaries. (We do generally let other threads run while we wait
1588 to hit the software single-step breakpoint, and they might
1589 encounter such a corrupted instruction.) One way to work around
1590 this would be to have gdbarch_displaced_step_copy_insn fully
1591 simulate the effect of PC-relative instructions (and return NULL)
1592 on architectures that use software single-stepping.
1593
1594 In non-stop mode, we can have independent and simultaneous step
1595 requests, so more than one thread may need to simultaneously step
1596 over a breakpoint. The current implementation assumes there is
1597 only one scratch space per process. In this case, we have to
1598 serialize access to the scratch space. If thread A wants to step
1599 over a breakpoint, but we are currently waiting for some other
1600 thread to complete a displaced step, we leave thread A stopped and
1601 place it in the displaced_step_request_queue. Whenever a displaced
1602 step finishes, we pick the next thread in the queue and start a new
1603 displaced step operation on it. See displaced_step_prepare and
1604 displaced_step_finish for details. */
1605
1606 /* Return true if THREAD is doing a displaced step. */
1607
1608 static bool
1609 displaced_step_in_progress_thread (thread_info *thread)
1610 {
1611 gdb_assert (thread != nullptr);
1612
1613 return thread->displaced_step_state.in_progress ();
1614 }
1615
1616 /* Return true if INF has a thread doing a displaced step. */
1617
1618 static bool
1619 displaced_step_in_progress (inferior *inf)
1620 {
1621 return inf->displaced_step_state.in_progress_count > 0;
1622 }
1623
1624 /* Return true if any thread is doing a displaced step. */
1625
1626 static bool
1627 displaced_step_in_progress_any_thread ()
1628 {
1629 for (inferior *inf : all_non_exited_inferiors ())
1630 {
1631 if (displaced_step_in_progress (inf))
1632 return true;
1633 }
1634
1635 return false;
1636 }
1637
1638 static void
1639 infrun_inferior_exit (struct inferior *inf)
1640 {
1641 inf->displaced_step_state.reset ();
1642 inf->thread_waiting_for_vfork_done = nullptr;
1643 }
1644
1645 static void
1646 infrun_inferior_execd (inferior *exec_inf, inferior *follow_inf)
1647 {
1648 /* If some threads where was doing a displaced step in this inferior at the
1649 moment of the exec, they no longer exist. Even if the exec'ing thread
1650 doing a displaced step, we don't want to to any fixup nor restore displaced
1651 stepping buffer bytes. */
1652 follow_inf->displaced_step_state.reset ();
1653
1654 for (thread_info *thread : follow_inf->threads ())
1655 thread->displaced_step_state.reset ();
1656
1657 /* Since an in-line step is done with everything else stopped, if there was
1658 one in progress at the time of the exec, it must have been the exec'ing
1659 thread. */
1660 clear_step_over_info ();
1661
1662 follow_inf->thread_waiting_for_vfork_done = nullptr;
1663 }
1664
1665 /* If ON, and the architecture supports it, GDB will use displaced
1666 stepping to step over breakpoints. If OFF, or if the architecture
1667 doesn't support it, GDB will instead use the traditional
1668 hold-and-step approach. If AUTO (which is the default), GDB will
1669 decide which technique to use to step over breakpoints depending on
1670 whether the target works in a non-stop way (see use_displaced_stepping). */
1671
1672 static enum auto_boolean can_use_displaced_stepping = AUTO_BOOLEAN_AUTO;
1673
1674 static void
1675 show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1676 struct cmd_list_element *c,
1677 const char *value)
1678 {
1679 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO)
1680 gdb_printf (file,
1681 _("Debugger's willingness to use displaced stepping "
1682 "to step over breakpoints is %s (currently %s).\n"),
1683 value, target_is_non_stop_p () ? "on" : "off");
1684 else
1685 gdb_printf (file,
1686 _("Debugger's willingness to use displaced stepping "
1687 "to step over breakpoints is %s.\n"), value);
1688 }
1689
1690 /* Return true if the gdbarch implements the required methods to use
1691 displaced stepping. */
1692
1693 static bool
1694 gdbarch_supports_displaced_stepping (gdbarch *arch)
1695 {
1696 /* Only check for the presence of `prepare`. The gdbarch verification ensures
1697 that if `prepare` is provided, so is `finish`. */
1698 return gdbarch_displaced_step_prepare_p (arch);
1699 }
1700
1701 /* Return non-zero if displaced stepping can/should be used to step
1702 over breakpoints of thread TP. */
1703
1704 static bool
1705 use_displaced_stepping (thread_info *tp)
1706 {
1707 /* If the user disabled it explicitly, don't use displaced stepping. */
1708 if (can_use_displaced_stepping == AUTO_BOOLEAN_FALSE)
1709 return false;
1710
1711 /* If "auto", only use displaced stepping if the target operates in a non-stop
1712 way. */
1713 if (can_use_displaced_stepping == AUTO_BOOLEAN_AUTO
1714 && !target_is_non_stop_p ())
1715 return false;
1716
1717 gdbarch *gdbarch = get_thread_regcache (tp)->arch ();
1718
1719 /* If the architecture doesn't implement displaced stepping, don't use
1720 it. */
1721 if (!gdbarch_supports_displaced_stepping (gdbarch))
1722 return false;
1723
1724 /* If recording, don't use displaced stepping. */
1725 if (find_record_target () != nullptr)
1726 return false;
1727
1728 /* If displaced stepping failed before for this inferior, don't bother trying
1729 again. */
1730 if (tp->inf->displaced_step_state.failed_before)
1731 return false;
1732
1733 return true;
1734 }
1735
1736 /* Simple function wrapper around displaced_step_thread_state::reset. */
1737
1738 static void
1739 displaced_step_reset (displaced_step_thread_state *displaced)
1740 {
1741 displaced->reset ();
1742 }
1743
1744 /* A cleanup that wraps displaced_step_reset. We use this instead of, say,
1745 SCOPE_EXIT, because it needs to be discardable with "cleanup.release ()". */
1746
1747 using displaced_step_reset_cleanup = FORWARD_SCOPE_EXIT (displaced_step_reset);
1748
1749 /* Prepare to single-step, using displaced stepping.
1750
1751 Note that we cannot use displaced stepping when we have a signal to
1752 deliver. If we have a signal to deliver and an instruction to step
1753 over, then after the step, there will be no indication from the
1754 target whether the thread entered a signal handler or ignored the
1755 signal and stepped over the instruction successfully --- both cases
1756 result in a simple SIGTRAP. In the first case we mustn't do a
1757 fixup, and in the second case we must --- but we can't tell which.
1758 Comments in the code for 'random signals' in handle_inferior_event
1759 explain how we handle this case instead.
1760
1761 Returns DISPLACED_STEP_PREPARE_STATUS_OK if preparing was successful -- this
1762 thread is going to be stepped now; DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE
1763 if displaced stepping this thread got queued; or
1764 DISPLACED_STEP_PREPARE_STATUS_CANT if this instruction can't be displaced
1765 stepped. */
1766
1767 static displaced_step_prepare_status
1768 displaced_step_prepare_throw (thread_info *tp)
1769 {
1770 regcache *regcache = get_thread_regcache (tp);
1771 struct gdbarch *gdbarch = regcache->arch ();
1772 displaced_step_thread_state &disp_step_thread_state
1773 = tp->displaced_step_state;
1774
1775 /* We should never reach this function if the architecture does not
1776 support displaced stepping. */
1777 gdb_assert (gdbarch_supports_displaced_stepping (gdbarch));
1778
1779 /* Nor if the thread isn't meant to step over a breakpoint. */
1780 gdb_assert (tp->control.trap_expected);
1781
1782 /* Disable range stepping while executing in the scratch pad. We
1783 want a single-step even if executing the displaced instruction in
1784 the scratch buffer lands within the stepping range (e.g., a
1785 jump/branch). */
1786 tp->control.may_range_step = 0;
1787
1788 /* We are about to start a displaced step for this thread. If one is already
1789 in progress, something's wrong. */
1790 gdb_assert (!disp_step_thread_state.in_progress ());
1791
1792 if (tp->inf->displaced_step_state.unavailable)
1793 {
1794 /* The gdbarch tells us it's not worth asking to try a prepare because
1795 it is likely that it will return unavailable, so don't bother asking. */
1796
1797 displaced_debug_printf ("deferring step of %s",
1798 tp->ptid.to_string ().c_str ());
1799
1800 global_thread_step_over_chain_enqueue (tp);
1801 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
1802 }
1803
1804 displaced_debug_printf ("displaced-stepping %s now",
1805 tp->ptid.to_string ().c_str ());
1806
1807 scoped_restore_current_thread restore_thread;
1808
1809 switch_to_thread (tp);
1810
1811 CORE_ADDR original_pc = regcache_read_pc (regcache);
1812 CORE_ADDR displaced_pc;
1813
1814 /* Display the instruction we are going to displaced step. */
1815 if (debug_displaced)
1816 {
1817 string_file tmp_stream;
1818 int dislen = gdb_print_insn (gdbarch, original_pc, &tmp_stream,
1819 nullptr);
1820
1821 if (dislen > 0)
1822 {
1823 gdb::byte_vector insn_buf (dislen);
1824 read_memory (original_pc, insn_buf.data (), insn_buf.size ());
1825
1826 std::string insn_bytes = bytes_to_string (insn_buf);
1827
1828 displaced_debug_printf ("original insn %s: %s \t %s",
1829 paddress (gdbarch, original_pc),
1830 insn_bytes.c_str (),
1831 tmp_stream.string ().c_str ());
1832 }
1833 else
1834 displaced_debug_printf ("original insn %s: invalid length: %d",
1835 paddress (gdbarch, original_pc), dislen);
1836 }
1837
1838 displaced_step_prepare_status status
1839 = gdbarch_displaced_step_prepare (gdbarch, tp, displaced_pc);
1840
1841 if (status == DISPLACED_STEP_PREPARE_STATUS_CANT)
1842 {
1843 displaced_debug_printf ("failed to prepare (%s)",
1844 tp->ptid.to_string ().c_str ());
1845
1846 return DISPLACED_STEP_PREPARE_STATUS_CANT;
1847 }
1848 else if (status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
1849 {
1850 /* Not enough displaced stepping resources available, defer this
1851 request by placing it the queue. */
1852
1853 displaced_debug_printf ("not enough resources available, "
1854 "deferring step of %s",
1855 tp->ptid.to_string ().c_str ());
1856
1857 global_thread_step_over_chain_enqueue (tp);
1858
1859 return DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE;
1860 }
1861
1862 gdb_assert (status == DISPLACED_STEP_PREPARE_STATUS_OK);
1863
1864 /* Save the information we need to fix things up if the step
1865 succeeds. */
1866 disp_step_thread_state.set (gdbarch);
1867
1868 tp->inf->displaced_step_state.in_progress_count++;
1869
1870 displaced_debug_printf ("prepared successfully thread=%s, "
1871 "original_pc=%s, displaced_pc=%s",
1872 tp->ptid.to_string ().c_str (),
1873 paddress (gdbarch, original_pc),
1874 paddress (gdbarch, displaced_pc));
1875
1876 /* Display the new displaced instruction(s). */
1877 if (debug_displaced)
1878 {
1879 string_file tmp_stream;
1880 CORE_ADDR addr = displaced_pc;
1881
1882 /* If displaced stepping is going to use h/w single step then we know
1883 that the replacement instruction can only be a single instruction,
1884 in that case set the end address at the next byte.
1885
1886 Otherwise the displaced stepping copy instruction routine could
1887 have generated multiple instructions, and all we know is that they
1888 must fit within the LEN bytes of the buffer. */
1889 CORE_ADDR end
1890 = addr + (gdbarch_displaced_step_hw_singlestep (gdbarch)
1891 ? 1 : gdbarch_displaced_step_buffer_length (gdbarch));
1892
1893 while (addr < end)
1894 {
1895 int dislen = gdb_print_insn (gdbarch, addr, &tmp_stream, nullptr);
1896 if (dislen <= 0)
1897 {
1898 displaced_debug_printf
1899 ("replacement insn %s: invalid length: %d",
1900 paddress (gdbarch, addr), dislen);
1901 break;
1902 }
1903
1904 gdb::byte_vector insn_buf (dislen);
1905 read_memory (addr, insn_buf.data (), insn_buf.size ());
1906
1907 std::string insn_bytes = bytes_to_string (insn_buf);
1908 std::string insn_str = tmp_stream.release ();
1909 displaced_debug_printf ("replacement insn %s: %s \t %s",
1910 paddress (gdbarch, addr),
1911 insn_bytes.c_str (),
1912 insn_str.c_str ());
1913 addr += dislen;
1914 }
1915 }
1916
1917 return DISPLACED_STEP_PREPARE_STATUS_OK;
1918 }
1919
1920 /* Wrapper for displaced_step_prepare_throw that disabled further
1921 attempts at displaced stepping if we get a memory error. */
1922
1923 static displaced_step_prepare_status
1924 displaced_step_prepare (thread_info *thread)
1925 {
1926 displaced_step_prepare_status status
1927 = DISPLACED_STEP_PREPARE_STATUS_CANT;
1928
1929 try
1930 {
1931 status = displaced_step_prepare_throw (thread);
1932 }
1933 catch (const gdb_exception_error &ex)
1934 {
1935 if (ex.error != MEMORY_ERROR
1936 && ex.error != NOT_SUPPORTED_ERROR)
1937 throw;
1938
1939 infrun_debug_printf ("caught exception, disabling displaced stepping: %s",
1940 ex.what ());
1941
1942 /* Be verbose if "set displaced-stepping" is "on", silent if
1943 "auto". */
1944 if (can_use_displaced_stepping == AUTO_BOOLEAN_TRUE)
1945 {
1946 warning (_("disabling displaced stepping: %s"),
1947 ex.what ());
1948 }
1949
1950 /* Disable further displaced stepping attempts. */
1951 thread->inf->displaced_step_state.failed_before = 1;
1952 }
1953
1954 return status;
1955 }
1956
1957 /* Maybe disable thread-{cloned,created,exited} event reporting after
1958 a step-over (either in-line or displaced) finishes. */
1959
1960 static void
1961 update_thread_events_after_step_over (thread_info *event_thread)
1962 {
1963 if (target_supports_set_thread_options (0))
1964 {
1965 /* We can control per-thread options. Disable events for the
1966 event thread. */
1967 event_thread->set_thread_options (0);
1968 }
1969 else
1970 {
1971 /* We can only control the target-wide target_thread_events
1972 setting. Disable it, but only if other threads don't need it
1973 enabled. */
1974 if (!displaced_step_in_progress_any_thread ())
1975 target_thread_events (false);
1976 }
1977 }
1978
1979 /* If we displaced stepped an instruction successfully, adjust registers and
1980 memory to yield the same effect the instruction would have had if we had
1981 executed it at its original address, and return
1982 DISPLACED_STEP_FINISH_STATUS_OK. If the instruction didn't complete,
1983 relocate the PC and return DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED.
1984
1985 If the thread wasn't displaced stepping, return
1986 DISPLACED_STEP_FINISH_STATUS_OK as well. */
1987
1988 static displaced_step_finish_status
1989 displaced_step_finish (thread_info *event_thread,
1990 const target_waitstatus &event_status)
1991 {
1992 /* Check whether the parent is displaced stepping. */
1993 struct regcache *regcache = get_thread_regcache (event_thread);
1994 struct gdbarch *gdbarch = regcache->arch ();
1995 inferior *parent_inf = event_thread->inf;
1996
1997 /* If this was a fork/vfork/clone, this event indicates that the
1998 displaced stepping of the syscall instruction has been done, so
1999 we perform cleanup for parent here. Also note that this
2000 operation also cleans up the child for vfork, because their pages
2001 are shared. */
2002
2003 /* If this is a fork (child gets its own address space copy) and
2004 some displaced step buffers were in use at the time of the fork,
2005 restore the displaced step buffer bytes in the child process.
2006
2007 Architectures which support displaced stepping and fork events
2008 must supply an implementation of
2009 gdbarch_displaced_step_restore_all_in_ptid. This is not enforced
2010 during gdbarch validation to support architectures which support
2011 displaced stepping but not forks. */
2012 if (event_status.kind () == TARGET_WAITKIND_FORKED
2013 && gdbarch_supports_displaced_stepping (gdbarch))
2014 gdbarch_displaced_step_restore_all_in_ptid
2015 (gdbarch, parent_inf, event_status.child_ptid ());
2016
2017 displaced_step_thread_state *displaced = &event_thread->displaced_step_state;
2018
2019 /* Was this thread performing a displaced step? */
2020 if (!displaced->in_progress ())
2021 return DISPLACED_STEP_FINISH_STATUS_OK;
2022
2023 update_thread_events_after_step_over (event_thread);
2024
2025 gdb_assert (event_thread->inf->displaced_step_state.in_progress_count > 0);
2026 event_thread->inf->displaced_step_state.in_progress_count--;
2027
2028 /* Fixup may need to read memory/registers. Switch to the thread
2029 that we're fixing up. Also, target_stopped_by_watchpoint checks
2030 the current thread, and displaced_step_restore performs ptid-dependent
2031 memory accesses using current_inferior(). */
2032 switch_to_thread (event_thread);
2033
2034 displaced_step_reset_cleanup cleanup (displaced);
2035
2036 /* Do the fixup, and release the resources acquired to do the displaced
2037 step. */
2038 displaced_step_finish_status status
2039 = gdbarch_displaced_step_finish (displaced->get_original_gdbarch (),
2040 event_thread, event_status);
2041
2042 if (event_status.kind () == TARGET_WAITKIND_FORKED
2043 || event_status.kind () == TARGET_WAITKIND_VFORKED
2044 || event_status.kind () == TARGET_WAITKIND_THREAD_CLONED)
2045 {
2046 /* Since the vfork/fork/clone syscall instruction was executed
2047 in the scratchpad, the child's PC is also within the
2048 scratchpad. Set the child's PC to the parent's PC value,
2049 which has already been fixed up. Note: we use the parent's
2050 aspace here, although we're touching the child, because the
2051 child hasn't been added to the inferior list yet at this
2052 point. */
2053
2054 struct regcache *child_regcache
2055 = get_thread_arch_aspace_regcache (parent_inf,
2056 event_status.child_ptid (),
2057 gdbarch,
2058 parent_inf->aspace);
2059 /* Read PC value of parent. */
2060 CORE_ADDR parent_pc = regcache_read_pc (regcache);
2061
2062 displaced_debug_printf ("write child pc from %s to %s",
2063 paddress (gdbarch,
2064 regcache_read_pc (child_regcache)),
2065 paddress (gdbarch, parent_pc));
2066
2067 regcache_write_pc (child_regcache, parent_pc);
2068 }
2069
2070 return status;
2071 }
2072
2073 /* Data to be passed around while handling an event. This data is
2074 discarded between events. */
2075 struct execution_control_state
2076 {
2077 explicit execution_control_state (thread_info *thr = nullptr)
2078 : ptid (thr == nullptr ? null_ptid : thr->ptid),
2079 event_thread (thr)
2080 {
2081 }
2082
2083 process_stratum_target *target = nullptr;
2084 ptid_t ptid;
2085 /* The thread that got the event, if this was a thread event; NULL
2086 otherwise. */
2087 struct thread_info *event_thread;
2088
2089 struct target_waitstatus ws;
2090 int stop_func_filled_in = 0;
2091 CORE_ADDR stop_func_alt_start = 0;
2092 CORE_ADDR stop_func_start = 0;
2093 CORE_ADDR stop_func_end = 0;
2094 const char *stop_func_name = nullptr;
2095 int wait_some_more = 0;
2096
2097 /* True if the event thread hit the single-step breakpoint of
2098 another thread. Thus the event doesn't cause a stop, the thread
2099 needs to be single-stepped past the single-step breakpoint before
2100 we can switch back to the original stepping thread. */
2101 int hit_singlestep_breakpoint = 0;
2102 };
2103
2104 static void keep_going_pass_signal (struct execution_control_state *ecs);
2105 static void prepare_to_wait (struct execution_control_state *ecs);
2106 static bool keep_going_stepped_thread (struct thread_info *tp);
2107 static step_over_what thread_still_needs_step_over (struct thread_info *tp);
2108
2109 /* Are there any pending step-over requests? If so, run all we can
2110 now and return true. Otherwise, return false. */
2111
2112 static bool
2113 start_step_over (void)
2114 {
2115 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
2116
2117 /* Don't start a new step-over if we already have an in-line
2118 step-over operation ongoing. */
2119 if (step_over_info_valid_p ())
2120 return false;
2121
2122 /* Steal the global thread step over chain. As we try to initiate displaced
2123 steps, threads will be enqueued in the global chain if no buffers are
2124 available. If we iterated on the global chain directly, we might iterate
2125 indefinitely. */
2126 thread_step_over_list threads_to_step
2127 = std::move (global_thread_step_over_list);
2128
2129 infrun_debug_printf ("stealing global queue of threads to step, length = %d",
2130 thread_step_over_chain_length (threads_to_step));
2131
2132 bool started = false;
2133
2134 /* On scope exit (whatever the reason, return or exception), if there are
2135 threads left in the THREADS_TO_STEP chain, put back these threads in the
2136 global list. */
2137 SCOPE_EXIT
2138 {
2139 if (threads_to_step.empty ())
2140 infrun_debug_printf ("step-over queue now empty");
2141 else
2142 {
2143 infrun_debug_printf ("putting back %d threads to step in global queue",
2144 thread_step_over_chain_length (threads_to_step));
2145
2146 global_thread_step_over_chain_enqueue_chain
2147 (std::move (threads_to_step));
2148 }
2149 };
2150
2151 thread_step_over_list_safe_range range
2152 = make_thread_step_over_list_safe_range (threads_to_step);
2153
2154 for (thread_info *tp : range)
2155 {
2156 step_over_what step_what;
2157 int must_be_in_line;
2158
2159 gdb_assert (!tp->stop_requested);
2160
2161 if (tp->inf->displaced_step_state.unavailable)
2162 {
2163 /* The arch told us to not even try preparing another displaced step
2164 for this inferior. Just leave the thread in THREADS_TO_STEP, it
2165 will get moved to the global chain on scope exit. */
2166 continue;
2167 }
2168
2169 if (tp->inf->thread_waiting_for_vfork_done != nullptr)
2170 {
2171 /* When we stop all threads, handling a vfork, any thread in the step
2172 over chain remains there. A user could also try to continue a
2173 thread stopped at a breakpoint while another thread is waiting for
2174 a vfork-done event. In any case, we don't want to start a step
2175 over right now. */
2176 continue;
2177 }
2178
2179 /* Remove thread from the THREADS_TO_STEP chain. If anything goes wrong
2180 while we try to prepare the displaced step, we don't add it back to
2181 the global step over chain. This is to avoid a thread staying in the
2182 step over chain indefinitely if something goes wrong when resuming it
2183 If the error is intermittent and it still needs a step over, it will
2184 get enqueued again when we try to resume it normally. */
2185 threads_to_step.erase (threads_to_step.iterator_to (*tp));
2186
2187 step_what = thread_still_needs_step_over (tp);
2188 must_be_in_line = ((step_what & STEP_OVER_WATCHPOINT)
2189 || ((step_what & STEP_OVER_BREAKPOINT)
2190 && !use_displaced_stepping (tp)));
2191
2192 /* We currently stop all threads of all processes to step-over
2193 in-line. If we need to start a new in-line step-over, let
2194 any pending displaced steps finish first. */
2195 if (must_be_in_line && displaced_step_in_progress_any_thread ())
2196 {
2197 global_thread_step_over_chain_enqueue (tp);
2198 continue;
2199 }
2200
2201 if (tp->control.trap_expected
2202 || tp->resumed ()
2203 || tp->executing ())
2204 {
2205 internal_error ("[%s] has inconsistent state: "
2206 "trap_expected=%d, resumed=%d, executing=%d\n",
2207 tp->ptid.to_string ().c_str (),
2208 tp->control.trap_expected,
2209 tp->resumed (),
2210 tp->executing ());
2211 }
2212
2213 infrun_debug_printf ("resuming [%s] for step-over",
2214 tp->ptid.to_string ().c_str ());
2215
2216 /* keep_going_pass_signal skips the step-over if the breakpoint
2217 is no longer inserted. In all-stop, we want to keep looking
2218 for a thread that needs a step-over instead of resuming TP,
2219 because we wouldn't be able to resume anything else until the
2220 target stops again. In non-stop, the resume always resumes
2221 only TP, so it's OK to let the thread resume freely. */
2222 if (!target_is_non_stop_p () && !step_what)
2223 continue;
2224
2225 switch_to_thread (tp);
2226 execution_control_state ecs (tp);
2227 keep_going_pass_signal (&ecs);
2228
2229 if (!ecs.wait_some_more)
2230 error (_("Command aborted."));
2231
2232 /* If the thread's step over could not be initiated because no buffers
2233 were available, it was re-added to the global step over chain. */
2234 if (tp->resumed ())
2235 {
2236 infrun_debug_printf ("[%s] was resumed.",
2237 tp->ptid.to_string ().c_str ());
2238 gdb_assert (!thread_is_in_step_over_chain (tp));
2239 }
2240 else
2241 {
2242 infrun_debug_printf ("[%s] was NOT resumed.",
2243 tp->ptid.to_string ().c_str ());
2244 gdb_assert (thread_is_in_step_over_chain (tp));
2245 }
2246
2247 /* If we started a new in-line step-over, we're done. */
2248 if (step_over_info_valid_p ())
2249 {
2250 gdb_assert (tp->control.trap_expected);
2251 started = true;
2252 break;
2253 }
2254
2255 if (!target_is_non_stop_p ())
2256 {
2257 /* On all-stop, shouldn't have resumed unless we needed a
2258 step over. */
2259 gdb_assert (tp->control.trap_expected
2260 || tp->step_after_step_resume_breakpoint);
2261
2262 /* With remote targets (at least), in all-stop, we can't
2263 issue any further remote commands until the program stops
2264 again. */
2265 started = true;
2266 break;
2267 }
2268
2269 /* Either the thread no longer needed a step-over, or a new
2270 displaced stepping sequence started. Even in the latter
2271 case, continue looking. Maybe we can also start another
2272 displaced step on a thread of other process. */
2273 }
2274
2275 return started;
2276 }
2277
2278 /* Update global variables holding ptids to hold NEW_PTID if they were
2279 holding OLD_PTID. */
2280 static void
2281 infrun_thread_ptid_changed (process_stratum_target *target,
2282 ptid_t old_ptid, ptid_t new_ptid)
2283 {
2284 if (inferior_ptid == old_ptid
2285 && current_inferior ()->process_target () == target)
2286 inferior_ptid = new_ptid;
2287 }
2288
2289 \f
2290
2291 static const char schedlock_off[] = "off";
2292 static const char schedlock_on[] = "on";
2293 static const char schedlock_step[] = "step";
2294 static const char schedlock_replay[] = "replay";
2295 static const char *const scheduler_enums[] = {
2296 schedlock_off,
2297 schedlock_on,
2298 schedlock_step,
2299 schedlock_replay,
2300 nullptr
2301 };
2302 static const char *scheduler_mode = schedlock_replay;
2303 static void
2304 show_scheduler_mode (struct ui_file *file, int from_tty,
2305 struct cmd_list_element *c, const char *value)
2306 {
2307 gdb_printf (file,
2308 _("Mode for locking scheduler "
2309 "during execution is \"%s\".\n"),
2310 value);
2311 }
2312
2313 static void
2314 set_schedlock_func (const char *args, int from_tty, struct cmd_list_element *c)
2315 {
2316 if (!target_can_lock_scheduler ())
2317 {
2318 scheduler_mode = schedlock_off;
2319 error (_("Target '%s' cannot support this command."),
2320 target_shortname ());
2321 }
2322 }
2323
2324 /* True if execution commands resume all threads of all processes by
2325 default; otherwise, resume only threads of the current inferior
2326 process. */
2327 bool sched_multi = false;
2328
2329 /* Try to setup for software single stepping. Return true if target_resume()
2330 should use hardware single step.
2331
2332 GDBARCH the current gdbarch. */
2333
2334 static bool
2335 maybe_software_singlestep (struct gdbarch *gdbarch)
2336 {
2337 bool hw_step = true;
2338
2339 if (execution_direction == EXEC_FORWARD
2340 && gdbarch_software_single_step_p (gdbarch))
2341 hw_step = !insert_single_step_breakpoints (gdbarch);
2342
2343 return hw_step;
2344 }
2345
2346 /* See infrun.h. */
2347
2348 ptid_t
2349 user_visible_resume_ptid (int step)
2350 {
2351 ptid_t resume_ptid;
2352
2353 if (non_stop)
2354 {
2355 /* With non-stop mode on, threads are always handled
2356 individually. */
2357 resume_ptid = inferior_ptid;
2358 }
2359 else if ((scheduler_mode == schedlock_on)
2360 || (scheduler_mode == schedlock_step && step))
2361 {
2362 /* User-settable 'scheduler' mode requires solo thread
2363 resume. */
2364 resume_ptid = inferior_ptid;
2365 }
2366 else if ((scheduler_mode == schedlock_replay)
2367 && target_record_will_replay (minus_one_ptid, execution_direction))
2368 {
2369 /* User-settable 'scheduler' mode requires solo thread resume in replay
2370 mode. */
2371 resume_ptid = inferior_ptid;
2372 }
2373 else if (!sched_multi && target_supports_multi_process ())
2374 {
2375 /* Resume all threads of the current process (and none of other
2376 processes). */
2377 resume_ptid = ptid_t (inferior_ptid.pid ());
2378 }
2379 else
2380 {
2381 /* Resume all threads of all processes. */
2382 resume_ptid = RESUME_ALL;
2383 }
2384
2385 return resume_ptid;
2386 }
2387
2388 /* See infrun.h. */
2389
2390 process_stratum_target *
2391 user_visible_resume_target (ptid_t resume_ptid)
2392 {
2393 return (resume_ptid == minus_one_ptid && sched_multi
2394 ? nullptr
2395 : current_inferior ()->process_target ());
2396 }
2397
2398 /* Find a thread from the inferiors that we'll resume that is waiting
2399 for a vfork-done event. */
2400
2401 static thread_info *
2402 find_thread_waiting_for_vfork_done ()
2403 {
2404 gdb_assert (!target_is_non_stop_p ());
2405
2406 if (sched_multi)
2407 {
2408 for (inferior *inf : all_non_exited_inferiors ())
2409 if (inf->thread_waiting_for_vfork_done != nullptr)
2410 return inf->thread_waiting_for_vfork_done;
2411 }
2412 else
2413 {
2414 inferior *cur_inf = current_inferior ();
2415 if (cur_inf->thread_waiting_for_vfork_done != nullptr)
2416 return cur_inf->thread_waiting_for_vfork_done;
2417 }
2418 return nullptr;
2419 }
2420
2421 /* Return a ptid representing the set of threads that we will resume,
2422 in the perspective of the target, assuming run control handling
2423 does not require leaving some threads stopped (e.g., stepping past
2424 breakpoint). USER_STEP indicates whether we're about to start the
2425 target for a stepping command. */
2426
2427 static ptid_t
2428 internal_resume_ptid (int user_step)
2429 {
2430 /* In non-stop, we always control threads individually. Note that
2431 the target may always work in non-stop mode even with "set
2432 non-stop off", in which case user_visible_resume_ptid could
2433 return a wildcard ptid. */
2434 if (target_is_non_stop_p ())
2435 return inferior_ptid;
2436
2437 /* The rest of the function assumes non-stop==off and
2438 target-non-stop==off.
2439
2440 If a thread is waiting for a vfork-done event, it means breakpoints are out
2441 for this inferior (well, program space in fact). We don't want to resume
2442 any thread other than the one waiting for vfork done, otherwise these other
2443 threads could miss breakpoints. So if a thread in the resumption set is
2444 waiting for a vfork-done event, resume only that thread.
2445
2446 The resumption set width depends on whether schedule-multiple is on or off.
2447
2448 Note that if the target_resume interface was more flexible, we could be
2449 smarter here when schedule-multiple is on. For example, imagine 3
2450 inferiors with 2 threads each (1.1, 1.2, 2.1, 2.2, 3.1 and 3.2). Threads
2451 2.1 and 3.2 are both waiting for a vfork-done event. Then we could ask the
2452 target(s) to resume:
2453
2454 - All threads of inferior 1
2455 - Thread 2.1
2456 - Thread 3.2
2457
2458 Since we don't have that flexibility (we can only pass one ptid), just
2459 resume the first thread waiting for a vfork-done event we find (e.g. thread
2460 2.1). */
2461 thread_info *thr = find_thread_waiting_for_vfork_done ();
2462 if (thr != nullptr)
2463 {
2464 /* If we have a thread that is waiting for a vfork-done event,
2465 then we should have switched to it earlier. Calling
2466 target_resume with thread scope is only possible when the
2467 current thread matches the thread scope. */
2468 gdb_assert (thr->ptid == inferior_ptid);
2469 gdb_assert (thr->inf->process_target ()
2470 == inferior_thread ()->inf->process_target ());
2471 return thr->ptid;
2472 }
2473
2474 return user_visible_resume_ptid (user_step);
2475 }
2476
2477 /* Wrapper for target_resume, that handles infrun-specific
2478 bookkeeping. */
2479
2480 static void
2481 do_target_resume (ptid_t resume_ptid, bool step, enum gdb_signal sig)
2482 {
2483 struct thread_info *tp = inferior_thread ();
2484
2485 gdb_assert (!tp->stop_requested);
2486
2487 /* Install inferior's terminal modes. */
2488 target_terminal::inferior ();
2489
2490 /* Avoid confusing the next resume, if the next stop/resume
2491 happens to apply to another thread. */
2492 tp->set_stop_signal (GDB_SIGNAL_0);
2493
2494 /* Advise target which signals may be handled silently.
2495
2496 If we have removed breakpoints because we are stepping over one
2497 in-line (in any thread), we need to receive all signals to avoid
2498 accidentally skipping a breakpoint during execution of a signal
2499 handler.
2500
2501 Likewise if we're displaced stepping, otherwise a trap for a
2502 breakpoint in a signal handler might be confused with the
2503 displaced step finishing. We don't make the displaced_step_finish
2504 step distinguish the cases instead, because:
2505
2506 - a backtrace while stopped in the signal handler would show the
2507 scratch pad as frame older than the signal handler, instead of
2508 the real mainline code.
2509
2510 - when the thread is later resumed, the signal handler would
2511 return to the scratch pad area, which would no longer be
2512 valid. */
2513 if (step_over_info_valid_p ()
2514 || displaced_step_in_progress (tp->inf))
2515 target_pass_signals ({});
2516 else
2517 target_pass_signals (signal_pass);
2518
2519 /* Request that the target report thread-{created,cloned} events in
2520 the following situations:
2521
2522 - If we are performing an in-line step-over-breakpoint, then we
2523 will remove a breakpoint from the target and only run the
2524 current thread. We don't want any new thread (spawned by the
2525 step) to start running, as it might miss the breakpoint.
2526
2527 - If we are stepping over a breakpoint out of line (displaced
2528 stepping) then we won't remove a breakpoint from the target,
2529 but, if the step spawns a new clone thread, then we will need
2530 to fixup the $pc address in the clone child too, so we need it
2531 to start stopped.
2532 */
2533 if (step_over_info_valid_p ()
2534 || displaced_step_in_progress_thread (tp))
2535 {
2536 gdb_thread_options options = GDB_THREAD_OPTION_CLONE;
2537 if (target_supports_set_thread_options (options))
2538 tp->set_thread_options (options);
2539 else
2540 target_thread_events (true);
2541 }
2542
2543 /* If we're resuming more than one thread simultaneously, then any
2544 thread other than the leader is being set to run free. Clear any
2545 previous thread option for those threads. */
2546 if (resume_ptid != inferior_ptid && target_supports_set_thread_options (0))
2547 {
2548 process_stratum_target *resume_target = tp->inf->process_target ();
2549 for (thread_info *thr_iter : all_non_exited_threads (resume_target,
2550 resume_ptid))
2551 if (thr_iter != tp)
2552 thr_iter->set_thread_options (0);
2553 }
2554
2555 infrun_debug_printf ("resume_ptid=%s, step=%d, sig=%s",
2556 resume_ptid.to_string ().c_str (),
2557 step, gdb_signal_to_symbol_string (sig));
2558
2559 target_resume (resume_ptid, step, sig);
2560 }
2561
2562 /* Resume the inferior. SIG is the signal to give the inferior
2563 (GDB_SIGNAL_0 for none). Note: don't call this directly; instead
2564 call 'resume', which handles exceptions. */
2565
2566 static void
2567 resume_1 (enum gdb_signal sig)
2568 {
2569 struct regcache *regcache = get_current_regcache ();
2570 struct gdbarch *gdbarch = regcache->arch ();
2571 struct thread_info *tp = inferior_thread ();
2572 const address_space *aspace = regcache->aspace ();
2573 ptid_t resume_ptid;
2574 /* This represents the user's step vs continue request. When
2575 deciding whether "set scheduler-locking step" applies, it's the
2576 user's intention that counts. */
2577 const int user_step = tp->control.stepping_command;
2578 /* This represents what we'll actually request the target to do.
2579 This can decay from a step to a continue, if e.g., we need to
2580 implement single-stepping with breakpoints (software
2581 single-step). */
2582 bool step;
2583
2584 gdb_assert (!tp->stop_requested);
2585 gdb_assert (!thread_is_in_step_over_chain (tp));
2586
2587 if (tp->has_pending_waitstatus ())
2588 {
2589 infrun_debug_printf
2590 ("thread %s has pending wait "
2591 "status %s (currently_stepping=%d).",
2592 tp->ptid.to_string ().c_str (),
2593 tp->pending_waitstatus ().to_string ().c_str (),
2594 currently_stepping (tp));
2595
2596 tp->inf->process_target ()->threads_executing = true;
2597 tp->set_resumed (true);
2598
2599 /* FIXME: What should we do if we are supposed to resume this
2600 thread with a signal? Maybe we should maintain a queue of
2601 pending signals to deliver. */
2602 if (sig != GDB_SIGNAL_0)
2603 {
2604 warning (_("Couldn't deliver signal %s to %s."),
2605 gdb_signal_to_name (sig),
2606 tp->ptid.to_string ().c_str ());
2607 }
2608
2609 tp->set_stop_signal (GDB_SIGNAL_0);
2610
2611 if (target_can_async_p ())
2612 {
2613 target_async (true);
2614 /* Tell the event loop we have an event to process. */
2615 mark_async_event_handler (infrun_async_inferior_event_token);
2616 }
2617 return;
2618 }
2619
2620 tp->stepped_breakpoint = 0;
2621
2622 /* Depends on stepped_breakpoint. */
2623 step = currently_stepping (tp);
2624
2625 if (current_inferior ()->thread_waiting_for_vfork_done != nullptr)
2626 {
2627 /* Don't try to single-step a vfork parent that is waiting for
2628 the child to get out of the shared memory region (by exec'ing
2629 or exiting). This is particularly important on software
2630 single-step archs, as the child process would trip on the
2631 software single step breakpoint inserted for the parent
2632 process. Since the parent will not actually execute any
2633 instruction until the child is out of the shared region (such
2634 are vfork's semantics), it is safe to simply continue it.
2635 Eventually, we'll see a TARGET_WAITKIND_VFORK_DONE event for
2636 the parent, and tell it to `keep_going', which automatically
2637 re-sets it stepping. */
2638 infrun_debug_printf ("resume : clear step");
2639 step = false;
2640 }
2641
2642 CORE_ADDR pc = regcache_read_pc (regcache);
2643
2644 infrun_debug_printf ("step=%d, signal=%s, trap_expected=%d, "
2645 "current thread [%s] at %s",
2646 step, gdb_signal_to_symbol_string (sig),
2647 tp->control.trap_expected,
2648 inferior_ptid.to_string ().c_str (),
2649 paddress (gdbarch, pc));
2650
2651 /* Normally, by the time we reach `resume', the breakpoints are either
2652 removed or inserted, as appropriate. The exception is if we're sitting
2653 at a permanent breakpoint; we need to step over it, but permanent
2654 breakpoints can't be removed. So we have to test for it here. */
2655 if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
2656 {
2657 if (sig != GDB_SIGNAL_0)
2658 {
2659 /* We have a signal to pass to the inferior. The resume
2660 may, or may not take us to the signal handler. If this
2661 is a step, we'll need to stop in the signal handler, if
2662 there's one, (if the target supports stepping into
2663 handlers), or in the next mainline instruction, if
2664 there's no handler. If this is a continue, we need to be
2665 sure to run the handler with all breakpoints inserted.
2666 In all cases, set a breakpoint at the current address
2667 (where the handler returns to), and once that breakpoint
2668 is hit, resume skipping the permanent breakpoint. If
2669 that breakpoint isn't hit, then we've stepped into the
2670 signal handler (or hit some other event). We'll delete
2671 the step-resume breakpoint then. */
2672
2673 infrun_debug_printf ("resume: skipping permanent breakpoint, "
2674 "deliver signal first");
2675
2676 clear_step_over_info ();
2677 tp->control.trap_expected = 0;
2678
2679 if (tp->control.step_resume_breakpoint == nullptr)
2680 {
2681 /* Set a "high-priority" step-resume, as we don't want
2682 user breakpoints at PC to trigger (again) when this
2683 hits. */
2684 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2685 gdb_assert (tp->control.step_resume_breakpoint->first_loc ()
2686 .permanent);
2687
2688 tp->step_after_step_resume_breakpoint = step;
2689 }
2690
2691 insert_breakpoints ();
2692 }
2693 else
2694 {
2695 /* There's no signal to pass, we can go ahead and skip the
2696 permanent breakpoint manually. */
2697 infrun_debug_printf ("skipping permanent breakpoint");
2698 gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
2699 /* Update pc to reflect the new address from which we will
2700 execute instructions. */
2701 pc = regcache_read_pc (regcache);
2702
2703 if (step)
2704 {
2705 /* We've already advanced the PC, so the stepping part
2706 is done. Now we need to arrange for a trap to be
2707 reported to handle_inferior_event. Set a breakpoint
2708 at the current PC, and run to it. Don't update
2709 prev_pc, because if we end in
2710 switch_back_to_stepped_thread, we want the "expected
2711 thread advanced also" branch to be taken. IOW, we
2712 don't want this thread to step further from PC
2713 (overstep). */
2714 gdb_assert (!step_over_info_valid_p ());
2715 insert_single_step_breakpoint (gdbarch, aspace, pc);
2716 insert_breakpoints ();
2717
2718 resume_ptid = internal_resume_ptid (user_step);
2719 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
2720 tp->set_resumed (true);
2721 return;
2722 }
2723 }
2724 }
2725
2726 /* If we have a breakpoint to step over, make sure to do a single
2727 step only. Same if we have software watchpoints. */
2728 if (tp->control.trap_expected || bpstat_should_step ())
2729 tp->control.may_range_step = 0;
2730
2731 /* If displaced stepping is enabled, step over breakpoints by executing a
2732 copy of the instruction at a different address.
2733
2734 We can't use displaced stepping when we have a signal to deliver;
2735 the comments for displaced_step_prepare explain why. The
2736 comments in the handle_inferior event for dealing with 'random
2737 signals' explain what we do instead.
2738
2739 We can't use displaced stepping when we are waiting for vfork_done
2740 event, displaced stepping breaks the vfork child similarly as single
2741 step software breakpoint. */
2742 if (tp->control.trap_expected
2743 && use_displaced_stepping (tp)
2744 && !step_over_info_valid_p ()
2745 && sig == GDB_SIGNAL_0
2746 && current_inferior ()->thread_waiting_for_vfork_done == nullptr)
2747 {
2748 displaced_step_prepare_status prepare_status
2749 = displaced_step_prepare (tp);
2750
2751 if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_UNAVAILABLE)
2752 {
2753 infrun_debug_printf ("Got placed in step-over queue");
2754
2755 tp->control.trap_expected = 0;
2756 return;
2757 }
2758 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_CANT)
2759 {
2760 /* Fallback to stepping over the breakpoint in-line. */
2761
2762 if (target_is_non_stop_p ())
2763 stop_all_threads ("displaced stepping falling back on inline stepping");
2764
2765 set_step_over_info (regcache->aspace (),
2766 regcache_read_pc (regcache), 0, tp->global_num);
2767
2768 step = maybe_software_singlestep (gdbarch);
2769
2770 insert_breakpoints ();
2771 }
2772 else if (prepare_status == DISPLACED_STEP_PREPARE_STATUS_OK)
2773 {
2774 /* Update pc to reflect the new address from which we will
2775 execute instructions due to displaced stepping. */
2776 pc = regcache_read_pc (get_thread_regcache (tp));
2777
2778 step = gdbarch_displaced_step_hw_singlestep (gdbarch);
2779 }
2780 else
2781 gdb_assert_not_reached ("Invalid displaced_step_prepare_status "
2782 "value.");
2783 }
2784
2785 /* Do we need to do it the hard way, w/temp breakpoints? */
2786 else if (step)
2787 step = maybe_software_singlestep (gdbarch);
2788
2789 /* Currently, our software single-step implementation leads to different
2790 results than hardware single-stepping in one situation: when stepping
2791 into delivering a signal which has an associated signal handler,
2792 hardware single-step will stop at the first instruction of the handler,
2793 while software single-step will simply skip execution of the handler.
2794
2795 For now, this difference in behavior is accepted since there is no
2796 easy way to actually implement single-stepping into a signal handler
2797 without kernel support.
2798
2799 However, there is one scenario where this difference leads to follow-on
2800 problems: if we're stepping off a breakpoint by removing all breakpoints
2801 and then single-stepping. In this case, the software single-step
2802 behavior means that even if there is a *breakpoint* in the signal
2803 handler, GDB still would not stop.
2804
2805 Fortunately, we can at least fix this particular issue. We detect
2806 here the case where we are about to deliver a signal while software
2807 single-stepping with breakpoints removed. In this situation, we
2808 revert the decisions to remove all breakpoints and insert single-
2809 step breakpoints, and instead we install a step-resume breakpoint
2810 at the current address, deliver the signal without stepping, and
2811 once we arrive back at the step-resume breakpoint, actually step
2812 over the breakpoint we originally wanted to step over. */
2813 if (thread_has_single_step_breakpoints_set (tp)
2814 && sig != GDB_SIGNAL_0
2815 && step_over_info_valid_p ())
2816 {
2817 /* If we have nested signals or a pending signal is delivered
2818 immediately after a handler returns, might already have
2819 a step-resume breakpoint set on the earlier handler. We cannot
2820 set another step-resume breakpoint; just continue on until the
2821 original breakpoint is hit. */
2822 if (tp->control.step_resume_breakpoint == nullptr)
2823 {
2824 insert_hp_step_resume_breakpoint_at_frame (get_current_frame ());
2825 tp->step_after_step_resume_breakpoint = 1;
2826 }
2827
2828 delete_single_step_breakpoints (tp);
2829
2830 clear_step_over_info ();
2831 tp->control.trap_expected = 0;
2832
2833 insert_breakpoints ();
2834 }
2835
2836 /* If STEP is set, it's a request to use hardware stepping
2837 facilities. But in that case, we should never
2838 use singlestep breakpoint. */
2839 gdb_assert (!(thread_has_single_step_breakpoints_set (tp) && step));
2840
2841 /* Decide the set of threads to ask the target to resume. */
2842 if (tp->control.trap_expected)
2843 {
2844 /* We're allowing a thread to run past a breakpoint it has
2845 hit, either by single-stepping the thread with the breakpoint
2846 removed, or by displaced stepping, with the breakpoint inserted.
2847 In the former case, we need to single-step only this thread,
2848 and keep others stopped, as they can miss this breakpoint if
2849 allowed to run. That's not really a problem for displaced
2850 stepping, but, we still keep other threads stopped, in case
2851 another thread is also stopped for a breakpoint waiting for
2852 its turn in the displaced stepping queue. */
2853 resume_ptid = inferior_ptid;
2854 }
2855 else
2856 resume_ptid = internal_resume_ptid (user_step);
2857
2858 if (execution_direction != EXEC_REVERSE
2859 && step && breakpoint_inserted_here_p (aspace, pc))
2860 {
2861 /* There are two cases where we currently need to step a
2862 breakpoint instruction when we have a signal to deliver:
2863
2864 - See handle_signal_stop where we handle random signals that
2865 could take out us out of the stepping range. Normally, in
2866 that case we end up continuing (instead of stepping) over the
2867 signal handler with a breakpoint at PC, but there are cases
2868 where we should _always_ single-step, even if we have a
2869 step-resume breakpoint, like when a software watchpoint is
2870 set. Assuming single-stepping and delivering a signal at the
2871 same time would takes us to the signal handler, then we could
2872 have removed the breakpoint at PC to step over it. However,
2873 some hardware step targets (like e.g., Mac OS) can't step
2874 into signal handlers, and for those, we need to leave the
2875 breakpoint at PC inserted, as otherwise if the handler
2876 recurses and executes PC again, it'll miss the breakpoint.
2877 So we leave the breakpoint inserted anyway, but we need to
2878 record that we tried to step a breakpoint instruction, so
2879 that adjust_pc_after_break doesn't end up confused.
2880
2881 - In non-stop if we insert a breakpoint (e.g., a step-resume)
2882 in one thread after another thread that was stepping had been
2883 momentarily paused for a step-over. When we re-resume the
2884 stepping thread, it may be resumed from that address with a
2885 breakpoint that hasn't trapped yet. Seen with
2886 gdb.threads/non-stop-fair-events.exp, on targets that don't
2887 do displaced stepping. */
2888
2889 infrun_debug_printf ("resume: [%s] stepped breakpoint",
2890 tp->ptid.to_string ().c_str ());
2891
2892 tp->stepped_breakpoint = 1;
2893
2894 /* Most targets can step a breakpoint instruction, thus
2895 executing it normally. But if this one cannot, just
2896 continue and we will hit it anyway. */
2897 if (gdbarch_cannot_step_breakpoint (gdbarch))
2898 step = false;
2899 }
2900
2901 if (tp->control.may_range_step)
2902 {
2903 /* If we're resuming a thread with the PC out of the step
2904 range, then we're doing some nested/finer run control
2905 operation, like stepping the thread out of the dynamic
2906 linker or the displaced stepping scratch pad. We
2907 shouldn't have allowed a range step then. */
2908 gdb_assert (pc_in_thread_step_range (pc, tp));
2909 }
2910
2911 do_target_resume (resume_ptid, step, sig);
2912 tp->set_resumed (true);
2913 }
2914
2915 /* Resume the inferior. SIG is the signal to give the inferior
2916 (GDB_SIGNAL_0 for none). This is a wrapper around 'resume_1' that
2917 rolls back state on error. */
2918
2919 static void
2920 resume (gdb_signal sig)
2921 {
2922 try
2923 {
2924 resume_1 (sig);
2925 }
2926 catch (const gdb_exception &ex)
2927 {
2928 /* If resuming is being aborted for any reason, delete any
2929 single-step breakpoint resume_1 may have created, to avoid
2930 confusing the following resumption, and to avoid leaving
2931 single-step breakpoints perturbing other threads, in case
2932 we're running in non-stop mode. */
2933 if (inferior_ptid != null_ptid)
2934 delete_single_step_breakpoints (inferior_thread ());
2935 throw;
2936 }
2937 }
2938
2939 \f
2940 /* Proceeding. */
2941
2942 /* See infrun.h. */
2943
2944 /* Counter that tracks number of user visible stops. This can be used
2945 to tell whether a command has proceeded the inferior past the
2946 current location. This allows e.g., inferior function calls in
2947 breakpoint commands to not interrupt the command list. When the
2948 call finishes successfully, the inferior is standing at the same
2949 breakpoint as if nothing happened (and so we don't call
2950 normal_stop). */
2951 static ULONGEST current_stop_id;
2952
2953 /* See infrun.h. */
2954
2955 ULONGEST
2956 get_stop_id (void)
2957 {
2958 return current_stop_id;
2959 }
2960
2961 /* Called when we report a user visible stop. */
2962
2963 static void
2964 new_stop_id (void)
2965 {
2966 current_stop_id++;
2967 }
2968
2969 /* Clear out all variables saying what to do when inferior is continued.
2970 First do this, then set the ones you want, then call `proceed'. */
2971
2972 static void
2973 clear_proceed_status_thread (struct thread_info *tp)
2974 {
2975 infrun_debug_printf ("%s", tp->ptid.to_string ().c_str ());
2976
2977 /* If we're starting a new sequence, then the previous finished
2978 single-step is no longer relevant. */
2979 if (tp->has_pending_waitstatus ())
2980 {
2981 if (tp->stop_reason () == TARGET_STOPPED_BY_SINGLE_STEP)
2982 {
2983 infrun_debug_printf ("pending event of %s was a finished step. "
2984 "Discarding.",
2985 tp->ptid.to_string ().c_str ());
2986
2987 tp->clear_pending_waitstatus ();
2988 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
2989 }
2990 else
2991 {
2992 infrun_debug_printf
2993 ("thread %s has pending wait status %s (currently_stepping=%d).",
2994 tp->ptid.to_string ().c_str (),
2995 tp->pending_waitstatus ().to_string ().c_str (),
2996 currently_stepping (tp));
2997 }
2998 }
2999
3000 /* If this signal should not be seen by program, give it zero.
3001 Used for debugging signals. */
3002 if (!signal_pass_state (tp->stop_signal ()))
3003 tp->set_stop_signal (GDB_SIGNAL_0);
3004
3005 tp->release_thread_fsm ();
3006
3007 tp->control.trap_expected = 0;
3008 tp->control.step_range_start = 0;
3009 tp->control.step_range_end = 0;
3010 tp->control.may_range_step = 0;
3011 tp->control.step_frame_id = null_frame_id;
3012 tp->control.step_stack_frame_id = null_frame_id;
3013 tp->control.step_over_calls = STEP_OVER_UNDEBUGGABLE;
3014 tp->control.step_start_function = nullptr;
3015 tp->stop_requested = 0;
3016
3017 tp->control.stop_step = 0;
3018
3019 tp->control.proceed_to_finish = 0;
3020
3021 tp->control.stepping_command = 0;
3022
3023 /* Discard any remaining commands or status from previous stop. */
3024 bpstat_clear (&tp->control.stop_bpstat);
3025 }
3026
3027 /* Notify the current interpreter and observers that the target is about to
3028 proceed. */
3029
3030 static void
3031 notify_about_to_proceed ()
3032 {
3033 top_level_interpreter ()->on_about_to_proceed ();
3034 gdb::observers::about_to_proceed.notify ();
3035 }
3036
3037 void
3038 clear_proceed_status (int step)
3039 {
3040 /* With scheduler-locking replay, stop replaying other threads if we're
3041 not replaying the user-visible resume ptid.
3042
3043 This is a convenience feature to not require the user to explicitly
3044 stop replaying the other threads. We're assuming that the user's
3045 intent is to resume tracing the recorded process. */
3046 if (!non_stop && scheduler_mode == schedlock_replay
3047 && target_record_is_replaying (minus_one_ptid)
3048 && !target_record_will_replay (user_visible_resume_ptid (step),
3049 execution_direction))
3050 target_record_stop_replaying ();
3051
3052 if (!non_stop && inferior_ptid != null_ptid)
3053 {
3054 ptid_t resume_ptid = user_visible_resume_ptid (step);
3055 process_stratum_target *resume_target
3056 = user_visible_resume_target (resume_ptid);
3057
3058 /* In all-stop mode, delete the per-thread status of all threads
3059 we're about to resume, implicitly and explicitly. */
3060 for (thread_info *tp : all_non_exited_threads (resume_target, resume_ptid))
3061 clear_proceed_status_thread (tp);
3062 }
3063
3064 if (inferior_ptid != null_ptid)
3065 {
3066 struct inferior *inferior;
3067
3068 if (non_stop)
3069 {
3070 /* If in non-stop mode, only delete the per-thread status of
3071 the current thread. */
3072 clear_proceed_status_thread (inferior_thread ());
3073 }
3074
3075 inferior = current_inferior ();
3076 inferior->control.stop_soon = NO_STOP_QUIETLY;
3077 }
3078
3079 notify_about_to_proceed ();
3080 }
3081
3082 /* Returns true if TP is still stopped at a breakpoint that needs
3083 stepping-over in order to make progress. If the breakpoint is gone
3084 meanwhile, we can skip the whole step-over dance. */
3085
3086 static bool
3087 thread_still_needs_step_over_bp (struct thread_info *tp)
3088 {
3089 if (tp->stepping_over_breakpoint)
3090 {
3091 struct regcache *regcache = get_thread_regcache (tp);
3092
3093 if (breakpoint_here_p (regcache->aspace (),
3094 regcache_read_pc (regcache))
3095 == ordinary_breakpoint_here)
3096 return true;
3097
3098 tp->stepping_over_breakpoint = 0;
3099 }
3100
3101 return false;
3102 }
3103
3104 /* Check whether thread TP still needs to start a step-over in order
3105 to make progress when resumed. Returns an bitwise or of enum
3106 step_over_what bits, indicating what needs to be stepped over. */
3107
3108 static step_over_what
3109 thread_still_needs_step_over (struct thread_info *tp)
3110 {
3111 step_over_what what = 0;
3112
3113 if (thread_still_needs_step_over_bp (tp))
3114 what |= STEP_OVER_BREAKPOINT;
3115
3116 if (tp->stepping_over_watchpoint
3117 && !target_have_steppable_watchpoint ())
3118 what |= STEP_OVER_WATCHPOINT;
3119
3120 return what;
3121 }
3122
3123 /* Returns true if scheduler locking applies. STEP indicates whether
3124 we're about to do a step/next-like command to a thread. */
3125
3126 static bool
3127 schedlock_applies (struct thread_info *tp)
3128 {
3129 return (scheduler_mode == schedlock_on
3130 || (scheduler_mode == schedlock_step
3131 && tp->control.stepping_command)
3132 || (scheduler_mode == schedlock_replay
3133 && target_record_will_replay (minus_one_ptid,
3134 execution_direction)));
3135 }
3136
3137 /* Set process_stratum_target::COMMIT_RESUMED_STATE in all target
3138 stacks that have threads executing and don't have threads with
3139 pending events. */
3140
3141 static void
3142 maybe_set_commit_resumed_all_targets ()
3143 {
3144 scoped_restore_current_thread restore_thread;
3145
3146 for (inferior *inf : all_non_exited_inferiors ())
3147 {
3148 process_stratum_target *proc_target = inf->process_target ();
3149
3150 if (proc_target->commit_resumed_state)
3151 {
3152 /* We already set this in a previous iteration, via another
3153 inferior sharing the process_stratum target. */
3154 continue;
3155 }
3156
3157 /* If the target has no resumed threads, it would be useless to
3158 ask it to commit the resumed threads. */
3159 if (!proc_target->threads_executing)
3160 {
3161 infrun_debug_printf ("not requesting commit-resumed for target "
3162 "%s, no resumed threads",
3163 proc_target->shortname ());
3164 continue;
3165 }
3166
3167 /* As an optimization, if a thread from this target has some
3168 status to report, handle it before requiring the target to
3169 commit its resumed threads: handling the status might lead to
3170 resuming more threads. */
3171 if (proc_target->has_resumed_with_pending_wait_status ())
3172 {
3173 infrun_debug_printf ("not requesting commit-resumed for target %s, a"
3174 " thread has a pending waitstatus",
3175 proc_target->shortname ());
3176 continue;
3177 }
3178
3179 switch_to_inferior_no_thread (inf);
3180
3181 if (target_has_pending_events ())
3182 {
3183 infrun_debug_printf ("not requesting commit-resumed for target %s, "
3184 "target has pending events",
3185 proc_target->shortname ());
3186 continue;
3187 }
3188
3189 infrun_debug_printf ("enabling commit-resumed for target %s",
3190 proc_target->shortname ());
3191
3192 proc_target->commit_resumed_state = true;
3193 }
3194 }
3195
3196 /* See infrun.h. */
3197
3198 void
3199 maybe_call_commit_resumed_all_targets ()
3200 {
3201 scoped_restore_current_thread restore_thread;
3202
3203 for (inferior *inf : all_non_exited_inferiors ())
3204 {
3205 process_stratum_target *proc_target = inf->process_target ();
3206
3207 if (!proc_target->commit_resumed_state)
3208 continue;
3209
3210 switch_to_inferior_no_thread (inf);
3211
3212 infrun_debug_printf ("calling commit_resumed for target %s",
3213 proc_target->shortname());
3214
3215 target_commit_resumed ();
3216 }
3217 }
3218
3219 /* To track nesting of scoped_disable_commit_resumed objects, ensuring
3220 that only the outermost one attempts to re-enable
3221 commit-resumed. */
3222 static bool enable_commit_resumed = true;
3223
3224 /* See infrun.h. */
3225
3226 scoped_disable_commit_resumed::scoped_disable_commit_resumed
3227 (const char *reason)
3228 : m_reason (reason),
3229 m_prev_enable_commit_resumed (enable_commit_resumed)
3230 {
3231 infrun_debug_printf ("reason=%s", m_reason);
3232
3233 enable_commit_resumed = false;
3234
3235 for (inferior *inf : all_non_exited_inferiors ())
3236 {
3237 process_stratum_target *proc_target = inf->process_target ();
3238
3239 if (m_prev_enable_commit_resumed)
3240 {
3241 /* This is the outermost instance: force all
3242 COMMIT_RESUMED_STATE to false. */
3243 proc_target->commit_resumed_state = false;
3244 }
3245 else
3246 {
3247 /* This is not the outermost instance, we expect
3248 COMMIT_RESUMED_STATE to have been cleared by the
3249 outermost instance. */
3250 gdb_assert (!proc_target->commit_resumed_state);
3251 }
3252 }
3253 }
3254
3255 /* See infrun.h. */
3256
3257 void
3258 scoped_disable_commit_resumed::reset ()
3259 {
3260 if (m_reset)
3261 return;
3262 m_reset = true;
3263
3264 infrun_debug_printf ("reason=%s", m_reason);
3265
3266 gdb_assert (!enable_commit_resumed);
3267
3268 enable_commit_resumed = m_prev_enable_commit_resumed;
3269
3270 if (m_prev_enable_commit_resumed)
3271 {
3272 /* This is the outermost instance, re-enable
3273 COMMIT_RESUMED_STATE on the targets where it's possible. */
3274 maybe_set_commit_resumed_all_targets ();
3275 }
3276 else
3277 {
3278 /* This is not the outermost instance, we expect
3279 COMMIT_RESUMED_STATE to still be false. */
3280 for (inferior *inf : all_non_exited_inferiors ())
3281 {
3282 process_stratum_target *proc_target = inf->process_target ();
3283 gdb_assert (!proc_target->commit_resumed_state);
3284 }
3285 }
3286 }
3287
3288 /* See infrun.h. */
3289
3290 scoped_disable_commit_resumed::~scoped_disable_commit_resumed ()
3291 {
3292 reset ();
3293 }
3294
3295 /* See infrun.h. */
3296
3297 void
3298 scoped_disable_commit_resumed::reset_and_commit ()
3299 {
3300 reset ();
3301 maybe_call_commit_resumed_all_targets ();
3302 }
3303
3304 /* See infrun.h. */
3305
3306 scoped_enable_commit_resumed::scoped_enable_commit_resumed
3307 (const char *reason)
3308 : m_reason (reason),
3309 m_prev_enable_commit_resumed (enable_commit_resumed)
3310 {
3311 infrun_debug_printf ("reason=%s", m_reason);
3312
3313 if (!enable_commit_resumed)
3314 {
3315 enable_commit_resumed = true;
3316
3317 /* Re-enable COMMIT_RESUMED_STATE on the targets where it's
3318 possible. */
3319 maybe_set_commit_resumed_all_targets ();
3320
3321 maybe_call_commit_resumed_all_targets ();
3322 }
3323 }
3324
3325 /* See infrun.h. */
3326
3327 scoped_enable_commit_resumed::~scoped_enable_commit_resumed ()
3328 {
3329 infrun_debug_printf ("reason=%s", m_reason);
3330
3331 gdb_assert (enable_commit_resumed);
3332
3333 enable_commit_resumed = m_prev_enable_commit_resumed;
3334
3335 if (!enable_commit_resumed)
3336 {
3337 /* Force all COMMIT_RESUMED_STATE back to false. */
3338 for (inferior *inf : all_non_exited_inferiors ())
3339 {
3340 process_stratum_target *proc_target = inf->process_target ();
3341 proc_target->commit_resumed_state = false;
3342 }
3343 }
3344 }
3345
3346 /* Check that all the targets we're about to resume are in non-stop
3347 mode. Ideally, we'd only care whether all targets support
3348 target-async, but we're not there yet. E.g., stop_all_threads
3349 doesn't know how to handle all-stop targets. Also, the remote
3350 protocol in all-stop mode is synchronous, irrespective of
3351 target-async, which means that things like a breakpoint re-set
3352 triggered by one target would try to read memory from all targets
3353 and fail. */
3354
3355 static void
3356 check_multi_target_resumption (process_stratum_target *resume_target)
3357 {
3358 if (!non_stop && resume_target == nullptr)
3359 {
3360 scoped_restore_current_thread restore_thread;
3361
3362 /* This is used to track whether we're resuming more than one
3363 target. */
3364 process_stratum_target *first_connection = nullptr;
3365
3366 /* The first inferior we see with a target that does not work in
3367 always-non-stop mode. */
3368 inferior *first_not_non_stop = nullptr;
3369
3370 for (inferior *inf : all_non_exited_inferiors ())
3371 {
3372 switch_to_inferior_no_thread (inf);
3373
3374 if (!target_has_execution ())
3375 continue;
3376
3377 process_stratum_target *proc_target
3378 = current_inferior ()->process_target();
3379
3380 if (!target_is_non_stop_p ())
3381 first_not_non_stop = inf;
3382
3383 if (first_connection == nullptr)
3384 first_connection = proc_target;
3385 else if (first_connection != proc_target
3386 && first_not_non_stop != nullptr)
3387 {
3388 switch_to_inferior_no_thread (first_not_non_stop);
3389
3390 proc_target = current_inferior ()->process_target();
3391
3392 error (_("Connection %d (%s) does not support "
3393 "multi-target resumption."),
3394 proc_target->connection_number,
3395 make_target_connection_string (proc_target).c_str ());
3396 }
3397 }
3398 }
3399 }
3400
3401 /* Helper function for `proceed`. Check if thread TP is suitable for
3402 resuming, and, if it is, switch to the thread and call
3403 `keep_going_pass_signal`. If TP is not suitable for resuming then this
3404 function will just return without switching threads. */
3405
3406 static void
3407 proceed_resume_thread_checked (thread_info *tp)
3408 {
3409 if (!tp->inf->has_execution ())
3410 {
3411 infrun_debug_printf ("[%s] target has no execution",
3412 tp->ptid.to_string ().c_str ());
3413 return;
3414 }
3415
3416 if (tp->resumed ())
3417 {
3418 infrun_debug_printf ("[%s] resumed",
3419 tp->ptid.to_string ().c_str ());
3420 gdb_assert (tp->executing () || tp->has_pending_waitstatus ());
3421 return;
3422 }
3423
3424 if (thread_is_in_step_over_chain (tp))
3425 {
3426 infrun_debug_printf ("[%s] needs step-over",
3427 tp->ptid.to_string ().c_str ());
3428 return;
3429 }
3430
3431 /* When handling a vfork GDB removes all breakpoints from the program
3432 space in which the vfork is being handled. If we are following the
3433 parent then GDB will set the thread_waiting_for_vfork_done member of
3434 the parent inferior. In this case we should take care to only resume
3435 the vfork parent thread, the kernel will hold this thread suspended
3436 until the vfork child has exited or execd, at which point the parent
3437 will be resumed and a VFORK_DONE event sent to GDB. */
3438 if (tp->inf->thread_waiting_for_vfork_done != nullptr)
3439 {
3440 if (target_is_non_stop_p ())
3441 {
3442 /* For non-stop targets, regardless of whether GDB is using
3443 all-stop or non-stop mode, threads are controlled
3444 individually.
3445
3446 When a thread is handling a vfork, breakpoints are removed
3447 from the inferior (well, program space in fact), so it is
3448 critical that we don't try to resume any thread other than the
3449 vfork parent. */
3450 if (tp != tp->inf->thread_waiting_for_vfork_done)
3451 {
3452 infrun_debug_printf ("[%s] thread %s of this inferior is "
3453 "waiting for vfork-done",
3454 tp->ptid.to_string ().c_str (),
3455 tp->inf->thread_waiting_for_vfork_done
3456 ->ptid.to_string ().c_str ());
3457 return;
3458 }
3459 }
3460 else
3461 {
3462 /* For all-stop targets, when we attempt to resume the inferior,
3463 we will only resume the vfork parent thread, this is handled
3464 in internal_resume_ptid.
3465
3466 Additionally, we will always be called with the vfork parent
3467 thread as the current thread (TP) thanks to follow_fork, as
3468 such the following assertion should hold.
3469
3470 Beyond this there is nothing more that needs to be done
3471 here. */
3472 gdb_assert (tp == tp->inf->thread_waiting_for_vfork_done);
3473 }
3474 }
3475
3476 /* When handling a vfork GDB removes all breakpoints from the program
3477 space in which the vfork is being handled. If we are following the
3478 child then GDB will set vfork_child member of the vfork parent
3479 inferior. Once the child has either exited or execd then GDB will
3480 detach from the parent process. Until that point GDB should not
3481 resume any thread in the parent process. */
3482 if (tp->inf->vfork_child != nullptr)
3483 {
3484 infrun_debug_printf ("[%s] thread is part of a vfork parent, child is %d",
3485 tp->ptid.to_string ().c_str (),
3486 tp->inf->vfork_child->pid);
3487 return;
3488 }
3489
3490 infrun_debug_printf ("resuming %s",
3491 tp->ptid.to_string ().c_str ());
3492
3493 execution_control_state ecs (tp);
3494 switch_to_thread (tp);
3495 keep_going_pass_signal (&ecs);
3496 if (!ecs.wait_some_more)
3497 error (_("Command aborted."));
3498 }
3499
3500 /* Basic routine for continuing the program in various fashions.
3501
3502 ADDR is the address to resume at, or -1 for resume where stopped.
3503 SIGGNAL is the signal to give it, or GDB_SIGNAL_0 for none,
3504 or GDB_SIGNAL_DEFAULT for act according to how it stopped.
3505
3506 You should call clear_proceed_status before calling proceed. */
3507
3508 void
3509 proceed (CORE_ADDR addr, enum gdb_signal siggnal)
3510 {
3511 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
3512
3513 struct regcache *regcache;
3514 struct gdbarch *gdbarch;
3515 CORE_ADDR pc;
3516
3517 /* If we're stopped at a fork/vfork, switch to either the parent or child
3518 thread as defined by the "set follow-fork-mode" command, or, if both
3519 the parent and child are controlled by GDB, and schedule-multiple is
3520 on, follow the child. If none of the above apply then we just proceed
3521 resuming the current thread. */
3522 if (!follow_fork ())
3523 {
3524 /* The target for some reason decided not to resume. */
3525 normal_stop ();
3526 if (target_can_async_p ())
3527 inferior_event_handler (INF_EXEC_COMPLETE);
3528 return;
3529 }
3530
3531 /* We'll update this if & when we switch to a new thread. */
3532 update_previous_thread ();
3533
3534 regcache = get_current_regcache ();
3535 gdbarch = regcache->arch ();
3536 const address_space *aspace = regcache->aspace ();
3537
3538 pc = regcache_read_pc_protected (regcache);
3539
3540 thread_info *cur_thr = inferior_thread ();
3541
3542 infrun_debug_printf ("cur_thr = %s", cur_thr->ptid.to_string ().c_str ());
3543
3544 /* Fill in with reasonable starting values. */
3545 init_thread_stepping_state (cur_thr);
3546
3547 gdb_assert (!thread_is_in_step_over_chain (cur_thr));
3548
3549 ptid_t resume_ptid
3550 = user_visible_resume_ptid (cur_thr->control.stepping_command);
3551 process_stratum_target *resume_target
3552 = user_visible_resume_target (resume_ptid);
3553
3554 check_multi_target_resumption (resume_target);
3555
3556 if (addr == (CORE_ADDR) -1)
3557 {
3558 if (cur_thr->stop_pc_p ()
3559 && pc == cur_thr->stop_pc ()
3560 && breakpoint_here_p (aspace, pc) == ordinary_breakpoint_here
3561 && execution_direction != EXEC_REVERSE)
3562 /* There is a breakpoint at the address we will resume at,
3563 step one instruction before inserting breakpoints so that
3564 we do not stop right away (and report a second hit at this
3565 breakpoint).
3566
3567 Note, we don't do this in reverse, because we won't
3568 actually be executing the breakpoint insn anyway.
3569 We'll be (un-)executing the previous instruction. */
3570 cur_thr->stepping_over_breakpoint = 1;
3571 else if (gdbarch_single_step_through_delay_p (gdbarch)
3572 && gdbarch_single_step_through_delay (gdbarch,
3573 get_current_frame ()))
3574 /* We stepped onto an instruction that needs to be stepped
3575 again before re-inserting the breakpoint, do so. */
3576 cur_thr->stepping_over_breakpoint = 1;
3577 }
3578 else
3579 {
3580 regcache_write_pc (regcache, addr);
3581 }
3582
3583 if (siggnal != GDB_SIGNAL_DEFAULT)
3584 cur_thr->set_stop_signal (siggnal);
3585
3586 /* If an exception is thrown from this point on, make sure to
3587 propagate GDB's knowledge of the executing state to the
3588 frontend/user running state. */
3589 scoped_finish_thread_state finish_state (resume_target, resume_ptid);
3590
3591 /* Even if RESUME_PTID is a wildcard, and we end up resuming fewer
3592 threads (e.g., we might need to set threads stepping over
3593 breakpoints first), from the user/frontend's point of view, all
3594 threads in RESUME_PTID are now running. Unless we're calling an
3595 inferior function, as in that case we pretend the inferior
3596 doesn't run at all. */
3597 if (!cur_thr->control.in_infcall)
3598 set_running (resume_target, resume_ptid, true);
3599
3600 infrun_debug_printf ("addr=%s, signal=%s, resume_ptid=%s",
3601 paddress (gdbarch, addr),
3602 gdb_signal_to_symbol_string (siggnal),
3603 resume_ptid.to_string ().c_str ());
3604
3605 annotate_starting ();
3606
3607 /* Make sure that output from GDB appears before output from the
3608 inferior. */
3609 gdb_flush (gdb_stdout);
3610
3611 /* Since we've marked the inferior running, give it the terminal. A
3612 QUIT/Ctrl-C from here on is forwarded to the target (which can
3613 still detect attempts to unblock a stuck connection with repeated
3614 Ctrl-C from within target_pass_ctrlc). */
3615 target_terminal::inferior ();
3616
3617 /* In a multi-threaded task we may select another thread and
3618 then continue or step.
3619
3620 But if a thread that we're resuming had stopped at a breakpoint,
3621 it will immediately cause another breakpoint stop without any
3622 execution (i.e. it will report a breakpoint hit incorrectly). So
3623 we must step over it first.
3624
3625 Look for threads other than the current (TP) that reported a
3626 breakpoint hit and haven't been resumed yet since. */
3627
3628 /* If scheduler locking applies, we can avoid iterating over all
3629 threads. */
3630 if (!non_stop && !schedlock_applies (cur_thr))
3631 {
3632 for (thread_info *tp : all_non_exited_threads (resume_target,
3633 resume_ptid))
3634 {
3635 switch_to_thread_no_regs (tp);
3636
3637 /* Ignore the current thread here. It's handled
3638 afterwards. */
3639 if (tp == cur_thr)
3640 continue;
3641
3642 if (!thread_still_needs_step_over (tp))
3643 continue;
3644
3645 gdb_assert (!thread_is_in_step_over_chain (tp));
3646
3647 infrun_debug_printf ("need to step-over [%s] first",
3648 tp->ptid.to_string ().c_str ());
3649
3650 global_thread_step_over_chain_enqueue (tp);
3651 }
3652
3653 switch_to_thread (cur_thr);
3654 }
3655
3656 /* Enqueue the current thread last, so that we move all other
3657 threads over their breakpoints first. */
3658 if (cur_thr->stepping_over_breakpoint)
3659 global_thread_step_over_chain_enqueue (cur_thr);
3660
3661 /* If the thread isn't started, we'll still need to set its prev_pc,
3662 so that switch_back_to_stepped_thread knows the thread hasn't
3663 advanced. Must do this before resuming any thread, as in
3664 all-stop/remote, once we resume we can't send any other packet
3665 until the target stops again. */
3666 cur_thr->prev_pc = regcache_read_pc_protected (regcache);
3667
3668 {
3669 scoped_disable_commit_resumed disable_commit_resumed ("proceeding");
3670 bool step_over_started = start_step_over ();
3671
3672 if (step_over_info_valid_p ())
3673 {
3674 /* Either this thread started a new in-line step over, or some
3675 other thread was already doing one. In either case, don't
3676 resume anything else until the step-over is finished. */
3677 }
3678 else if (step_over_started && !target_is_non_stop_p ())
3679 {
3680 /* A new displaced stepping sequence was started. In all-stop,
3681 we can't talk to the target anymore until it next stops. */
3682 }
3683 else if (!non_stop && target_is_non_stop_p ())
3684 {
3685 INFRUN_SCOPED_DEBUG_START_END
3686 ("resuming threads, all-stop-on-top-of-non-stop");
3687
3688 /* In all-stop, but the target is always in non-stop mode.
3689 Start all other threads that are implicitly resumed too. */
3690 for (thread_info *tp : all_non_exited_threads (resume_target,
3691 resume_ptid))
3692 {
3693 switch_to_thread_no_regs (tp);
3694 proceed_resume_thread_checked (tp);
3695 }
3696 }
3697 else
3698 proceed_resume_thread_checked (cur_thr);
3699
3700 disable_commit_resumed.reset_and_commit ();
3701 }
3702
3703 finish_state.release ();
3704
3705 /* If we've switched threads above, switch back to the previously
3706 current thread. We don't want the user to see a different
3707 selected thread. */
3708 switch_to_thread (cur_thr);
3709
3710 /* Tell the event loop to wait for it to stop. If the target
3711 supports asynchronous execution, it'll do this from within
3712 target_resume. */
3713 if (!target_can_async_p ())
3714 mark_async_event_handler (infrun_async_inferior_event_token);
3715 }
3716 \f
3717
3718 /* Start remote-debugging of a machine over a serial link. */
3719
3720 void
3721 start_remote (int from_tty)
3722 {
3723 inferior *inf = current_inferior ();
3724 inf->control.stop_soon = STOP_QUIETLY_REMOTE;
3725
3726 /* Always go on waiting for the target, regardless of the mode. */
3727 /* FIXME: cagney/1999-09-23: At present it isn't possible to
3728 indicate to wait_for_inferior that a target should timeout if
3729 nothing is returned (instead of just blocking). Because of this,
3730 targets expecting an immediate response need to, internally, set
3731 things up so that the target_wait() is forced to eventually
3732 timeout. */
3733 /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
3734 differentiate to its caller what the state of the target is after
3735 the initial open has been performed. Here we're assuming that
3736 the target has stopped. It should be possible to eventually have
3737 target_open() return to the caller an indication that the target
3738 is currently running and GDB state should be set to the same as
3739 for an async run. */
3740 wait_for_inferior (inf);
3741
3742 /* Now that the inferior has stopped, do any bookkeeping like
3743 loading shared libraries. We want to do this before normal_stop,
3744 so that the displayed frame is up to date. */
3745 post_create_inferior (from_tty);
3746
3747 normal_stop ();
3748 }
3749
3750 /* Initialize static vars when a new inferior begins. */
3751
3752 void
3753 init_wait_for_inferior (void)
3754 {
3755 /* These are meaningless until the first time through wait_for_inferior. */
3756
3757 breakpoint_init_inferior (inf_starting);
3758
3759 clear_proceed_status (0);
3760
3761 nullify_last_target_wait_ptid ();
3762
3763 update_previous_thread ();
3764 }
3765
3766 \f
3767
3768 static void handle_inferior_event (struct execution_control_state *ecs);
3769
3770 static void handle_step_into_function (struct gdbarch *gdbarch,
3771 struct execution_control_state *ecs);
3772 static void handle_step_into_function_backward (struct gdbarch *gdbarch,
3773 struct execution_control_state *ecs);
3774 static void handle_signal_stop (struct execution_control_state *ecs);
3775 static void check_exception_resume (struct execution_control_state *,
3776 frame_info_ptr);
3777
3778 static void end_stepping_range (struct execution_control_state *ecs);
3779 static void stop_waiting (struct execution_control_state *ecs);
3780 static void keep_going (struct execution_control_state *ecs);
3781 static void process_event_stop_test (struct execution_control_state *ecs);
3782 static bool switch_back_to_stepped_thread (struct execution_control_state *ecs);
3783
3784 /* This function is attached as a "thread_stop_requested" observer.
3785 Cleanup local state that assumed the PTID was to be resumed, and
3786 report the stop to the frontend. */
3787
3788 static void
3789 infrun_thread_stop_requested (ptid_t ptid)
3790 {
3791 process_stratum_target *curr_target = current_inferior ()->process_target ();
3792
3793 /* PTID was requested to stop. If the thread was already stopped,
3794 but the user/frontend doesn't know about that yet (e.g., the
3795 thread had been temporarily paused for some step-over), set up
3796 for reporting the stop now. */
3797 for (thread_info *tp : all_threads (curr_target, ptid))
3798 {
3799 if (tp->state != THREAD_RUNNING)
3800 continue;
3801 if (tp->executing ())
3802 continue;
3803
3804 /* Remove matching threads from the step-over queue, so
3805 start_step_over doesn't try to resume them
3806 automatically. */
3807 if (thread_is_in_step_over_chain (tp))
3808 global_thread_step_over_chain_remove (tp);
3809
3810 /* If the thread is stopped, but the user/frontend doesn't
3811 know about that yet, queue a pending event, as if the
3812 thread had just stopped now. Unless the thread already had
3813 a pending event. */
3814 if (!tp->has_pending_waitstatus ())
3815 {
3816 target_waitstatus ws;
3817 ws.set_stopped (GDB_SIGNAL_0);
3818 tp->set_pending_waitstatus (ws);
3819 }
3820
3821 /* Clear the inline-frame state, since we're re-processing the
3822 stop. */
3823 clear_inline_frame_state (tp);
3824
3825 /* If this thread was paused because some other thread was
3826 doing an inline-step over, let that finish first. Once
3827 that happens, we'll restart all threads and consume pending
3828 stop events then. */
3829 if (step_over_info_valid_p ())
3830 continue;
3831
3832 /* Otherwise we can process the (new) pending event now. Set
3833 it so this pending event is considered by
3834 do_target_wait. */
3835 tp->set_resumed (true);
3836 }
3837 }
3838
3839 /* Delete the step resume, single-step and longjmp/exception resume
3840 breakpoints of TP. */
3841
3842 static void
3843 delete_thread_infrun_breakpoints (struct thread_info *tp)
3844 {
3845 delete_step_resume_breakpoint (tp);
3846 delete_exception_resume_breakpoint (tp);
3847 delete_single_step_breakpoints (tp);
3848 }
3849
3850 /* If the target still has execution, call FUNC for each thread that
3851 just stopped. In all-stop, that's all the non-exited threads; in
3852 non-stop, that's the current thread, only. */
3853
3854 typedef void (*for_each_just_stopped_thread_callback_func)
3855 (struct thread_info *tp);
3856
3857 static void
3858 for_each_just_stopped_thread (for_each_just_stopped_thread_callback_func func)
3859 {
3860 if (!target_has_execution () || inferior_ptid == null_ptid)
3861 return;
3862
3863 if (target_is_non_stop_p ())
3864 {
3865 /* If in non-stop mode, only the current thread stopped. */
3866 func (inferior_thread ());
3867 }
3868 else
3869 {
3870 /* In all-stop mode, all threads have stopped. */
3871 for (thread_info *tp : all_non_exited_threads ())
3872 func (tp);
3873 }
3874 }
3875
3876 /* Delete the step resume and longjmp/exception resume breakpoints of
3877 the threads that just stopped. */
3878
3879 static void
3880 delete_just_stopped_threads_infrun_breakpoints (void)
3881 {
3882 for_each_just_stopped_thread (delete_thread_infrun_breakpoints);
3883 }
3884
3885 /* Delete the single-step breakpoints of the threads that just
3886 stopped. */
3887
3888 static void
3889 delete_just_stopped_threads_single_step_breakpoints (void)
3890 {
3891 for_each_just_stopped_thread (delete_single_step_breakpoints);
3892 }
3893
3894 /* See infrun.h. */
3895
3896 void
3897 print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
3898 const struct target_waitstatus &ws)
3899 {
3900 infrun_debug_printf ("target_wait (%s [%s], status) =",
3901 waiton_ptid.to_string ().c_str (),
3902 target_pid_to_str (waiton_ptid).c_str ());
3903 infrun_debug_printf (" %s [%s],",
3904 result_ptid.to_string ().c_str (),
3905 target_pid_to_str (result_ptid).c_str ());
3906 infrun_debug_printf (" %s", ws.to_string ().c_str ());
3907 }
3908
3909 /* Select a thread at random, out of those which are resumed and have
3910 had events. */
3911
3912 static struct thread_info *
3913 random_pending_event_thread (inferior *inf, ptid_t waiton_ptid)
3914 {
3915 process_stratum_target *proc_target = inf->process_target ();
3916 thread_info *thread
3917 = proc_target->random_resumed_with_pending_wait_status (inf, waiton_ptid);
3918
3919 if (thread == nullptr)
3920 {
3921 infrun_debug_printf ("None found.");
3922 return nullptr;
3923 }
3924
3925 infrun_debug_printf ("Found %s.", thread->ptid.to_string ().c_str ());
3926 gdb_assert (thread->resumed ());
3927 gdb_assert (thread->has_pending_waitstatus ());
3928
3929 return thread;
3930 }
3931
3932 /* Wrapper for target_wait that first checks whether threads have
3933 pending statuses to report before actually asking the target for
3934 more events. INF is the inferior we're using to call target_wait
3935 on. */
3936
3937 static ptid_t
3938 do_target_wait_1 (inferior *inf, ptid_t ptid,
3939 target_waitstatus *status, target_wait_flags options)
3940 {
3941 struct thread_info *tp;
3942
3943 /* We know that we are looking for an event in the target of inferior
3944 INF, but we don't know which thread the event might come from. As
3945 such we want to make sure that INFERIOR_PTID is reset so that none of
3946 the wait code relies on it - doing so is always a mistake. */
3947 switch_to_inferior_no_thread (inf);
3948
3949 /* First check if there is a resumed thread with a wait status
3950 pending. */
3951 if (ptid == minus_one_ptid || ptid.is_pid ())
3952 {
3953 tp = random_pending_event_thread (inf, ptid);
3954 }
3955 else
3956 {
3957 infrun_debug_printf ("Waiting for specific thread %s.",
3958 ptid.to_string ().c_str ());
3959
3960 /* We have a specific thread to check. */
3961 tp = inf->find_thread (ptid);
3962 gdb_assert (tp != nullptr);
3963 if (!tp->has_pending_waitstatus ())
3964 tp = nullptr;
3965 }
3966
3967 if (tp != nullptr
3968 && (tp->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
3969 || tp->stop_reason () == TARGET_STOPPED_BY_HW_BREAKPOINT))
3970 {
3971 struct regcache *regcache = get_thread_regcache (tp);
3972 struct gdbarch *gdbarch = regcache->arch ();
3973 CORE_ADDR pc;
3974 int discard = 0;
3975
3976 pc = regcache_read_pc (regcache);
3977
3978 if (pc != tp->stop_pc ())
3979 {
3980 infrun_debug_printf ("PC of %s changed. was=%s, now=%s",
3981 tp->ptid.to_string ().c_str (),
3982 paddress (gdbarch, tp->stop_pc ()),
3983 paddress (gdbarch, pc));
3984 discard = 1;
3985 }
3986 else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
3987 {
3988 infrun_debug_printf ("previous breakpoint of %s, at %s gone",
3989 tp->ptid.to_string ().c_str (),
3990 paddress (gdbarch, pc));
3991
3992 discard = 1;
3993 }
3994
3995 if (discard)
3996 {
3997 infrun_debug_printf ("pending event of %s cancelled.",
3998 tp->ptid.to_string ().c_str ());
3999
4000 tp->clear_pending_waitstatus ();
4001 target_waitstatus ws;
4002 ws.set_spurious ();
4003 tp->set_pending_waitstatus (ws);
4004 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
4005 }
4006 }
4007
4008 if (tp != nullptr)
4009 {
4010 infrun_debug_printf ("Using pending wait status %s for %s.",
4011 tp->pending_waitstatus ().to_string ().c_str (),
4012 tp->ptid.to_string ().c_str ());
4013
4014 /* Now that we've selected our final event LWP, un-adjust its PC
4015 if it was a software breakpoint (and the target doesn't
4016 always adjust the PC itself). */
4017 if (tp->stop_reason () == TARGET_STOPPED_BY_SW_BREAKPOINT
4018 && !target_supports_stopped_by_sw_breakpoint ())
4019 {
4020 struct regcache *regcache;
4021 struct gdbarch *gdbarch;
4022 int decr_pc;
4023
4024 regcache = get_thread_regcache (tp);
4025 gdbarch = regcache->arch ();
4026
4027 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4028 if (decr_pc != 0)
4029 {
4030 CORE_ADDR pc;
4031
4032 pc = regcache_read_pc (regcache);
4033 regcache_write_pc (regcache, pc + decr_pc);
4034 }
4035 }
4036
4037 tp->set_stop_reason (TARGET_STOPPED_BY_NO_REASON);
4038 *status = tp->pending_waitstatus ();
4039 tp->clear_pending_waitstatus ();
4040
4041 /* Wake up the event loop again, until all pending events are
4042 processed. */
4043 if (target_is_async_p ())
4044 mark_async_event_handler (infrun_async_inferior_event_token);
4045 return tp->ptid;
4046 }
4047
4048 /* But if we don't find one, we'll have to wait. */
4049
4050 /* We can't ask a non-async target to do a non-blocking wait, so this will be
4051 a blocking wait. */
4052 if (!target_can_async_p ())
4053 options &= ~TARGET_WNOHANG;
4054
4055 return target_wait (ptid, status, options);
4056 }
4057
4058 /* Wrapper for target_wait that first checks whether threads have
4059 pending statuses to report before actually asking the target for
4060 more events. Polls for events from all inferiors/targets. */
4061
4062 static bool
4063 do_target_wait (execution_control_state *ecs, target_wait_flags options)
4064 {
4065 int num_inferiors = 0;
4066 int random_selector;
4067
4068 /* For fairness, we pick the first inferior/target to poll at random
4069 out of all inferiors that may report events, and then continue
4070 polling the rest of the inferior list starting from that one in a
4071 circular fashion until the whole list is polled once. */
4072
4073 auto inferior_matches = [] (inferior *inf)
4074 {
4075 return inf->process_target () != nullptr;
4076 };
4077
4078 /* First see how many matching inferiors we have. */
4079 for (inferior *inf : all_inferiors ())
4080 if (inferior_matches (inf))
4081 num_inferiors++;
4082
4083 if (num_inferiors == 0)
4084 {
4085 ecs->ws.set_ignore ();
4086 return false;
4087 }
4088
4089 /* Now randomly pick an inferior out of those that matched. */
4090 random_selector = (int)
4091 ((num_inferiors * (double) rand ()) / (RAND_MAX + 1.0));
4092
4093 if (num_inferiors > 1)
4094 infrun_debug_printf ("Found %d inferiors, starting at #%d",
4095 num_inferiors, random_selector);
4096
4097 /* Select the Nth inferior that matched. */
4098
4099 inferior *selected = nullptr;
4100
4101 for (inferior *inf : all_inferiors ())
4102 if (inferior_matches (inf))
4103 if (random_selector-- == 0)
4104 {
4105 selected = inf;
4106 break;
4107 }
4108
4109 /* Now poll for events out of each of the matching inferior's
4110 targets, starting from the selected one. */
4111
4112 auto do_wait = [&] (inferior *inf)
4113 {
4114 ecs->ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs->ws, options);
4115 ecs->target = inf->process_target ();
4116 return (ecs->ws.kind () != TARGET_WAITKIND_IGNORE);
4117 };
4118
4119 /* Needed in 'all-stop + target-non-stop' mode, because we end up
4120 here spuriously after the target is all stopped and we've already
4121 reported the stop to the user, polling for events. */
4122 scoped_restore_current_thread restore_thread;
4123
4124 intrusive_list_iterator<inferior> start
4125 = inferior_list.iterator_to (*selected);
4126
4127 for (intrusive_list_iterator<inferior> it = start;
4128 it != inferior_list.end ();
4129 ++it)
4130 {
4131 inferior *inf = &*it;
4132
4133 if (inferior_matches (inf) && do_wait (inf))
4134 return true;
4135 }
4136
4137 for (intrusive_list_iterator<inferior> it = inferior_list.begin ();
4138 it != start;
4139 ++it)
4140 {
4141 inferior *inf = &*it;
4142
4143 if (inferior_matches (inf) && do_wait (inf))
4144 return true;
4145 }
4146
4147 ecs->ws.set_ignore ();
4148 return false;
4149 }
4150
4151 /* An event reported by wait_one. */
4152
4153 struct wait_one_event
4154 {
4155 /* The target the event came out of. */
4156 process_stratum_target *target;
4157
4158 /* The PTID the event was for. */
4159 ptid_t ptid;
4160
4161 /* The waitstatus. */
4162 target_waitstatus ws;
4163 };
4164
4165 static bool handle_one (const wait_one_event &event);
4166
4167 /* Prepare and stabilize the inferior for detaching it. E.g.,
4168 detaching while a thread is displaced stepping is a recipe for
4169 crashing it, as nothing would readjust the PC out of the scratch
4170 pad. */
4171
4172 void
4173 prepare_for_detach (void)
4174 {
4175 struct inferior *inf = current_inferior ();
4176 ptid_t pid_ptid = ptid_t (inf->pid);
4177 scoped_restore_current_thread restore_thread;
4178
4179 scoped_restore restore_detaching = make_scoped_restore (&inf->detaching, true);
4180
4181 /* Remove all threads of INF from the global step-over chain. We
4182 want to stop any ongoing step-over, not start any new one. */
4183 thread_step_over_list_safe_range range
4184 = make_thread_step_over_list_safe_range (global_thread_step_over_list);
4185
4186 for (thread_info *tp : range)
4187 if (tp->inf == inf)
4188 {
4189 infrun_debug_printf ("removing thread %s from global step over chain",
4190 tp->ptid.to_string ().c_str ());
4191 global_thread_step_over_chain_remove (tp);
4192 }
4193
4194 /* If we were already in the middle of an inline step-over, and the
4195 thread stepping belongs to the inferior we're detaching, we need
4196 to restart the threads of other inferiors. */
4197 if (step_over_info.thread != -1)
4198 {
4199 infrun_debug_printf ("inline step-over in-process while detaching");
4200
4201 thread_info *thr = find_thread_global_id (step_over_info.thread);
4202 if (thr->inf == inf)
4203 {
4204 /* Since we removed threads of INF from the step-over chain,
4205 we know this won't start a step-over for INF. */
4206 clear_step_over_info ();
4207
4208 if (target_is_non_stop_p ())
4209 {
4210 /* Start a new step-over in another thread if there's
4211 one that needs it. */
4212 start_step_over ();
4213
4214 /* Restart all other threads (except the
4215 previously-stepping thread, since that one is still
4216 running). */
4217 if (!step_over_info_valid_p ())
4218 restart_threads (thr);
4219 }
4220 }
4221 }
4222
4223 if (displaced_step_in_progress (inf))
4224 {
4225 infrun_debug_printf ("displaced-stepping in-process while detaching");
4226
4227 /* Stop threads currently displaced stepping, aborting it. */
4228
4229 for (thread_info *thr : inf->non_exited_threads ())
4230 {
4231 if (thr->displaced_step_state.in_progress ())
4232 {
4233 if (thr->executing ())
4234 {
4235 if (!thr->stop_requested)
4236 {
4237 target_stop (thr->ptid);
4238 thr->stop_requested = true;
4239 }
4240 }
4241 else
4242 thr->set_resumed (false);
4243 }
4244 }
4245
4246 while (displaced_step_in_progress (inf))
4247 {
4248 wait_one_event event;
4249
4250 event.target = inf->process_target ();
4251 event.ptid = do_target_wait_1 (inf, pid_ptid, &event.ws, 0);
4252
4253 if (debug_infrun)
4254 print_target_wait_results (pid_ptid, event.ptid, event.ws);
4255
4256 handle_one (event);
4257 }
4258
4259 /* It's OK to leave some of the threads of INF stopped, since
4260 they'll be detached shortly. */
4261 }
4262 }
4263
4264 /* If all-stop, but there exists a non-stop target, stop all threads
4265 now that we're presenting the stop to the user. */
4266
4267 static void
4268 stop_all_threads_if_all_stop_mode ()
4269 {
4270 if (!non_stop && exists_non_stop_target ())
4271 stop_all_threads ("presenting stop to user in all-stop");
4272 }
4273
4274 /* Wait for control to return from inferior to debugger.
4275
4276 If inferior gets a signal, we may decide to start it up again
4277 instead of returning. That is why there is a loop in this function.
4278 When this function actually returns it means the inferior
4279 should be left stopped and GDB should read more commands. */
4280
4281 static void
4282 wait_for_inferior (inferior *inf)
4283 {
4284 infrun_debug_printf ("wait_for_inferior ()");
4285
4286 SCOPE_EXIT { delete_just_stopped_threads_infrun_breakpoints (); };
4287
4288 /* If an error happens while handling the event, propagate GDB's
4289 knowledge of the executing state to the frontend/user running
4290 state. */
4291 scoped_finish_thread_state finish_state
4292 (inf->process_target (), minus_one_ptid);
4293
4294 while (1)
4295 {
4296 execution_control_state ecs;
4297
4298 overlay_cache_invalid = 1;
4299
4300 /* Flush target cache before starting to handle each event.
4301 Target was running and cache could be stale. This is just a
4302 heuristic. Running threads may modify target memory, but we
4303 don't get any event. */
4304 target_dcache_invalidate ();
4305
4306 ecs.ptid = do_target_wait_1 (inf, minus_one_ptid, &ecs.ws, 0);
4307 ecs.target = inf->process_target ();
4308
4309 if (debug_infrun)
4310 print_target_wait_results (minus_one_ptid, ecs.ptid, ecs.ws);
4311
4312 /* Now figure out what to do with the result of the result. */
4313 handle_inferior_event (&ecs);
4314
4315 if (!ecs.wait_some_more)
4316 break;
4317 }
4318
4319 stop_all_threads_if_all_stop_mode ();
4320
4321 /* No error, don't finish the state yet. */
4322 finish_state.release ();
4323 }
4324
4325 /* Cleanup that reinstalls the readline callback handler, if the
4326 target is running in the background. If while handling the target
4327 event something triggered a secondary prompt, like e.g., a
4328 pagination prompt, we'll have removed the callback handler (see
4329 gdb_readline_wrapper_line). Need to do this as we go back to the
4330 event loop, ready to process further input. Note this has no
4331 effect if the handler hasn't actually been removed, because calling
4332 rl_callback_handler_install resets the line buffer, thus losing
4333 input. */
4334
4335 static void
4336 reinstall_readline_callback_handler_cleanup ()
4337 {
4338 struct ui *ui = current_ui;
4339
4340 if (!ui->async)
4341 {
4342 /* We're not going back to the top level event loop yet. Don't
4343 install the readline callback, as it'd prep the terminal,
4344 readline-style (raw, noecho) (e.g., --batch). We'll install
4345 it the next time the prompt is displayed, when we're ready
4346 for input. */
4347 return;
4348 }
4349
4350 if (ui->command_editing && ui->prompt_state != PROMPT_BLOCKED)
4351 gdb_rl_callback_handler_reinstall ();
4352 }
4353
4354 /* Clean up the FSMs of threads that are now stopped. In non-stop,
4355 that's just the event thread. In all-stop, that's all threads. */
4356
4357 static void
4358 clean_up_just_stopped_threads_fsms (struct execution_control_state *ecs)
4359 {
4360 /* The first clean_up call below assumes the event thread is the current
4361 one. */
4362 if (ecs->event_thread != nullptr)
4363 gdb_assert (ecs->event_thread == inferior_thread ());
4364
4365 if (ecs->event_thread != nullptr
4366 && ecs->event_thread->thread_fsm () != nullptr)
4367 ecs->event_thread->thread_fsm ()->clean_up (ecs->event_thread);
4368
4369 if (!non_stop)
4370 {
4371 scoped_restore_current_thread restore_thread;
4372
4373 for (thread_info *thr : all_non_exited_threads ())
4374 {
4375 if (thr->thread_fsm () == nullptr)
4376 continue;
4377 if (thr == ecs->event_thread)
4378 continue;
4379
4380 switch_to_thread (thr);
4381 thr->thread_fsm ()->clean_up (thr);
4382 }
4383 }
4384 }
4385
4386 /* Helper for all_uis_check_sync_execution_done that works on the
4387 current UI. */
4388
4389 static void
4390 check_curr_ui_sync_execution_done (void)
4391 {
4392 struct ui *ui = current_ui;
4393
4394 if (ui->prompt_state == PROMPT_NEEDED
4395 && ui->async
4396 && !gdb_in_secondary_prompt_p (ui))
4397 {
4398 target_terminal::ours ();
4399 top_level_interpreter ()->on_sync_execution_done ();
4400 ui->register_file_handler ();
4401 }
4402 }
4403
4404 /* See infrun.h. */
4405
4406 void
4407 all_uis_check_sync_execution_done (void)
4408 {
4409 SWITCH_THRU_ALL_UIS ()
4410 {
4411 check_curr_ui_sync_execution_done ();
4412 }
4413 }
4414
4415 /* See infrun.h. */
4416
4417 void
4418 all_uis_on_sync_execution_starting (void)
4419 {
4420 SWITCH_THRU_ALL_UIS ()
4421 {
4422 if (current_ui->prompt_state == PROMPT_NEEDED)
4423 async_disable_stdin ();
4424 }
4425 }
4426
4427 /* A quit_handler callback installed while we're handling inferior
4428 events. */
4429
4430 static void
4431 infrun_quit_handler ()
4432 {
4433 if (target_terminal::is_ours ())
4434 {
4435 /* Do nothing.
4436
4437 default_quit_handler would throw a quit in this case, but if
4438 we're handling an event while we have the terminal, it means
4439 the target is running a background execution command, and
4440 thus when users press Ctrl-C, they're wanting to interrupt
4441 whatever command they were executing in the command line.
4442 E.g.:
4443
4444 (gdb) c&
4445 (gdb) foo bar whatever<ctrl-c>
4446
4447 That Ctrl-C should clear the input line, not interrupt event
4448 handling if it happens that the user types Ctrl-C at just the
4449 "wrong" time!
4450
4451 It's as-if background event handling was handled by a
4452 separate background thread.
4453
4454 To be clear, the Ctrl-C is not lost -- it will be processed
4455 by the next QUIT call once we're out of fetch_inferior_event
4456 again. */
4457 }
4458 else
4459 {
4460 if (check_quit_flag ())
4461 target_pass_ctrlc ();
4462 }
4463 }
4464
4465 /* Asynchronous version of wait_for_inferior. It is called by the
4466 event loop whenever a change of state is detected on the file
4467 descriptor corresponding to the target. It can be called more than
4468 once to complete a single execution command. In such cases we need
4469 to keep the state in a global variable ECSS. If it is the last time
4470 that this function is called for a single execution command, then
4471 report to the user that the inferior has stopped, and do the
4472 necessary cleanups. */
4473
4474 void
4475 fetch_inferior_event ()
4476 {
4477 INFRUN_SCOPED_DEBUG_ENTER_EXIT;
4478
4479 execution_control_state ecs;
4480 int cmd_done = 0;
4481
4482 /* Events are always processed with the main UI as current UI. This
4483 way, warnings, debug output, etc. are always consistently sent to
4484 the main console. */
4485 scoped_restore save_ui = make_scoped_restore (&current_ui, main_ui);
4486
4487 /* Temporarily disable pagination. Otherwise, the user would be
4488 given an option to press 'q' to quit, which would cause an early
4489 exit and could leave GDB in a half-baked state. */
4490 scoped_restore save_pagination
4491 = make_scoped_restore (&pagination_enabled, false);
4492
4493 /* Install a quit handler that does nothing if we have the terminal
4494 (meaning the target is running a background execution command),
4495 so that Ctrl-C never interrupts GDB before the event is fully
4496 handled. */
4497 scoped_restore restore_quit_handler
4498 = make_scoped_restore (&quit_handler, infrun_quit_handler);
4499
4500 /* Make sure a SIGINT does not interrupt an extension language while
4501 we're handling an event. That could interrupt a Python unwinder
4502 or a Python observer or some such. A Ctrl-C should either be
4503 forwarded to the inferior if the inferior has the terminal, or,
4504 if GDB has the terminal, should interrupt the command the user is
4505 typing in the CLI. */
4506 scoped_disable_cooperative_sigint_handling restore_coop_sigint;
4507
4508 /* End up with readline processing input, if necessary. */
4509 {
4510 SCOPE_EXIT { reinstall_readline_callback_handler_cleanup (); };
4511
4512 /* We're handling a live event, so make sure we're doing live
4513 debugging. If we're looking at traceframes while the target is
4514 running, we're going to need to get back to that mode after
4515 handling the event. */
4516 gdb::optional<scoped_restore_current_traceframe> maybe_restore_traceframe;
4517 if (non_stop)
4518 {
4519 maybe_restore_traceframe.emplace ();
4520 set_current_traceframe (-1);
4521 }
4522
4523 /* The user/frontend should not notice a thread switch due to
4524 internal events. Make sure we revert to the user selected
4525 thread and frame after handling the event and running any
4526 breakpoint commands. */
4527 scoped_restore_current_thread restore_thread;
4528
4529 overlay_cache_invalid = 1;
4530 /* Flush target cache before starting to handle each event. Target
4531 was running and cache could be stale. This is just a heuristic.
4532 Running threads may modify target memory, but we don't get any
4533 event. */
4534 target_dcache_invalidate ();
4535
4536 scoped_restore save_exec_dir
4537 = make_scoped_restore (&execution_direction,
4538 target_execution_direction ());
4539
4540 /* Allow targets to pause their resumed threads while we handle
4541 the event. */
4542 scoped_disable_commit_resumed disable_commit_resumed ("handling event");
4543
4544 if (!do_target_wait (&ecs, TARGET_WNOHANG))
4545 {
4546 infrun_debug_printf ("do_target_wait returned no event");
4547 disable_commit_resumed.reset_and_commit ();
4548 return;
4549 }
4550
4551 gdb_assert (ecs.ws.kind () != TARGET_WAITKIND_IGNORE);
4552
4553 /* Switch to the inferior that generated the event, so we can do
4554 target calls. If the event was not associated to a ptid, */
4555 if (ecs.ptid != null_ptid
4556 && ecs.ptid != minus_one_ptid)
4557 switch_to_inferior_no_thread (find_inferior_ptid (ecs.target, ecs.ptid));
4558 else
4559 switch_to_target_no_thread (ecs.target);
4560
4561 if (debug_infrun)
4562 print_target_wait_results (minus_one_ptid, ecs.ptid, ecs.ws);
4563
4564 /* If an error happens while handling the event, propagate GDB's
4565 knowledge of the executing state to the frontend/user running
4566 state. */
4567 ptid_t finish_ptid = !target_is_non_stop_p () ? minus_one_ptid : ecs.ptid;
4568 scoped_finish_thread_state finish_state (ecs.target, finish_ptid);
4569
4570 /* Get executed before scoped_restore_current_thread above to apply
4571 still for the thread which has thrown the exception. */
4572 auto defer_bpstat_clear
4573 = make_scope_exit (bpstat_clear_actions);
4574 auto defer_delete_threads
4575 = make_scope_exit (delete_just_stopped_threads_infrun_breakpoints);
4576
4577 int stop_id = get_stop_id ();
4578
4579 /* Now figure out what to do with the result of the result. */
4580 handle_inferior_event (&ecs);
4581
4582 if (!ecs.wait_some_more)
4583 {
4584 struct inferior *inf = find_inferior_ptid (ecs.target, ecs.ptid);
4585 bool should_stop = true;
4586 struct thread_info *thr = ecs.event_thread;
4587
4588 delete_just_stopped_threads_infrun_breakpoints ();
4589
4590 if (thr != nullptr && thr->thread_fsm () != nullptr)
4591 should_stop = thr->thread_fsm ()->should_stop (thr);
4592
4593 if (!should_stop)
4594 {
4595 keep_going (&ecs);
4596 }
4597 else
4598 {
4599 bool should_notify_stop = true;
4600 bool proceeded = false;
4601
4602 stop_all_threads_if_all_stop_mode ();
4603
4604 clean_up_just_stopped_threads_fsms (&ecs);
4605
4606 if (stop_id != get_stop_id ())
4607 {
4608 /* If the stop-id has changed then a stop has already been
4609 presented to the user in handle_inferior_event, this is
4610 likely a failed inferior call. As the stop has already
4611 been announced then we should not notify again.
4612
4613 Also, if the prompt state is not PROMPT_NEEDED then GDB
4614 will not be ready for user input after this function. */
4615 should_notify_stop = false;
4616 gdb_assert (current_ui->prompt_state == PROMPT_NEEDED);
4617 }
4618 else if (thr != nullptr && thr->thread_fsm () != nullptr)
4619 should_notify_stop
4620 = thr->thread_fsm ()->should_notify_stop ();
4621
4622 if (should_notify_stop)
4623 {
4624 /* We may not find an inferior if this was a process exit. */
4625 if (inf == nullptr || inf->control.stop_soon == NO_STOP_QUIETLY)
4626 proceeded = normal_stop ();
4627 }
4628
4629 if (!proceeded)
4630 {
4631 inferior_event_handler (INF_EXEC_COMPLETE);
4632 cmd_done = 1;
4633 }
4634
4635 /* If we got a TARGET_WAITKIND_NO_RESUMED event, then the
4636 previously selected thread is gone. We have two
4637 choices - switch to no thread selected, or restore the
4638 previously selected thread (now exited). We chose the
4639 later, just because that's what GDB used to do. After
4640 this, "info threads" says "The current thread <Thread
4641 ID 2> has terminated." instead of "No thread
4642 selected.". */
4643 if (!non_stop
4644 && cmd_done
4645 && ecs.ws.kind () != TARGET_WAITKIND_NO_RESUMED)
4646 restore_thread.dont_restore ();
4647 }
4648 }
4649
4650 defer_delete_threads.release ();
4651 defer_bpstat_clear.release ();
4652
4653 /* No error, don't finish the thread states yet. */
4654 finish_state.release ();
4655
4656 disable_commit_resumed.reset_and_commit ();
4657
4658 /* This scope is used to ensure that readline callbacks are
4659 reinstalled here. */
4660 }
4661
4662 /* Handling this event might have caused some inferiors to become prunable.
4663 For example, the exit of an inferior that was automatically added. Try
4664 to get rid of them. Keeping those around slows down things linearly.
4665
4666 Note that this never removes the current inferior. Therefore, call this
4667 after RESTORE_THREAD went out of scope, in case the event inferior (which was
4668 temporarily made the current inferior) is meant to be deleted.
4669
4670 Call this before all_uis_check_sync_execution_done, so that notifications about
4671 removed inferiors appear before the prompt. */
4672 prune_inferiors ();
4673
4674 /* If a UI was in sync execution mode, and now isn't, restore its
4675 prompt (a synchronous execution command has finished, and we're
4676 ready for input). */
4677 all_uis_check_sync_execution_done ();
4678
4679 if (cmd_done
4680 && exec_done_display_p
4681 && (inferior_ptid == null_ptid
4682 || inferior_thread ()->state != THREAD_RUNNING))
4683 gdb_printf (_("completed.\n"));
4684 }
4685
4686 /* See infrun.h. */
4687
4688 void
4689 set_step_info (thread_info *tp, frame_info_ptr frame,
4690 struct symtab_and_line sal)
4691 {
4692 /* This can be removed once this function no longer implicitly relies on the
4693 inferior_ptid value. */
4694 gdb_assert (inferior_ptid == tp->ptid);
4695
4696 tp->control.step_frame_id = get_frame_id (frame);
4697 tp->control.step_stack_frame_id = get_stack_frame_id (frame);
4698
4699 tp->current_symtab = sal.symtab;
4700 tp->current_line = sal.line;
4701
4702 infrun_debug_printf
4703 ("symtab = %s, line = %d, step_frame_id = %s, step_stack_frame_id = %s",
4704 tp->current_symtab != nullptr ? tp->current_symtab->filename : "<null>",
4705 tp->current_line,
4706 tp->control.step_frame_id.to_string ().c_str (),
4707 tp->control.step_stack_frame_id.to_string ().c_str ());
4708 }
4709
4710 /* Clear context switchable stepping state. */
4711
4712 void
4713 init_thread_stepping_state (struct thread_info *tss)
4714 {
4715 tss->stepped_breakpoint = 0;
4716 tss->stepping_over_breakpoint = 0;
4717 tss->stepping_over_watchpoint = 0;
4718 tss->step_after_step_resume_breakpoint = 0;
4719 }
4720
4721 /* See infrun.h. */
4722
4723 void
4724 set_last_target_status (process_stratum_target *target, ptid_t ptid,
4725 const target_waitstatus &status)
4726 {
4727 target_last_proc_target = target;
4728 target_last_wait_ptid = ptid;
4729 target_last_waitstatus = status;
4730 }
4731
4732 /* See infrun.h. */
4733
4734 void
4735 get_last_target_status (process_stratum_target **target, ptid_t *ptid,
4736 target_waitstatus *status)
4737 {
4738 if (target != nullptr)
4739 *target = target_last_proc_target;
4740 if (ptid != nullptr)
4741 *ptid = target_last_wait_ptid;
4742 if (status != nullptr)
4743 *status = target_last_waitstatus;
4744 }
4745
4746 /* See infrun.h. */
4747
4748 void
4749 nullify_last_target_wait_ptid (void)
4750 {
4751 target_last_proc_target = nullptr;
4752 target_last_wait_ptid = minus_one_ptid;
4753 target_last_waitstatus = {};
4754 }
4755
4756 /* Switch thread contexts. */
4757
4758 static void
4759 context_switch (execution_control_state *ecs)
4760 {
4761 if (ecs->ptid != inferior_ptid
4762 && (inferior_ptid == null_ptid
4763 || ecs->event_thread != inferior_thread ()))
4764 {
4765 infrun_debug_printf ("Switching context from %s to %s",
4766 inferior_ptid.to_string ().c_str (),
4767 ecs->ptid.to_string ().c_str ());
4768 }
4769
4770 switch_to_thread (ecs->event_thread);
4771 }
4772
4773 /* If the target can't tell whether we've hit breakpoints
4774 (target_supports_stopped_by_sw_breakpoint), and we got a SIGTRAP,
4775 check whether that could have been caused by a breakpoint. If so,
4776 adjust the PC, per gdbarch_decr_pc_after_break. */
4777
4778 static void
4779 adjust_pc_after_break (struct thread_info *thread,
4780 const target_waitstatus &ws)
4781 {
4782 struct regcache *regcache;
4783 struct gdbarch *gdbarch;
4784 CORE_ADDR breakpoint_pc, decr_pc;
4785
4786 /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP. If
4787 we aren't, just return.
4788
4789 We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
4790 affected by gdbarch_decr_pc_after_break. Other waitkinds which are
4791 implemented by software breakpoints should be handled through the normal
4792 breakpoint layer.
4793
4794 NOTE drow/2004-01-31: On some targets, breakpoints may generate
4795 different signals (SIGILL or SIGEMT for instance), but it is less
4796 clear where the PC is pointing afterwards. It may not match
4797 gdbarch_decr_pc_after_break. I don't know any specific target that
4798 generates these signals at breakpoints (the code has been in GDB since at
4799 least 1992) so I can not guess how to handle them here.
4800
4801 In earlier versions of GDB, a target with
4802 gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
4803 watchpoint affected by gdbarch_decr_pc_after_break. I haven't found any
4804 target with both of these set in GDB history, and it seems unlikely to be
4805 correct, so gdbarch_have_nonsteppable_watchpoint is not checked here. */
4806
4807 if (ws.kind () != TARGET_WAITKIND_STOPPED)
4808 return;
4809
4810 if (ws.sig () != GDB_SIGNAL_TRAP)
4811 return;
4812
4813 /* In reverse execution, when a breakpoint is hit, the instruction
4814 under it has already been de-executed. The reported PC always
4815 points at the breakpoint address, so adjusting it further would
4816 be wrong. E.g., consider this case on a decr_pc_after_break == 1
4817 architecture:
4818
4819 B1 0x08000000 : INSN1
4820 B2 0x08000001 : INSN2
4821 0x08000002 : INSN3
4822 PC -> 0x08000003 : INSN4
4823
4824 Say you're stopped at 0x08000003 as above. Reverse continuing
4825 from that point should hit B2 as below. Reading the PC when the
4826 SIGTRAP is reported should read 0x08000001 and INSN2 should have
4827 been de-executed already.
4828
4829 B1 0x08000000 : INSN1
4830 B2 PC -> 0x08000001 : INSN2
4831 0x08000002 : INSN3
4832 0x08000003 : INSN4
4833
4834 We can't apply the same logic as for forward execution, because
4835 we would wrongly adjust the PC to 0x08000000, since there's a
4836 breakpoint at PC - 1. We'd then report a hit on B1, although
4837 INSN1 hadn't been de-executed yet. Doing nothing is the correct
4838 behaviour. */
4839 if (execution_direction == EXEC_REVERSE)
4840 return;
4841
4842 /* If the target can tell whether the thread hit a SW breakpoint,
4843 trust it. Targets that can tell also adjust the PC
4844 themselves. */
4845 if (target_supports_stopped_by_sw_breakpoint ())
4846 return;
4847
4848 /* Note that relying on whether a breakpoint is planted in memory to
4849 determine this can fail. E.g,. the breakpoint could have been
4850 removed since. Or the thread could have been told to step an
4851 instruction the size of a breakpoint instruction, and only
4852 _after_ was a breakpoint inserted at its address. */
4853
4854 /* If this target does not decrement the PC after breakpoints, then
4855 we have nothing to do. */
4856 regcache = get_thread_regcache (thread);
4857 gdbarch = regcache->arch ();
4858
4859 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4860 if (decr_pc == 0)
4861 return;
4862
4863 const address_space *aspace = regcache->aspace ();
4864
4865 /* Find the location where (if we've hit a breakpoint) the
4866 breakpoint would be. */
4867 breakpoint_pc = regcache_read_pc (regcache) - decr_pc;
4868
4869 /* If the target can't tell whether a software breakpoint triggered,
4870 fallback to figuring it out based on breakpoints we think were
4871 inserted in the target, and on whether the thread was stepped or
4872 continued. */
4873
4874 /* Check whether there actually is a software breakpoint inserted at
4875 that location.
4876
4877 If in non-stop mode, a race condition is possible where we've
4878 removed a breakpoint, but stop events for that breakpoint were
4879 already queued and arrive later. To suppress those spurious
4880 SIGTRAPs, we keep a list of such breakpoint locations for a bit,
4881 and retire them after a number of stop events are reported. Note
4882 this is an heuristic and can thus get confused. The real fix is
4883 to get the "stopped by SW BP and needs adjustment" info out of
4884 the target/kernel (and thus never reach here; see above). */
4885 if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
4886 || (target_is_non_stop_p ()
4887 && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
4888 {
4889 gdb::optional<scoped_restore_tmpl<int>> restore_operation_disable;
4890
4891 if (record_full_is_used ())
4892 restore_operation_disable.emplace
4893 (record_full_gdb_operation_disable_set ());
4894
4895 /* When using hardware single-step, a SIGTRAP is reported for both
4896 a completed single-step and a software breakpoint. Need to
4897 differentiate between the two, as the latter needs adjusting
4898 but the former does not.
4899
4900 The SIGTRAP can be due to a completed hardware single-step only if
4901 - we didn't insert software single-step breakpoints
4902 - this thread is currently being stepped
4903
4904 If any of these events did not occur, we must have stopped due
4905 to hitting a software breakpoint, and have to back up to the
4906 breakpoint address.
4907
4908 As a special case, we could have hardware single-stepped a
4909 software breakpoint. In this case (prev_pc == breakpoint_pc),
4910 we also need to back up to the breakpoint address. */
4911
4912 if (thread_has_single_step_breakpoints_set (thread)
4913 || !currently_stepping (thread)
4914 || (thread->stepped_breakpoint
4915 && thread->prev_pc == breakpoint_pc))
4916 regcache_write_pc (regcache, breakpoint_pc);
4917 }
4918 }
4919
4920 static bool
4921 stepped_in_from (frame_info_ptr frame, struct frame_id step_frame_id)
4922 {
4923 for (frame = get_prev_frame (frame);
4924 frame != nullptr;
4925 frame = get_prev_frame (frame))
4926 {
4927 if (get_frame_id (frame) == step_frame_id)
4928 return true;
4929
4930 if (get_frame_type (frame) != INLINE_FRAME)
4931 break;
4932 }
4933
4934 return false;
4935 }
4936
4937 /* Look for an inline frame that is marked for skip.
4938 If PREV_FRAME is TRUE start at the previous frame,
4939 otherwise start at the current frame. Stop at the
4940 first non-inline frame, or at the frame where the
4941 step started. */
4942
4943 static bool
4944 inline_frame_is_marked_for_skip (bool prev_frame, struct thread_info *tp)
4945 {
4946 frame_info_ptr frame = get_current_frame ();
4947
4948 if (prev_frame)
4949 frame = get_prev_frame (frame);
4950
4951 for (; frame != nullptr; frame = get_prev_frame (frame))
4952 {
4953 const char *fn = nullptr;
4954 symtab_and_line sal;
4955 struct symbol *sym;
4956
4957 if (get_frame_id (frame) == tp->control.step_frame_id)
4958 break;
4959 if (get_frame_type (frame) != INLINE_FRAME)
4960 break;
4961
4962 sal = find_frame_sal (frame);
4963 sym = get_frame_function (frame);
4964
4965 if (sym != nullptr)
4966 fn = sym->print_name ();
4967
4968 if (sal.line != 0
4969 && function_name_is_marked_for_skip (fn, sal))
4970 return true;
4971 }
4972
4973 return false;
4974 }
4975
4976 /* If the event thread has the stop requested flag set, pretend it
4977 stopped for a GDB_SIGNAL_0 (i.e., as if it stopped due to
4978 target_stop). */
4979
4980 static bool
4981 handle_stop_requested (struct execution_control_state *ecs)
4982 {
4983 if (ecs->event_thread->stop_requested)
4984 {
4985 ecs->ws.set_stopped (GDB_SIGNAL_0);
4986 handle_signal_stop (ecs);
4987 return true;
4988 }
4989 return false;
4990 }
4991
4992 /* Auxiliary function that handles syscall entry/return events.
4993 It returns true if the inferior should keep going (and GDB
4994 should ignore the event), or false if the event deserves to be
4995 processed. */
4996
4997 static bool
4998 handle_syscall_event (struct execution_control_state *ecs)
4999 {
5000 struct regcache *regcache;
5001 int syscall_number;
5002
5003 context_switch (ecs);
5004
5005 regcache = get_thread_regcache (ecs->event_thread);
5006 syscall_number = ecs->ws.syscall_number ();
5007 ecs->event_thread->set_stop_pc (regcache_read_pc (regcache));
5008
5009 if (catch_syscall_enabled () > 0
5010 && catching_syscall_number (syscall_number))
5011 {
5012 infrun_debug_printf ("syscall number=%d", syscall_number);
5013
5014 ecs->event_thread->control.stop_bpstat
5015 = bpstat_stop_status_nowatch (regcache->aspace (),
5016 ecs->event_thread->stop_pc (),
5017 ecs->event_thread, ecs->ws);
5018
5019 if (handle_stop_requested (ecs))
5020 return false;
5021
5022 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
5023 {
5024 /* Catchpoint hit. */
5025 return false;
5026 }
5027 }
5028
5029 if (handle_stop_requested (ecs))
5030 return false;
5031
5032 /* If no catchpoint triggered for this, then keep going. */
5033 keep_going (ecs);
5034
5035 return true;
5036 }
5037
5038 /* Lazily fill in the execution_control_state's stop_func_* fields. */
5039
5040 static void
5041 fill_in_stop_func (struct gdbarch *gdbarch,
5042 struct execution_control_state *ecs)
5043 {
5044 if (!ecs->stop_func_filled_in)
5045 {
5046 const block *block;
5047 const general_symbol_info *gsi;
5048
5049 /* Don't care about return value; stop_func_start and stop_func_name
5050 will both be 0 if it doesn't work. */
5051 find_pc_partial_function_sym (ecs->event_thread->stop_pc (),
5052 &gsi,
5053 &ecs->stop_func_start,
5054 &ecs->stop_func_end,
5055 &block);
5056 ecs->stop_func_name = gsi == nullptr ? nullptr : gsi->print_name ();
5057
5058 /* The call to find_pc_partial_function, above, will set
5059 stop_func_start and stop_func_end to the start and end
5060 of the range containing the stop pc. If this range
5061 contains the entry pc for the block (which is always the
5062 case for contiguous blocks), advance stop_func_start past
5063 the function's start offset and entrypoint. Note that
5064 stop_func_start is NOT advanced when in a range of a
5065 non-contiguous block that does not contain the entry pc. */
5066 if (block != nullptr
5067 && ecs->stop_func_start <= block->entry_pc ()
5068 && block->entry_pc () < ecs->stop_func_end)
5069 {
5070 ecs->stop_func_start
5071 += gdbarch_deprecated_function_start_offset (gdbarch);
5072
5073 /* PowerPC functions have a Local Entry Point (LEP) and a Global
5074 Entry Point (GEP). There is only one Entry Point (GEP = LEP) for
5075 other architectures. */
5076 ecs->stop_func_alt_start = ecs->stop_func_start;
5077
5078 if (gdbarch_skip_entrypoint_p (gdbarch))
5079 ecs->stop_func_start
5080 = gdbarch_skip_entrypoint (gdbarch, ecs->stop_func_start);
5081 }
5082
5083 ecs->stop_func_filled_in = 1;
5084 }
5085 }
5086
5087
5088 /* Return the STOP_SOON field of the inferior pointed at by ECS. */
5089
5090 static enum stop_kind
5091 get_inferior_stop_soon (execution_control_state *ecs)
5092 {
5093 struct inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
5094
5095 gdb_assert (inf != nullptr);
5096 return inf->control.stop_soon;
5097 }
5098
5099 /* Poll for one event out of the current target. Store the resulting
5100 waitstatus in WS, and return the event ptid. Does not block. */
5101
5102 static ptid_t
5103 poll_one_curr_target (struct target_waitstatus *ws)
5104 {
5105 ptid_t event_ptid;
5106
5107 overlay_cache_invalid = 1;
5108
5109 /* Flush target cache before starting to handle each event.
5110 Target was running and cache could be stale. This is just a
5111 heuristic. Running threads may modify target memory, but we
5112 don't get any event. */
5113 target_dcache_invalidate ();
5114
5115 event_ptid = target_wait (minus_one_ptid, ws, TARGET_WNOHANG);
5116
5117 if (debug_infrun)
5118 print_target_wait_results (minus_one_ptid, event_ptid, *ws);
5119
5120 return event_ptid;
5121 }
5122
5123 /* Wait for one event out of any target. */
5124
5125 static wait_one_event
5126 wait_one ()
5127 {
5128 while (1)
5129 {
5130 for (inferior *inf : all_inferiors ())
5131 {
5132 process_stratum_target *target = inf->process_target ();
5133 if (target == nullptr
5134 || !target->is_async_p ()
5135 || !target->threads_executing)
5136 continue;
5137
5138 switch_to_inferior_no_thread (inf);
5139
5140 wait_one_event event;
5141 event.target = target;
5142 event.ptid = poll_one_curr_target (&event.ws);
5143
5144 if (event.ws.kind () == TARGET_WAITKIND_NO_RESUMED)
5145 {
5146 /* If nothing is resumed, remove the target from the
5147 event loop. */
5148 target_async (false);
5149 }
5150 else if (event.ws.kind () != TARGET_WAITKIND_IGNORE)
5151 return event;
5152 }
5153
5154 /* Block waiting for some event. */
5155
5156 fd_set readfds;
5157 int nfds = 0;
5158
5159 FD_ZERO (&readfds);
5160
5161 for (inferior *inf : all_inferiors ())
5162 {
5163 process_stratum_target *target = inf->process_target ();
5164 if (target == nullptr
5165 || !target->is_async_p ()
5166 || !target->threads_executing)
5167 continue;
5168
5169 int fd = target->async_wait_fd ();
5170 FD_SET (fd, &readfds);
5171 if (nfds <= fd)
5172 nfds = fd + 1;
5173 }
5174
5175 if (nfds == 0)
5176 {
5177 /* No waitable targets left. All must be stopped. */
5178 target_waitstatus ws;
5179 ws.set_no_resumed ();
5180 return {nullptr, minus_one_ptid, std::move (ws)};
5181 }
5182
5183 QUIT;
5184
5185 int numfds = interruptible_select (nfds, &readfds, 0, nullptr, 0);
5186 if (numfds < 0)
5187 {
5188 if (errno == EINTR)
5189 continue;
5190 else
5191 perror_with_name ("interruptible_select");
5192 }
5193 }
5194 }
5195
5196 /* Save the thread's event and stop reason to process it later. */
5197
5198 static void
5199 save_waitstatus (struct thread_info *tp, const target_waitstatus &ws)
5200 {
5201 infrun_debug_printf ("saving status %s for %s",
5202 ws.to_string ().c_str (),
5203 tp->ptid.to_string ().c_str ());
5204
5205 /* Record for later. */
5206 tp->set_pending_waitstatus (ws);
5207
5208 if (ws.kind () == TARGET_WAITKIND_STOPPED
5209 && ws.sig () == GDB_SIGNAL_TRAP)
5210 {
5211 struct regcache *regcache = get_thread_regcache (tp);
5212 const address_space *aspace = regcache->aspace ();
5213 CORE_ADDR pc = regcache_read_pc (regcache);
5214
5215 adjust_pc_after_break (tp, tp->pending_waitstatus ());
5216
5217 scoped_restore_current_thread restore_thread;
5218 switch_to_thread (tp);
5219
5220 if (target_stopped_by_watchpoint ())
5221 tp->set_stop_reason (TARGET_STOPPED_BY_WATCHPOINT);
5222 else if (target_supports_stopped_by_sw_breakpoint ()
5223 && target_stopped_by_sw_breakpoint ())
5224 tp->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT);
5225 else if (target_supports_stopped_by_hw_breakpoint ()
5226 && target_stopped_by_hw_breakpoint ())
5227 tp->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT);
5228 else if (!target_supports_stopped_by_hw_breakpoint ()
5229 && hardware_breakpoint_inserted_here_p (aspace, pc))
5230 tp->set_stop_reason (TARGET_STOPPED_BY_HW_BREAKPOINT);
5231 else if (!target_supports_stopped_by_sw_breakpoint ()
5232 && software_breakpoint_inserted_here_p (aspace, pc))
5233 tp->set_stop_reason (TARGET_STOPPED_BY_SW_BREAKPOINT);
5234 else if (!thread_has_single_step_breakpoints_set (tp)
5235 && currently_stepping (tp))
5236 tp->set_stop_reason (TARGET_STOPPED_BY_SINGLE_STEP);
5237 }
5238 }
5239
5240 /* Mark the non-executing threads accordingly. In all-stop, all
5241 threads of all processes are stopped when we get any event
5242 reported. In non-stop mode, only the event thread stops. */
5243
5244 static void
5245 mark_non_executing_threads (process_stratum_target *target,
5246 ptid_t event_ptid,
5247 const target_waitstatus &ws)
5248 {
5249 ptid_t mark_ptid;
5250
5251 if (!target_is_non_stop_p ())
5252 mark_ptid = minus_one_ptid;
5253 else if (ws.kind () == TARGET_WAITKIND_SIGNALLED
5254 || ws.kind () == TARGET_WAITKIND_EXITED)
5255 {
5256 /* If we're handling a process exit in non-stop mode, even
5257 though threads haven't been deleted yet, one would think
5258 that there is nothing to do, as threads of the dead process
5259 will be soon deleted, and threads of any other process were
5260 left running. However, on some targets, threads survive a
5261 process exit event. E.g., for the "checkpoint" command,
5262 when the current checkpoint/fork exits, linux-fork.c
5263 automatically switches to another fork from within
5264 target_mourn_inferior, by associating the same
5265 inferior/thread to another fork. We haven't mourned yet at
5266 this point, but we must mark any threads left in the
5267 process as not-executing so that finish_thread_state marks
5268 them stopped (in the user's perspective) if/when we present
5269 the stop to the user. */
5270 mark_ptid = ptid_t (event_ptid.pid ());
5271 }
5272 else
5273 mark_ptid = event_ptid;
5274
5275 set_executing (target, mark_ptid, false);
5276
5277 /* Likewise the resumed flag. */
5278 set_resumed (target, mark_ptid, false);
5279 }
5280
5281 /* Handle one event after stopping threads. If the eventing thread
5282 reports back any interesting event, we leave it pending. If the
5283 eventing thread was in the middle of a displaced step, we
5284 cancel/finish it, and unless the thread's inferior is being
5285 detached, put the thread back in the step-over chain. Returns true
5286 if there are no resumed threads left in the target (thus there's no
5287 point in waiting further), false otherwise. */
5288
5289 static bool
5290 handle_one (const wait_one_event &event)
5291 {
5292 infrun_debug_printf
5293 ("%s %s", event.ws.to_string ().c_str (),
5294 event.ptid.to_string ().c_str ());
5295
5296 if (event.ws.kind () == TARGET_WAITKIND_NO_RESUMED)
5297 {
5298 /* All resumed threads exited. */
5299 return true;
5300 }
5301 else if (event.ws.kind () == TARGET_WAITKIND_THREAD_EXITED
5302 || event.ws.kind () == TARGET_WAITKIND_EXITED
5303 || event.ws.kind () == TARGET_WAITKIND_SIGNALLED)
5304 {
5305 /* One thread/process exited/signalled. */
5306
5307 thread_info *t = nullptr;
5308
5309 /* The target may have reported just a pid. If so, try
5310 the first non-exited thread. */
5311 if (event.ptid.is_pid ())
5312 {
5313 int pid = event.ptid.pid ();
5314 inferior *inf = find_inferior_pid (event.target, pid);
5315 for (thread_info *tp : inf->non_exited_threads ())
5316 {
5317 t = tp;
5318 break;
5319 }
5320
5321 /* If there is no available thread, the event would
5322 have to be appended to a per-inferior event list,
5323 which does not exist (and if it did, we'd have
5324 to adjust run control command to be able to
5325 resume such an inferior). We assert here instead
5326 of going into an infinite loop. */
5327 gdb_assert (t != nullptr);
5328
5329 infrun_debug_printf
5330 ("using %s", t->ptid.to_string ().c_str ());
5331 }
5332 else
5333 {
5334 t = event.target->find_thread (event.ptid);
5335 /* Check if this is the first time we see this thread.
5336 Don't bother adding if it individually exited. */
5337 if (t == nullptr
5338 && event.ws.kind () != TARGET_WAITKIND_THREAD_EXITED)
5339 t = add_thread (event.target, event.ptid);
5340 }
5341
5342 if (t != nullptr)
5343 {
5344 /* Set the threads as non-executing to avoid
5345 another stop attempt on them. */
5346 switch_to_thread_no_regs (t);
5347 mark_non_executing_threads (event.target, event.ptid,
5348 event.ws);
5349 save_waitstatus (t, event.ws);
5350 t->stop_requested = false;
5351 }
5352 }
5353 else
5354 {
5355 thread_info *t = event.target->find_thread (event.ptid);
5356 if (t == nullptr)
5357 t = add_thread (event.target, event.ptid);
5358
5359 t->stop_requested = 0;
5360 t->set_executing (false);
5361 t->set_resumed (false);
5362 t->control.may_range_step = 0;
5363
5364 /* This may be the first time we see the inferior report
5365 a stop. */
5366 if (t->inf->needs_setup)
5367 {
5368 switch_to_thread_no_regs (t);
5369 setup_inferior (0);
5370 }
5371
5372 if (event.ws.kind () == TARGET_WAITKIND_STOPPED
5373 && event.ws.sig () == GDB_SIGNAL_0)
5374 {
5375 /* We caught the event that we intended to catch, so
5376 there's no event to save as pending. */
5377
5378 if (displaced_step_finish (t, event.ws)
5379 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
5380 {
5381 /* Add it back to the step-over queue. */
5382 infrun_debug_printf
5383 ("displaced-step of %s canceled",
5384 t->ptid.to_string ().c_str ());
5385
5386 t->control.trap_expected = 0;
5387 if (!t->inf->detaching)
5388 global_thread_step_over_chain_enqueue (t);
5389 }
5390 }
5391 else
5392 {
5393 struct regcache *regcache;
5394
5395 infrun_debug_printf
5396 ("target_wait %s, saving status for %s",
5397 event.ws.to_string ().c_str (),
5398 t->ptid.to_string ().c_str ());
5399
5400 /* Record for later. */
5401 save_waitstatus (t, event.ws);
5402
5403 if (displaced_step_finish (t, event.ws)
5404 == DISPLACED_STEP_FINISH_STATUS_NOT_EXECUTED)
5405 {
5406 /* Add it back to the step-over queue. */
5407 t->control.trap_expected = 0;
5408 if (!t->inf->detaching)
5409 global_thread_step_over_chain_enqueue (t);
5410 }
5411
5412 regcache = get_thread_regcache (t);
5413 t->set_stop_pc (regcache_read_pc (regcache));
5414
5415 infrun_debug_printf ("saved stop_pc=%s for %s "
5416 "(currently_stepping=%d)",
5417 paddress (current_inferior ()->arch (),
5418 t->stop_pc ()),
5419 t->ptid.to_string ().c_str (),
5420 currently_stepping (t));
5421 }
5422 }
5423
5424 return false;
5425 }
5426
5427 /* See infrun.h. */
5428
5429 void
5430 stop_all_threads (const char *reason, inferior *inf)
5431 {
5432 /* We may need multiple passes to discover all threads. */
5433 int pass;
5434 int iterations = 0;
5435
5436 gdb_assert (exists_non_stop_target ());
5437
5438 INFRUN_SCOPED_DEBUG_START_END ("reason=%s, inf=%d", reason,
5439 inf != nullptr ? inf->num : -1);
5440
5441 infrun_debug_show_threads ("non-exited threads",
5442 all_non_exited_threads ());
5443
5444 scoped_restore_current_thread restore_thread;
5445
5446 /* Enable thread events on relevant targets. */
5447 for (auto *target : all_non_exited_process_targets ())
5448 {
5449 if (inf != nullptr && inf->process_target () != target)
5450 continue;
5451
5452 switch_to_target_no_thread (target);
5453 target_thread_events (true);
5454 }
5455
5456 SCOPE_EXIT
5457 {
5458 /* Disable thread events on relevant targets. */
5459 for (auto *target : all_non_exited_process_targets ())
5460 {
5461 if (inf != nullptr && inf->process_target () != target)
5462 continue;
5463
5464 switch_to_target_no_thread (target);
5465 target_thread_events (false);
5466 }
5467
5468 /* Use debug_prefixed_printf directly to get a meaningful function
5469 name. */
5470 if (debug_infrun)
5471 debug_prefixed_printf ("infrun", "stop_all_threads", "done");
5472 };
5473
5474 /* Request threads to stop, and then wait for the stops. Because
5475 threads we already know about can spawn more threads while we're
5476 trying to stop them, and we only learn about new threads when we
5477 update the thread list, do this in a loop, and keep iterating
5478 until two passes find no threads that need to be stopped. */
5479 for (pass = 0; pass < 2; pass++, iterations++)
5480 {
5481 infrun_debug_printf ("pass=%d, iterations=%d", pass, iterations);
5482 while (1)
5483 {
5484 int waits_needed = 0;
5485
5486 for (auto *target : all_non_exited_process_targets ())
5487 {
5488 if (inf != nullptr && inf->process_target () != target)
5489 continue;
5490
5491 switch_to_target_no_thread (target);
5492 update_thread_list ();
5493 }
5494
5495 /* Go through all threads looking for threads that we need
5496 to tell the target to stop. */
5497 for (thread_info *t : all_non_exited_threads ())
5498 {
5499 if (inf != nullptr && t->inf != inf)
5500 continue;
5501
5502 /* For a single-target setting with an all-stop target,
5503 we would not even arrive here. For a multi-target
5504 setting, until GDB is able to handle a mixture of
5505 all-stop and non-stop targets, simply skip all-stop
5506 targets' threads. This should be fine due to the
5507 protection of 'check_multi_target_resumption'. */
5508
5509 switch_to_thread_no_regs (t);
5510 if (!target_is_non_stop_p ())
5511 continue;
5512
5513 if (t->executing ())
5514 {
5515 /* If already stopping, don't request a stop again.
5516 We just haven't seen the notification yet. */
5517 if (!t->stop_requested)
5518 {
5519 infrun_debug_printf (" %s executing, need stop",
5520 t->ptid.to_string ().c_str ());
5521 target_stop (t->ptid);
5522 t->stop_requested = 1;
5523 }
5524 else
5525 {
5526 infrun_debug_printf (" %s executing, already stopping",
5527 t->ptid.to_string ().c_str ());
5528 }
5529
5530 if (t->stop_requested)
5531 waits_needed++;
5532 }
5533 else
5534 {
5535 infrun_debug_printf (" %s not executing",
5536 t->ptid.to_string ().c_str ());
5537
5538 /* The thread may be not executing, but still be
5539 resumed with a pending status to process. */
5540 t->set_resumed (false);
5541 }
5542 }
5543
5544 if (waits_needed == 0)
5545 break;
5546
5547 /* If we find new threads on the second iteration, restart
5548 over. We want to see two iterations in a row with all
5549 threads stopped. */
5550 if (pass > 0)
5551 pass = -1;
5552
5553 for (int i = 0; i < waits_needed; i++)
5554 {
5555 wait_one_event event = wait_one ();
5556 if (handle_one (event))
5557 break;
5558 }
5559 }
5560 }
5561 }
5562
5563 /* Handle a TARGET_WAITKIND_NO_RESUMED event. */
5564
5565 static bool
5566 handle_no_resumed (struct execution_control_state *ecs)
5567 {
5568 if (target_can_async_p ())
5569 {
5570 bool any_sync = false;
5571
5572 for (ui *ui : all_uis ())
5573 {
5574 if (ui->prompt_state == PROMPT_BLOCKED)
5575 {
5576 any_sync = true;
5577 break;
5578 }
5579 }
5580 if (!any_sync)
5581 {
5582 /* There were no unwaited-for children left in the target, but,
5583 we're not synchronously waiting for events either. Just
5584 ignore. */
5585
5586 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED (ignoring: bg)");
5587 prepare_to_wait (ecs);
5588 return true;
5589 }
5590 }
5591
5592 /* Otherwise, if we were running a synchronous execution command, we
5593 may need to cancel it and give the user back the terminal.
5594
5595 In non-stop mode, the target can't tell whether we've already
5596 consumed previous stop events, so it can end up sending us a
5597 no-resumed event like so:
5598
5599 #0 - thread 1 is left stopped
5600
5601 #1 - thread 2 is resumed and hits breakpoint
5602 -> TARGET_WAITKIND_STOPPED
5603
5604 #2 - thread 3 is resumed and exits
5605 this is the last resumed thread, so
5606 -> TARGET_WAITKIND_NO_RESUMED
5607
5608 #3 - gdb processes stop for thread 2 and decides to re-resume
5609 it.
5610
5611 #4 - gdb processes the TARGET_WAITKIND_NO_RESUMED event.
5612 thread 2 is now resumed, so the event should be ignored.
5613
5614 IOW, if the stop for thread 2 doesn't end a foreground command,
5615 then we need to ignore the following TARGET_WAITKIND_NO_RESUMED
5616 event. But it could be that the event meant that thread 2 itself
5617 (or whatever other thread was the last resumed thread) exited.
5618
5619 To address this we refresh the thread list and check whether we
5620 have resumed threads _now_. In the example above, this removes
5621 thread 3 from the thread list. If thread 2 was re-resumed, we
5622 ignore this event. If we find no thread resumed, then we cancel
5623 the synchronous command and show "no unwaited-for " to the
5624 user. */
5625
5626 inferior *curr_inf = current_inferior ();
5627
5628 scoped_restore_current_thread restore_thread;
5629 update_thread_list ();
5630
5631 /* If:
5632
5633 - the current target has no thread executing, and
5634 - the current inferior is native, and
5635 - the current inferior is the one which has the terminal, and
5636 - we did nothing,
5637
5638 then a Ctrl-C from this point on would remain stuck in the
5639 kernel, until a thread resumes and dequeues it. That would
5640 result in the GDB CLI not reacting to Ctrl-C, not able to
5641 interrupt the program. To address this, if the current inferior
5642 no longer has any thread executing, we give the terminal to some
5643 other inferior that has at least one thread executing. */
5644 bool swap_terminal = true;
5645
5646 /* Whether to ignore this TARGET_WAITKIND_NO_RESUMED event, or
5647 whether to report it to the user. */
5648 bool ignore_event = false;
5649
5650 for (thread_info *thread : all_non_exited_threads ())
5651 {
5652 if (swap_terminal && thread->executing ())
5653 {
5654 if (thread->inf != curr_inf)
5655 {
5656 target_terminal::ours ();
5657
5658 switch_to_thread (thread);
5659 target_terminal::inferior ();
5660 }
5661 swap_terminal = false;
5662 }
5663
5664 if (!ignore_event && thread->resumed ())
5665 {
5666 /* Either there were no unwaited-for children left in the
5667 target at some point, but there are now, or some target
5668 other than the eventing one has unwaited-for children
5669 left. Just ignore. */
5670 infrun_debug_printf ("TARGET_WAITKIND_NO_RESUMED "
5671 "(ignoring: found resumed)");
5672
5673 ignore_event = true;
5674 }
5675
5676 if (ignore_event && !swap_terminal)
5677 break;
5678 }
5679
5680 if (ignore_event)
5681 {
5682 switch_to_inferior_no_thread (curr_inf);
5683 prepare_to_wait (ecs);
5684 return true;
5685 }
5686
5687 /* Go ahead and report the event. */
5688 return false;
5689 }
5690
5691 /* Given an execution control state that has been freshly filled in by
5692 an event from the inferior, figure out what it means and take
5693 appropriate action.
5694
5695 The alternatives are:
5696
5697 1) stop_waiting and return; to really stop and return to the
5698 debugger.
5699
5700 2) keep_going and return; to wait for the next event (set
5701 ecs->event_thread->stepping_over_breakpoint to 1 to single step
5702 once). */
5703
5704 static void
5705 handle_inferior_event (struct execution_control_state *ecs)
5706 {
5707 /* Make sure that all temporary struct value objects that were
5708 created during the handling of the event get deleted at the
5709 end. */
5710 scoped_value_mark free_values;
5711
5712 infrun_debug_printf ("%s", ecs->ws.to_string ().c_str ());
5713
5714 if (ecs->ws.kind () == TARGET_WAITKIND_IGNORE)
5715 {
5716 /* We had an event in the inferior, but we are not interested in
5717 handling it at this level. The lower layers have already
5718 done what needs to be done, if anything.
5719
5720 One of the possible circumstances for this is when the
5721 inferior produces output for the console. The inferior has
5722 not stopped, and we are ignoring the event. Another possible
5723 circumstance is any event which the lower level knows will be
5724 reported multiple times without an intervening resume. */
5725 prepare_to_wait (ecs);
5726 return;
5727 }
5728
5729 if (ecs->ws.kind () == TARGET_WAITKIND_THREAD_EXITED)
5730 {
5731 prepare_to_wait (ecs);
5732 return;
5733 }
5734
5735 if (ecs->ws.kind () == TARGET_WAITKIND_NO_RESUMED
5736 && handle_no_resumed (ecs))
5737 return;
5738
5739 /* Cache the last target/ptid/waitstatus. */
5740 set_last_target_status (ecs->target, ecs->ptid, ecs->ws);
5741
5742 /* Always clear state belonging to the previous time we stopped. */
5743 stop_stack_dummy = STOP_NONE;
5744
5745 if (ecs->ws.kind () == TARGET_WAITKIND_NO_RESUMED)
5746 {
5747 /* No unwaited-for children left. IOW, all resumed children
5748 have exited. */
5749 stop_print_frame = false;
5750 stop_waiting (ecs);
5751 return;
5752 }
5753
5754 if (ecs->ws.kind () != TARGET_WAITKIND_EXITED
5755 && ecs->ws.kind () != TARGET_WAITKIND_SIGNALLED)
5756 {
5757 ecs->event_thread = ecs->target->find_thread (ecs->ptid);
5758 /* If it's a new thread, add it to the thread database. */
5759 if (ecs->event_thread == nullptr)
5760 ecs->event_thread = add_thread (ecs->target, ecs->ptid);
5761
5762 /* Disable range stepping. If the next step request could use a
5763 range, this will be end up re-enabled then. */
5764 ecs->event_thread->control.may_range_step = 0;
5765 }
5766
5767 /* Dependent on valid ECS->EVENT_THREAD. */
5768 adjust_pc_after_break (ecs->event_thread, ecs->ws);
5769
5770 /* Dependent on the current PC value modified by adjust_pc_after_break. */
5771 reinit_frame_cache ();
5772
5773 breakpoint_retire_moribund ();
5774
5775 /* First, distinguish signals caused by the debugger from signals
5776 that have to do with the program's own actions. Note that
5777 breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
5778 on the operating system version. Here we detect when a SIGILL or
5779 SIGEMT is really a breakpoint and change it to SIGTRAP. We do
5780 something similar for SIGSEGV, since a SIGSEGV will be generated
5781 when we're trying to execute a breakpoint instruction on a
5782 non-executable stack. This happens for call dummy breakpoints
5783 for architectures like SPARC that place call dummies on the
5784 stack. */
5785 if (ecs->ws.kind () == TARGET_WAITKIND_STOPPED
5786 && (ecs->ws.sig () == GDB_SIGNAL_ILL
5787 || ecs->ws.sig () == GDB_SIGNAL_SEGV
5788 || ecs->ws.sig () == GDB_SIGNAL_EMT))
5789 {
5790 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
5791
5792 if (breakpoint_inserted_here_p (regcache->aspace (),
5793 regcache_read_pc (regcache)))
5794 {
5795 infrun_debug_printf ("Treating signal as SIGTRAP");
5796 ecs->ws.set_stopped (GDB_SIGNAL_TRAP);
5797 }
5798 }
5799
5800 mark_non_executing_threads (ecs->target, ecs->ptid, ecs->ws);
5801
5802 switch (ecs->ws.kind ())
5803 {
5804 case TARGET_WAITKIND_LOADED:
5805 {
5806 context_switch (ecs);
5807 /* Ignore gracefully during startup of the inferior, as it might
5808 be the shell which has just loaded some objects, otherwise
5809 add the symbols for the newly loaded objects. Also ignore at
5810 the beginning of an attach or remote session; we will query
5811 the full list of libraries once the connection is
5812 established. */
5813
5814 stop_kind stop_soon = get_inferior_stop_soon (ecs);
5815 if (stop_soon == NO_STOP_QUIETLY)
5816 {
5817 struct regcache *regcache;
5818
5819 regcache = get_thread_regcache (ecs->event_thread);
5820
5821 handle_solib_event ();
5822
5823 ecs->event_thread->set_stop_pc (regcache_read_pc (regcache));
5824 ecs->event_thread->control.stop_bpstat
5825 = bpstat_stop_status_nowatch (regcache->aspace (),
5826 ecs->event_thread->stop_pc (),
5827 ecs->event_thread, ecs->ws);
5828
5829 if (handle_stop_requested (ecs))
5830 return;
5831
5832 if (bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
5833 {
5834 /* A catchpoint triggered. */
5835 process_event_stop_test (ecs);
5836 return;
5837 }
5838
5839 /* If requested, stop when the dynamic linker notifies
5840 gdb of events. This allows the user to get control
5841 and place breakpoints in initializer routines for
5842 dynamically loaded objects (among other things). */
5843 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
5844 if (stop_on_solib_events)
5845 {
5846 /* Make sure we print "Stopped due to solib-event" in
5847 normal_stop. */
5848 stop_print_frame = true;
5849
5850 stop_waiting (ecs);
5851 return;
5852 }
5853 }
5854
5855 /* If we are skipping through a shell, or through shared library
5856 loading that we aren't interested in, resume the program. If
5857 we're running the program normally, also resume. */
5858 if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
5859 {
5860 /* Loading of shared libraries might have changed breakpoint
5861 addresses. Make sure new breakpoints are inserted. */
5862 if (stop_soon == NO_STOP_QUIETLY)
5863 insert_breakpoints ();
5864 resume (GDB_SIGNAL_0);
5865 prepare_to_wait (ecs);
5866 return;
5867 }
5868
5869 /* But stop if we're attaching or setting up a remote
5870 connection. */
5871 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
5872 || stop_soon == STOP_QUIETLY_REMOTE)
5873 {
5874 infrun_debug_printf ("quietly stopped");
5875 stop_waiting (ecs);
5876 return;
5877 }
5878
5879 internal_error (_("unhandled stop_soon: %d"), (int) stop_soon);
5880 }
5881
5882 case TARGET_WAITKIND_SPURIOUS:
5883 if (handle_stop_requested (ecs))
5884 return;
5885 context_switch (ecs);
5886 resume (GDB_SIGNAL_0);
5887 prepare_to_wait (ecs);
5888 return;
5889
5890 case TARGET_WAITKIND_THREAD_CREATED:
5891 if (handle_stop_requested (ecs))
5892 return;
5893 context_switch (ecs);
5894 if (!switch_back_to_stepped_thread (ecs))
5895 keep_going (ecs);
5896 return;
5897
5898 case TARGET_WAITKIND_EXITED:
5899 case TARGET_WAITKIND_SIGNALLED:
5900 {
5901 /* Depending on the system, ecs->ptid may point to a thread or
5902 to a process. On some targets, target_mourn_inferior may
5903 need to have access to the just-exited thread. That is the
5904 case of GNU/Linux's "checkpoint" support, for example.
5905 Call the switch_to_xxx routine as appropriate. */
5906 thread_info *thr = ecs->target->find_thread (ecs->ptid);
5907 if (thr != nullptr)
5908 switch_to_thread (thr);
5909 else
5910 {
5911 inferior *inf = find_inferior_ptid (ecs->target, ecs->ptid);
5912 switch_to_inferior_no_thread (inf);
5913 }
5914 }
5915 handle_vfork_child_exec_or_exit (0);
5916 target_terminal::ours (); /* Must do this before mourn anyway. */
5917
5918 /* Clearing any previous state of convenience variables. */
5919 clear_exit_convenience_vars ();
5920
5921 if (ecs->ws.kind () == TARGET_WAITKIND_EXITED)
5922 {
5923 /* Record the exit code in the convenience variable $_exitcode, so
5924 that the user can inspect this again later. */
5925 set_internalvar_integer (lookup_internalvar ("_exitcode"),
5926 (LONGEST) ecs->ws.exit_status ());
5927
5928 /* Also record this in the inferior itself. */
5929 current_inferior ()->has_exit_code = true;
5930 current_inferior ()->exit_code = (LONGEST) ecs->ws.exit_status ();
5931
5932 /* Support the --return-child-result option. */
5933 return_child_result_value = ecs->ws.exit_status ();
5934
5935 interps_notify_exited (ecs->ws.exit_status ());
5936 }
5937 else
5938 {
5939 struct gdbarch *gdbarch = current_inferior ()->arch ();
5940
5941 if (gdbarch_gdb_signal_to_target_p (gdbarch))
5942 {
5943 /* Set the value of the internal variable $_exitsignal,
5944 which holds the signal uncaught by the inferior. */
5945 set_internalvar_integer (lookup_internalvar ("_exitsignal"),
5946 gdbarch_gdb_signal_to_target (gdbarch,
5947 ecs->ws.sig ()));
5948 }
5949 else
5950 {
5951 /* We don't have access to the target's method used for
5952 converting between signal numbers (GDB's internal
5953 representation <-> target's representation).
5954 Therefore, we cannot do a good job at displaying this
5955 information to the user. It's better to just warn
5956 her about it (if infrun debugging is enabled), and
5957 give up. */
5958 infrun_debug_printf ("Cannot fill $_exitsignal with the correct "
5959 "signal number.");
5960 }
5961
5962 interps_notify_signal_exited (ecs->ws.sig ());
5963 }
5964
5965 gdb_flush (gdb_stdout);
5966 target_mourn_inferior (inferior_ptid);
5967 stop_print_frame = false;
5968 stop_waiting (ecs);
5969 return;
5970
5971 case TARGET_WAITKIND_FORKED:
5972 case TARGET_WAITKIND_VFORKED:
5973 case TARGET_WAITKIND_THREAD_CLONED:
5974
5975 displaced_step_finish (ecs->event_thread, ecs->ws);
5976
5977 /* Start a new step-over in another thread if there's one that
5978 needs it. */
5979 start_step_over ();
5980
5981 context_switch (ecs);
5982
5983 /* Immediately detach breakpoints from the child before there's
5984 any chance of letting the user delete breakpoints from the
5985 breakpoint lists. If we don't do this early, it's easy to
5986 leave left over traps in the child, vis: "break foo; catch
5987 fork; c; <fork>; del; c; <child calls foo>". We only follow
5988 the fork on the last `continue', and by that time the
5989 breakpoint at "foo" is long gone from the breakpoint table.
5990 If we vforked, then we don't need to unpatch here, since both
5991 parent and child are sharing the same memory pages; we'll
5992 need to unpatch at follow/detach time instead to be certain
5993 that new breakpoints added between catchpoint hit time and
5994 vfork follow are detached. */
5995 if (ecs->ws.kind () == TARGET_WAITKIND_FORKED)
5996 {
5997 /* This won't actually modify the breakpoint list, but will
5998 physically remove the breakpoints from the child. */
5999 detach_breakpoints (ecs->ws.child_ptid ());
6000 }
6001
6002 delete_just_stopped_threads_single_step_breakpoints ();
6003
6004 /* In case the event is caught by a catchpoint, remember that
6005 the event is to be followed at the next resume of the thread,
6006 and not immediately. */
6007 ecs->event_thread->pending_follow = ecs->ws;
6008
6009 ecs->event_thread->set_stop_pc
6010 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
6011
6012 ecs->event_thread->control.stop_bpstat
6013 = bpstat_stop_status_nowatch (get_current_regcache ()->aspace (),
6014 ecs->event_thread->stop_pc (),
6015 ecs->event_thread, ecs->ws);
6016
6017 if (handle_stop_requested (ecs))
6018 return;
6019
6020 /* If no catchpoint triggered for this, then keep going. Note
6021 that we're interested in knowing the bpstat actually causes a
6022 stop, not just if it may explain the signal. Software
6023 watchpoints, for example, always appear in the bpstat. */
6024 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
6025 {
6026 bool follow_child
6027 = (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
6028 && follow_fork_mode_string == follow_fork_mode_child);
6029
6030 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6031
6032 process_stratum_target *targ
6033 = ecs->event_thread->inf->process_target ();
6034
6035 bool should_resume;
6036 if (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED)
6037 should_resume = follow_fork ();
6038 else
6039 {
6040 should_resume = true;
6041 inferior *inf = ecs->event_thread->inf;
6042 inf->top_target ()->follow_clone (ecs->ws.child_ptid ());
6043 ecs->event_thread->pending_follow.set_spurious ();
6044 }
6045
6046 /* Note that one of these may be an invalid pointer,
6047 depending on detach_fork. */
6048 thread_info *parent = ecs->event_thread;
6049 thread_info *child = targ->find_thread (ecs->ws.child_ptid ());
6050
6051 /* At this point, the parent is marked running, and the
6052 child is marked stopped. */
6053
6054 /* If not resuming the parent, mark it stopped. */
6055 if (ecs->ws.kind () != TARGET_WAITKIND_THREAD_CLONED
6056 && follow_child && !detach_fork && !non_stop && !sched_multi)
6057 parent->set_running (false);
6058
6059 /* If resuming the child, mark it running. */
6060 if (ecs->ws.kind () == TARGET_WAITKIND_THREAD_CLONED
6061 || (follow_child || (!detach_fork && (non_stop || sched_multi))))
6062 child->set_running (true);
6063
6064 /* In non-stop mode, also resume the other branch. */
6065 if ((ecs->ws.kind () == TARGET_WAITKIND_THREAD_CLONED
6066 && target_is_non_stop_p ())
6067 || (!detach_fork && (non_stop
6068 || (sched_multi
6069 && target_is_non_stop_p ()))))
6070 {
6071 if (follow_child)
6072 switch_to_thread (parent);
6073 else
6074 switch_to_thread (child);
6075
6076 ecs->event_thread = inferior_thread ();
6077 ecs->ptid = inferior_ptid;
6078 keep_going (ecs);
6079 }
6080
6081 if (follow_child)
6082 switch_to_thread (child);
6083 else
6084 switch_to_thread (parent);
6085
6086 ecs->event_thread = inferior_thread ();
6087 ecs->ptid = inferior_ptid;
6088
6089 if (should_resume)
6090 {
6091 /* Never call switch_back_to_stepped_thread if we are waiting for
6092 vfork-done (waiting for an external vfork child to exec or
6093 exit). We will resume only the vforking thread for the purpose
6094 of collecting the vfork-done event, and we will restart any
6095 step once the critical shared address space window is done. */
6096 if ((!follow_child
6097 && detach_fork
6098 && parent->inf->thread_waiting_for_vfork_done != nullptr)
6099 || !switch_back_to_stepped_thread (ecs))
6100 keep_going (ecs);
6101 }
6102 else
6103 stop_waiting (ecs);
6104 return;
6105 }
6106 process_event_stop_test (ecs);
6107 return;
6108
6109 case TARGET_WAITKIND_VFORK_DONE:
6110 /* Done with the shared memory region. Re-insert breakpoints in
6111 the parent, and keep going. */
6112
6113 context_switch (ecs);
6114
6115 handle_vfork_done (ecs->event_thread);
6116 gdb_assert (inferior_thread () == ecs->event_thread);
6117
6118 if (handle_stop_requested (ecs))
6119 return;
6120
6121 if (!switch_back_to_stepped_thread (ecs))
6122 {
6123 gdb_assert (inferior_thread () == ecs->event_thread);
6124 /* This also takes care of reinserting breakpoints in the
6125 previously locked inferior. */
6126 keep_going (ecs);
6127 }
6128 return;
6129
6130 case TARGET_WAITKIND_EXECD:
6131
6132 /* Note we can't read registers yet (the stop_pc), because we
6133 don't yet know the inferior's post-exec architecture.
6134 'stop_pc' is explicitly read below instead. */
6135 switch_to_thread_no_regs (ecs->event_thread);
6136
6137 /* Do whatever is necessary to the parent branch of the vfork. */
6138 handle_vfork_child_exec_or_exit (1);
6139
6140 /* This causes the eventpoints and symbol table to be reset.
6141 Must do this now, before trying to determine whether to
6142 stop. */
6143 follow_exec (inferior_ptid, ecs->ws.execd_pathname ());
6144
6145 /* In follow_exec we may have deleted the original thread and
6146 created a new one. Make sure that the event thread is the
6147 execd thread for that case (this is a nop otherwise). */
6148 ecs->event_thread = inferior_thread ();
6149
6150 ecs->event_thread->set_stop_pc
6151 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
6152
6153 ecs->event_thread->control.stop_bpstat
6154 = bpstat_stop_status_nowatch (get_current_regcache ()->aspace (),
6155 ecs->event_thread->stop_pc (),
6156 ecs->event_thread, ecs->ws);
6157
6158 if (handle_stop_requested (ecs))
6159 return;
6160
6161 /* If no catchpoint triggered for this, then keep going. */
6162 if (!bpstat_causes_stop (ecs->event_thread->control.stop_bpstat))
6163 {
6164 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6165 keep_going (ecs);
6166 return;
6167 }
6168 process_event_stop_test (ecs);
6169 return;
6170
6171 /* Be careful not to try to gather much state about a thread
6172 that's in a syscall. It's frequently a losing proposition. */
6173 case TARGET_WAITKIND_SYSCALL_ENTRY:
6174 /* Getting the current syscall number. */
6175 if (handle_syscall_event (ecs) == 0)
6176 process_event_stop_test (ecs);
6177 return;
6178
6179 /* Before examining the threads further, step this thread to
6180 get it entirely out of the syscall. (We get notice of the
6181 event when the thread is just on the verge of exiting a
6182 syscall. Stepping one instruction seems to get it back
6183 into user code.) */
6184 case TARGET_WAITKIND_SYSCALL_RETURN:
6185 if (handle_syscall_event (ecs) == 0)
6186 process_event_stop_test (ecs);
6187 return;
6188
6189 case TARGET_WAITKIND_STOPPED:
6190 handle_signal_stop (ecs);
6191 return;
6192
6193 case TARGET_WAITKIND_NO_HISTORY:
6194 /* Reverse execution: target ran out of history info. */
6195
6196 /* Switch to the stopped thread. */
6197 context_switch (ecs);
6198 infrun_debug_printf ("stopped");
6199
6200 delete_just_stopped_threads_single_step_breakpoints ();
6201 ecs->event_thread->set_stop_pc
6202 (regcache_read_pc (get_thread_regcache (inferior_thread ())));
6203
6204 if (handle_stop_requested (ecs))
6205 return;
6206
6207 interps_notify_no_history ();
6208 stop_waiting (ecs);
6209 return;
6210 }
6211 }
6212
6213 /* Restart threads back to what they were trying to do back when we
6214 paused them (because of an in-line step-over or vfork, for example).
6215 The EVENT_THREAD thread is ignored (not restarted).
6216
6217 If INF is non-nullptr, only resume threads from INF. */
6218
6219 static void
6220 restart_threads (struct thread_info *event_thread, inferior *inf)
6221 {
6222 INFRUN_SCOPED_DEBUG_START_END ("event_thread=%s, inf=%d",
6223 event_thread->ptid.to_string ().c_str (),
6224 inf != nullptr ? inf->num : -1);
6225
6226 gdb_assert (!step_over_info_valid_p ());
6227
6228 /* In case the instruction just stepped spawned a new thread. */
6229 update_thread_list ();
6230
6231 for (thread_info *tp : all_non_exited_threads ())
6232 {
6233 if (inf != nullptr && tp->inf != inf)
6234 continue;
6235
6236 if (tp->inf->detaching)
6237 {
6238 infrun_debug_printf ("restart threads: [%s] inferior detaching",
6239 tp->ptid.to_string ().c_str ());
6240 continue;
6241 }
6242
6243 switch_to_thread_no_regs (tp);
6244
6245 if (tp == event_thread)
6246 {
6247 infrun_debug_printf ("restart threads: [%s] is event thread",
6248 tp->ptid.to_string ().c_str ());
6249 continue;
6250 }
6251
6252 if (!(tp->state == THREAD_RUNNING || tp->control.in_infcall))
6253 {
6254 infrun_debug_printf ("restart threads: [%s] not meant to be running",
6255 tp->ptid.to_string ().c_str ());
6256 continue;
6257 }
6258
6259 if (tp->resumed ())
6260 {
6261 infrun_debug_printf ("restart threads: [%s] resumed",
6262 tp->ptid.to_string ().c_str ());
6263 gdb_assert (tp->executing () || tp->has_pending_waitstatus ());
6264 continue;
6265 }
6266
6267 if (thread_is_in_step_over_chain (tp))
6268 {
6269 infrun_debug_printf ("restart threads: [%s] needs step-over",
6270 tp->ptid.to_string ().c_str ());
6271 gdb_assert (!tp->resumed ());
6272 continue;
6273 }
6274
6275
6276 if (tp->has_pending_waitstatus ())
6277 {
6278 infrun_debug_printf ("restart threads: [%s] has pending status",
6279 tp->ptid.to_string ().c_str ());
6280 tp->set_resumed (true);
6281 continue;
6282 }
6283
6284 gdb_assert (!tp->stop_requested);
6285
6286 /* If some thread needs to start a step-over at this point, it
6287 should still be in the step-over queue, and thus skipped
6288 above. */
6289 if (thread_still_needs_step_over (tp))
6290 {
6291 internal_error ("thread [%s] needs a step-over, but not in "
6292 "step-over queue\n",
6293 tp->ptid.to_string ().c_str ());
6294 }
6295
6296 if (currently_stepping (tp))
6297 {
6298 infrun_debug_printf ("restart threads: [%s] was stepping",
6299 tp->ptid.to_string ().c_str ());
6300 keep_going_stepped_thread (tp);
6301 }
6302 else
6303 {
6304 infrun_debug_printf ("restart threads: [%s] continuing",
6305 tp->ptid.to_string ().c_str ());
6306 execution_control_state ecs (tp);
6307 switch_to_thread (tp);
6308 keep_going_pass_signal (&ecs);
6309 }
6310 }
6311 }
6312
6313 /* Callback for iterate_over_threads. Find a resumed thread that has
6314 a pending waitstatus. */
6315
6316 static int
6317 resumed_thread_with_pending_status (struct thread_info *tp,
6318 void *arg)
6319 {
6320 return tp->resumed () && tp->has_pending_waitstatus ();
6321 }
6322
6323 /* Called when we get an event that may finish an in-line or
6324 out-of-line (displaced stepping) step-over started previously.
6325 Return true if the event is processed and we should go back to the
6326 event loop; false if the caller should continue processing the
6327 event. */
6328
6329 static int
6330 finish_step_over (struct execution_control_state *ecs)
6331 {
6332 displaced_step_finish (ecs->event_thread, ecs->ws);
6333
6334 bool had_step_over_info = step_over_info_valid_p ();
6335
6336 if (had_step_over_info)
6337 {
6338 /* If we're stepping over a breakpoint with all threads locked,
6339 then only the thread that was stepped should be reporting
6340 back an event. */
6341 gdb_assert (ecs->event_thread->control.trap_expected);
6342
6343 update_thread_events_after_step_over (ecs->event_thread);
6344
6345 clear_step_over_info ();
6346 }
6347
6348 if (!target_is_non_stop_p ())
6349 return 0;
6350
6351 /* Start a new step-over in another thread if there's one that
6352 needs it. */
6353 start_step_over ();
6354
6355 /* If we were stepping over a breakpoint before, and haven't started
6356 a new in-line step-over sequence, then restart all other threads
6357 (except the event thread). We can't do this in all-stop, as then
6358 e.g., we wouldn't be able to issue any other remote packet until
6359 these other threads stop. */
6360 if (had_step_over_info && !step_over_info_valid_p ())
6361 {
6362 struct thread_info *pending;
6363
6364 /* If we only have threads with pending statuses, the restart
6365 below won't restart any thread and so nothing re-inserts the
6366 breakpoint we just stepped over. But we need it inserted
6367 when we later process the pending events, otherwise if
6368 another thread has a pending event for this breakpoint too,
6369 we'd discard its event (because the breakpoint that
6370 originally caused the event was no longer inserted). */
6371 context_switch (ecs);
6372 insert_breakpoints ();
6373
6374 restart_threads (ecs->event_thread);
6375
6376 /* If we have events pending, go through handle_inferior_event
6377 again, picking up a pending event at random. This avoids
6378 thread starvation. */
6379
6380 /* But not if we just stepped over a watchpoint in order to let
6381 the instruction execute so we can evaluate its expression.
6382 The set of watchpoints that triggered is recorded in the
6383 breakpoint objects themselves (see bp->watchpoint_triggered).
6384 If we processed another event first, that other event could
6385 clobber this info. */
6386 if (ecs->event_thread->stepping_over_watchpoint)
6387 return 0;
6388
6389 pending = iterate_over_threads (resumed_thread_with_pending_status,
6390 nullptr);
6391 if (pending != nullptr)
6392 {
6393 struct thread_info *tp = ecs->event_thread;
6394 struct regcache *regcache;
6395
6396 infrun_debug_printf ("found resumed threads with "
6397 "pending events, saving status");
6398
6399 gdb_assert (pending != tp);
6400
6401 /* Record the event thread's event for later. */
6402 save_waitstatus (tp, ecs->ws);
6403 /* This was cleared early, by handle_inferior_event. Set it
6404 so this pending event is considered by
6405 do_target_wait. */
6406 tp->set_resumed (true);
6407
6408 gdb_assert (!tp->executing ());
6409
6410 regcache = get_thread_regcache (tp);
6411 tp->set_stop_pc (regcache_read_pc (regcache));
6412
6413 infrun_debug_printf ("saved stop_pc=%s for %s "
6414 "(currently_stepping=%d)",
6415 paddress (current_inferior ()->arch (),
6416 tp->stop_pc ()),
6417 tp->ptid.to_string ().c_str (),
6418 currently_stepping (tp));
6419
6420 /* This in-line step-over finished; clear this so we won't
6421 start a new one. This is what handle_signal_stop would
6422 do, if we returned false. */
6423 tp->stepping_over_breakpoint = 0;
6424
6425 /* Wake up the event loop again. */
6426 mark_async_event_handler (infrun_async_inferior_event_token);
6427
6428 prepare_to_wait (ecs);
6429 return 1;
6430 }
6431 }
6432
6433 return 0;
6434 }
6435
6436 /* See infrun.h. */
6437
6438 void
6439 notify_signal_received (gdb_signal sig)
6440 {
6441 interps_notify_signal_received (sig);
6442 gdb::observers::signal_received.notify (sig);
6443 }
6444
6445 /* See infrun.h. */
6446
6447 void
6448 notify_normal_stop (bpstat *bs, int print_frame)
6449 {
6450 interps_notify_normal_stop (bs, print_frame);
6451 gdb::observers::normal_stop.notify (bs, print_frame);
6452 }
6453
6454 /* See infrun.h. */
6455
6456 void notify_user_selected_context_changed (user_selected_what selection)
6457 {
6458 interps_notify_user_selected_context_changed (selection);
6459 gdb::observers::user_selected_context_changed.notify (selection);
6460 }
6461
6462 /* Come here when the program has stopped with a signal. */
6463
6464 static void
6465 handle_signal_stop (struct execution_control_state *ecs)
6466 {
6467 frame_info_ptr frame;
6468 struct gdbarch *gdbarch;
6469 int stopped_by_watchpoint;
6470 enum stop_kind stop_soon;
6471 int random_signal;
6472
6473 gdb_assert (ecs->ws.kind () == TARGET_WAITKIND_STOPPED);
6474
6475 ecs->event_thread->set_stop_signal (ecs->ws.sig ());
6476
6477 /* Do we need to clean up the state of a thread that has
6478 completed a displaced single-step? (Doing so usually affects
6479 the PC, so do it here, before we set stop_pc.) */
6480 if (finish_step_over (ecs))
6481 return;
6482
6483 /* If we either finished a single-step or hit a breakpoint, but
6484 the user wanted this thread to be stopped, pretend we got a
6485 SIG0 (generic unsignaled stop). */
6486 if (ecs->event_thread->stop_requested
6487 && ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
6488 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6489
6490 ecs->event_thread->set_stop_pc
6491 (regcache_read_pc (get_thread_regcache (ecs->event_thread)));
6492
6493 context_switch (ecs);
6494
6495 if (deprecated_context_hook)
6496 deprecated_context_hook (ecs->event_thread->global_num);
6497
6498 if (debug_infrun)
6499 {
6500 struct regcache *regcache = get_thread_regcache (ecs->event_thread);
6501 struct gdbarch *reg_gdbarch = regcache->arch ();
6502
6503 infrun_debug_printf
6504 ("stop_pc=%s", paddress (reg_gdbarch, ecs->event_thread->stop_pc ()));
6505 if (target_stopped_by_watchpoint ())
6506 {
6507 CORE_ADDR addr;
6508
6509 infrun_debug_printf ("stopped by watchpoint");
6510
6511 if (target_stopped_data_address (current_inferior ()->top_target (),
6512 &addr))
6513 infrun_debug_printf ("stopped data address=%s",
6514 paddress (reg_gdbarch, addr));
6515 else
6516 infrun_debug_printf ("(no data address available)");
6517 }
6518 }
6519
6520 /* This is originated from start_remote(), start_inferior() and
6521 shared libraries hook functions. */
6522 stop_soon = get_inferior_stop_soon (ecs);
6523 if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
6524 {
6525 infrun_debug_printf ("quietly stopped");
6526 stop_print_frame = true;
6527 stop_waiting (ecs);
6528 return;
6529 }
6530
6531 /* This originates from attach_command(). We need to overwrite
6532 the stop_signal here, because some kernels don't ignore a
6533 SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
6534 See more comments in inferior.h. On the other hand, if we
6535 get a non-SIGSTOP, report it to the user - assume the backend
6536 will handle the SIGSTOP if it should show up later.
6537
6538 Also consider that the attach is complete when we see a
6539 SIGTRAP. Some systems (e.g. Windows), and stubs supporting
6540 target extended-remote report it instead of a SIGSTOP
6541 (e.g. gdbserver). We already rely on SIGTRAP being our
6542 signal, so this is no exception.
6543
6544 Also consider that the attach is complete when we see a
6545 GDB_SIGNAL_0. In non-stop mode, GDB will explicitly tell
6546 the target to stop all threads of the inferior, in case the
6547 low level attach operation doesn't stop them implicitly. If
6548 they weren't stopped implicitly, then the stub will report a
6549 GDB_SIGNAL_0, meaning: stopped for no particular reason
6550 other than GDB's request. */
6551 if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
6552 && (ecs->event_thread->stop_signal () == GDB_SIGNAL_STOP
6553 || ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6554 || ecs->event_thread->stop_signal () == GDB_SIGNAL_0))
6555 {
6556 stop_print_frame = true;
6557 stop_waiting (ecs);
6558 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6559 return;
6560 }
6561
6562 /* At this point, get hold of the now-current thread's frame. */
6563 frame = get_current_frame ();
6564 gdbarch = get_frame_arch (frame);
6565
6566 /* Pull the single step breakpoints out of the target. */
6567 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
6568 {
6569 struct regcache *regcache;
6570 CORE_ADDR pc;
6571
6572 regcache = get_thread_regcache (ecs->event_thread);
6573 const address_space *aspace = regcache->aspace ();
6574
6575 pc = regcache_read_pc (regcache);
6576
6577 /* However, before doing so, if this single-step breakpoint was
6578 actually for another thread, set this thread up for moving
6579 past it. */
6580 if (!thread_has_single_step_breakpoint_here (ecs->event_thread,
6581 aspace, pc))
6582 {
6583 if (single_step_breakpoint_inserted_here_p (aspace, pc))
6584 {
6585 infrun_debug_printf ("[%s] hit another thread's single-step "
6586 "breakpoint",
6587 ecs->ptid.to_string ().c_str ());
6588 ecs->hit_singlestep_breakpoint = 1;
6589 }
6590 }
6591 else
6592 {
6593 infrun_debug_printf ("[%s] hit its single-step breakpoint",
6594 ecs->ptid.to_string ().c_str ());
6595 }
6596 }
6597 delete_just_stopped_threads_single_step_breakpoints ();
6598
6599 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6600 && ecs->event_thread->control.trap_expected
6601 && ecs->event_thread->stepping_over_watchpoint)
6602 stopped_by_watchpoint = 0;
6603 else
6604 stopped_by_watchpoint = watchpoints_triggered (ecs->ws);
6605
6606 /* If necessary, step over this watchpoint. We'll be back to display
6607 it in a moment. */
6608 if (stopped_by_watchpoint
6609 && (target_have_steppable_watchpoint ()
6610 || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
6611 {
6612 /* At this point, we are stopped at an instruction which has
6613 attempted to write to a piece of memory under control of
6614 a watchpoint. The instruction hasn't actually executed
6615 yet. If we were to evaluate the watchpoint expression
6616 now, we would get the old value, and therefore no change
6617 would seem to have occurred.
6618
6619 In order to make watchpoints work `right', we really need
6620 to complete the memory write, and then evaluate the
6621 watchpoint expression. We do this by single-stepping the
6622 target.
6623
6624 It may not be necessary to disable the watchpoint to step over
6625 it. For example, the PA can (with some kernel cooperation)
6626 single step over a watchpoint without disabling the watchpoint.
6627
6628 It is far more common to need to disable a watchpoint to step
6629 the inferior over it. If we have non-steppable watchpoints,
6630 we must disable the current watchpoint; it's simplest to
6631 disable all watchpoints.
6632
6633 Any breakpoint at PC must also be stepped over -- if there's
6634 one, it will have already triggered before the watchpoint
6635 triggered, and we either already reported it to the user, or
6636 it didn't cause a stop and we called keep_going. In either
6637 case, if there was a breakpoint at PC, we must be trying to
6638 step past it. */
6639 ecs->event_thread->stepping_over_watchpoint = 1;
6640 keep_going (ecs);
6641 return;
6642 }
6643
6644 ecs->event_thread->stepping_over_breakpoint = 0;
6645 ecs->event_thread->stepping_over_watchpoint = 0;
6646 bpstat_clear (&ecs->event_thread->control.stop_bpstat);
6647 ecs->event_thread->control.stop_step = 0;
6648 stop_print_frame = true;
6649 stopped_by_random_signal = 0;
6650 bpstat *stop_chain = nullptr;
6651
6652 /* Hide inlined functions starting here, unless we just performed stepi or
6653 nexti. After stepi and nexti, always show the innermost frame (not any
6654 inline function call sites). */
6655 if (ecs->event_thread->control.step_range_end != 1)
6656 {
6657 const address_space *aspace
6658 = get_thread_regcache (ecs->event_thread)->aspace ();
6659
6660 /* skip_inline_frames is expensive, so we avoid it if we can
6661 determine that the address is one where functions cannot have
6662 been inlined. This improves performance with inferiors that
6663 load a lot of shared libraries, because the solib event
6664 breakpoint is defined as the address of a function (i.e. not
6665 inline). Note that we have to check the previous PC as well
6666 as the current one to catch cases when we have just
6667 single-stepped off a breakpoint prior to reinstating it.
6668 Note that we're assuming that the code we single-step to is
6669 not inline, but that's not definitive: there's nothing
6670 preventing the event breakpoint function from containing
6671 inlined code, and the single-step ending up there. If the
6672 user had set a breakpoint on that inlined code, the missing
6673 skip_inline_frames call would break things. Fortunately
6674 that's an extremely unlikely scenario. */
6675 if (!pc_at_non_inline_function (aspace,
6676 ecs->event_thread->stop_pc (),
6677 ecs->ws)
6678 && !(ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6679 && ecs->event_thread->control.trap_expected
6680 && pc_at_non_inline_function (aspace,
6681 ecs->event_thread->prev_pc,
6682 ecs->ws)))
6683 {
6684 stop_chain = build_bpstat_chain (aspace,
6685 ecs->event_thread->stop_pc (),
6686 ecs->ws);
6687 skip_inline_frames (ecs->event_thread, stop_chain);
6688
6689 /* Re-fetch current thread's frame in case that invalidated
6690 the frame cache. */
6691 frame = get_current_frame ();
6692 gdbarch = get_frame_arch (frame);
6693 }
6694 }
6695
6696 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6697 && ecs->event_thread->control.trap_expected
6698 && gdbarch_single_step_through_delay_p (gdbarch)
6699 && currently_stepping (ecs->event_thread))
6700 {
6701 /* We're trying to step off a breakpoint. Turns out that we're
6702 also on an instruction that needs to be stepped multiple
6703 times before it's been fully executing. E.g., architectures
6704 with a delay slot. It needs to be stepped twice, once for
6705 the instruction and once for the delay slot. */
6706 int step_through_delay
6707 = gdbarch_single_step_through_delay (gdbarch, frame);
6708
6709 if (step_through_delay)
6710 infrun_debug_printf ("step through delay");
6711
6712 if (ecs->event_thread->control.step_range_end == 0
6713 && step_through_delay)
6714 {
6715 /* The user issued a continue when stopped at a breakpoint.
6716 Set up for another trap and get out of here. */
6717 ecs->event_thread->stepping_over_breakpoint = 1;
6718 keep_going (ecs);
6719 return;
6720 }
6721 else if (step_through_delay)
6722 {
6723 /* The user issued a step when stopped at a breakpoint.
6724 Maybe we should stop, maybe we should not - the delay
6725 slot *might* correspond to a line of source. In any
6726 case, don't decide that here, just set
6727 ecs->stepping_over_breakpoint, making sure we
6728 single-step again before breakpoints are re-inserted. */
6729 ecs->event_thread->stepping_over_breakpoint = 1;
6730 }
6731 }
6732
6733 /* See if there is a breakpoint/watchpoint/catchpoint/etc. that
6734 handles this event. */
6735 ecs->event_thread->control.stop_bpstat
6736 = bpstat_stop_status (get_current_regcache ()->aspace (),
6737 ecs->event_thread->stop_pc (),
6738 ecs->event_thread, ecs->ws, stop_chain);
6739
6740 /* Following in case break condition called a
6741 function. */
6742 stop_print_frame = true;
6743
6744 /* This is where we handle "moribund" watchpoints. Unlike
6745 software breakpoints traps, hardware watchpoint traps are
6746 always distinguishable from random traps. If no high-level
6747 watchpoint is associated with the reported stop data address
6748 anymore, then the bpstat does not explain the signal ---
6749 simply make sure to ignore it if `stopped_by_watchpoint' is
6750 set. */
6751
6752 if (ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6753 && !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
6754 GDB_SIGNAL_TRAP)
6755 && stopped_by_watchpoint)
6756 {
6757 infrun_debug_printf ("no user watchpoint explains watchpoint SIGTRAP, "
6758 "ignoring");
6759 }
6760
6761 /* NOTE: cagney/2003-03-29: These checks for a random signal
6762 at one stage in the past included checks for an inferior
6763 function call's call dummy's return breakpoint. The original
6764 comment, that went with the test, read:
6765
6766 ``End of a stack dummy. Some systems (e.g. Sony news) give
6767 another signal besides SIGTRAP, so check here as well as
6768 above.''
6769
6770 If someone ever tries to get call dummys on a
6771 non-executable stack to work (where the target would stop
6772 with something like a SIGSEGV), then those tests might need
6773 to be re-instated. Given, however, that the tests were only
6774 enabled when momentary breakpoints were not being used, I
6775 suspect that it won't be the case.
6776
6777 NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
6778 be necessary for call dummies on a non-executable stack on
6779 SPARC. */
6780
6781 /* See if the breakpoints module can explain the signal. */
6782 random_signal
6783 = !bpstat_explains_signal (ecs->event_thread->control.stop_bpstat,
6784 ecs->event_thread->stop_signal ());
6785
6786 /* Maybe this was a trap for a software breakpoint that has since
6787 been removed. */
6788 if (random_signal && target_stopped_by_sw_breakpoint ())
6789 {
6790 if (gdbarch_program_breakpoint_here_p (gdbarch,
6791 ecs->event_thread->stop_pc ()))
6792 {
6793 struct regcache *regcache;
6794 int decr_pc;
6795
6796 /* Re-adjust PC to what the program would see if GDB was not
6797 debugging it. */
6798 regcache = get_thread_regcache (ecs->event_thread);
6799 decr_pc = gdbarch_decr_pc_after_break (gdbarch);
6800 if (decr_pc != 0)
6801 {
6802 gdb::optional<scoped_restore_tmpl<int>>
6803 restore_operation_disable;
6804
6805 if (record_full_is_used ())
6806 restore_operation_disable.emplace
6807 (record_full_gdb_operation_disable_set ());
6808
6809 regcache_write_pc (regcache,
6810 ecs->event_thread->stop_pc () + decr_pc);
6811 }
6812 }
6813 else
6814 {
6815 /* A delayed software breakpoint event. Ignore the trap. */
6816 infrun_debug_printf ("delayed software breakpoint trap, ignoring");
6817 random_signal = 0;
6818 }
6819 }
6820
6821 /* Maybe this was a trap for a hardware breakpoint/watchpoint that
6822 has since been removed. */
6823 if (random_signal && target_stopped_by_hw_breakpoint ())
6824 {
6825 /* A delayed hardware breakpoint event. Ignore the trap. */
6826 infrun_debug_printf ("delayed hardware breakpoint/watchpoint "
6827 "trap, ignoring");
6828 random_signal = 0;
6829 }
6830
6831 /* If not, perhaps stepping/nexting can. */
6832 if (random_signal)
6833 random_signal = !(ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP
6834 && currently_stepping (ecs->event_thread));
6835
6836 /* Perhaps the thread hit a single-step breakpoint of _another_
6837 thread. Single-step breakpoints are transparent to the
6838 breakpoints module. */
6839 if (random_signal)
6840 random_signal = !ecs->hit_singlestep_breakpoint;
6841
6842 /* No? Perhaps we got a moribund watchpoint. */
6843 if (random_signal)
6844 random_signal = !stopped_by_watchpoint;
6845
6846 /* Always stop if the user explicitly requested this thread to
6847 remain stopped. */
6848 if (ecs->event_thread->stop_requested)
6849 {
6850 random_signal = 1;
6851 infrun_debug_printf ("user-requested stop");
6852 }
6853
6854 /* For the program's own signals, act according to
6855 the signal handling tables. */
6856
6857 if (random_signal)
6858 {
6859 /* Signal not for debugging purposes. */
6860 enum gdb_signal stop_signal = ecs->event_thread->stop_signal ();
6861
6862 infrun_debug_printf ("random signal (%s)",
6863 gdb_signal_to_symbol_string (stop_signal));
6864
6865 stopped_by_random_signal = 1;
6866
6867 /* Always stop on signals if we're either just gaining control
6868 of the program, or the user explicitly requested this thread
6869 to remain stopped. */
6870 if (stop_soon != NO_STOP_QUIETLY
6871 || ecs->event_thread->stop_requested
6872 || signal_stop_state (ecs->event_thread->stop_signal ()))
6873 {
6874 stop_waiting (ecs);
6875 return;
6876 }
6877
6878 /* Notify observers the signal has "handle print" set. Note we
6879 returned early above if stopping; normal_stop handles the
6880 printing in that case. */
6881 if (signal_print[ecs->event_thread->stop_signal ()])
6882 {
6883 /* The signal table tells us to print about this signal. */
6884 target_terminal::ours_for_output ();
6885 notify_signal_received (ecs->event_thread->stop_signal ());
6886 target_terminal::inferior ();
6887 }
6888
6889 /* Clear the signal if it should not be passed. */
6890 if (signal_program[ecs->event_thread->stop_signal ()] == 0)
6891 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
6892
6893 if (ecs->event_thread->prev_pc == ecs->event_thread->stop_pc ()
6894 && ecs->event_thread->control.trap_expected
6895 && ecs->event_thread->control.step_resume_breakpoint == nullptr)
6896 {
6897 /* We were just starting a new sequence, attempting to
6898 single-step off of a breakpoint and expecting a SIGTRAP.
6899 Instead this signal arrives. This signal will take us out
6900 of the stepping range so GDB needs to remember to, when
6901 the signal handler returns, resume stepping off that
6902 breakpoint. */
6903 /* To simplify things, "continue" is forced to use the same
6904 code paths as single-step - set a breakpoint at the
6905 signal return address and then, once hit, step off that
6906 breakpoint. */
6907 infrun_debug_printf ("signal arrived while stepping over breakpoint");
6908
6909 insert_hp_step_resume_breakpoint_at_frame (frame);
6910 ecs->event_thread->step_after_step_resume_breakpoint = 1;
6911 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6912 ecs->event_thread->control.trap_expected = 0;
6913
6914 /* If we were nexting/stepping some other thread, switch to
6915 it, so that we don't continue it, losing control. */
6916 if (!switch_back_to_stepped_thread (ecs))
6917 keep_going (ecs);
6918 return;
6919 }
6920
6921 if (ecs->event_thread->stop_signal () != GDB_SIGNAL_0
6922 && (pc_in_thread_step_range (ecs->event_thread->stop_pc (),
6923 ecs->event_thread)
6924 || ecs->event_thread->control.step_range_end == 1)
6925 && (get_stack_frame_id (frame)
6926 == ecs->event_thread->control.step_stack_frame_id)
6927 && ecs->event_thread->control.step_resume_breakpoint == nullptr)
6928 {
6929 /* The inferior is about to take a signal that will take it
6930 out of the single step range. Set a breakpoint at the
6931 current PC (which is presumably where the signal handler
6932 will eventually return) and then allow the inferior to
6933 run free.
6934
6935 Note that this is only needed for a signal delivered
6936 while in the single-step range. Nested signals aren't a
6937 problem as they eventually all return. */
6938 infrun_debug_printf ("signal may take us out of single-step range");
6939
6940 clear_step_over_info ();
6941 insert_hp_step_resume_breakpoint_at_frame (frame);
6942 ecs->event_thread->step_after_step_resume_breakpoint = 1;
6943 /* Reset trap_expected to ensure breakpoints are re-inserted. */
6944 ecs->event_thread->control.trap_expected = 0;
6945 keep_going (ecs);
6946 return;
6947 }
6948
6949 /* Note: step_resume_breakpoint may be non-NULL. This occurs
6950 when either there's a nested signal, or when there's a
6951 pending signal enabled just as the signal handler returns
6952 (leaving the inferior at the step-resume-breakpoint without
6953 actually executing it). Either way continue until the
6954 breakpoint is really hit. */
6955
6956 if (!switch_back_to_stepped_thread (ecs))
6957 {
6958 infrun_debug_printf ("random signal, keep going");
6959
6960 keep_going (ecs);
6961 }
6962 return;
6963 }
6964
6965 process_event_stop_test (ecs);
6966 }
6967
6968 /* Come here when we've got some debug event / signal we can explain
6969 (IOW, not a random signal), and test whether it should cause a
6970 stop, or whether we should resume the inferior (transparently).
6971 E.g., could be a breakpoint whose condition evaluates false; we
6972 could be still stepping within the line; etc. */
6973
6974 static void
6975 process_event_stop_test (struct execution_control_state *ecs)
6976 {
6977 struct symtab_and_line stop_pc_sal;
6978 frame_info_ptr frame;
6979 struct gdbarch *gdbarch;
6980 CORE_ADDR jmp_buf_pc;
6981 struct bpstat_what what;
6982
6983 /* Handle cases caused by hitting a breakpoint. */
6984
6985 frame = get_current_frame ();
6986 gdbarch = get_frame_arch (frame);
6987
6988 what = bpstat_what (ecs->event_thread->control.stop_bpstat);
6989
6990 if (what.call_dummy)
6991 {
6992 stop_stack_dummy = what.call_dummy;
6993 }
6994
6995 /* A few breakpoint types have callbacks associated (e.g.,
6996 bp_jit_event). Run them now. */
6997 bpstat_run_callbacks (ecs->event_thread->control.stop_bpstat);
6998
6999 /* If we hit an internal event that triggers symbol changes, the
7000 current frame will be invalidated within bpstat_what (e.g., if we
7001 hit an internal solib event). Re-fetch it. */
7002 frame = get_current_frame ();
7003 gdbarch = get_frame_arch (frame);
7004
7005 switch (what.main_action)
7006 {
7007 case BPSTAT_WHAT_SET_LONGJMP_RESUME:
7008 /* If we hit the breakpoint at longjmp while stepping, we
7009 install a momentary breakpoint at the target of the
7010 jmp_buf. */
7011
7012 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME");
7013
7014 ecs->event_thread->stepping_over_breakpoint = 1;
7015
7016 if (what.is_longjmp)
7017 {
7018 struct value *arg_value;
7019
7020 /* If we set the longjmp breakpoint via a SystemTap probe,
7021 then use it to extract the arguments. The destination PC
7022 is the third argument to the probe. */
7023 arg_value = probe_safe_evaluate_at_pc (frame, 2);
7024 if (arg_value)
7025 {
7026 jmp_buf_pc = value_as_address (arg_value);
7027 jmp_buf_pc = gdbarch_addr_bits_remove (gdbarch, jmp_buf_pc);
7028 }
7029 else if (!gdbarch_get_longjmp_target_p (gdbarch)
7030 || !gdbarch_get_longjmp_target (gdbarch,
7031 frame, &jmp_buf_pc))
7032 {
7033 infrun_debug_printf ("BPSTAT_WHAT_SET_LONGJMP_RESUME "
7034 "(!gdbarch_get_longjmp_target)");
7035 keep_going (ecs);
7036 return;
7037 }
7038
7039 /* Insert a breakpoint at resume address. */
7040 insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
7041 }
7042 else
7043 check_exception_resume (ecs, frame);
7044 keep_going (ecs);
7045 return;
7046
7047 case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
7048 {
7049 frame_info_ptr init_frame;
7050
7051 /* There are several cases to consider.
7052
7053 1. The initiating frame no longer exists. In this case we
7054 must stop, because the exception or longjmp has gone too
7055 far.
7056
7057 2. The initiating frame exists, and is the same as the
7058 current frame. We stop, because the exception or longjmp
7059 has been caught.
7060
7061 3. The initiating frame exists and is different from the
7062 current frame. This means the exception or longjmp has
7063 been caught beneath the initiating frame, so keep going.
7064
7065 4. longjmp breakpoint has been placed just to protect
7066 against stale dummy frames and user is not interested in
7067 stopping around longjmps. */
7068
7069 infrun_debug_printf ("BPSTAT_WHAT_CLEAR_LONGJMP_RESUME");
7070
7071 gdb_assert (ecs->event_thread->control.exception_resume_breakpoint
7072 != nullptr);
7073 delete_exception_resume_breakpoint (ecs->event_thread);
7074
7075 if (what.is_longjmp)
7076 {
7077 check_longjmp_breakpoint_for_call_dummy (ecs->event_thread);
7078
7079 if (!frame_id_p (ecs->event_thread->initiating_frame))
7080 {
7081 /* Case 4. */
7082 keep_going (ecs);
7083 return;
7084 }
7085 }
7086
7087 init_frame = frame_find_by_id (ecs->event_thread->initiating_frame);
7088
7089 if (init_frame)
7090 {
7091 struct frame_id current_id
7092 = get_frame_id (get_current_frame ());
7093 if (current_id == ecs->event_thread->initiating_frame)
7094 {
7095 /* Case 2. Fall through. */
7096 }
7097 else
7098 {
7099 /* Case 3. */
7100 keep_going (ecs);
7101 return;
7102 }
7103 }
7104
7105 /* For Cases 1 and 2, remove the step-resume breakpoint, if it
7106 exists. */
7107 delete_step_resume_breakpoint (ecs->event_thread);
7108
7109 end_stepping_range (ecs);
7110 }
7111 return;
7112
7113 case BPSTAT_WHAT_SINGLE:
7114 infrun_debug_printf ("BPSTAT_WHAT_SINGLE");
7115 ecs->event_thread->stepping_over_breakpoint = 1;
7116 /* Still need to check other stuff, at least the case where we
7117 are stepping and step out of the right range. */
7118 break;
7119
7120 case BPSTAT_WHAT_STEP_RESUME:
7121 infrun_debug_printf ("BPSTAT_WHAT_STEP_RESUME");
7122
7123 delete_step_resume_breakpoint (ecs->event_thread);
7124 if (ecs->event_thread->control.proceed_to_finish
7125 && execution_direction == EXEC_REVERSE)
7126 {
7127 struct thread_info *tp = ecs->event_thread;
7128
7129 /* We are finishing a function in reverse, and just hit the
7130 step-resume breakpoint at the start address of the
7131 function, and we're almost there -- just need to back up
7132 by one more single-step, which should take us back to the
7133 function call. */
7134 tp->control.step_range_start = tp->control.step_range_end = 1;
7135 keep_going (ecs);
7136 return;
7137 }
7138 fill_in_stop_func (gdbarch, ecs);
7139 if (ecs->event_thread->stop_pc () == ecs->stop_func_start
7140 && execution_direction == EXEC_REVERSE)
7141 {
7142 /* We are stepping over a function call in reverse, and just
7143 hit the step-resume breakpoint at the start address of
7144 the function. Go back to single-stepping, which should
7145 take us back to the function call. */
7146 ecs->event_thread->stepping_over_breakpoint = 1;
7147 keep_going (ecs);
7148 return;
7149 }
7150 break;
7151
7152 case BPSTAT_WHAT_STOP_NOISY:
7153 infrun_debug_printf ("BPSTAT_WHAT_STOP_NOISY");
7154 stop_print_frame = true;
7155
7156 /* Assume the thread stopped for a breakpoint. We'll still check
7157 whether a/the breakpoint is there when the thread is next
7158 resumed. */
7159 ecs->event_thread->stepping_over_breakpoint = 1;
7160
7161 stop_waiting (ecs);
7162 return;
7163
7164 case BPSTAT_WHAT_STOP_SILENT:
7165 infrun_debug_printf ("BPSTAT_WHAT_STOP_SILENT");
7166 stop_print_frame = false;
7167
7168 /* Assume the thread stopped for a breakpoint. We'll still check
7169 whether a/the breakpoint is there when the thread is next
7170 resumed. */
7171 ecs->event_thread->stepping_over_breakpoint = 1;
7172 stop_waiting (ecs);
7173 return;
7174
7175 case BPSTAT_WHAT_HP_STEP_RESUME:
7176 infrun_debug_printf ("BPSTAT_WHAT_HP_STEP_RESUME");
7177
7178 delete_step_resume_breakpoint (ecs->event_thread);
7179 if (ecs->event_thread->step_after_step_resume_breakpoint)
7180 {
7181 /* Back when the step-resume breakpoint was inserted, we
7182 were trying to single-step off a breakpoint. Go back to
7183 doing that. */
7184 ecs->event_thread->step_after_step_resume_breakpoint = 0;
7185 ecs->event_thread->stepping_over_breakpoint = 1;
7186 keep_going (ecs);
7187 return;
7188 }
7189 break;
7190
7191 case BPSTAT_WHAT_KEEP_CHECKING:
7192 break;
7193 }
7194
7195 /* If we stepped a permanent breakpoint and we had a high priority
7196 step-resume breakpoint for the address we stepped, but we didn't
7197 hit it, then we must have stepped into the signal handler. The
7198 step-resume was only necessary to catch the case of _not_
7199 stepping into the handler, so delete it, and fall through to
7200 checking whether the step finished. */
7201 if (ecs->event_thread->stepped_breakpoint)
7202 {
7203 struct breakpoint *sr_bp
7204 = ecs->event_thread->control.step_resume_breakpoint;
7205
7206 if (sr_bp != nullptr
7207 && sr_bp->first_loc ().permanent
7208 && sr_bp->type == bp_hp_step_resume
7209 && sr_bp->first_loc ().address == ecs->event_thread->prev_pc)
7210 {
7211 infrun_debug_printf ("stepped permanent breakpoint, stopped in handler");
7212 delete_step_resume_breakpoint (ecs->event_thread);
7213 ecs->event_thread->step_after_step_resume_breakpoint = 0;
7214 }
7215 }
7216
7217 /* We come here if we hit a breakpoint but should not stop for it.
7218 Possibly we also were stepping and should stop for that. So fall
7219 through and test for stepping. But, if not stepping, do not
7220 stop. */
7221
7222 /* In all-stop mode, if we're currently stepping but have stopped in
7223 some other thread, we need to switch back to the stepped thread. */
7224 if (switch_back_to_stepped_thread (ecs))
7225 return;
7226
7227 if (ecs->event_thread->control.step_resume_breakpoint)
7228 {
7229 infrun_debug_printf ("step-resume breakpoint is inserted");
7230
7231 /* Having a step-resume breakpoint overrides anything
7232 else having to do with stepping commands until
7233 that breakpoint is reached. */
7234 keep_going (ecs);
7235 return;
7236 }
7237
7238 if (ecs->event_thread->control.step_range_end == 0)
7239 {
7240 infrun_debug_printf ("no stepping, continue");
7241 /* Likewise if we aren't even stepping. */
7242 keep_going (ecs);
7243 return;
7244 }
7245
7246 /* Re-fetch current thread's frame in case the code above caused
7247 the frame cache to be re-initialized, making our FRAME variable
7248 a dangling pointer. */
7249 frame = get_current_frame ();
7250 gdbarch = get_frame_arch (frame);
7251 fill_in_stop_func (gdbarch, ecs);
7252
7253 /* If stepping through a line, keep going if still within it.
7254
7255 Note that step_range_end is the address of the first instruction
7256 beyond the step range, and NOT the address of the last instruction
7257 within it!
7258
7259 Note also that during reverse execution, we may be stepping
7260 through a function epilogue and therefore must detect when
7261 the current-frame changes in the middle of a line. */
7262
7263 if (pc_in_thread_step_range (ecs->event_thread->stop_pc (),
7264 ecs->event_thread)
7265 && (execution_direction != EXEC_REVERSE
7266 || get_frame_id (frame) == ecs->event_thread->control.step_frame_id))
7267 {
7268 infrun_debug_printf
7269 ("stepping inside range [%s-%s]",
7270 paddress (gdbarch, ecs->event_thread->control.step_range_start),
7271 paddress (gdbarch, ecs->event_thread->control.step_range_end));
7272
7273 /* Tentatively re-enable range stepping; `resume' disables it if
7274 necessary (e.g., if we're stepping over a breakpoint or we
7275 have software watchpoints). */
7276 ecs->event_thread->control.may_range_step = 1;
7277
7278 /* When stepping backward, stop at beginning of line range
7279 (unless it's the function entry point, in which case
7280 keep going back to the call point). */
7281 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7282 if (stop_pc == ecs->event_thread->control.step_range_start
7283 && stop_pc != ecs->stop_func_start
7284 && execution_direction == EXEC_REVERSE)
7285 end_stepping_range (ecs);
7286 else
7287 keep_going (ecs);
7288
7289 return;
7290 }
7291
7292 /* We stepped out of the stepping range. */
7293
7294 /* If we are stepping at the source level and entered the runtime
7295 loader dynamic symbol resolution code...
7296
7297 EXEC_FORWARD: we keep on single stepping until we exit the run
7298 time loader code and reach the callee's address.
7299
7300 EXEC_REVERSE: we've already executed the callee (backward), and
7301 the runtime loader code is handled just like any other
7302 undebuggable function call. Now we need only keep stepping
7303 backward through the trampoline code, and that's handled further
7304 down, so there is nothing for us to do here. */
7305
7306 if (execution_direction != EXEC_REVERSE
7307 && ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7308 && in_solib_dynsym_resolve_code (ecs->event_thread->stop_pc ())
7309 && (ecs->event_thread->control.step_start_function == nullptr
7310 || !in_solib_dynsym_resolve_code (
7311 ecs->event_thread->control.step_start_function->value_block ()
7312 ->entry_pc ())))
7313 {
7314 CORE_ADDR pc_after_resolver =
7315 gdbarch_skip_solib_resolver (gdbarch, ecs->event_thread->stop_pc ());
7316
7317 infrun_debug_printf ("stepped into dynsym resolve code");
7318
7319 if (pc_after_resolver)
7320 {
7321 /* Set up a step-resume breakpoint at the address
7322 indicated by SKIP_SOLIB_RESOLVER. */
7323 symtab_and_line sr_sal;
7324 sr_sal.pc = pc_after_resolver;
7325 sr_sal.pspace = get_frame_program_space (frame);
7326
7327 insert_step_resume_breakpoint_at_sal (gdbarch,
7328 sr_sal, null_frame_id);
7329 }
7330
7331 keep_going (ecs);
7332 return;
7333 }
7334
7335 /* Step through an indirect branch thunk. */
7336 if (ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
7337 && gdbarch_in_indirect_branch_thunk (gdbarch,
7338 ecs->event_thread->stop_pc ()))
7339 {
7340 infrun_debug_printf ("stepped into indirect branch thunk");
7341 keep_going (ecs);
7342 return;
7343 }
7344
7345 if (ecs->event_thread->control.step_range_end != 1
7346 && (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7347 || ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
7348 && get_frame_type (frame) == SIGTRAMP_FRAME)
7349 {
7350 infrun_debug_printf ("stepped into signal trampoline");
7351 /* The inferior, while doing a "step" or "next", has ended up in
7352 a signal trampoline (either by a signal being delivered or by
7353 the signal handler returning). Just single-step until the
7354 inferior leaves the trampoline (either by calling the handler
7355 or returning). */
7356 keep_going (ecs);
7357 return;
7358 }
7359
7360 /* If we're in the return path from a shared library trampoline,
7361 we want to proceed through the trampoline when stepping. */
7362 /* macro/2012-04-25: This needs to come before the subroutine
7363 call check below as on some targets return trampolines look
7364 like subroutine calls (MIPS16 return thunks). */
7365 if (gdbarch_in_solib_return_trampoline (gdbarch,
7366 ecs->event_thread->stop_pc (),
7367 ecs->stop_func_name)
7368 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
7369 {
7370 /* Determine where this trampoline returns. */
7371 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7372 CORE_ADDR real_stop_pc
7373 = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
7374
7375 infrun_debug_printf ("stepped into solib return tramp");
7376
7377 /* Only proceed through if we know where it's going. */
7378 if (real_stop_pc)
7379 {
7380 /* And put the step-breakpoint there and go until there. */
7381 symtab_and_line sr_sal;
7382 sr_sal.pc = real_stop_pc;
7383 sr_sal.section = find_pc_overlay (sr_sal.pc);
7384 sr_sal.pspace = get_frame_program_space (frame);
7385
7386 /* Do not specify what the fp should be when we stop since
7387 on some machines the prologue is where the new fp value
7388 is established. */
7389 insert_step_resume_breakpoint_at_sal (gdbarch,
7390 sr_sal, null_frame_id);
7391
7392 /* Restart without fiddling with the step ranges or
7393 other state. */
7394 keep_going (ecs);
7395 return;
7396 }
7397 }
7398
7399 /* Check for subroutine calls. The check for the current frame
7400 equalling the step ID is not necessary - the check of the
7401 previous frame's ID is sufficient - but it is a common case and
7402 cheaper than checking the previous frame's ID.
7403
7404 NOTE: frame_id::operator== will never report two invalid frame IDs as
7405 being equal, so to get into this block, both the current and
7406 previous frame must have valid frame IDs. */
7407 /* The outer_frame_id check is a heuristic to detect stepping
7408 through startup code. If we step over an instruction which
7409 sets the stack pointer from an invalid value to a valid value,
7410 we may detect that as a subroutine call from the mythical
7411 "outermost" function. This could be fixed by marking
7412 outermost frames as !stack_p,code_p,special_p. Then the
7413 initial outermost frame, before sp was valid, would
7414 have code_addr == &_start. See the comment in frame_id::operator==
7415 for more. */
7416
7417 /* We want "nexti" to step into, not over, signal handlers invoked
7418 by the kernel, therefore this subroutine check should not trigger
7419 for a signal handler invocation. On most platforms, this is already
7420 not the case, as the kernel puts a signal trampoline frame onto the
7421 stack to handle proper return after the handler, and therefore at this
7422 point, the current frame is a grandchild of the step frame, not a
7423 child. However, on some platforms, the kernel actually uses a
7424 trampoline to handle *invocation* of the handler. In that case,
7425 when executing the first instruction of the trampoline, this check
7426 would erroneously detect the trampoline invocation as a subroutine
7427 call. Fix this by checking for SIGTRAMP_FRAME. */
7428 if ((get_stack_frame_id (frame)
7429 != ecs->event_thread->control.step_stack_frame_id)
7430 && get_frame_type (frame) != SIGTRAMP_FRAME
7431 && ((frame_unwind_caller_id (get_current_frame ())
7432 == ecs->event_thread->control.step_stack_frame_id)
7433 && ((ecs->event_thread->control.step_stack_frame_id
7434 != outer_frame_id)
7435 || (ecs->event_thread->control.step_start_function
7436 != find_pc_function (ecs->event_thread->stop_pc ())))))
7437 {
7438 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7439 CORE_ADDR real_stop_pc;
7440
7441 infrun_debug_printf ("stepped into subroutine");
7442
7443 if (ecs->event_thread->control.step_over_calls == STEP_OVER_NONE)
7444 {
7445 /* I presume that step_over_calls is only 0 when we're
7446 supposed to be stepping at the assembly language level
7447 ("stepi"). Just stop. */
7448 /* And this works the same backward as frontward. MVS */
7449 end_stepping_range (ecs);
7450 return;
7451 }
7452
7453 /* Reverse stepping through solib trampolines. */
7454
7455 if (execution_direction == EXEC_REVERSE
7456 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE
7457 && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
7458 || (ecs->stop_func_start == 0
7459 && in_solib_dynsym_resolve_code (stop_pc))))
7460 {
7461 /* Any solib trampoline code can be handled in reverse
7462 by simply continuing to single-step. We have already
7463 executed the solib function (backwards), and a few
7464 steps will take us back through the trampoline to the
7465 caller. */
7466 keep_going (ecs);
7467 return;
7468 }
7469
7470 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL)
7471 {
7472 /* We're doing a "next".
7473
7474 Normal (forward) execution: set a breakpoint at the
7475 callee's return address (the address at which the caller
7476 will resume).
7477
7478 Reverse (backward) execution. set the step-resume
7479 breakpoint at the start of the function that we just
7480 stepped into (backwards), and continue to there. When we
7481 get there, we'll need to single-step back to the caller. */
7482
7483 if (execution_direction == EXEC_REVERSE)
7484 {
7485 /* If we're already at the start of the function, we've either
7486 just stepped backward into a single instruction function,
7487 or stepped back out of a signal handler to the first instruction
7488 of the function. Just keep going, which will single-step back
7489 to the caller. */
7490 if (ecs->stop_func_start != stop_pc && ecs->stop_func_start != 0)
7491 {
7492 /* Normal function call return (static or dynamic). */
7493 symtab_and_line sr_sal;
7494 sr_sal.pc = ecs->stop_func_start;
7495 sr_sal.pspace = get_frame_program_space (frame);
7496 insert_step_resume_breakpoint_at_sal (gdbarch,
7497 sr_sal, get_stack_frame_id (frame));
7498 }
7499 }
7500 else
7501 insert_step_resume_breakpoint_at_caller (frame);
7502
7503 keep_going (ecs);
7504 return;
7505 }
7506
7507 /* If we are in a function call trampoline (a stub between the
7508 calling routine and the real function), locate the real
7509 function. That's what tells us (a) whether we want to step
7510 into it at all, and (b) what prologue we want to run to the
7511 end of, if we do step into it. */
7512 real_stop_pc = skip_language_trampoline (frame, stop_pc);
7513 if (real_stop_pc == 0)
7514 real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
7515 if (real_stop_pc != 0)
7516 ecs->stop_func_start = real_stop_pc;
7517
7518 if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
7519 {
7520 symtab_and_line sr_sal;
7521 sr_sal.pc = ecs->stop_func_start;
7522 sr_sal.pspace = get_frame_program_space (frame);
7523
7524 insert_step_resume_breakpoint_at_sal (gdbarch,
7525 sr_sal, null_frame_id);
7526 keep_going (ecs);
7527 return;
7528 }
7529
7530 /* If we have line number information for the function we are
7531 thinking of stepping into and the function isn't on the skip
7532 list, step into it.
7533
7534 If there are several symtabs at that PC (e.g. with include
7535 files), just want to know whether *any* of them have line
7536 numbers. find_pc_line handles this. */
7537 {
7538 struct symtab_and_line tmp_sal;
7539
7540 tmp_sal = find_pc_line (ecs->stop_func_start, 0);
7541 if (tmp_sal.line != 0
7542 && !function_name_is_marked_for_skip (ecs->stop_func_name,
7543 tmp_sal)
7544 && !inline_frame_is_marked_for_skip (true, ecs->event_thread))
7545 {
7546 if (execution_direction == EXEC_REVERSE)
7547 handle_step_into_function_backward (gdbarch, ecs);
7548 else
7549 handle_step_into_function (gdbarch, ecs);
7550 return;
7551 }
7552 }
7553
7554 /* If we have no line number and the step-stop-if-no-debug is
7555 set, we stop the step so that the user has a chance to switch
7556 in assembly mode. */
7557 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7558 && step_stop_if_no_debug)
7559 {
7560 end_stepping_range (ecs);
7561 return;
7562 }
7563
7564 if (execution_direction == EXEC_REVERSE)
7565 {
7566 /* If we're already at the start of the function, we've either just
7567 stepped backward into a single instruction function without line
7568 number info, or stepped back out of a signal handler to the first
7569 instruction of the function without line number info. Just keep
7570 going, which will single-step back to the caller. */
7571 if (ecs->stop_func_start != stop_pc)
7572 {
7573 /* Set a breakpoint at callee's start address.
7574 From there we can step once and be back in the caller. */
7575 symtab_and_line sr_sal;
7576 sr_sal.pc = ecs->stop_func_start;
7577 sr_sal.pspace = get_frame_program_space (frame);
7578 insert_step_resume_breakpoint_at_sal (gdbarch,
7579 sr_sal, null_frame_id);
7580 }
7581 }
7582 else
7583 /* Set a breakpoint at callee's return address (the address
7584 at which the caller will resume). */
7585 insert_step_resume_breakpoint_at_caller (frame);
7586
7587 keep_going (ecs);
7588 return;
7589 }
7590
7591 /* Reverse stepping through solib trampolines. */
7592
7593 if (execution_direction == EXEC_REVERSE
7594 && ecs->event_thread->control.step_over_calls != STEP_OVER_NONE)
7595 {
7596 CORE_ADDR stop_pc = ecs->event_thread->stop_pc ();
7597
7598 if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
7599 || (ecs->stop_func_start == 0
7600 && in_solib_dynsym_resolve_code (stop_pc)))
7601 {
7602 /* Any solib trampoline code can be handled in reverse
7603 by simply continuing to single-step. We have already
7604 executed the solib function (backwards), and a few
7605 steps will take us back through the trampoline to the
7606 caller. */
7607 keep_going (ecs);
7608 return;
7609 }
7610 else if (in_solib_dynsym_resolve_code (stop_pc))
7611 {
7612 /* Stepped backward into the solib dynsym resolver.
7613 Set a breakpoint at its start and continue, then
7614 one more step will take us out. */
7615 symtab_and_line sr_sal;
7616 sr_sal.pc = ecs->stop_func_start;
7617 sr_sal.pspace = get_frame_program_space (frame);
7618 insert_step_resume_breakpoint_at_sal (gdbarch,
7619 sr_sal, null_frame_id);
7620 keep_going (ecs);
7621 return;
7622 }
7623 }
7624
7625 /* This always returns the sal for the inner-most frame when we are in a
7626 stack of inlined frames, even if GDB actually believes that it is in a
7627 more outer frame. This is checked for below by calls to
7628 inline_skipped_frames. */
7629 stop_pc_sal = find_pc_line (ecs->event_thread->stop_pc (), 0);
7630
7631 /* NOTE: tausq/2004-05-24: This if block used to be done before all
7632 the trampoline processing logic, however, there are some trampolines
7633 that have no names, so we should do trampoline handling first. */
7634 if (ecs->event_thread->control.step_over_calls == STEP_OVER_UNDEBUGGABLE
7635 && ecs->stop_func_name == nullptr
7636 && stop_pc_sal.line == 0)
7637 {
7638 infrun_debug_printf ("stepped into undebuggable function");
7639
7640 /* The inferior just stepped into, or returned to, an
7641 undebuggable function (where there is no debugging information
7642 and no line number corresponding to the address where the
7643 inferior stopped). Since we want to skip this kind of code,
7644 we keep going until the inferior returns from this
7645 function - unless the user has asked us not to (via
7646 set step-mode) or we no longer know how to get back
7647 to the call site. */
7648 if (step_stop_if_no_debug
7649 || !frame_id_p (frame_unwind_caller_id (frame)))
7650 {
7651 /* If we have no line number and the step-stop-if-no-debug
7652 is set, we stop the step so that the user has a chance to
7653 switch in assembly mode. */
7654 end_stepping_range (ecs);
7655 return;
7656 }
7657 else
7658 {
7659 /* Set a breakpoint at callee's return address (the address
7660 at which the caller will resume). */
7661 insert_step_resume_breakpoint_at_caller (frame);
7662 keep_going (ecs);
7663 return;
7664 }
7665 }
7666
7667 if (execution_direction == EXEC_REVERSE
7668 && ecs->event_thread->control.proceed_to_finish
7669 && ecs->event_thread->stop_pc () >= ecs->stop_func_alt_start
7670 && ecs->event_thread->stop_pc () < ecs->stop_func_start)
7671 {
7672 /* We are executing the reverse-finish command.
7673 If the system supports multiple entry points and we are finishing a
7674 function in reverse. If we are between the entry points single-step
7675 back to the alternate entry point. If we are at the alternate entry
7676 point -- just need to back up by one more single-step, which
7677 should take us back to the function call. */
7678 ecs->event_thread->control.step_range_start
7679 = ecs->event_thread->control.step_range_end = 1;
7680 keep_going (ecs);
7681 return;
7682
7683 }
7684
7685 if (ecs->event_thread->control.step_range_end == 1)
7686 {
7687 /* It is stepi or nexti. We always want to stop stepping after
7688 one instruction. */
7689 infrun_debug_printf ("stepi/nexti");
7690 end_stepping_range (ecs);
7691 return;
7692 }
7693
7694 if (stop_pc_sal.line == 0)
7695 {
7696 /* We have no line number information. That means to stop
7697 stepping (does this always happen right after one instruction,
7698 when we do "s" in a function with no line numbers,
7699 or can this happen as a result of a return or longjmp?). */
7700 infrun_debug_printf ("line number info");
7701 end_stepping_range (ecs);
7702 return;
7703 }
7704
7705 /* Look for "calls" to inlined functions, part one. If the inline
7706 frame machinery detected some skipped call sites, we have entered
7707 a new inline function. */
7708
7709 if ((get_frame_id (get_current_frame ())
7710 == ecs->event_thread->control.step_frame_id)
7711 && inline_skipped_frames (ecs->event_thread))
7712 {
7713 infrun_debug_printf ("stepped into inlined function");
7714
7715 symtab_and_line call_sal = find_frame_sal (get_current_frame ());
7716
7717 if (ecs->event_thread->control.step_over_calls != STEP_OVER_ALL)
7718 {
7719 /* For "step", we're going to stop. But if the call site
7720 for this inlined function is on the same source line as
7721 we were previously stepping, go down into the function
7722 first. Otherwise stop at the call site. */
7723
7724 if (call_sal.line == ecs->event_thread->current_line
7725 && call_sal.symtab == ecs->event_thread->current_symtab)
7726 {
7727 step_into_inline_frame (ecs->event_thread);
7728 if (inline_frame_is_marked_for_skip (false, ecs->event_thread))
7729 {
7730 keep_going (ecs);
7731 return;
7732 }
7733 }
7734
7735 end_stepping_range (ecs);
7736 return;
7737 }
7738 else
7739 {
7740 /* For "next", we should stop at the call site if it is on a
7741 different source line. Otherwise continue through the
7742 inlined function. */
7743 if (call_sal.line == ecs->event_thread->current_line
7744 && call_sal.symtab == ecs->event_thread->current_symtab)
7745 keep_going (ecs);
7746 else
7747 end_stepping_range (ecs);
7748 return;
7749 }
7750 }
7751
7752 /* Look for "calls" to inlined functions, part two. If we are still
7753 in the same real function we were stepping through, but we have
7754 to go further up to find the exact frame ID, we are stepping
7755 through a more inlined call beyond its call site. */
7756
7757 if (get_frame_type (get_current_frame ()) == INLINE_FRAME
7758 && (get_frame_id (get_current_frame ())
7759 != ecs->event_thread->control.step_frame_id)
7760 && stepped_in_from (get_current_frame (),
7761 ecs->event_thread->control.step_frame_id))
7762 {
7763 infrun_debug_printf ("stepping through inlined function");
7764
7765 if (ecs->event_thread->control.step_over_calls == STEP_OVER_ALL
7766 || inline_frame_is_marked_for_skip (false, ecs->event_thread))
7767 keep_going (ecs);
7768 else
7769 end_stepping_range (ecs);
7770 return;
7771 }
7772
7773 bool refresh_step_info = true;
7774 if ((ecs->event_thread->stop_pc () == stop_pc_sal.pc)
7775 && (ecs->event_thread->current_line != stop_pc_sal.line
7776 || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
7777 {
7778 /* We are at a different line. */
7779
7780 if (stop_pc_sal.is_stmt)
7781 {
7782 /* We are at the start of a statement.
7783
7784 So stop. Note that we don't stop if we step into the middle of a
7785 statement. That is said to make things like for (;;) statements
7786 work better. */
7787 infrun_debug_printf ("stepped to a different line");
7788 end_stepping_range (ecs);
7789 return;
7790 }
7791 else if (get_frame_id (get_current_frame ())
7792 == ecs->event_thread->control.step_frame_id)
7793 {
7794 /* We are not at the start of a statement, and we have not changed
7795 frame.
7796
7797 We ignore this line table entry, and continue stepping forward,
7798 looking for a better place to stop. */
7799 refresh_step_info = false;
7800 infrun_debug_printf ("stepped to a different line, but "
7801 "it's not the start of a statement");
7802 }
7803 else
7804 {
7805 /* We are not the start of a statement, and we have changed frame.
7806
7807 We ignore this line table entry, and continue stepping forward,
7808 looking for a better place to stop. Keep refresh_step_info at
7809 true to note that the frame has changed, but ignore the line
7810 number to make sure we don't ignore a subsequent entry with the
7811 same line number. */
7812 stop_pc_sal.line = 0;
7813 infrun_debug_printf ("stepped to a different frame, but "
7814 "it's not the start of a statement");
7815 }
7816 }
7817
7818 /* We aren't done stepping.
7819
7820 Optimize by setting the stepping range to the line.
7821 (We might not be in the original line, but if we entered a
7822 new line in mid-statement, we continue stepping. This makes
7823 things like for(;;) statements work better.)
7824
7825 If we entered a SAL that indicates a non-statement line table entry,
7826 then we update the stepping range, but we don't update the step info,
7827 which includes things like the line number we are stepping away from.
7828 This means we will stop when we find a line table entry that is marked
7829 as is-statement, even if it matches the non-statement one we just
7830 stepped into. */
7831
7832 ecs->event_thread->control.step_range_start = stop_pc_sal.pc;
7833 ecs->event_thread->control.step_range_end = stop_pc_sal.end;
7834 ecs->event_thread->control.may_range_step = 1;
7835 infrun_debug_printf
7836 ("updated step range, start = %s, end = %s, may_range_step = %d",
7837 paddress (gdbarch, ecs->event_thread->control.step_range_start),
7838 paddress (gdbarch, ecs->event_thread->control.step_range_end),
7839 ecs->event_thread->control.may_range_step);
7840 if (refresh_step_info)
7841 set_step_info (ecs->event_thread, frame, stop_pc_sal);
7842
7843 infrun_debug_printf ("keep going");
7844 keep_going (ecs);
7845 }
7846
7847 static bool restart_stepped_thread (process_stratum_target *resume_target,
7848 ptid_t resume_ptid);
7849
7850 /* In all-stop mode, if we're currently stepping but have stopped in
7851 some other thread, we may need to switch back to the stepped
7852 thread. Returns true we set the inferior running, false if we left
7853 it stopped (and the event needs further processing). */
7854
7855 static bool
7856 switch_back_to_stepped_thread (struct execution_control_state *ecs)
7857 {
7858 if (!target_is_non_stop_p ())
7859 {
7860 /* If any thread is blocked on some internal breakpoint, and we
7861 simply need to step over that breakpoint to get it going
7862 again, do that first. */
7863
7864 /* However, if we see an event for the stepping thread, then we
7865 know all other threads have been moved past their breakpoints
7866 already. Let the caller check whether the step is finished,
7867 etc., before deciding to move it past a breakpoint. */
7868 if (ecs->event_thread->control.step_range_end != 0)
7869 return false;
7870
7871 /* Check if the current thread is blocked on an incomplete
7872 step-over, interrupted by a random signal. */
7873 if (ecs->event_thread->control.trap_expected
7874 && ecs->event_thread->stop_signal () != GDB_SIGNAL_TRAP)
7875 {
7876 infrun_debug_printf
7877 ("need to finish step-over of [%s]",
7878 ecs->event_thread->ptid.to_string ().c_str ());
7879 keep_going (ecs);
7880 return true;
7881 }
7882
7883 /* Check if the current thread is blocked by a single-step
7884 breakpoint of another thread. */
7885 if (ecs->hit_singlestep_breakpoint)
7886 {
7887 infrun_debug_printf ("need to step [%s] over single-step breakpoint",
7888 ecs->ptid.to_string ().c_str ());
7889 keep_going (ecs);
7890 return true;
7891 }
7892
7893 /* If this thread needs yet another step-over (e.g., stepping
7894 through a delay slot), do it first before moving on to
7895 another thread. */
7896 if (thread_still_needs_step_over (ecs->event_thread))
7897 {
7898 infrun_debug_printf
7899 ("thread [%s] still needs step-over",
7900 ecs->event_thread->ptid.to_string ().c_str ());
7901 keep_going (ecs);
7902 return true;
7903 }
7904
7905 /* If scheduler locking applies even if not stepping, there's no
7906 need to walk over threads. Above we've checked whether the
7907 current thread is stepping. If some other thread not the
7908 event thread is stepping, then it must be that scheduler
7909 locking is not in effect. */
7910 if (schedlock_applies (ecs->event_thread))
7911 return false;
7912
7913 /* Otherwise, we no longer expect a trap in the current thread.
7914 Clear the trap_expected flag before switching back -- this is
7915 what keep_going does as well, if we call it. */
7916 ecs->event_thread->control.trap_expected = 0;
7917
7918 /* Likewise, clear the signal if it should not be passed. */
7919 if (!signal_program[ecs->event_thread->stop_signal ()])
7920 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
7921
7922 if (restart_stepped_thread (ecs->target, ecs->ptid))
7923 {
7924 prepare_to_wait (ecs);
7925 return true;
7926 }
7927
7928 switch_to_thread (ecs->event_thread);
7929 }
7930
7931 return false;
7932 }
7933
7934 /* Look for the thread that was stepping, and resume it.
7935 RESUME_TARGET / RESUME_PTID indicate the set of threads the caller
7936 is resuming. Return true if a thread was started, false
7937 otherwise. */
7938
7939 static bool
7940 restart_stepped_thread (process_stratum_target *resume_target,
7941 ptid_t resume_ptid)
7942 {
7943 /* Do all pending step-overs before actually proceeding with
7944 step/next/etc. */
7945 if (start_step_over ())
7946 return true;
7947
7948 for (thread_info *tp : all_threads_safe ())
7949 {
7950 if (tp->state == THREAD_EXITED)
7951 continue;
7952
7953 if (tp->has_pending_waitstatus ())
7954 continue;
7955
7956 /* Ignore threads of processes the caller is not
7957 resuming. */
7958 if (!sched_multi
7959 && (tp->inf->process_target () != resume_target
7960 || tp->inf->pid != resume_ptid.pid ()))
7961 continue;
7962
7963 if (tp->control.trap_expected)
7964 {
7965 infrun_debug_printf ("switching back to stepped thread (step-over)");
7966
7967 if (keep_going_stepped_thread (tp))
7968 return true;
7969 }
7970 }
7971
7972 for (thread_info *tp : all_threads_safe ())
7973 {
7974 if (tp->state == THREAD_EXITED)
7975 continue;
7976
7977 if (tp->has_pending_waitstatus ())
7978 continue;
7979
7980 /* Ignore threads of processes the caller is not
7981 resuming. */
7982 if (!sched_multi
7983 && (tp->inf->process_target () != resume_target
7984 || tp->inf->pid != resume_ptid.pid ()))
7985 continue;
7986
7987 /* Did we find the stepping thread? */
7988 if (tp->control.step_range_end)
7989 {
7990 infrun_debug_printf ("switching back to stepped thread (stepping)");
7991
7992 if (keep_going_stepped_thread (tp))
7993 return true;
7994 }
7995 }
7996
7997 return false;
7998 }
7999
8000 /* See infrun.h. */
8001
8002 void
8003 restart_after_all_stop_detach (process_stratum_target *proc_target)
8004 {
8005 /* Note we don't check target_is_non_stop_p() here, because the
8006 current inferior may no longer have a process_stratum target
8007 pushed, as we just detached. */
8008
8009 /* See if we have a THREAD_RUNNING thread that need to be
8010 re-resumed. If we have any thread that is already executing,
8011 then we don't need to resume the target -- it is already been
8012 resumed. With the remote target (in all-stop), it's even
8013 impossible to issue another resumption if the target is already
8014 resumed, until the target reports a stop. */
8015 for (thread_info *thr : all_threads (proc_target))
8016 {
8017 if (thr->state != THREAD_RUNNING)
8018 continue;
8019
8020 /* If we have any thread that is already executing, then we
8021 don't need to resume the target -- it is already been
8022 resumed. */
8023 if (thr->executing ())
8024 return;
8025
8026 /* If we have a pending event to process, skip resuming the
8027 target and go straight to processing it. */
8028 if (thr->resumed () && thr->has_pending_waitstatus ())
8029 return;
8030 }
8031
8032 /* Alright, we need to re-resume the target. If a thread was
8033 stepping, we need to restart it stepping. */
8034 if (restart_stepped_thread (proc_target, minus_one_ptid))
8035 return;
8036
8037 /* Otherwise, find the first THREAD_RUNNING thread and resume
8038 it. */
8039 for (thread_info *thr : all_threads (proc_target))
8040 {
8041 if (thr->state != THREAD_RUNNING)
8042 continue;
8043
8044 execution_control_state ecs (thr);
8045 switch_to_thread (thr);
8046 keep_going (&ecs);
8047 return;
8048 }
8049 }
8050
8051 /* Set a previously stepped thread back to stepping. Returns true on
8052 success, false if the resume is not possible (e.g., the thread
8053 vanished). */
8054
8055 static bool
8056 keep_going_stepped_thread (struct thread_info *tp)
8057 {
8058 frame_info_ptr frame;
8059
8060 /* If the stepping thread exited, then don't try to switch back and
8061 resume it, which could fail in several different ways depending
8062 on the target. Instead, just keep going.
8063
8064 We can find a stepping dead thread in the thread list in two
8065 cases:
8066
8067 - The target supports thread exit events, and when the target
8068 tries to delete the thread from the thread list, inferior_ptid
8069 pointed at the exiting thread. In such case, calling
8070 delete_thread does not really remove the thread from the list;
8071 instead, the thread is left listed, with 'exited' state.
8072
8073 - The target's debug interface does not support thread exit
8074 events, and so we have no idea whatsoever if the previously
8075 stepping thread is still alive. For that reason, we need to
8076 synchronously query the target now. */
8077
8078 if (tp->state == THREAD_EXITED || !target_thread_alive (tp->ptid))
8079 {
8080 infrun_debug_printf ("not resuming previously stepped thread, it has "
8081 "vanished");
8082
8083 delete_thread (tp);
8084 return false;
8085 }
8086
8087 infrun_debug_printf ("resuming previously stepped thread");
8088
8089 execution_control_state ecs (tp);
8090 switch_to_thread (tp);
8091
8092 tp->set_stop_pc (regcache_read_pc (get_thread_regcache (tp)));
8093 frame = get_current_frame ();
8094
8095 /* If the PC of the thread we were trying to single-step has
8096 changed, then that thread has trapped or been signaled, but the
8097 event has not been reported to GDB yet. Re-poll the target
8098 looking for this particular thread's event (i.e. temporarily
8099 enable schedlock) by:
8100
8101 - setting a break at the current PC
8102 - resuming that particular thread, only (by setting trap
8103 expected)
8104
8105 This prevents us continuously moving the single-step breakpoint
8106 forward, one instruction at a time, overstepping. */
8107
8108 if (tp->stop_pc () != tp->prev_pc)
8109 {
8110 ptid_t resume_ptid;
8111
8112 infrun_debug_printf ("expected thread advanced also (%s -> %s)",
8113 paddress (current_inferior ()->arch (), tp->prev_pc),
8114 paddress (current_inferior ()->arch (),
8115 tp->stop_pc ()));
8116
8117 /* Clear the info of the previous step-over, as it's no longer
8118 valid (if the thread was trying to step over a breakpoint, it
8119 has already succeeded). It's what keep_going would do too,
8120 if we called it. Do this before trying to insert the sss
8121 breakpoint, otherwise if we were previously trying to step
8122 over this exact address in another thread, the breakpoint is
8123 skipped. */
8124 clear_step_over_info ();
8125 tp->control.trap_expected = 0;
8126
8127 insert_single_step_breakpoint (get_frame_arch (frame),
8128 get_frame_address_space (frame),
8129 tp->stop_pc ());
8130
8131 tp->set_resumed (true);
8132 resume_ptid = internal_resume_ptid (tp->control.stepping_command);
8133 do_target_resume (resume_ptid, false, GDB_SIGNAL_0);
8134 }
8135 else
8136 {
8137 infrun_debug_printf ("expected thread still hasn't advanced");
8138
8139 keep_going_pass_signal (&ecs);
8140 }
8141
8142 return true;
8143 }
8144
8145 /* Is thread TP in the middle of (software or hardware)
8146 single-stepping? (Note the result of this function must never be
8147 passed directly as target_resume's STEP parameter.) */
8148
8149 static bool
8150 currently_stepping (struct thread_info *tp)
8151 {
8152 return ((tp->control.step_range_end
8153 && tp->control.step_resume_breakpoint == nullptr)
8154 || tp->control.trap_expected
8155 || tp->stepped_breakpoint
8156 || bpstat_should_step ());
8157 }
8158
8159 /* Inferior has stepped into a subroutine call with source code that
8160 we should not step over. Do step to the first line of code in
8161 it. */
8162
8163 static void
8164 handle_step_into_function (struct gdbarch *gdbarch,
8165 struct execution_control_state *ecs)
8166 {
8167 fill_in_stop_func (gdbarch, ecs);
8168
8169 compunit_symtab *cust
8170 = find_pc_compunit_symtab (ecs->event_thread->stop_pc ());
8171 if (cust != nullptr && cust->language () != language_asm)
8172 ecs->stop_func_start
8173 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
8174
8175 symtab_and_line stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
8176 /* Use the step_resume_break to step until the end of the prologue,
8177 even if that involves jumps (as it seems to on the vax under
8178 4.2). */
8179 /* If the prologue ends in the middle of a source line, continue to
8180 the end of that source line (if it is still within the function).
8181 Otherwise, just go to end of prologue. */
8182 if (stop_func_sal.end
8183 && stop_func_sal.pc != ecs->stop_func_start
8184 && stop_func_sal.end < ecs->stop_func_end)
8185 ecs->stop_func_start = stop_func_sal.end;
8186
8187 /* Architectures which require breakpoint adjustment might not be able
8188 to place a breakpoint at the computed address. If so, the test
8189 ``ecs->stop_func_start == stop_pc'' will never succeed. Adjust
8190 ecs->stop_func_start to an address at which a breakpoint may be
8191 legitimately placed.
8192
8193 Note: kevinb/2004-01-19: On FR-V, if this adjustment is not
8194 made, GDB will enter an infinite loop when stepping through
8195 optimized code consisting of VLIW instructions which contain
8196 subinstructions corresponding to different source lines. On
8197 FR-V, it's not permitted to place a breakpoint on any but the
8198 first subinstruction of a VLIW instruction. When a breakpoint is
8199 set, GDB will adjust the breakpoint address to the beginning of
8200 the VLIW instruction. Thus, we need to make the corresponding
8201 adjustment here when computing the stop address. */
8202
8203 if (gdbarch_adjust_breakpoint_address_p (gdbarch))
8204 {
8205 ecs->stop_func_start
8206 = gdbarch_adjust_breakpoint_address (gdbarch,
8207 ecs->stop_func_start);
8208 }
8209
8210 if (ecs->stop_func_start == ecs->event_thread->stop_pc ())
8211 {
8212 /* We are already there: stop now. */
8213 end_stepping_range (ecs);
8214 return;
8215 }
8216 else
8217 {
8218 /* Put the step-breakpoint there and go until there. */
8219 symtab_and_line sr_sal;
8220 sr_sal.pc = ecs->stop_func_start;
8221 sr_sal.section = find_pc_overlay (ecs->stop_func_start);
8222 sr_sal.pspace = get_frame_program_space (get_current_frame ());
8223
8224 /* Do not specify what the fp should be when we stop since on
8225 some machines the prologue is where the new fp value is
8226 established. */
8227 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
8228
8229 /* And make sure stepping stops right away then. */
8230 ecs->event_thread->control.step_range_end
8231 = ecs->event_thread->control.step_range_start;
8232 }
8233 keep_going (ecs);
8234 }
8235
8236 /* Inferior has stepped backward into a subroutine call with source
8237 code that we should not step over. Do step to the beginning of the
8238 last line of code in it. */
8239
8240 static void
8241 handle_step_into_function_backward (struct gdbarch *gdbarch,
8242 struct execution_control_state *ecs)
8243 {
8244 struct compunit_symtab *cust;
8245 struct symtab_and_line stop_func_sal;
8246
8247 fill_in_stop_func (gdbarch, ecs);
8248
8249 cust = find_pc_compunit_symtab (ecs->event_thread->stop_pc ());
8250 if (cust != nullptr && cust->language () != language_asm)
8251 ecs->stop_func_start
8252 = gdbarch_skip_prologue_noexcept (gdbarch, ecs->stop_func_start);
8253
8254 stop_func_sal = find_pc_line (ecs->event_thread->stop_pc (), 0);
8255
8256 /* OK, we're just going to keep stepping here. */
8257 if (stop_func_sal.pc == ecs->event_thread->stop_pc ())
8258 {
8259 /* We're there already. Just stop stepping now. */
8260 end_stepping_range (ecs);
8261 }
8262 else
8263 {
8264 /* Else just reset the step range and keep going.
8265 No step-resume breakpoint, they don't work for
8266 epilogues, which can have multiple entry paths. */
8267 ecs->event_thread->control.step_range_start = stop_func_sal.pc;
8268 ecs->event_thread->control.step_range_end = stop_func_sal.end;
8269 keep_going (ecs);
8270 }
8271 return;
8272 }
8273
8274 /* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
8275 This is used to both functions and to skip over code. */
8276
8277 static void
8278 insert_step_resume_breakpoint_at_sal_1 (struct gdbarch *gdbarch,
8279 struct symtab_and_line sr_sal,
8280 struct frame_id sr_id,
8281 enum bptype sr_type)
8282 {
8283 /* There should never be more than one step-resume or longjmp-resume
8284 breakpoint per thread, so we should never be setting a new
8285 step_resume_breakpoint when one is already active. */
8286 gdb_assert (inferior_thread ()->control.step_resume_breakpoint == nullptr);
8287 gdb_assert (sr_type == bp_step_resume || sr_type == bp_hp_step_resume);
8288
8289 infrun_debug_printf ("inserting step-resume breakpoint at %s",
8290 paddress (gdbarch, sr_sal.pc));
8291
8292 inferior_thread ()->control.step_resume_breakpoint
8293 = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, sr_type).release ();
8294 }
8295
8296 void
8297 insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
8298 struct symtab_and_line sr_sal,
8299 struct frame_id sr_id)
8300 {
8301 insert_step_resume_breakpoint_at_sal_1 (gdbarch,
8302 sr_sal, sr_id,
8303 bp_step_resume);
8304 }
8305
8306 /* Insert a "high-priority step-resume breakpoint" at RETURN_FRAME.pc.
8307 This is used to skip a potential signal handler.
8308
8309 This is called with the interrupted function's frame. The signal
8310 handler, when it returns, will resume the interrupted function at
8311 RETURN_FRAME.pc. */
8312
8313 static void
8314 insert_hp_step_resume_breakpoint_at_frame (frame_info_ptr return_frame)
8315 {
8316 gdb_assert (return_frame != nullptr);
8317
8318 struct gdbarch *gdbarch = get_frame_arch (return_frame);
8319
8320 symtab_and_line sr_sal;
8321 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
8322 sr_sal.section = find_pc_overlay (sr_sal.pc);
8323 sr_sal.pspace = get_frame_program_space (return_frame);
8324
8325 insert_step_resume_breakpoint_at_sal_1 (gdbarch, sr_sal,
8326 get_stack_frame_id (return_frame),
8327 bp_hp_step_resume);
8328 }
8329
8330 /* Insert a "step-resume breakpoint" at the previous frame's PC. This
8331 is used to skip a function after stepping into it (for "next" or if
8332 the called function has no debugging information).
8333
8334 The current function has almost always been reached by single
8335 stepping a call or return instruction. NEXT_FRAME belongs to the
8336 current function, and the breakpoint will be set at the caller's
8337 resume address.
8338
8339 This is a separate function rather than reusing
8340 insert_hp_step_resume_breakpoint_at_frame in order to avoid
8341 get_prev_frame, which may stop prematurely (see the implementation
8342 of frame_unwind_caller_id for an example). */
8343
8344 static void
8345 insert_step_resume_breakpoint_at_caller (frame_info_ptr next_frame)
8346 {
8347 /* We shouldn't have gotten here if we don't know where the call site
8348 is. */
8349 gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
8350
8351 struct gdbarch *gdbarch = frame_unwind_caller_arch (next_frame);
8352
8353 symtab_and_line sr_sal;
8354 sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
8355 frame_unwind_caller_pc (next_frame));
8356 sr_sal.section = find_pc_overlay (sr_sal.pc);
8357 sr_sal.pspace = frame_unwind_program_space (next_frame);
8358
8359 insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
8360 frame_unwind_caller_id (next_frame));
8361 }
8362
8363 /* Insert a "longjmp-resume" breakpoint at PC. This is used to set a
8364 new breakpoint at the target of a jmp_buf. The handling of
8365 longjmp-resume uses the same mechanisms used for handling
8366 "step-resume" breakpoints. */
8367
8368 static void
8369 insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
8370 {
8371 /* There should never be more than one longjmp-resume breakpoint per
8372 thread, so we should never be setting a new
8373 longjmp_resume_breakpoint when one is already active. */
8374 gdb_assert (inferior_thread ()->control.exception_resume_breakpoint == nullptr);
8375
8376 infrun_debug_printf ("inserting longjmp-resume breakpoint at %s",
8377 paddress (gdbarch, pc));
8378
8379 inferior_thread ()->control.exception_resume_breakpoint =
8380 set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume).release ();
8381 }
8382
8383 /* Insert an exception resume breakpoint. TP is the thread throwing
8384 the exception. The block B is the block of the unwinder debug hook
8385 function. FRAME is the frame corresponding to the call to this
8386 function. SYM is the symbol of the function argument holding the
8387 target PC of the exception. */
8388
8389 static void
8390 insert_exception_resume_breakpoint (struct thread_info *tp,
8391 const struct block *b,
8392 frame_info_ptr frame,
8393 struct symbol *sym)
8394 {
8395 try
8396 {
8397 struct block_symbol vsym;
8398 struct value *value;
8399 CORE_ADDR handler;
8400 struct breakpoint *bp;
8401
8402 vsym = lookup_symbol_search_name (sym->search_name (),
8403 b, VAR_DOMAIN);
8404 value = read_var_value (vsym.symbol, vsym.block, frame);
8405 /* If the value was optimized out, revert to the old behavior. */
8406 if (! value->optimized_out ())
8407 {
8408 handler = value_as_address (value);
8409
8410 infrun_debug_printf ("exception resume at %lx",
8411 (unsigned long) handler);
8412
8413 /* set_momentary_breakpoint_at_pc creates a thread-specific
8414 breakpoint for the current inferior thread. */
8415 gdb_assert (tp == inferior_thread ());
8416 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
8417 handler,
8418 bp_exception_resume).release ();
8419
8420 /* set_momentary_breakpoint_at_pc invalidates FRAME. */
8421 frame = nullptr;
8422
8423 tp->control.exception_resume_breakpoint = bp;
8424 }
8425 }
8426 catch (const gdb_exception_error &e)
8427 {
8428 /* We want to ignore errors here. */
8429 }
8430 }
8431
8432 /* A helper for check_exception_resume that sets an
8433 exception-breakpoint based on a SystemTap probe. */
8434
8435 static void
8436 insert_exception_resume_from_probe (struct thread_info *tp,
8437 const struct bound_probe *probe,
8438 frame_info_ptr frame)
8439 {
8440 struct value *arg_value;
8441 CORE_ADDR handler;
8442 struct breakpoint *bp;
8443
8444 arg_value = probe_safe_evaluate_at_pc (frame, 1);
8445 if (!arg_value)
8446 return;
8447
8448 handler = value_as_address (arg_value);
8449
8450 infrun_debug_printf ("exception resume at %s",
8451 paddress (probe->objfile->arch (), handler));
8452
8453 /* set_momentary_breakpoint_at_pc creates a thread-specific breakpoint
8454 for the current inferior thread. */
8455 gdb_assert (tp == inferior_thread ());
8456 bp = set_momentary_breakpoint_at_pc (get_frame_arch (frame),
8457 handler, bp_exception_resume).release ();
8458 tp->control.exception_resume_breakpoint = bp;
8459 }
8460
8461 /* This is called when an exception has been intercepted. Check to
8462 see whether the exception's destination is of interest, and if so,
8463 set an exception resume breakpoint there. */
8464
8465 static void
8466 check_exception_resume (struct execution_control_state *ecs,
8467 frame_info_ptr frame)
8468 {
8469 struct bound_probe probe;
8470 struct symbol *func;
8471
8472 /* First see if this exception unwinding breakpoint was set via a
8473 SystemTap probe point. If so, the probe has two arguments: the
8474 CFA and the HANDLER. We ignore the CFA, extract the handler, and
8475 set a breakpoint there. */
8476 probe = find_probe_by_pc (get_frame_pc (frame));
8477 if (probe.prob)
8478 {
8479 insert_exception_resume_from_probe (ecs->event_thread, &probe, frame);
8480 return;
8481 }
8482
8483 func = get_frame_function (frame);
8484 if (!func)
8485 return;
8486
8487 try
8488 {
8489 const struct block *b;
8490 int argno = 0;
8491
8492 /* The exception breakpoint is a thread-specific breakpoint on
8493 the unwinder's debug hook, declared as:
8494
8495 void _Unwind_DebugHook (void *cfa, void *handler);
8496
8497 The CFA argument indicates the frame to which control is
8498 about to be transferred. HANDLER is the destination PC.
8499
8500 We ignore the CFA and set a temporary breakpoint at HANDLER.
8501 This is not extremely efficient but it avoids issues in gdb
8502 with computing the DWARF CFA, and it also works even in weird
8503 cases such as throwing an exception from inside a signal
8504 handler. */
8505
8506 b = func->value_block ();
8507 for (struct symbol *sym : block_iterator_range (b))
8508 {
8509 if (!sym->is_argument ())
8510 continue;
8511
8512 if (argno == 0)
8513 ++argno;
8514 else
8515 {
8516 insert_exception_resume_breakpoint (ecs->event_thread,
8517 b, frame, sym);
8518 break;
8519 }
8520 }
8521 }
8522 catch (const gdb_exception_error &e)
8523 {
8524 }
8525 }
8526
8527 static void
8528 stop_waiting (struct execution_control_state *ecs)
8529 {
8530 infrun_debug_printf ("stop_waiting");
8531
8532 /* Let callers know we don't want to wait for the inferior anymore. */
8533 ecs->wait_some_more = 0;
8534 }
8535
8536 /* Like keep_going, but passes the signal to the inferior, even if the
8537 signal is set to nopass. */
8538
8539 static void
8540 keep_going_pass_signal (struct execution_control_state *ecs)
8541 {
8542 gdb_assert (ecs->event_thread->ptid == inferior_ptid);
8543 gdb_assert (!ecs->event_thread->resumed ());
8544
8545 /* Save the pc before execution, to compare with pc after stop. */
8546 ecs->event_thread->prev_pc
8547 = regcache_read_pc_protected (get_thread_regcache (ecs->event_thread));
8548
8549 if (ecs->event_thread->control.trap_expected)
8550 {
8551 struct thread_info *tp = ecs->event_thread;
8552
8553 infrun_debug_printf ("%s has trap_expected set, "
8554 "resuming to collect trap",
8555 tp->ptid.to_string ().c_str ());
8556
8557 /* We haven't yet gotten our trap, and either: intercepted a
8558 non-signal event (e.g., a fork); or took a signal which we
8559 are supposed to pass through to the inferior. Simply
8560 continue. */
8561 resume (ecs->event_thread->stop_signal ());
8562 }
8563 else if (step_over_info_valid_p ())
8564 {
8565 /* Another thread is stepping over a breakpoint in-line. If
8566 this thread needs a step-over too, queue the request. In
8567 either case, this resume must be deferred for later. */
8568 struct thread_info *tp = ecs->event_thread;
8569
8570 if (ecs->hit_singlestep_breakpoint
8571 || thread_still_needs_step_over (tp))
8572 {
8573 infrun_debug_printf ("step-over already in progress: "
8574 "step-over for %s deferred",
8575 tp->ptid.to_string ().c_str ());
8576 global_thread_step_over_chain_enqueue (tp);
8577 }
8578 else
8579 infrun_debug_printf ("step-over in progress: resume of %s deferred",
8580 tp->ptid.to_string ().c_str ());
8581 }
8582 else
8583 {
8584 struct regcache *regcache = get_current_regcache ();
8585 int remove_bp;
8586 int remove_wps;
8587 step_over_what step_what;
8588
8589 /* Either the trap was not expected, but we are continuing
8590 anyway (if we got a signal, the user asked it be passed to
8591 the child)
8592 -- or --
8593 We got our expected trap, but decided we should resume from
8594 it.
8595
8596 We're going to run this baby now!
8597
8598 Note that insert_breakpoints won't try to re-insert
8599 already inserted breakpoints. Therefore, we don't
8600 care if breakpoints were already inserted, or not. */
8601
8602 /* If we need to step over a breakpoint, and we're not using
8603 displaced stepping to do so, insert all breakpoints
8604 (watchpoints, etc.) but the one we're stepping over, step one
8605 instruction, and then re-insert the breakpoint when that step
8606 is finished. */
8607
8608 step_what = thread_still_needs_step_over (ecs->event_thread);
8609
8610 remove_bp = (ecs->hit_singlestep_breakpoint
8611 || (step_what & STEP_OVER_BREAKPOINT));
8612 remove_wps = (step_what & STEP_OVER_WATCHPOINT);
8613
8614 /* We can't use displaced stepping if we need to step past a
8615 watchpoint. The instruction copied to the scratch pad would
8616 still trigger the watchpoint. */
8617 if (remove_bp
8618 && (remove_wps || !use_displaced_stepping (ecs->event_thread)))
8619 {
8620 set_step_over_info (regcache->aspace (),
8621 regcache_read_pc (regcache), remove_wps,
8622 ecs->event_thread->global_num);
8623 }
8624 else if (remove_wps)
8625 set_step_over_info (nullptr, 0, remove_wps, -1);
8626
8627 /* If we now need to do an in-line step-over, we need to stop
8628 all other threads. Note this must be done before
8629 insert_breakpoints below, because that removes the breakpoint
8630 we're about to step over, otherwise other threads could miss
8631 it. */
8632 if (step_over_info_valid_p () && target_is_non_stop_p ())
8633 stop_all_threads ("starting in-line step-over");
8634
8635 /* Stop stepping if inserting breakpoints fails. */
8636 try
8637 {
8638 insert_breakpoints ();
8639 }
8640 catch (const gdb_exception_error &e)
8641 {
8642 exception_print (gdb_stderr, e);
8643 stop_waiting (ecs);
8644 clear_step_over_info ();
8645 return;
8646 }
8647
8648 ecs->event_thread->control.trap_expected = (remove_bp || remove_wps);
8649
8650 resume (ecs->event_thread->stop_signal ());
8651 }
8652
8653 prepare_to_wait (ecs);
8654 }
8655
8656 /* Called when we should continue running the inferior, because the
8657 current event doesn't cause a user visible stop. This does the
8658 resuming part; waiting for the next event is done elsewhere. */
8659
8660 static void
8661 keep_going (struct execution_control_state *ecs)
8662 {
8663 if (ecs->event_thread->control.trap_expected
8664 && ecs->event_thread->stop_signal () == GDB_SIGNAL_TRAP)
8665 ecs->event_thread->control.trap_expected = 0;
8666
8667 if (!signal_program[ecs->event_thread->stop_signal ()])
8668 ecs->event_thread->set_stop_signal (GDB_SIGNAL_0);
8669 keep_going_pass_signal (ecs);
8670 }
8671
8672 /* This function normally comes after a resume, before
8673 handle_inferior_event exits. It takes care of any last bits of
8674 housekeeping, and sets the all-important wait_some_more flag. */
8675
8676 static void
8677 prepare_to_wait (struct execution_control_state *ecs)
8678 {
8679 infrun_debug_printf ("prepare_to_wait");
8680
8681 ecs->wait_some_more = 1;
8682
8683 /* If the target can't async, emulate it by marking the infrun event
8684 handler such that as soon as we get back to the event-loop, we
8685 immediately end up in fetch_inferior_event again calling
8686 target_wait. */
8687 if (!target_can_async_p ())
8688 mark_infrun_async_event_handler ();
8689 }
8690
8691 /* We are done with the step range of a step/next/si/ni command.
8692 Called once for each n of a "step n" operation. */
8693
8694 static void
8695 end_stepping_range (struct execution_control_state *ecs)
8696 {
8697 ecs->event_thread->control.stop_step = 1;
8698 stop_waiting (ecs);
8699 }
8700
8701 /* Several print_*_reason functions to print why the inferior has stopped.
8702 We always print something when the inferior exits, or receives a signal.
8703 The rest of the cases are dealt with later on in normal_stop and
8704 print_it_typical. Ideally there should be a call to one of these
8705 print_*_reason functions functions from handle_inferior_event each time
8706 stop_waiting is called.
8707
8708 Note that we don't call these directly, instead we delegate that to
8709 the interpreters, through observers. Interpreters then call these
8710 with whatever uiout is right. */
8711
8712 void
8713 print_signal_exited_reason (struct ui_out *uiout, enum gdb_signal siggnal)
8714 {
8715 annotate_signalled ();
8716 if (uiout->is_mi_like_p ())
8717 uiout->field_string
8718 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
8719 uiout->text ("\nProgram terminated with signal ");
8720 annotate_signal_name ();
8721 uiout->field_string ("signal-name",
8722 gdb_signal_to_name (siggnal));
8723 annotate_signal_name_end ();
8724 uiout->text (", ");
8725 annotate_signal_string ();
8726 uiout->field_string ("signal-meaning",
8727 gdb_signal_to_string (siggnal));
8728 annotate_signal_string_end ();
8729 uiout->text (".\n");
8730 uiout->text ("The program no longer exists.\n");
8731 }
8732
8733 void
8734 print_exited_reason (struct ui_out *uiout, int exitstatus)
8735 {
8736 struct inferior *inf = current_inferior ();
8737 std::string pidstr = target_pid_to_str (ptid_t (inf->pid));
8738
8739 annotate_exited (exitstatus);
8740 if (exitstatus)
8741 {
8742 if (uiout->is_mi_like_p ())
8743 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_EXITED));
8744 std::string exit_code_str
8745 = string_printf ("0%o", (unsigned int) exitstatus);
8746 uiout->message ("[Inferior %s (%s) exited with code %pF]\n",
8747 plongest (inf->num), pidstr.c_str (),
8748 string_field ("exit-code", exit_code_str.c_str ()));
8749 }
8750 else
8751 {
8752 if (uiout->is_mi_like_p ())
8753 uiout->field_string
8754 ("reason", async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
8755 uiout->message ("[Inferior %s (%s) exited normally]\n",
8756 plongest (inf->num), pidstr.c_str ());
8757 }
8758 }
8759
8760 void
8761 print_signal_received_reason (struct ui_out *uiout, enum gdb_signal siggnal)
8762 {
8763 struct thread_info *thr = inferior_thread ();
8764
8765 infrun_debug_printf ("signal = %s", gdb_signal_to_string (siggnal));
8766
8767 annotate_signal ();
8768
8769 if (uiout->is_mi_like_p ())
8770 ;
8771 else if (show_thread_that_caused_stop ())
8772 {
8773 uiout->text ("\nThread ");
8774 uiout->field_string ("thread-id", print_thread_id (thr));
8775
8776 const char *name = thread_name (thr);
8777 if (name != nullptr)
8778 {
8779 uiout->text (" \"");
8780 uiout->field_string ("name", name);
8781 uiout->text ("\"");
8782 }
8783 }
8784 else
8785 uiout->text ("\nProgram");
8786
8787 if (siggnal == GDB_SIGNAL_0 && !uiout->is_mi_like_p ())
8788 uiout->text (" stopped");
8789 else
8790 {
8791 uiout->text (" received signal ");
8792 annotate_signal_name ();
8793 if (uiout->is_mi_like_p ())
8794 uiout->field_string
8795 ("reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
8796 uiout->field_string ("signal-name", gdb_signal_to_name (siggnal));
8797 annotate_signal_name_end ();
8798 uiout->text (", ");
8799 annotate_signal_string ();
8800 uiout->field_string ("signal-meaning", gdb_signal_to_string (siggnal));
8801
8802 struct regcache *regcache = get_current_regcache ();
8803 struct gdbarch *gdbarch = regcache->arch ();
8804 if (gdbarch_report_signal_info_p (gdbarch))
8805 gdbarch_report_signal_info (gdbarch, uiout, siggnal);
8806
8807 annotate_signal_string_end ();
8808 }
8809 uiout->text (".\n");
8810 }
8811
8812 void
8813 print_no_history_reason (struct ui_out *uiout)
8814 {
8815 if (uiout->is_mi_like_p ())
8816 uiout->field_string ("reason", async_reason_lookup (EXEC_ASYNC_NO_HISTORY));
8817 else
8818 uiout->text ("\nNo more reverse-execution history.\n");
8819 }
8820
8821 /* Print current location without a level number, if we have changed
8822 functions or hit a breakpoint. Print source line if we have one.
8823 bpstat_print contains the logic deciding in detail what to print,
8824 based on the event(s) that just occurred. */
8825
8826 static void
8827 print_stop_location (const target_waitstatus &ws)
8828 {
8829 int bpstat_ret;
8830 enum print_what source_flag;
8831 int do_frame_printing = 1;
8832 struct thread_info *tp = inferior_thread ();
8833
8834 bpstat_ret = bpstat_print (tp->control.stop_bpstat, ws.kind ());
8835 switch (bpstat_ret)
8836 {
8837 case PRINT_UNKNOWN:
8838 /* FIXME: cagney/2002-12-01: Given that a frame ID does (or
8839 should) carry around the function and does (or should) use
8840 that when doing a frame comparison. */
8841 if (tp->control.stop_step
8842 && (tp->control.step_frame_id
8843 == get_frame_id (get_current_frame ()))
8844 && (tp->control.step_start_function
8845 == find_pc_function (tp->stop_pc ())))
8846 {
8847 /* Finished step, just print source line. */
8848 source_flag = SRC_LINE;
8849 }
8850 else
8851 {
8852 /* Print location and source line. */
8853 source_flag = SRC_AND_LOC;
8854 }
8855 break;
8856 case PRINT_SRC_AND_LOC:
8857 /* Print location and source line. */
8858 source_flag = SRC_AND_LOC;
8859 break;
8860 case PRINT_SRC_ONLY:
8861 source_flag = SRC_LINE;
8862 break;
8863 case PRINT_NOTHING:
8864 /* Something bogus. */
8865 source_flag = SRC_LINE;
8866 do_frame_printing = 0;
8867 break;
8868 default:
8869 internal_error (_("Unknown value."));
8870 }
8871
8872 /* The behavior of this routine with respect to the source
8873 flag is:
8874 SRC_LINE: Print only source line
8875 LOCATION: Print only location
8876 SRC_AND_LOC: Print location and source line. */
8877 if (do_frame_printing)
8878 print_stack_frame (get_selected_frame (nullptr), 0, source_flag, 1);
8879 }
8880
8881 /* See infrun.h. */
8882
8883 void
8884 print_stop_event (struct ui_out *uiout, bool displays)
8885 {
8886 struct target_waitstatus last;
8887 struct thread_info *tp;
8888
8889 get_last_target_status (nullptr, nullptr, &last);
8890
8891 {
8892 scoped_restore save_uiout = make_scoped_restore (&current_uiout, uiout);
8893
8894 print_stop_location (last);
8895
8896 /* Display the auto-display expressions. */
8897 if (displays)
8898 do_displays ();
8899 }
8900
8901 tp = inferior_thread ();
8902 if (tp->thread_fsm () != nullptr
8903 && tp->thread_fsm ()->finished_p ())
8904 {
8905 struct return_value_info *rv;
8906
8907 rv = tp->thread_fsm ()->return_value ();
8908 if (rv != nullptr)
8909 print_return_value (uiout, rv);
8910 }
8911 }
8912
8913 /* See infrun.h. */
8914
8915 void
8916 maybe_remove_breakpoints (void)
8917 {
8918 if (!breakpoints_should_be_inserted_now () && target_has_execution ())
8919 {
8920 if (remove_breakpoints ())
8921 {
8922 target_terminal::ours_for_output ();
8923 gdb_printf (_("Cannot remove breakpoints because "
8924 "program is no longer writable.\nFurther "
8925 "execution is probably impossible.\n"));
8926 }
8927 }
8928 }
8929
8930 /* The execution context that just caused a normal stop. */
8931
8932 struct stop_context
8933 {
8934 stop_context ();
8935
8936 DISABLE_COPY_AND_ASSIGN (stop_context);
8937
8938 bool changed () const;
8939
8940 /* The stop ID. */
8941 ULONGEST stop_id;
8942
8943 /* The event PTID. */
8944
8945 ptid_t ptid;
8946
8947 /* If stopp for a thread event, this is the thread that caused the
8948 stop. */
8949 thread_info_ref thread;
8950
8951 /* The inferior that caused the stop. */
8952 int inf_num;
8953 };
8954
8955 /* Initializes a new stop context. If stopped for a thread event, this
8956 takes a strong reference to the thread. */
8957
8958 stop_context::stop_context ()
8959 {
8960 stop_id = get_stop_id ();
8961 ptid = inferior_ptid;
8962 inf_num = current_inferior ()->num;
8963
8964 if (inferior_ptid != null_ptid)
8965 {
8966 /* Take a strong reference so that the thread can't be deleted
8967 yet. */
8968 thread = thread_info_ref::new_reference (inferior_thread ());
8969 }
8970 }
8971
8972 /* Return true if the current context no longer matches the saved stop
8973 context. */
8974
8975 bool
8976 stop_context::changed () const
8977 {
8978 if (ptid != inferior_ptid)
8979 return true;
8980 if (inf_num != current_inferior ()->num)
8981 return true;
8982 if (thread != nullptr && thread->state != THREAD_STOPPED)
8983 return true;
8984 if (get_stop_id () != stop_id)
8985 return true;
8986 return false;
8987 }
8988
8989 /* See infrun.h. */
8990
8991 bool
8992 normal_stop ()
8993 {
8994 struct target_waitstatus last;
8995
8996 get_last_target_status (nullptr, nullptr, &last);
8997
8998 new_stop_id ();
8999
9000 /* If an exception is thrown from this point on, make sure to
9001 propagate GDB's knowledge of the executing state to the
9002 frontend/user running state. A QUIT is an easy exception to see
9003 here, so do this before any filtered output. */
9004
9005 ptid_t finish_ptid = null_ptid;
9006
9007 if (!non_stop)
9008 finish_ptid = minus_one_ptid;
9009 else if (last.kind () == TARGET_WAITKIND_SIGNALLED
9010 || last.kind () == TARGET_WAITKIND_EXITED)
9011 {
9012 /* On some targets, we may still have live threads in the
9013 inferior when we get a process exit event. E.g., for
9014 "checkpoint", when the current checkpoint/fork exits,
9015 linux-fork.c automatically switches to another fork from
9016 within target_mourn_inferior. */
9017 if (inferior_ptid != null_ptid)
9018 finish_ptid = ptid_t (inferior_ptid.pid ());
9019 }
9020 else if (last.kind () != TARGET_WAITKIND_NO_RESUMED)
9021 finish_ptid = inferior_ptid;
9022
9023 gdb::optional<scoped_finish_thread_state> maybe_finish_thread_state;
9024 if (finish_ptid != null_ptid)
9025 {
9026 maybe_finish_thread_state.emplace
9027 (user_visible_resume_target (finish_ptid), finish_ptid);
9028 }
9029
9030 /* As we're presenting a stop, and potentially removing breakpoints,
9031 update the thread list so we can tell whether there are threads
9032 running on the target. With target remote, for example, we can
9033 only learn about new threads when we explicitly update the thread
9034 list. Do this before notifying the interpreters about signal
9035 stops, end of stepping ranges, etc., so that the "new thread"
9036 output is emitted before e.g., "Program received signal FOO",
9037 instead of after. */
9038 update_thread_list ();
9039
9040 if (last.kind () == TARGET_WAITKIND_STOPPED && stopped_by_random_signal)
9041 notify_signal_received (inferior_thread ()->stop_signal ());
9042
9043 /* As with the notification of thread events, we want to delay
9044 notifying the user that we've switched thread context until
9045 the inferior actually stops.
9046
9047 There's no point in saying anything if the inferior has exited.
9048 Note that SIGNALLED here means "exited with a signal", not
9049 "received a signal".
9050
9051 Also skip saying anything in non-stop mode. In that mode, as we
9052 don't want GDB to switch threads behind the user's back, to avoid
9053 races where the user is typing a command to apply to thread x,
9054 but GDB switches to thread y before the user finishes entering
9055 the command, fetch_inferior_event installs a cleanup to restore
9056 the current thread back to the thread the user had selected right
9057 after this event is handled, so we're not really switching, only
9058 informing of a stop. */
9059 if (!non_stop)
9060 {
9061 if ((last.kind () != TARGET_WAITKIND_SIGNALLED
9062 && last.kind () != TARGET_WAITKIND_EXITED
9063 && last.kind () != TARGET_WAITKIND_NO_RESUMED)
9064 && target_has_execution ()
9065 && previous_thread != inferior_thread ())
9066 {
9067 SWITCH_THRU_ALL_UIS ()
9068 {
9069 target_terminal::ours_for_output ();
9070 gdb_printf (_("[Switching to %s]\n"),
9071 target_pid_to_str (inferior_ptid).c_str ());
9072 annotate_thread_changed ();
9073 }
9074 }
9075
9076 update_previous_thread ();
9077 }
9078
9079 if (last.kind () == TARGET_WAITKIND_NO_RESUMED)
9080 {
9081 SWITCH_THRU_ALL_UIS ()
9082 if (current_ui->prompt_state == PROMPT_BLOCKED)
9083 {
9084 target_terminal::ours_for_output ();
9085 gdb_printf (_("No unwaited-for children left.\n"));
9086 }
9087 }
9088
9089 /* Note: this depends on the update_thread_list call above. */
9090 maybe_remove_breakpoints ();
9091
9092 /* If an auto-display called a function and that got a signal,
9093 delete that auto-display to avoid an infinite recursion. */
9094
9095 if (stopped_by_random_signal)
9096 disable_current_display ();
9097
9098 SWITCH_THRU_ALL_UIS ()
9099 {
9100 async_enable_stdin ();
9101 }
9102
9103 /* Let the user/frontend see the threads as stopped. */
9104 maybe_finish_thread_state.reset ();
9105
9106 /* Select innermost stack frame - i.e., current frame is frame 0,
9107 and current location is based on that. Handle the case where the
9108 dummy call is returning after being stopped. E.g. the dummy call
9109 previously hit a breakpoint. (If the dummy call returns
9110 normally, we won't reach here.) Do this before the stop hook is
9111 run, so that it doesn't get to see the temporary dummy frame,
9112 which is not where we'll present the stop. */
9113 if (has_stack_frames ())
9114 {
9115 if (stop_stack_dummy == STOP_STACK_DUMMY)
9116 {
9117 /* Pop the empty frame that contains the stack dummy. This
9118 also restores inferior state prior to the call (struct
9119 infcall_suspend_state). */
9120 frame_info_ptr frame = get_current_frame ();
9121
9122 gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
9123 frame_pop (frame);
9124 /* frame_pop calls reinit_frame_cache as the last thing it
9125 does which means there's now no selected frame. */
9126 }
9127
9128 select_frame (get_current_frame ());
9129
9130 /* Set the current source location. */
9131 set_current_sal_from_frame (get_current_frame ());
9132 }
9133
9134 /* Look up the hook_stop and run it (CLI internally handles problem
9135 of stop_command's pre-hook not existing). */
9136 stop_context saved_context;
9137
9138 try
9139 {
9140 execute_cmd_pre_hook (stop_command);
9141 }
9142 catch (const gdb_exception_error &ex)
9143 {
9144 exception_fprintf (gdb_stderr, ex,
9145 "Error while running hook_stop:\n");
9146 }
9147
9148 /* If the stop hook resumes the target, then there's no point in
9149 trying to notify about the previous stop; its context is
9150 gone. Likewise if the command switches thread or inferior --
9151 the observers would print a stop for the wrong
9152 thread/inferior. */
9153 if (saved_context.changed ())
9154 return true;
9155
9156 /* Notify observers about the stop. This is where the interpreters
9157 print the stop event. */
9158 notify_normal_stop ((inferior_ptid != null_ptid
9159 ? inferior_thread ()->control.stop_bpstat
9160 : nullptr),
9161 stop_print_frame);
9162 annotate_stopped ();
9163
9164 if (target_has_execution ())
9165 {
9166 if (last.kind () != TARGET_WAITKIND_SIGNALLED
9167 && last.kind () != TARGET_WAITKIND_EXITED
9168 && last.kind () != TARGET_WAITKIND_NO_RESUMED)
9169 /* Delete the breakpoint we stopped at, if it wants to be deleted.
9170 Delete any breakpoint that is to be deleted at the next stop. */
9171 breakpoint_auto_delete (inferior_thread ()->control.stop_bpstat);
9172 }
9173
9174 return false;
9175 }
9176 \f
9177 int
9178 signal_stop_state (int signo)
9179 {
9180 return signal_stop[signo];
9181 }
9182
9183 int
9184 signal_print_state (int signo)
9185 {
9186 return signal_print[signo];
9187 }
9188
9189 int
9190 signal_pass_state (int signo)
9191 {
9192 return signal_program[signo];
9193 }
9194
9195 static void
9196 signal_cache_update (int signo)
9197 {
9198 if (signo == -1)
9199 {
9200 for (signo = 0; signo < (int) GDB_SIGNAL_LAST; signo++)
9201 signal_cache_update (signo);
9202
9203 return;
9204 }
9205
9206 signal_pass[signo] = (signal_stop[signo] == 0
9207 && signal_print[signo] == 0
9208 && signal_program[signo] == 1
9209 && signal_catch[signo] == 0);
9210 }
9211
9212 int
9213 signal_stop_update (int signo, int state)
9214 {
9215 int ret = signal_stop[signo];
9216
9217 signal_stop[signo] = state;
9218 signal_cache_update (signo);
9219 return ret;
9220 }
9221
9222 int
9223 signal_print_update (int signo, int state)
9224 {
9225 int ret = signal_print[signo];
9226
9227 signal_print[signo] = state;
9228 signal_cache_update (signo);
9229 return ret;
9230 }
9231
9232 int
9233 signal_pass_update (int signo, int state)
9234 {
9235 int ret = signal_program[signo];
9236
9237 signal_program[signo] = state;
9238 signal_cache_update (signo);
9239 return ret;
9240 }
9241
9242 /* Update the global 'signal_catch' from INFO and notify the
9243 target. */
9244
9245 void
9246 signal_catch_update (const unsigned int *info)
9247 {
9248 int i;
9249
9250 for (i = 0; i < GDB_SIGNAL_LAST; ++i)
9251 signal_catch[i] = info[i] > 0;
9252 signal_cache_update (-1);
9253 target_pass_signals (signal_pass);
9254 }
9255
9256 static void
9257 sig_print_header (void)
9258 {
9259 gdb_printf (_("Signal Stop\tPrint\tPass "
9260 "to program\tDescription\n"));
9261 }
9262
9263 static void
9264 sig_print_info (enum gdb_signal oursig)
9265 {
9266 const char *name = gdb_signal_to_name (oursig);
9267 int name_padding = 13 - strlen (name);
9268
9269 if (name_padding <= 0)
9270 name_padding = 0;
9271
9272 gdb_printf ("%s", name);
9273 gdb_printf ("%*.*s ", name_padding, name_padding, " ");
9274 gdb_printf ("%s\t", signal_stop[oursig] ? "Yes" : "No");
9275 gdb_printf ("%s\t", signal_print[oursig] ? "Yes" : "No");
9276 gdb_printf ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
9277 gdb_printf ("%s\n", gdb_signal_to_string (oursig));
9278 }
9279
9280 /* Specify how various signals in the inferior should be handled. */
9281
9282 static void
9283 handle_command (const char *args, int from_tty)
9284 {
9285 int digits, wordlen;
9286 int sigfirst, siglast;
9287 enum gdb_signal oursig;
9288 int allsigs;
9289
9290 if (args == nullptr)
9291 {
9292 error_no_arg (_("signal to handle"));
9293 }
9294
9295 /* Allocate and zero an array of flags for which signals to handle. */
9296
9297 const size_t nsigs = GDB_SIGNAL_LAST;
9298 unsigned char sigs[nsigs] {};
9299
9300 /* Break the command line up into args. */
9301
9302 gdb_argv built_argv (args);
9303
9304 /* Walk through the args, looking for signal oursigs, signal names, and
9305 actions. Signal numbers and signal names may be interspersed with
9306 actions, with the actions being performed for all signals cumulatively
9307 specified. Signal ranges can be specified as <LOW>-<HIGH>. */
9308
9309 for (char *arg : built_argv)
9310 {
9311 wordlen = strlen (arg);
9312 for (digits = 0; isdigit (arg[digits]); digits++)
9313 {;
9314 }
9315 allsigs = 0;
9316 sigfirst = siglast = -1;
9317
9318 if (wordlen >= 1 && !strncmp (arg, "all", wordlen))
9319 {
9320 /* Apply action to all signals except those used by the
9321 debugger. Silently skip those. */
9322 allsigs = 1;
9323 sigfirst = 0;
9324 siglast = nsigs - 1;
9325 }
9326 else if (wordlen >= 1 && !strncmp (arg, "stop", wordlen))
9327 {
9328 SET_SIGS (nsigs, sigs, signal_stop);
9329 SET_SIGS (nsigs, sigs, signal_print);
9330 }
9331 else if (wordlen >= 1 && !strncmp (arg, "ignore", wordlen))
9332 {
9333 UNSET_SIGS (nsigs, sigs, signal_program);
9334 }
9335 else if (wordlen >= 2 && !strncmp (arg, "print", wordlen))
9336 {
9337 SET_SIGS (nsigs, sigs, signal_print);
9338 }
9339 else if (wordlen >= 2 && !strncmp (arg, "pass", wordlen))
9340 {
9341 SET_SIGS (nsigs, sigs, signal_program);
9342 }
9343 else if (wordlen >= 3 && !strncmp (arg, "nostop", wordlen))
9344 {
9345 UNSET_SIGS (nsigs, sigs, signal_stop);
9346 }
9347 else if (wordlen >= 3 && !strncmp (arg, "noignore", wordlen))
9348 {
9349 SET_SIGS (nsigs, sigs, signal_program);
9350 }
9351 else if (wordlen >= 4 && !strncmp (arg, "noprint", wordlen))
9352 {
9353 UNSET_SIGS (nsigs, sigs, signal_print);
9354 UNSET_SIGS (nsigs, sigs, signal_stop);
9355 }
9356 else if (wordlen >= 4 && !strncmp (arg, "nopass", wordlen))
9357 {
9358 UNSET_SIGS (nsigs, sigs, signal_program);
9359 }
9360 else if (digits > 0)
9361 {
9362 /* It is numeric. The numeric signal refers to our own
9363 internal signal numbering from target.h, not to host/target
9364 signal number. This is a feature; users really should be
9365 using symbolic names anyway, and the common ones like
9366 SIGHUP, SIGINT, SIGALRM, etc. will work right anyway. */
9367
9368 sigfirst = siglast = (int)
9369 gdb_signal_from_command (atoi (arg));
9370 if (arg[digits] == '-')
9371 {
9372 siglast = (int)
9373 gdb_signal_from_command (atoi (arg + digits + 1));
9374 }
9375 if (sigfirst > siglast)
9376 {
9377 /* Bet he didn't figure we'd think of this case... */
9378 std::swap (sigfirst, siglast);
9379 }
9380 }
9381 else
9382 {
9383 oursig = gdb_signal_from_name (arg);
9384 if (oursig != GDB_SIGNAL_UNKNOWN)
9385 {
9386 sigfirst = siglast = (int) oursig;
9387 }
9388 else
9389 {
9390 /* Not a number and not a recognized flag word => complain. */
9391 error (_("Unrecognized or ambiguous flag word: \"%s\"."), arg);
9392 }
9393 }
9394
9395 /* If any signal numbers or symbol names were found, set flags for
9396 which signals to apply actions to. */
9397
9398 for (int signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
9399 {
9400 switch ((enum gdb_signal) signum)
9401 {
9402 case GDB_SIGNAL_TRAP:
9403 case GDB_SIGNAL_INT:
9404 if (!allsigs && !sigs[signum])
9405 {
9406 if (query (_("%s is used by the debugger.\n\
9407 Are you sure you want to change it? "),
9408 gdb_signal_to_name ((enum gdb_signal) signum)))
9409 {
9410 sigs[signum] = 1;
9411 }
9412 else
9413 gdb_printf (_("Not confirmed, unchanged.\n"));
9414 }
9415 break;
9416 case GDB_SIGNAL_0:
9417 case GDB_SIGNAL_DEFAULT:
9418 case GDB_SIGNAL_UNKNOWN:
9419 /* Make sure that "all" doesn't print these. */
9420 break;
9421 default:
9422 sigs[signum] = 1;
9423 break;
9424 }
9425 }
9426 }
9427
9428 for (int signum = 0; signum < nsigs; signum++)
9429 if (sigs[signum])
9430 {
9431 signal_cache_update (-1);
9432 target_pass_signals (signal_pass);
9433 target_program_signals (signal_program);
9434
9435 if (from_tty)
9436 {
9437 /* Show the results. */
9438 sig_print_header ();
9439 for (; signum < nsigs; signum++)
9440 if (sigs[signum])
9441 sig_print_info ((enum gdb_signal) signum);
9442 }
9443
9444 break;
9445 }
9446 }
9447
9448 /* Complete the "handle" command. */
9449
9450 static void
9451 handle_completer (struct cmd_list_element *ignore,
9452 completion_tracker &tracker,
9453 const char *text, const char *word)
9454 {
9455 static const char * const keywords[] =
9456 {
9457 "all",
9458 "stop",
9459 "ignore",
9460 "print",
9461 "pass",
9462 "nostop",
9463 "noignore",
9464 "noprint",
9465 "nopass",
9466 nullptr,
9467 };
9468
9469 signal_completer (ignore, tracker, text, word);
9470 complete_on_enum (tracker, keywords, word, word);
9471 }
9472
9473 enum gdb_signal
9474 gdb_signal_from_command (int num)
9475 {
9476 if (num >= 1 && num <= 15)
9477 return (enum gdb_signal) num;
9478 error (_("Only signals 1-15 are valid as numeric signals.\n\
9479 Use \"info signals\" for a list of symbolic signals."));
9480 }
9481
9482 /* Print current contents of the tables set by the handle command.
9483 It is possible we should just be printing signals actually used
9484 by the current target (but for things to work right when switching
9485 targets, all signals should be in the signal tables). */
9486
9487 static void
9488 info_signals_command (const char *signum_exp, int from_tty)
9489 {
9490 enum gdb_signal oursig;
9491
9492 sig_print_header ();
9493
9494 if (signum_exp)
9495 {
9496 /* First see if this is a symbol name. */
9497 oursig = gdb_signal_from_name (signum_exp);
9498 if (oursig == GDB_SIGNAL_UNKNOWN)
9499 {
9500 /* No, try numeric. */
9501 oursig =
9502 gdb_signal_from_command (parse_and_eval_long (signum_exp));
9503 }
9504 sig_print_info (oursig);
9505 return;
9506 }
9507
9508 gdb_printf ("\n");
9509 /* These ugly casts brought to you by the native VAX compiler. */
9510 for (oursig = GDB_SIGNAL_FIRST;
9511 (int) oursig < (int) GDB_SIGNAL_LAST;
9512 oursig = (enum gdb_signal) ((int) oursig + 1))
9513 {
9514 QUIT;
9515
9516 if (oursig != GDB_SIGNAL_UNKNOWN
9517 && oursig != GDB_SIGNAL_DEFAULT && oursig != GDB_SIGNAL_0)
9518 sig_print_info (oursig);
9519 }
9520
9521 gdb_printf (_("\nUse the \"handle\" command "
9522 "to change these tables.\n"));
9523 }
9524
9525 /* The $_siginfo convenience variable is a bit special. We don't know
9526 for sure the type of the value until we actually have a chance to
9527 fetch the data. The type can change depending on gdbarch, so it is
9528 also dependent on which thread you have selected.
9529
9530 1. making $_siginfo be an internalvar that creates a new value on
9531 access.
9532
9533 2. making the value of $_siginfo be an lval_computed value. */
9534
9535 /* This function implements the lval_computed support for reading a
9536 $_siginfo value. */
9537
9538 static void
9539 siginfo_value_read (struct value *v)
9540 {
9541 LONGEST transferred;
9542
9543 /* If we can access registers, so can we access $_siginfo. Likewise
9544 vice versa. */
9545 validate_registers_access ();
9546
9547 transferred =
9548 target_read (current_inferior ()->top_target (),
9549 TARGET_OBJECT_SIGNAL_INFO,
9550 nullptr,
9551 v->contents_all_raw ().data (),
9552 v->offset (),
9553 v->type ()->length ());
9554
9555 if (transferred != v->type ()->length ())
9556 error (_("Unable to read siginfo"));
9557 }
9558
9559 /* This function implements the lval_computed support for writing a
9560 $_siginfo value. */
9561
9562 static void
9563 siginfo_value_write (struct value *v, struct value *fromval)
9564 {
9565 LONGEST transferred;
9566
9567 /* If we can access registers, so can we access $_siginfo. Likewise
9568 vice versa. */
9569 validate_registers_access ();
9570
9571 transferred = target_write (current_inferior ()->top_target (),
9572 TARGET_OBJECT_SIGNAL_INFO,
9573 nullptr,
9574 fromval->contents_all_raw ().data (),
9575 v->offset (),
9576 fromval->type ()->length ());
9577
9578 if (transferred != fromval->type ()->length ())
9579 error (_("Unable to write siginfo"));
9580 }
9581
9582 static const struct lval_funcs siginfo_value_funcs =
9583 {
9584 siginfo_value_read,
9585 siginfo_value_write
9586 };
9587
9588 /* Return a new value with the correct type for the siginfo object of
9589 the current thread using architecture GDBARCH. Return a void value
9590 if there's no object available. */
9591
9592 static struct value *
9593 siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var,
9594 void *ignore)
9595 {
9596 if (target_has_stack ()
9597 && inferior_ptid != null_ptid
9598 && gdbarch_get_siginfo_type_p (gdbarch))
9599 {
9600 struct type *type = gdbarch_get_siginfo_type (gdbarch);
9601
9602 return value::allocate_computed (type, &siginfo_value_funcs, nullptr);
9603 }
9604
9605 return value::allocate (builtin_type (gdbarch)->builtin_void);
9606 }
9607
9608 \f
9609 /* infcall_suspend_state contains state about the program itself like its
9610 registers and any signal it received when it last stopped.
9611 This state must be restored regardless of how the inferior function call
9612 ends (either successfully, or after it hits a breakpoint or signal)
9613 if the program is to properly continue where it left off. */
9614
9615 class infcall_suspend_state
9616 {
9617 public:
9618 /* Capture state from GDBARCH, TP, and REGCACHE that must be restored
9619 once the inferior function call has finished. */
9620 infcall_suspend_state (struct gdbarch *gdbarch,
9621 const struct thread_info *tp,
9622 struct regcache *regcache)
9623 : m_registers (new readonly_detached_regcache (*regcache))
9624 {
9625 tp->save_suspend_to (m_thread_suspend);
9626
9627 gdb::unique_xmalloc_ptr<gdb_byte> siginfo_data;
9628
9629 if (gdbarch_get_siginfo_type_p (gdbarch))
9630 {
9631 struct type *type = gdbarch_get_siginfo_type (gdbarch);
9632 size_t len = type->length ();
9633
9634 siginfo_data.reset ((gdb_byte *) xmalloc (len));
9635
9636 if (target_read (current_inferior ()->top_target (),
9637 TARGET_OBJECT_SIGNAL_INFO, nullptr,
9638 siginfo_data.get (), 0, len) != len)
9639 {
9640 /* Errors ignored. */
9641 siginfo_data.reset (nullptr);
9642 }
9643 }
9644
9645 if (siginfo_data)
9646 {
9647 m_siginfo_gdbarch = gdbarch;
9648 m_siginfo_data = std::move (siginfo_data);
9649 }
9650 }
9651
9652 /* Return a pointer to the stored register state. */
9653
9654 readonly_detached_regcache *registers () const
9655 {
9656 return m_registers.get ();
9657 }
9658
9659 /* Restores the stored state into GDBARCH, TP, and REGCACHE. */
9660
9661 void restore (struct gdbarch *gdbarch,
9662 struct thread_info *tp,
9663 struct regcache *regcache) const
9664 {
9665 tp->restore_suspend_from (m_thread_suspend);
9666
9667 if (m_siginfo_gdbarch == gdbarch)
9668 {
9669 struct type *type = gdbarch_get_siginfo_type (gdbarch);
9670
9671 /* Errors ignored. */
9672 target_write (current_inferior ()->top_target (),
9673 TARGET_OBJECT_SIGNAL_INFO, nullptr,
9674 m_siginfo_data.get (), 0, type->length ());
9675 }
9676
9677 /* The inferior can be gone if the user types "print exit(0)"
9678 (and perhaps other times). */
9679 if (target_has_execution ())
9680 /* NB: The register write goes through to the target. */
9681 regcache->restore (registers ());
9682 }
9683
9684 private:
9685 /* How the current thread stopped before the inferior function call was
9686 executed. */
9687 struct thread_suspend_state m_thread_suspend;
9688
9689 /* The registers before the inferior function call was executed. */
9690 std::unique_ptr<readonly_detached_regcache> m_registers;
9691
9692 /* Format of SIGINFO_DATA or NULL if it is not present. */
9693 struct gdbarch *m_siginfo_gdbarch = nullptr;
9694
9695 /* The inferior format depends on SIGINFO_GDBARCH and it has a length of
9696 gdbarch_get_siginfo_type ()->length (). For different gdbarch the
9697 content would be invalid. */
9698 gdb::unique_xmalloc_ptr<gdb_byte> m_siginfo_data;
9699 };
9700
9701 infcall_suspend_state_up
9702 save_infcall_suspend_state ()
9703 {
9704 struct thread_info *tp = inferior_thread ();
9705 struct regcache *regcache = get_current_regcache ();
9706 struct gdbarch *gdbarch = regcache->arch ();
9707
9708 infcall_suspend_state_up inf_state
9709 (new struct infcall_suspend_state (gdbarch, tp, regcache));
9710
9711 /* Having saved the current state, adjust the thread state, discarding
9712 any stop signal information. The stop signal is not useful when
9713 starting an inferior function call, and run_inferior_call will not use
9714 the signal due to its `proceed' call with GDB_SIGNAL_0. */
9715 tp->set_stop_signal (GDB_SIGNAL_0);
9716
9717 return inf_state;
9718 }
9719
9720 /* Restore inferior session state to INF_STATE. */
9721
9722 void
9723 restore_infcall_suspend_state (struct infcall_suspend_state *inf_state)
9724 {
9725 struct thread_info *tp = inferior_thread ();
9726 struct regcache *regcache = get_current_regcache ();
9727 struct gdbarch *gdbarch = regcache->arch ();
9728
9729 inf_state->restore (gdbarch, tp, regcache);
9730 discard_infcall_suspend_state (inf_state);
9731 }
9732
9733 void
9734 discard_infcall_suspend_state (struct infcall_suspend_state *inf_state)
9735 {
9736 delete inf_state;
9737 }
9738
9739 readonly_detached_regcache *
9740 get_infcall_suspend_state_regcache (struct infcall_suspend_state *inf_state)
9741 {
9742 return inf_state->registers ();
9743 }
9744
9745 /* infcall_control_state contains state regarding gdb's control of the
9746 inferior itself like stepping control. It also contains session state like
9747 the user's currently selected frame. */
9748
9749 struct infcall_control_state
9750 {
9751 struct thread_control_state thread_control;
9752 struct inferior_control_state inferior_control;
9753
9754 /* Other fields: */
9755 enum stop_stack_kind stop_stack_dummy = STOP_NONE;
9756 int stopped_by_random_signal = 0;
9757
9758 /* ID and level of the selected frame when the inferior function
9759 call was made. */
9760 struct frame_id selected_frame_id {};
9761 int selected_frame_level = -1;
9762 };
9763
9764 /* Save all of the information associated with the inferior<==>gdb
9765 connection. */
9766
9767 infcall_control_state_up
9768 save_infcall_control_state ()
9769 {
9770 infcall_control_state_up inf_status (new struct infcall_control_state);
9771 struct thread_info *tp = inferior_thread ();
9772 struct inferior *inf = current_inferior ();
9773
9774 inf_status->thread_control = tp->control;
9775 inf_status->inferior_control = inf->control;
9776
9777 tp->control.step_resume_breakpoint = nullptr;
9778 tp->control.exception_resume_breakpoint = nullptr;
9779
9780 /* Save original bpstat chain to INF_STATUS; replace it in TP with copy of
9781 chain. If caller's caller is walking the chain, they'll be happier if we
9782 hand them back the original chain when restore_infcall_control_state is
9783 called. */
9784 tp->control.stop_bpstat = bpstat_copy (tp->control.stop_bpstat);
9785
9786 /* Other fields: */
9787 inf_status->stop_stack_dummy = stop_stack_dummy;
9788 inf_status->stopped_by_random_signal = stopped_by_random_signal;
9789
9790 save_selected_frame (&inf_status->selected_frame_id,
9791 &inf_status->selected_frame_level);
9792
9793 return inf_status;
9794 }
9795
9796 /* Restore inferior session state to INF_STATUS. */
9797
9798 void
9799 restore_infcall_control_state (struct infcall_control_state *inf_status)
9800 {
9801 struct thread_info *tp = inferior_thread ();
9802 struct inferior *inf = current_inferior ();
9803
9804 if (tp->control.step_resume_breakpoint)
9805 tp->control.step_resume_breakpoint->disposition = disp_del_at_next_stop;
9806
9807 if (tp->control.exception_resume_breakpoint)
9808 tp->control.exception_resume_breakpoint->disposition
9809 = disp_del_at_next_stop;
9810
9811 /* Handle the bpstat_copy of the chain. */
9812 bpstat_clear (&tp->control.stop_bpstat);
9813
9814 tp->control = inf_status->thread_control;
9815 inf->control = inf_status->inferior_control;
9816
9817 /* Other fields: */
9818 stop_stack_dummy = inf_status->stop_stack_dummy;
9819 stopped_by_random_signal = inf_status->stopped_by_random_signal;
9820
9821 if (target_has_stack ())
9822 {
9823 restore_selected_frame (inf_status->selected_frame_id,
9824 inf_status->selected_frame_level);
9825 }
9826
9827 delete inf_status;
9828 }
9829
9830 void
9831 discard_infcall_control_state (struct infcall_control_state *inf_status)
9832 {
9833 if (inf_status->thread_control.step_resume_breakpoint)
9834 inf_status->thread_control.step_resume_breakpoint->disposition
9835 = disp_del_at_next_stop;
9836
9837 if (inf_status->thread_control.exception_resume_breakpoint)
9838 inf_status->thread_control.exception_resume_breakpoint->disposition
9839 = disp_del_at_next_stop;
9840
9841 /* See save_infcall_control_state for info on stop_bpstat. */
9842 bpstat_clear (&inf_status->thread_control.stop_bpstat);
9843
9844 delete inf_status;
9845 }
9846 \f
9847 /* See infrun.h. */
9848
9849 void
9850 clear_exit_convenience_vars (void)
9851 {
9852 clear_internalvar (lookup_internalvar ("_exitsignal"));
9853 clear_internalvar (lookup_internalvar ("_exitcode"));
9854 }
9855 \f
9856
9857 /* User interface for reverse debugging:
9858 Set exec-direction / show exec-direction commands
9859 (returns error unless target implements to_set_exec_direction method). */
9860
9861 enum exec_direction_kind execution_direction = EXEC_FORWARD;
9862 static const char exec_forward[] = "forward";
9863 static const char exec_reverse[] = "reverse";
9864 static const char *exec_direction = exec_forward;
9865 static const char *const exec_direction_names[] = {
9866 exec_forward,
9867 exec_reverse,
9868 nullptr
9869 };
9870
9871 static void
9872 set_exec_direction_func (const char *args, int from_tty,
9873 struct cmd_list_element *cmd)
9874 {
9875 if (target_can_execute_reverse ())
9876 {
9877 if (!strcmp (exec_direction, exec_forward))
9878 execution_direction = EXEC_FORWARD;
9879 else if (!strcmp (exec_direction, exec_reverse))
9880 execution_direction = EXEC_REVERSE;
9881 }
9882 else
9883 {
9884 exec_direction = exec_forward;
9885 error (_("Target does not support this operation."));
9886 }
9887 }
9888
9889 static void
9890 show_exec_direction_func (struct ui_file *out, int from_tty,
9891 struct cmd_list_element *cmd, const char *value)
9892 {
9893 switch (execution_direction) {
9894 case EXEC_FORWARD:
9895 gdb_printf (out, _("Forward.\n"));
9896 break;
9897 case EXEC_REVERSE:
9898 gdb_printf (out, _("Reverse.\n"));
9899 break;
9900 default:
9901 internal_error (_("bogus execution_direction value: %d"),
9902 (int) execution_direction);
9903 }
9904 }
9905
9906 static void
9907 show_schedule_multiple (struct ui_file *file, int from_tty,
9908 struct cmd_list_element *c, const char *value)
9909 {
9910 gdb_printf (file, _("Resuming the execution of threads "
9911 "of all processes is %s.\n"), value);
9912 }
9913
9914 /* Implementation of `siginfo' variable. */
9915
9916 static const struct internalvar_funcs siginfo_funcs =
9917 {
9918 siginfo_make_value,
9919 nullptr,
9920 };
9921
9922 /* Callback for infrun's target events source. This is marked when a
9923 thread has a pending status to process. */
9924
9925 static void
9926 infrun_async_inferior_event_handler (gdb_client_data data)
9927 {
9928 clear_async_event_handler (infrun_async_inferior_event_token);
9929 inferior_event_handler (INF_REG_EVENT);
9930 }
9931
9932 #if GDB_SELF_TEST
9933 namespace selftests
9934 {
9935
9936 /* Verify that when two threads with the same ptid exist (from two different
9937 targets) and one of them changes ptid, we only update inferior_ptid if
9938 it is appropriate. */
9939
9940 static void
9941 infrun_thread_ptid_changed ()
9942 {
9943 gdbarch *arch = current_inferior ()->arch ();
9944
9945 /* The thread which inferior_ptid represents changes ptid. */
9946 {
9947 scoped_restore_current_pspace_and_thread restore;
9948
9949 scoped_mock_context<test_target_ops> target1 (arch);
9950 scoped_mock_context<test_target_ops> target2 (arch);
9951
9952 ptid_t old_ptid (111, 222);
9953 ptid_t new_ptid (111, 333);
9954
9955 target1.mock_inferior.pid = old_ptid.pid ();
9956 target1.mock_thread.ptid = old_ptid;
9957 target1.mock_inferior.ptid_thread_map.clear ();
9958 target1.mock_inferior.ptid_thread_map[old_ptid] = &target1.mock_thread;
9959
9960 target2.mock_inferior.pid = old_ptid.pid ();
9961 target2.mock_thread.ptid = old_ptid;
9962 target2.mock_inferior.ptid_thread_map.clear ();
9963 target2.mock_inferior.ptid_thread_map[old_ptid] = &target2.mock_thread;
9964
9965 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
9966 set_current_inferior (&target1.mock_inferior);
9967
9968 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
9969
9970 gdb_assert (inferior_ptid == new_ptid);
9971 }
9972
9973 /* A thread with the same ptid as inferior_ptid, but from another target,
9974 changes ptid. */
9975 {
9976 scoped_restore_current_pspace_and_thread restore;
9977
9978 scoped_mock_context<test_target_ops> target1 (arch);
9979 scoped_mock_context<test_target_ops> target2 (arch);
9980
9981 ptid_t old_ptid (111, 222);
9982 ptid_t new_ptid (111, 333);
9983
9984 target1.mock_inferior.pid = old_ptid.pid ();
9985 target1.mock_thread.ptid = old_ptid;
9986 target1.mock_inferior.ptid_thread_map.clear ();
9987 target1.mock_inferior.ptid_thread_map[old_ptid] = &target1.mock_thread;
9988
9989 target2.mock_inferior.pid = old_ptid.pid ();
9990 target2.mock_thread.ptid = old_ptid;
9991 target2.mock_inferior.ptid_thread_map.clear ();
9992 target2.mock_inferior.ptid_thread_map[old_ptid] = &target2.mock_thread;
9993
9994 auto restore_inferior_ptid = make_scoped_restore (&inferior_ptid, old_ptid);
9995 set_current_inferior (&target2.mock_inferior);
9996
9997 thread_change_ptid (&target1.mock_target, old_ptid, new_ptid);
9998
9999 gdb_assert (inferior_ptid == old_ptid);
10000 }
10001 }
10002
10003 } /* namespace selftests */
10004
10005 #endif /* GDB_SELF_TEST */
10006
10007 void _initialize_infrun ();
10008 void
10009 _initialize_infrun ()
10010 {
10011 struct cmd_list_element *c;
10012
10013 /* Register extra event sources in the event loop. */
10014 infrun_async_inferior_event_token
10015 = create_async_event_handler (infrun_async_inferior_event_handler, nullptr,
10016 "infrun");
10017
10018 cmd_list_element *info_signals_cmd
10019 = add_info ("signals", info_signals_command, _("\
10020 What debugger does when program gets various signals.\n\
10021 Specify a signal as argument to print info on that signal only."));
10022 add_info_alias ("handle", info_signals_cmd, 0);
10023
10024 c = add_com ("handle", class_run, handle_command, _("\
10025 Specify how to handle signals.\n\
10026 Usage: handle SIGNAL [ACTIONS]\n\
10027 Args are signals and actions to apply to those signals.\n\
10028 If no actions are specified, the current settings for the specified signals\n\
10029 will be displayed instead.\n\
10030 \n\
10031 Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
10032 from 1-15 are allowed for compatibility with old versions of GDB.\n\
10033 Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
10034 The special arg \"all\" is recognized to mean all signals except those\n\
10035 used by the debugger, typically SIGTRAP and SIGINT.\n\
10036 \n\
10037 Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
10038 \"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
10039 Stop means reenter debugger if this signal happens (implies print).\n\
10040 Print means print a message if this signal happens.\n\
10041 Pass means let program see this signal; otherwise program doesn't know.\n\
10042 Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
10043 Pass and Stop may be combined.\n\
10044 \n\
10045 Multiple signals may be specified. Signal numbers and signal names\n\
10046 may be interspersed with actions, with the actions being performed for\n\
10047 all signals cumulatively specified."));
10048 set_cmd_completer (c, handle_completer);
10049
10050 stop_command = add_cmd ("stop", class_obscure,
10051 not_just_help_class_command, _("\
10052 There is no `stop' command, but you can set a hook on `stop'.\n\
10053 This allows you to set a list of commands to be run each time execution\n\
10054 of the program stops."), &cmdlist);
10055
10056 add_setshow_boolean_cmd
10057 ("infrun", class_maintenance, &debug_infrun,
10058 _("Set inferior debugging."),
10059 _("Show inferior debugging."),
10060 _("When non-zero, inferior specific debugging is enabled."),
10061 nullptr, show_debug_infrun, &setdebuglist, &showdebuglist);
10062
10063 add_setshow_boolean_cmd ("non-stop", no_class,
10064 &non_stop_1, _("\
10065 Set whether gdb controls the inferior in non-stop mode."), _("\
10066 Show whether gdb controls the inferior in non-stop mode."), _("\
10067 When debugging a multi-threaded program and this setting is\n\
10068 off (the default, also called all-stop mode), when one thread stops\n\
10069 (for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
10070 all other threads in the program while you interact with the thread of\n\
10071 interest. When you continue or step a thread, you can allow the other\n\
10072 threads to run, or have them remain stopped, but while you inspect any\n\
10073 thread's state, all threads stop.\n\
10074 \n\
10075 In non-stop mode, when one thread stops, other threads can continue\n\
10076 to run freely. You'll be able to step each thread independently,\n\
10077 leave it stopped or free to run as needed."),
10078 set_non_stop,
10079 show_non_stop,
10080 &setlist,
10081 &showlist);
10082
10083 for (size_t i = 0; i < GDB_SIGNAL_LAST; i++)
10084 {
10085 signal_stop[i] = 1;
10086 signal_print[i] = 1;
10087 signal_program[i] = 1;
10088 signal_catch[i] = 0;
10089 }
10090
10091 /* Signals caused by debugger's own actions should not be given to
10092 the program afterwards.
10093
10094 Do not deliver GDB_SIGNAL_TRAP by default, except when the user
10095 explicitly specifies that it should be delivered to the target
10096 program. Typically, that would occur when a user is debugging a
10097 target monitor on a simulator: the target monitor sets a
10098 breakpoint; the simulator encounters this breakpoint and halts
10099 the simulation handing control to GDB; GDB, noting that the stop
10100 address doesn't map to any known breakpoint, returns control back
10101 to the simulator; the simulator then delivers the hardware
10102 equivalent of a GDB_SIGNAL_TRAP to the program being
10103 debugged. */
10104 signal_program[GDB_SIGNAL_TRAP] = 0;
10105 signal_program[GDB_SIGNAL_INT] = 0;
10106
10107 /* Signals that are not errors should not normally enter the debugger. */
10108 signal_stop[GDB_SIGNAL_ALRM] = 0;
10109 signal_print[GDB_SIGNAL_ALRM] = 0;
10110 signal_stop[GDB_SIGNAL_VTALRM] = 0;
10111 signal_print[GDB_SIGNAL_VTALRM] = 0;
10112 signal_stop[GDB_SIGNAL_PROF] = 0;
10113 signal_print[GDB_SIGNAL_PROF] = 0;
10114 signal_stop[GDB_SIGNAL_CHLD] = 0;
10115 signal_print[GDB_SIGNAL_CHLD] = 0;
10116 signal_stop[GDB_SIGNAL_IO] = 0;
10117 signal_print[GDB_SIGNAL_IO] = 0;
10118 signal_stop[GDB_SIGNAL_POLL] = 0;
10119 signal_print[GDB_SIGNAL_POLL] = 0;
10120 signal_stop[GDB_SIGNAL_URG] = 0;
10121 signal_print[GDB_SIGNAL_URG] = 0;
10122 signal_stop[GDB_SIGNAL_WINCH] = 0;
10123 signal_print[GDB_SIGNAL_WINCH] = 0;
10124 signal_stop[GDB_SIGNAL_PRIO] = 0;
10125 signal_print[GDB_SIGNAL_PRIO] = 0;
10126
10127 /* These signals are used internally by user-level thread
10128 implementations. (See signal(5) on Solaris.) Like the above
10129 signals, a healthy program receives and handles them as part of
10130 its normal operation. */
10131 signal_stop[GDB_SIGNAL_LWP] = 0;
10132 signal_print[GDB_SIGNAL_LWP] = 0;
10133 signal_stop[GDB_SIGNAL_WAITING] = 0;
10134 signal_print[GDB_SIGNAL_WAITING] = 0;
10135 signal_stop[GDB_SIGNAL_CANCEL] = 0;
10136 signal_print[GDB_SIGNAL_CANCEL] = 0;
10137 signal_stop[GDB_SIGNAL_LIBRT] = 0;
10138 signal_print[GDB_SIGNAL_LIBRT] = 0;
10139
10140 /* Update cached state. */
10141 signal_cache_update (-1);
10142
10143 add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
10144 &stop_on_solib_events, _("\
10145 Set stopping for shared library events."), _("\
10146 Show stopping for shared library events."), _("\
10147 If nonzero, gdb will give control to the user when the dynamic linker\n\
10148 notifies gdb of shared library events. The most common event of interest\n\
10149 to the user would be loading/unloading of a new library."),
10150 set_stop_on_solib_events,
10151 show_stop_on_solib_events,
10152 &setlist, &showlist);
10153
10154 add_setshow_enum_cmd ("follow-fork-mode", class_run,
10155 follow_fork_mode_kind_names,
10156 &follow_fork_mode_string, _("\
10157 Set debugger response to a program call of fork or vfork."), _("\
10158 Show debugger response to a program call of fork or vfork."), _("\
10159 A fork or vfork creates a new process. follow-fork-mode can be:\n\
10160 parent - the original process is debugged after a fork\n\
10161 child - the new process is debugged after a fork\n\
10162 The unfollowed process will continue to run.\n\
10163 By default, the debugger will follow the parent process."),
10164 nullptr,
10165 show_follow_fork_mode_string,
10166 &setlist, &showlist);
10167
10168 add_setshow_enum_cmd ("follow-exec-mode", class_run,
10169 follow_exec_mode_names,
10170 &follow_exec_mode_string, _("\
10171 Set debugger response to a program call of exec."), _("\
10172 Show debugger response to a program call of exec."), _("\
10173 An exec call replaces the program image of a process.\n\
10174 \n\
10175 follow-exec-mode can be:\n\
10176 \n\
10177 new - the debugger creates a new inferior and rebinds the process\n\
10178 to this new inferior. The program the process was running before\n\
10179 the exec call can be restarted afterwards by restarting the original\n\
10180 inferior.\n\
10181 \n\
10182 same - the debugger keeps the process bound to the same inferior.\n\
10183 The new executable image replaces the previous executable loaded in\n\
10184 the inferior. Restarting the inferior after the exec call restarts\n\
10185 the executable the process was running after the exec call.\n\
10186 \n\
10187 By default, the debugger will use the same inferior."),
10188 nullptr,
10189 show_follow_exec_mode_string,
10190 &setlist, &showlist);
10191
10192 add_setshow_enum_cmd ("scheduler-locking", class_run,
10193 scheduler_enums, &scheduler_mode, _("\
10194 Set mode for locking scheduler during execution."), _("\
10195 Show mode for locking scheduler during execution."), _("\
10196 off == no locking (threads may preempt at any time)\n\
10197 on == full locking (no thread except the current thread may run)\n\
10198 This applies to both normal execution and replay mode.\n\
10199 step == scheduler locked during stepping commands (step, next, stepi, nexti).\n\
10200 In this mode, other threads may run during other commands.\n\
10201 This applies to both normal execution and replay mode.\n\
10202 replay == scheduler locked in replay mode and unlocked during normal execution."),
10203 set_schedlock_func, /* traps on target vector */
10204 show_scheduler_mode,
10205 &setlist, &showlist);
10206
10207 add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
10208 Set mode for resuming threads of all processes."), _("\
10209 Show mode for resuming threads of all processes."), _("\
10210 When on, execution commands (such as 'continue' or 'next') resume all\n\
10211 threads of all processes. When off (which is the default), execution\n\
10212 commands only resume the threads of the current process. The set of\n\
10213 threads that are resumed is further refined by the scheduler-locking\n\
10214 mode (see help set scheduler-locking)."),
10215 nullptr,
10216 show_schedule_multiple,
10217 &setlist, &showlist);
10218
10219 add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
10220 Set mode of the step operation."), _("\
10221 Show mode of the step operation."), _("\
10222 When set, doing a step over a function without debug line information\n\
10223 will stop at the first instruction of that function. Otherwise, the\n\
10224 function is skipped and the step command stops at a different source line."),
10225 nullptr,
10226 show_step_stop_if_no_debug,
10227 &setlist, &showlist);
10228
10229 add_setshow_auto_boolean_cmd ("displaced-stepping", class_run,
10230 &can_use_displaced_stepping, _("\
10231 Set debugger's willingness to use displaced stepping."), _("\
10232 Show debugger's willingness to use displaced stepping."), _("\
10233 If on, gdb will use displaced stepping to step over breakpoints if it is\n\
10234 supported by the target architecture. If off, gdb will not use displaced\n\
10235 stepping to step over breakpoints, even if such is supported by the target\n\
10236 architecture. If auto (which is the default), gdb will use displaced stepping\n\
10237 if the target architecture supports it and non-stop mode is active, but will not\n\
10238 use it in all-stop mode (see help set non-stop)."),
10239 nullptr,
10240 show_can_use_displaced_stepping,
10241 &setlist, &showlist);
10242
10243 add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
10244 &exec_direction, _("Set direction of execution.\n\
10245 Options are 'forward' or 'reverse'."),
10246 _("Show direction of execution (forward/reverse)."),
10247 _("Tells gdb whether to execute forward or backward."),
10248 set_exec_direction_func, show_exec_direction_func,
10249 &setlist, &showlist);
10250
10251 /* Set/show detach-on-fork: user-settable mode. */
10252
10253 add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
10254 Set whether gdb will detach the child of a fork."), _("\
10255 Show whether gdb will detach the child of a fork."), _("\
10256 Tells gdb whether to detach the child of a fork."),
10257 nullptr, nullptr, &setlist, &showlist);
10258
10259 /* Set/show disable address space randomization mode. */
10260
10261 add_setshow_boolean_cmd ("disable-randomization", class_support,
10262 &disable_randomization, _("\
10263 Set disabling of debuggee's virtual address space randomization."), _("\
10264 Show disabling of debuggee's virtual address space randomization."), _("\
10265 When this mode is on (which is the default), randomization of the virtual\n\
10266 address space is disabled. Standalone programs run with the randomization\n\
10267 enabled by default on some platforms."),
10268 &set_disable_randomization,
10269 &show_disable_randomization,
10270 &setlist, &showlist);
10271
10272 /* ptid initializations */
10273 inferior_ptid = null_ptid;
10274 target_last_wait_ptid = minus_one_ptid;
10275
10276 gdb::observers::thread_ptid_changed.attach (infrun_thread_ptid_changed,
10277 "infrun");
10278 gdb::observers::thread_stop_requested.attach (infrun_thread_stop_requested,
10279 "infrun");
10280 gdb::observers::inferior_exit.attach (infrun_inferior_exit, "infrun");
10281 gdb::observers::inferior_execd.attach (infrun_inferior_execd, "infrun");
10282
10283 /* Explicitly create without lookup, since that tries to create a
10284 value with a void typed value, and when we get here, gdbarch
10285 isn't initialized yet. At this point, we're quite sure there
10286 isn't another convenience variable of the same name. */
10287 create_internalvar_type_lazy ("_siginfo", &siginfo_funcs, nullptr);
10288
10289 add_setshow_boolean_cmd ("observer", no_class,
10290 &observer_mode_1, _("\
10291 Set whether gdb controls the inferior in observer mode."), _("\
10292 Show whether gdb controls the inferior in observer mode."), _("\
10293 In observer mode, GDB can get data from the inferior, but not\n\
10294 affect its execution. Registers and memory may not be changed,\n\
10295 breakpoints may not be set, and the program cannot be interrupted\n\
10296 or signalled."),
10297 set_observer_mode,
10298 show_observer_mode,
10299 &setlist,
10300 &showlist);
10301
10302 #if GDB_SELF_TEST
10303 selftests::register_test ("infrun_thread_ptid_changed",
10304 selftests::infrun_thread_ptid_changed);
10305 #endif
10306 }