Remove path name from test case
[binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright (C) 2001-2023 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "inferior.h"
22 #include "infrun.h"
23 #include "target.h"
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdbsupport/gdb_wait.h"
27 #include <unistd.h>
28 #include <sys/syscall.h>
29 #include "nat/gdb_ptrace.h"
30 #include "linux-nat.h"
31 #include "nat/linux-ptrace.h"
32 #include "nat/linux-procfs.h"
33 #include "nat/linux-personality.h"
34 #include "linux-fork.h"
35 #include "gdbthread.h"
36 #include "gdbcmd.h"
37 #include "regcache.h"
38 #include "regset.h"
39 #include "inf-child.h"
40 #include "inf-ptrace.h"
41 #include "auxv.h"
42 #include <sys/procfs.h>
43 #include "elf-bfd.h"
44 #include "gregset.h"
45 #include "gdbcore.h"
46 #include <ctype.h>
47 #include <sys/stat.h>
48 #include <fcntl.h>
49 #include "inf-loop.h"
50 #include "gdbsupport/event-loop.h"
51 #include "event-top.h"
52 #include <pwd.h>
53 #include <sys/types.h>
54 #include <dirent.h>
55 #include "xml-support.h"
56 #include <sys/vfs.h>
57 #include "solib.h"
58 #include "nat/linux-osdata.h"
59 #include "linux-tdep.h"
60 #include "symfile.h"
61 #include "gdbsupport/agent.h"
62 #include "tracepoint.h"
63 #include "target-descriptions.h"
64 #include "gdbsupport/filestuff.h"
65 #include "objfiles.h"
66 #include "nat/linux-namespaces.h"
67 #include "gdbsupport/block-signals.h"
68 #include "gdbsupport/fileio.h"
69 #include "gdbsupport/scope-exit.h"
70 #include "gdbsupport/gdb-sigmask.h"
71 #include "gdbsupport/common-debug.h"
72 #include <unordered_map>
73
74 /* This comment documents high-level logic of this file.
75
76 Waiting for events in sync mode
77 ===============================
78
79 When waiting for an event in a specific thread, we just use waitpid,
80 passing the specific pid, and not passing WNOHANG.
81
82 When waiting for an event in all threads, waitpid is not quite good:
83
84 - If the thread group leader exits while other threads in the thread
85 group still exist, waitpid(TGID, ...) hangs. That waitpid won't
86 return an exit status until the other threads in the group are
87 reaped.
88
89 - When a non-leader thread execs, that thread just vanishes without
90 reporting an exit (so we'd hang if we waited for it explicitly in
91 that case). The exec event is instead reported to the TGID pid.
92
93 The solution is to always use -1 and WNOHANG, together with
94 sigsuspend.
95
96 First, we use non-blocking waitpid to check for events. If nothing is
97 found, we use sigsuspend to wait for SIGCHLD. When SIGCHLD arrives,
98 it means something happened to a child process. As soon as we know
99 there's an event, we get back to calling nonblocking waitpid.
100
101 Note that SIGCHLD should be blocked between waitpid and sigsuspend
102 calls, so that we don't miss a signal. If SIGCHLD arrives in between,
103 when it's blocked, the signal becomes pending and sigsuspend
104 immediately notices it and returns.
105
106 Waiting for events in async mode (TARGET_WNOHANG)
107 =================================================
108
109 In async mode, GDB should always be ready to handle both user input
110 and target events, so neither blocking waitpid nor sigsuspend are
111 viable options. Instead, we should asynchronously notify the GDB main
112 event loop whenever there's an unprocessed event from the target. We
113 detect asynchronous target events by handling SIGCHLD signals. To
114 notify the event loop about target events, an event pipe is used
115 --- the pipe is registered as waitable event source in the event loop,
116 the event loop select/poll's on the read end of this pipe (as well on
117 other event sources, e.g., stdin), and the SIGCHLD handler marks the
118 event pipe to raise an event. This is more portable than relying on
119 pselect/ppoll, since on kernels that lack those syscalls, libc
120 emulates them with select/poll+sigprocmask, and that is racy
121 (a.k.a. plain broken).
122
123 Obviously, if we fail to notify the event loop if there's a target
124 event, it's bad. OTOH, if we notify the event loop when there's no
125 event from the target, linux_nat_wait will detect that there's no real
126 event to report, and return event of type TARGET_WAITKIND_IGNORE.
127 This is mostly harmless, but it will waste time and is better avoided.
128
129 The main design point is that every time GDB is outside linux-nat.c,
130 we have a SIGCHLD handler installed that is called when something
131 happens to the target and notifies the GDB event loop. Whenever GDB
132 core decides to handle the event, and calls into linux-nat.c, we
133 process things as in sync mode, except that the we never block in
134 sigsuspend.
135
136 While processing an event, we may end up momentarily blocked in
137 waitpid calls. Those waitpid calls, while blocking, are guarantied to
138 return quickly. E.g., in all-stop mode, before reporting to the core
139 that an LWP hit a breakpoint, all LWPs are stopped by sending them
140 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
141 Note that this is different from blocking indefinitely waiting for the
142 next event --- here, we're already handling an event.
143
144 Use of signals
145 ==============
146
147 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
148 signal is not entirely significant; we just need for a signal to be delivered,
149 so that we can intercept it. SIGSTOP's advantage is that it can not be
150 blocked. A disadvantage is that it is not a real-time signal, so it can only
151 be queued once; we do not keep track of other sources of SIGSTOP.
152
153 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
154 use them, because they have special behavior when the signal is generated -
155 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
156 kills the entire thread group.
157
158 A delivered SIGSTOP would stop the entire thread group, not just the thread we
159 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
160 cancel it (by PTRACE_CONT without passing SIGSTOP).
161
162 We could use a real-time signal instead. This would solve those problems; we
163 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
164 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
165 generates it, and there are races with trying to find a signal that is not
166 blocked.
167
168 Exec events
169 ===========
170
171 The case of a thread group (process) with 3 or more threads, and a
172 thread other than the leader execs is worth detailing:
173
174 On an exec, the Linux kernel destroys all threads except the execing
175 one in the thread group, and resets the execing thread's tid to the
176 tgid. No exit notification is sent for the execing thread -- from the
177 ptracer's perspective, it appears as though the execing thread just
178 vanishes. Until we reap all other threads except the leader and the
179 execing thread, the leader will be zombie, and the execing thread will
180 be in `D (disc sleep)' state. As soon as all other threads are
181 reaped, the execing thread changes its tid to the tgid, and the
182 previous (zombie) leader vanishes, giving place to the "new"
183 leader. */
184
185 #ifndef O_LARGEFILE
186 #define O_LARGEFILE 0
187 #endif
188
189 struct linux_nat_target *linux_target;
190
191 /* Does the current host support PTRACE_GETREGSET? */
192 enum tribool have_ptrace_getregset = TRIBOOL_UNKNOWN;
193
194 /* When true, print debug messages relating to the linux native target. */
195
196 static bool debug_linux_nat;
197
198 /* Implement 'show debug linux-nat'. */
199
200 static void
201 show_debug_linux_nat (struct ui_file *file, int from_tty,
202 struct cmd_list_element *c, const char *value)
203 {
204 gdb_printf (file, _("Debugging of GNU/Linux native targets is %s.\n"),
205 value);
206 }
207
208 /* Print a linux-nat debug statement. */
209
210 #define linux_nat_debug_printf(fmt, ...) \
211 debug_prefixed_printf_cond (debug_linux_nat, "linux-nat", fmt, ##__VA_ARGS__)
212
213 /* Print "linux-nat" enter/exit debug statements. */
214
215 #define LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT \
216 scoped_debug_enter_exit (debug_linux_nat, "linux-nat")
217
218 struct simple_pid_list
219 {
220 int pid;
221 int status;
222 struct simple_pid_list *next;
223 };
224 static struct simple_pid_list *stopped_pids;
225
226 /* Whether target_thread_events is in effect. */
227 static int report_thread_events;
228
229 static int kill_lwp (int lwpid, int signo);
230
231 static int stop_callback (struct lwp_info *lp);
232
233 static void block_child_signals (sigset_t *prev_mask);
234 static void restore_child_signals_mask (sigset_t *prev_mask);
235
236 struct lwp_info;
237 static struct lwp_info *add_lwp (ptid_t ptid);
238 static void purge_lwp_list (int pid);
239 static void delete_lwp (ptid_t ptid);
240 static struct lwp_info *find_lwp_pid (ptid_t ptid);
241
242 static int lwp_status_pending_p (struct lwp_info *lp);
243
244 static void save_stop_reason (struct lwp_info *lp);
245
246 static bool proc_mem_file_is_writable ();
247 static void close_proc_mem_file (pid_t pid);
248 static void open_proc_mem_file (ptid_t ptid);
249
250 /* Return TRUE if LWP is the leader thread of the process. */
251
252 static bool
253 is_leader (lwp_info *lp)
254 {
255 return lp->ptid.pid () == lp->ptid.lwp ();
256 }
257
258 /* Convert an LWP's pending status to a std::string. */
259
260 static std::string
261 pending_status_str (lwp_info *lp)
262 {
263 gdb_assert (lwp_status_pending_p (lp));
264
265 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
266 return lp->waitstatus.to_string ();
267 else
268 return status_to_str (lp->status);
269 }
270
271 /* Return true if we should report exit events for LP. */
272
273 static bool
274 report_exit_events_for (lwp_info *lp)
275 {
276 thread_info *thr = linux_target->find_thread (lp->ptid);
277 gdb_assert (thr != nullptr);
278
279 return (report_thread_events
280 || (thr->thread_options () & GDB_THREAD_OPTION_EXIT) != 0);
281 }
282
283 \f
284 /* LWP accessors. */
285
286 /* See nat/linux-nat.h. */
287
288 ptid_t
289 ptid_of_lwp (struct lwp_info *lwp)
290 {
291 return lwp->ptid;
292 }
293
294 /* See nat/linux-nat.h. */
295
296 void
297 lwp_set_arch_private_info (struct lwp_info *lwp,
298 struct arch_lwp_info *info)
299 {
300 lwp->arch_private = info;
301 }
302
303 /* See nat/linux-nat.h. */
304
305 struct arch_lwp_info *
306 lwp_arch_private_info (struct lwp_info *lwp)
307 {
308 return lwp->arch_private;
309 }
310
311 /* See nat/linux-nat.h. */
312
313 int
314 lwp_is_stopped (struct lwp_info *lwp)
315 {
316 return lwp->stopped;
317 }
318
319 /* See nat/linux-nat.h. */
320
321 enum target_stop_reason
322 lwp_stop_reason (struct lwp_info *lwp)
323 {
324 return lwp->stop_reason;
325 }
326
327 /* See nat/linux-nat.h. */
328
329 int
330 lwp_is_stepping (struct lwp_info *lwp)
331 {
332 return lwp->step;
333 }
334
335 \f
336 /* Trivial list manipulation functions to keep track of a list of
337 new stopped processes. */
338 static void
339 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
340 {
341 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
342
343 new_pid->pid = pid;
344 new_pid->status = status;
345 new_pid->next = *listp;
346 *listp = new_pid;
347 }
348
349 static int
350 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
351 {
352 struct simple_pid_list **p;
353
354 for (p = listp; *p != NULL; p = &(*p)->next)
355 if ((*p)->pid == pid)
356 {
357 struct simple_pid_list *next = (*p)->next;
358
359 *statusp = (*p)->status;
360 xfree (*p);
361 *p = next;
362 return 1;
363 }
364 return 0;
365 }
366
367 /* Return the ptrace options that we want to try to enable. */
368
369 static int
370 linux_nat_ptrace_options (int attached)
371 {
372 int options = 0;
373
374 if (!attached)
375 options |= PTRACE_O_EXITKILL;
376
377 options |= (PTRACE_O_TRACESYSGOOD
378 | PTRACE_O_TRACEVFORKDONE
379 | PTRACE_O_TRACEVFORK
380 | PTRACE_O_TRACEFORK
381 | PTRACE_O_TRACEEXEC);
382
383 return options;
384 }
385
386 /* Initialize ptrace and procfs warnings and check for supported
387 ptrace features given PID.
388
389 ATTACHED should be nonzero iff we attached to the inferior. */
390
391 static void
392 linux_init_ptrace_procfs (pid_t pid, int attached)
393 {
394 int options = linux_nat_ptrace_options (attached);
395
396 linux_enable_event_reporting (pid, options);
397 linux_ptrace_init_warnings ();
398 linux_proc_init_warnings ();
399 proc_mem_file_is_writable ();
400 }
401
402 linux_nat_target::~linux_nat_target ()
403 {}
404
405 void
406 linux_nat_target::post_attach (int pid)
407 {
408 linux_init_ptrace_procfs (pid, 1);
409 }
410
411 /* Implement the virtual inf_ptrace_target::post_startup_inferior method. */
412
413 void
414 linux_nat_target::post_startup_inferior (ptid_t ptid)
415 {
416 linux_init_ptrace_procfs (ptid.pid (), 0);
417 }
418
419 /* Return the number of known LWPs in the tgid given by PID. */
420
421 static int
422 num_lwps (int pid)
423 {
424 int count = 0;
425
426 for (const lwp_info *lp ATTRIBUTE_UNUSED : all_lwps ())
427 if (lp->ptid.pid () == pid)
428 count++;
429
430 return count;
431 }
432
433 /* Deleter for lwp_info unique_ptr specialisation. */
434
435 struct lwp_deleter
436 {
437 void operator() (struct lwp_info *lwp) const
438 {
439 delete_lwp (lwp->ptid);
440 }
441 };
442
443 /* A unique_ptr specialisation for lwp_info. */
444
445 typedef std::unique_ptr<struct lwp_info, lwp_deleter> lwp_info_up;
446
447 /* Target hook for follow_fork. */
448
449 void
450 linux_nat_target::follow_fork (inferior *child_inf, ptid_t child_ptid,
451 target_waitkind fork_kind, bool follow_child,
452 bool detach_fork)
453 {
454 inf_ptrace_target::follow_fork (child_inf, child_ptid, fork_kind,
455 follow_child, detach_fork);
456
457 if (!follow_child)
458 {
459 bool has_vforked = fork_kind == TARGET_WAITKIND_VFORKED;
460 ptid_t parent_ptid = inferior_ptid;
461 int parent_pid = parent_ptid.lwp ();
462 int child_pid = child_ptid.lwp ();
463
464 /* We're already attached to the parent, by default. */
465 lwp_info *child_lp = add_lwp (child_ptid);
466 child_lp->stopped = 1;
467 child_lp->last_resume_kind = resume_stop;
468
469 /* Detach new forked process? */
470 if (detach_fork)
471 {
472 int child_stop_signal = 0;
473 bool detach_child = true;
474
475 /* Move CHILD_LP into a unique_ptr and clear the source pointer
476 to prevent us doing anything stupid with it. */
477 lwp_info_up child_lp_ptr (child_lp);
478 child_lp = nullptr;
479
480 linux_target->low_prepare_to_resume (child_lp_ptr.get ());
481
482 /* When debugging an inferior in an architecture that supports
483 hardware single stepping on a kernel without commit
484 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
485 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
486 set if the parent process had them set.
487 To work around this, single step the child process
488 once before detaching to clear the flags. */
489
490 /* Note that we consult the parent's architecture instead of
491 the child's because there's no inferior for the child at
492 this point. */
493 if (!gdbarch_software_single_step_p (target_thread_architecture
494 (parent_ptid)))
495 {
496 int status;
497
498 linux_disable_event_reporting (child_pid);
499 if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
500 perror_with_name (_("Couldn't do single step"));
501 if (my_waitpid (child_pid, &status, 0) < 0)
502 perror_with_name (_("Couldn't wait vfork process"));
503 else
504 {
505 detach_child = WIFSTOPPED (status);
506 child_stop_signal = WSTOPSIG (status);
507 }
508 }
509
510 if (detach_child)
511 {
512 int signo = child_stop_signal;
513
514 if (signo != 0
515 && !signal_pass_state (gdb_signal_from_host (signo)))
516 signo = 0;
517 ptrace (PTRACE_DETACH, child_pid, 0, signo);
518
519 close_proc_mem_file (child_pid);
520 }
521 }
522
523 if (has_vforked)
524 {
525 lwp_info *parent_lp = find_lwp_pid (parent_ptid);
526 linux_nat_debug_printf ("waiting for VFORK_DONE on %d", parent_pid);
527 parent_lp->stopped = 1;
528
529 /* We'll handle the VFORK_DONE event like any other
530 event, in target_wait. */
531 }
532 }
533 else
534 {
535 struct lwp_info *child_lp;
536
537 child_lp = add_lwp (child_ptid);
538 child_lp->stopped = 1;
539 child_lp->last_resume_kind = resume_stop;
540 }
541 }
542
543 \f
544 int
545 linux_nat_target::insert_fork_catchpoint (int pid)
546 {
547 return 0;
548 }
549
550 int
551 linux_nat_target::remove_fork_catchpoint (int pid)
552 {
553 return 0;
554 }
555
556 int
557 linux_nat_target::insert_vfork_catchpoint (int pid)
558 {
559 return 0;
560 }
561
562 int
563 linux_nat_target::remove_vfork_catchpoint (int pid)
564 {
565 return 0;
566 }
567
568 int
569 linux_nat_target::insert_exec_catchpoint (int pid)
570 {
571 return 0;
572 }
573
574 int
575 linux_nat_target::remove_exec_catchpoint (int pid)
576 {
577 return 0;
578 }
579
580 int
581 linux_nat_target::set_syscall_catchpoint (int pid, bool needed, int any_count,
582 gdb::array_view<const int> syscall_counts)
583 {
584 /* On GNU/Linux, we ignore the arguments. It means that we only
585 enable the syscall catchpoints, but do not disable them.
586
587 Also, we do not use the `syscall_counts' information because we do not
588 filter system calls here. We let GDB do the logic for us. */
589 return 0;
590 }
591
592 /* List of known LWPs, keyed by LWP PID. This speeds up the common
593 case of mapping a PID returned from the kernel to our corresponding
594 lwp_info data structure. */
595 static htab_t lwp_lwpid_htab;
596
597 /* Calculate a hash from a lwp_info's LWP PID. */
598
599 static hashval_t
600 lwp_info_hash (const void *ap)
601 {
602 const struct lwp_info *lp = (struct lwp_info *) ap;
603 pid_t pid = lp->ptid.lwp ();
604
605 return iterative_hash_object (pid, 0);
606 }
607
608 /* Equality function for the lwp_info hash table. Compares the LWP's
609 PID. */
610
611 static int
612 lwp_lwpid_htab_eq (const void *a, const void *b)
613 {
614 const struct lwp_info *entry = (const struct lwp_info *) a;
615 const struct lwp_info *element = (const struct lwp_info *) b;
616
617 return entry->ptid.lwp () == element->ptid.lwp ();
618 }
619
620 /* Create the lwp_lwpid_htab hash table. */
621
622 static void
623 lwp_lwpid_htab_create (void)
624 {
625 lwp_lwpid_htab = htab_create (100, lwp_info_hash, lwp_lwpid_htab_eq, NULL);
626 }
627
628 /* Add LP to the hash table. */
629
630 static void
631 lwp_lwpid_htab_add_lwp (struct lwp_info *lp)
632 {
633 void **slot;
634
635 slot = htab_find_slot (lwp_lwpid_htab, lp, INSERT);
636 gdb_assert (slot != NULL && *slot == NULL);
637 *slot = lp;
638 }
639
640 /* Head of doubly-linked list of known LWPs. Sorted by reverse
641 creation order. This order is assumed in some cases. E.g.,
642 reaping status after killing alls lwps of a process: the leader LWP
643 must be reaped last. */
644
645 static intrusive_list<lwp_info> lwp_list;
646
647 /* See linux-nat.h. */
648
649 lwp_info_range
650 all_lwps ()
651 {
652 return lwp_info_range (lwp_list.begin ());
653 }
654
655 /* See linux-nat.h. */
656
657 lwp_info_safe_range
658 all_lwps_safe ()
659 {
660 return lwp_info_safe_range (lwp_list.begin ());
661 }
662
663 /* Add LP to sorted-by-reverse-creation-order doubly-linked list. */
664
665 static void
666 lwp_list_add (struct lwp_info *lp)
667 {
668 lwp_list.push_front (*lp);
669 }
670
671 /* Remove LP from sorted-by-reverse-creation-order doubly-linked
672 list. */
673
674 static void
675 lwp_list_remove (struct lwp_info *lp)
676 {
677 /* Remove from sorted-by-creation-order list. */
678 lwp_list.erase (lwp_list.iterator_to (*lp));
679 }
680
681 \f
682
683 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
684 _initialize_linux_nat. */
685 static sigset_t suspend_mask;
686
687 /* Signals to block to make that sigsuspend work. */
688 static sigset_t blocked_mask;
689
690 /* SIGCHLD action. */
691 static struct sigaction sigchld_action;
692
693 /* Block child signals (SIGCHLD and linux threads signals), and store
694 the previous mask in PREV_MASK. */
695
696 static void
697 block_child_signals (sigset_t *prev_mask)
698 {
699 /* Make sure SIGCHLD is blocked. */
700 if (!sigismember (&blocked_mask, SIGCHLD))
701 sigaddset (&blocked_mask, SIGCHLD);
702
703 gdb_sigmask (SIG_BLOCK, &blocked_mask, prev_mask);
704 }
705
706 /* Restore child signals mask, previously returned by
707 block_child_signals. */
708
709 static void
710 restore_child_signals_mask (sigset_t *prev_mask)
711 {
712 gdb_sigmask (SIG_SETMASK, prev_mask, NULL);
713 }
714
715 /* Mask of signals to pass directly to the inferior. */
716 static sigset_t pass_mask;
717
718 /* Update signals to pass to the inferior. */
719 void
720 linux_nat_target::pass_signals
721 (gdb::array_view<const unsigned char> pass_signals)
722 {
723 int signo;
724
725 sigemptyset (&pass_mask);
726
727 for (signo = 1; signo < NSIG; signo++)
728 {
729 int target_signo = gdb_signal_from_host (signo);
730 if (target_signo < pass_signals.size () && pass_signals[target_signo])
731 sigaddset (&pass_mask, signo);
732 }
733 }
734
735 \f
736
737 /* Prototypes for local functions. */
738 static int stop_wait_callback (struct lwp_info *lp);
739 static int resume_stopped_resumed_lwps (struct lwp_info *lp, const ptid_t wait_ptid);
740 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
741
742 \f
743
744 /* Destroy and free LP. */
745
746 lwp_info::~lwp_info ()
747 {
748 /* Let the arch specific bits release arch_lwp_info. */
749 linux_target->low_delete_thread (this->arch_private);
750 }
751
752 /* Traversal function for purge_lwp_list. */
753
754 static int
755 lwp_lwpid_htab_remove_pid (void **slot, void *info)
756 {
757 struct lwp_info *lp = (struct lwp_info *) *slot;
758 int pid = *(int *) info;
759
760 if (lp->ptid.pid () == pid)
761 {
762 htab_clear_slot (lwp_lwpid_htab, slot);
763 lwp_list_remove (lp);
764 delete lp;
765 }
766
767 return 1;
768 }
769
770 /* Remove all LWPs belong to PID from the lwp list. */
771
772 static void
773 purge_lwp_list (int pid)
774 {
775 htab_traverse_noresize (lwp_lwpid_htab, lwp_lwpid_htab_remove_pid, &pid);
776 }
777
778 /* Add the LWP specified by PTID to the list. PTID is the first LWP
779 in the process. Return a pointer to the structure describing the
780 new LWP.
781
782 This differs from add_lwp in that we don't let the arch specific
783 bits know about this new thread. Current clients of this callback
784 take the opportunity to install watchpoints in the new thread, and
785 we shouldn't do that for the first thread. If we're spawning a
786 child ("run"), the thread executes the shell wrapper first, and we
787 shouldn't touch it until it execs the program we want to debug.
788 For "attach", it'd be okay to call the callback, but it's not
789 necessary, because watchpoints can't yet have been inserted into
790 the inferior. */
791
792 static struct lwp_info *
793 add_initial_lwp (ptid_t ptid)
794 {
795 gdb_assert (ptid.lwp_p ());
796
797 lwp_info *lp = new lwp_info (ptid);
798
799
800 /* Add to sorted-by-reverse-creation-order list. */
801 lwp_list_add (lp);
802
803 /* Add to keyed-by-pid htab. */
804 lwp_lwpid_htab_add_lwp (lp);
805
806 return lp;
807 }
808
809 /* Add the LWP specified by PID to the list. Return a pointer to the
810 structure describing the new LWP. The LWP should already be
811 stopped. */
812
813 static struct lwp_info *
814 add_lwp (ptid_t ptid)
815 {
816 struct lwp_info *lp;
817
818 lp = add_initial_lwp (ptid);
819
820 /* Let the arch specific bits know about this new thread. Current
821 clients of this callback take the opportunity to install
822 watchpoints in the new thread. We don't do this for the first
823 thread though. See add_initial_lwp. */
824 linux_target->low_new_thread (lp);
825
826 return lp;
827 }
828
829 /* Remove the LWP specified by PID from the list. */
830
831 static void
832 delete_lwp (ptid_t ptid)
833 {
834 lwp_info dummy (ptid);
835
836 void **slot = htab_find_slot (lwp_lwpid_htab, &dummy, NO_INSERT);
837 if (slot == NULL)
838 return;
839
840 lwp_info *lp = *(struct lwp_info **) slot;
841 gdb_assert (lp != NULL);
842
843 htab_clear_slot (lwp_lwpid_htab, slot);
844
845 /* Remove from sorted-by-creation-order list. */
846 lwp_list_remove (lp);
847
848 /* Release. */
849 delete lp;
850 }
851
852 /* Return a pointer to the structure describing the LWP corresponding
853 to PID. If no corresponding LWP could be found, return NULL. */
854
855 static struct lwp_info *
856 find_lwp_pid (ptid_t ptid)
857 {
858 int lwp;
859
860 if (ptid.lwp_p ())
861 lwp = ptid.lwp ();
862 else
863 lwp = ptid.pid ();
864
865 lwp_info dummy (ptid_t (0, lwp));
866 return (struct lwp_info *) htab_find (lwp_lwpid_htab, &dummy);
867 }
868
869 /* See nat/linux-nat.h. */
870
871 struct lwp_info *
872 iterate_over_lwps (ptid_t filter,
873 gdb::function_view<iterate_over_lwps_ftype> callback)
874 {
875 for (lwp_info *lp : all_lwps_safe ())
876 {
877 if (lp->ptid.matches (filter))
878 {
879 if (callback (lp) != 0)
880 return lp;
881 }
882 }
883
884 return NULL;
885 }
886
887 /* Update our internal state when changing from one checkpoint to
888 another indicated by NEW_PTID. We can only switch single-threaded
889 applications, so we only create one new LWP, and the previous list
890 is discarded. */
891
892 void
893 linux_nat_switch_fork (ptid_t new_ptid)
894 {
895 struct lwp_info *lp;
896
897 purge_lwp_list (inferior_ptid.pid ());
898
899 lp = add_lwp (new_ptid);
900 lp->stopped = 1;
901
902 /* This changes the thread's ptid while preserving the gdb thread
903 num. Also changes the inferior pid, while preserving the
904 inferior num. */
905 thread_change_ptid (linux_target, inferior_ptid, new_ptid);
906
907 /* We've just told GDB core that the thread changed target id, but,
908 in fact, it really is a different thread, with different register
909 contents. */
910 registers_changed ();
911 }
912
913 /* Handle the exit of a single thread LP. If DEL_THREAD is true,
914 delete the thread_info associated to LP, if it exists. */
915
916 static void
917 exit_lwp (struct lwp_info *lp, bool del_thread = true)
918 {
919 struct thread_info *th = linux_target->find_thread (lp->ptid);
920
921 if (th != nullptr && del_thread)
922 delete_thread (th);
923
924 delete_lwp (lp->ptid);
925 }
926
927 /* Wait for the LWP specified by LP, which we have just attached to.
928 Returns a wait status for that LWP, to cache. */
929
930 static int
931 linux_nat_post_attach_wait (ptid_t ptid, int *signalled)
932 {
933 pid_t new_pid, pid = ptid.lwp ();
934 int status;
935
936 if (linux_proc_pid_is_stopped (pid))
937 {
938 linux_nat_debug_printf ("Attaching to a stopped process");
939
940 /* The process is definitely stopped. It is in a job control
941 stop, unless the kernel predates the TASK_STOPPED /
942 TASK_TRACED distinction, in which case it might be in a
943 ptrace stop. Make sure it is in a ptrace stop; from there we
944 can kill it, signal it, et cetera.
945
946 First make sure there is a pending SIGSTOP. Since we are
947 already attached, the process can not transition from stopped
948 to running without a PTRACE_CONT; so we know this signal will
949 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
950 probably already in the queue (unless this kernel is old
951 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
952 is not an RT signal, it can only be queued once. */
953 kill_lwp (pid, SIGSTOP);
954
955 /* Finally, resume the stopped process. This will deliver the SIGSTOP
956 (or a higher priority signal, just like normal PTRACE_ATTACH). */
957 ptrace (PTRACE_CONT, pid, 0, 0);
958 }
959
960 /* Make sure the initial process is stopped. The user-level threads
961 layer might want to poke around in the inferior, and that won't
962 work if things haven't stabilized yet. */
963 new_pid = my_waitpid (pid, &status, __WALL);
964 gdb_assert (pid == new_pid);
965
966 if (!WIFSTOPPED (status))
967 {
968 /* The pid we tried to attach has apparently just exited. */
969 linux_nat_debug_printf ("Failed to stop %d: %s", pid,
970 status_to_str (status).c_str ());
971 return status;
972 }
973
974 if (WSTOPSIG (status) != SIGSTOP)
975 {
976 *signalled = 1;
977 linux_nat_debug_printf ("Received %s after attaching",
978 status_to_str (status).c_str ());
979 }
980
981 return status;
982 }
983
984 void
985 linux_nat_target::create_inferior (const char *exec_file,
986 const std::string &allargs,
987 char **env, int from_tty)
988 {
989 maybe_disable_address_space_randomization restore_personality
990 (disable_randomization);
991
992 /* The fork_child mechanism is synchronous and calls target_wait, so
993 we have to mask the async mode. */
994
995 /* Make sure we report all signals during startup. */
996 pass_signals ({});
997
998 inf_ptrace_target::create_inferior (exec_file, allargs, env, from_tty);
999
1000 open_proc_mem_file (inferior_ptid);
1001 }
1002
1003 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1004 already attached. Returns true if a new LWP is found, false
1005 otherwise. */
1006
1007 static int
1008 attach_proc_task_lwp_callback (ptid_t ptid)
1009 {
1010 struct lwp_info *lp;
1011
1012 /* Ignore LWPs we're already attached to. */
1013 lp = find_lwp_pid (ptid);
1014 if (lp == NULL)
1015 {
1016 int lwpid = ptid.lwp ();
1017
1018 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1019 {
1020 int err = errno;
1021
1022 /* Be quiet if we simply raced with the thread exiting.
1023 EPERM is returned if the thread's task still exists, and
1024 is marked as exited or zombie, as well as other
1025 conditions, so in that case, confirm the status in
1026 /proc/PID/status. */
1027 if (err == ESRCH
1028 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1029 {
1030 linux_nat_debug_printf
1031 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1032 lwpid, err, safe_strerror (err));
1033
1034 }
1035 else
1036 {
1037 std::string reason
1038 = linux_ptrace_attach_fail_reason_string (ptid, err);
1039
1040 warning (_("Cannot attach to lwp %d: %s"),
1041 lwpid, reason.c_str ());
1042 }
1043 }
1044 else
1045 {
1046 linux_nat_debug_printf ("PTRACE_ATTACH %s, 0, 0 (OK)",
1047 ptid.to_string ().c_str ());
1048
1049 lp = add_lwp (ptid);
1050
1051 /* The next time we wait for this LWP we'll see a SIGSTOP as
1052 PTRACE_ATTACH brings it to a halt. */
1053 lp->signalled = 1;
1054
1055 /* We need to wait for a stop before being able to make the
1056 next ptrace call on this LWP. */
1057 lp->must_set_ptrace_flags = 1;
1058
1059 /* So that wait collects the SIGSTOP. */
1060 lp->resumed = 1;
1061
1062 /* Also add the LWP to gdb's thread list, in case a
1063 matching libthread_db is not found (or the process uses
1064 raw clone). */
1065 add_thread (linux_target, lp->ptid);
1066 set_running (linux_target, lp->ptid, true);
1067 set_executing (linux_target, lp->ptid, true);
1068 }
1069
1070 return 1;
1071 }
1072 return 0;
1073 }
1074
1075 void
1076 linux_nat_target::attach (const char *args, int from_tty)
1077 {
1078 struct lwp_info *lp;
1079 int status;
1080 ptid_t ptid;
1081
1082 /* Make sure we report all signals during attach. */
1083 pass_signals ({});
1084
1085 try
1086 {
1087 inf_ptrace_target::attach (args, from_tty);
1088 }
1089 catch (const gdb_exception_error &ex)
1090 {
1091 pid_t pid = parse_pid_to_attach (args);
1092 std::string reason = linux_ptrace_attach_fail_reason (pid);
1093
1094 if (!reason.empty ())
1095 throw_error (ex.error, "warning: %s\n%s", reason.c_str (),
1096 ex.what ());
1097 else
1098 throw_error (ex.error, "%s", ex.what ());
1099 }
1100
1101 /* The ptrace base target adds the main thread with (pid,0,0)
1102 format. Decorate it with lwp info. */
1103 ptid = ptid_t (inferior_ptid.pid (),
1104 inferior_ptid.pid ());
1105 thread_change_ptid (linux_target, inferior_ptid, ptid);
1106
1107 /* Add the initial process as the first LWP to the list. */
1108 lp = add_initial_lwp (ptid);
1109
1110 status = linux_nat_post_attach_wait (lp->ptid, &lp->signalled);
1111 if (!WIFSTOPPED (status))
1112 {
1113 if (WIFEXITED (status))
1114 {
1115 int exit_code = WEXITSTATUS (status);
1116
1117 target_terminal::ours ();
1118 target_mourn_inferior (inferior_ptid);
1119 if (exit_code == 0)
1120 error (_("Unable to attach: program exited normally."));
1121 else
1122 error (_("Unable to attach: program exited with code %d."),
1123 exit_code);
1124 }
1125 else if (WIFSIGNALED (status))
1126 {
1127 enum gdb_signal signo;
1128
1129 target_terminal::ours ();
1130 target_mourn_inferior (inferior_ptid);
1131
1132 signo = gdb_signal_from_host (WTERMSIG (status));
1133 error (_("Unable to attach: program terminated with signal "
1134 "%s, %s."),
1135 gdb_signal_to_name (signo),
1136 gdb_signal_to_string (signo));
1137 }
1138
1139 internal_error (_("unexpected status %d for PID %ld"),
1140 status, (long) ptid.lwp ());
1141 }
1142
1143 lp->stopped = 1;
1144
1145 open_proc_mem_file (lp->ptid);
1146
1147 /* Save the wait status to report later. */
1148 lp->resumed = 1;
1149 linux_nat_debug_printf ("waitpid %ld, saving status %s",
1150 (long) lp->ptid.pid (),
1151 status_to_str (status).c_str ());
1152
1153 lp->status = status;
1154
1155 /* We must attach to every LWP. If /proc is mounted, use that to
1156 find them now. The inferior may be using raw clone instead of
1157 using pthreads. But even if it is using pthreads, thread_db
1158 walks structures in the inferior's address space to find the list
1159 of threads/LWPs, and those structures may well be corrupted.
1160 Note that once thread_db is loaded, we'll still use it to list
1161 threads and associate pthread info with each LWP. */
1162 linux_proc_attach_tgid_threads (lp->ptid.pid (),
1163 attach_proc_task_lwp_callback);
1164 }
1165
1166 /* Ptrace-detach the thread with pid PID. */
1167
1168 static void
1169 detach_one_pid (int pid, int signo)
1170 {
1171 if (ptrace (PTRACE_DETACH, pid, 0, signo) < 0)
1172 {
1173 int save_errno = errno;
1174
1175 /* We know the thread exists, so ESRCH must mean the lwp is
1176 zombie. This can happen if one of the already-detached
1177 threads exits the whole thread group. In that case we're
1178 still attached, and must reap the lwp. */
1179 if (save_errno == ESRCH)
1180 {
1181 int ret, status;
1182
1183 ret = my_waitpid (pid, &status, __WALL);
1184 if (ret == -1)
1185 {
1186 warning (_("Couldn't reap LWP %d while detaching: %s"),
1187 pid, safe_strerror (errno));
1188 }
1189 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1190 {
1191 warning (_("Reaping LWP %d while detaching "
1192 "returned unexpected status 0x%x"),
1193 pid, status);
1194 }
1195 }
1196 else
1197 error (_("Can't detach %d: %s"),
1198 pid, safe_strerror (save_errno));
1199 }
1200 else
1201 linux_nat_debug_printf ("PTRACE_DETACH (%d, %s, 0) (OK)",
1202 pid, strsignal (signo));
1203 }
1204
1205 /* Get pending signal of THREAD as a host signal number, for detaching
1206 purposes. This is the signal the thread last stopped for, which we
1207 need to deliver to the thread when detaching, otherwise, it'd be
1208 suppressed/lost. */
1209
1210 static int
1211 get_detach_signal (struct lwp_info *lp)
1212 {
1213 enum gdb_signal signo = GDB_SIGNAL_0;
1214
1215 /* If we paused threads momentarily, we may have stored pending
1216 events in lp->status or lp->waitstatus (see stop_wait_callback),
1217 and GDB core hasn't seen any signal for those threads.
1218 Otherwise, the last signal reported to the core is found in the
1219 thread object's stop_signal.
1220
1221 There's a corner case that isn't handled here at present. Only
1222 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1223 stop_signal make sense as a real signal to pass to the inferior.
1224 Some catchpoint related events, like
1225 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1226 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1227 those traps are debug API (ptrace in our case) related and
1228 induced; the inferior wouldn't see them if it wasn't being
1229 traced. Hence, we should never pass them to the inferior, even
1230 when set to pass state. Since this corner case isn't handled by
1231 infrun.c when proceeding with a signal, for consistency, neither
1232 do we handle it here (or elsewhere in the file we check for
1233 signal pass state). Normally SIGTRAP isn't set to pass state, so
1234 this is really a corner case. */
1235
1236 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
1237 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1238 else if (lp->status)
1239 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1240 else
1241 {
1242 thread_info *tp = linux_target->find_thread (lp->ptid);
1243
1244 if (target_is_non_stop_p () && !tp->executing ())
1245 {
1246 if (tp->has_pending_waitstatus ())
1247 {
1248 /* If the thread has a pending event, and it was stopped with a
1249 signal, use that signal to resume it. If it has a pending
1250 event of another kind, it was not stopped with a signal, so
1251 resume it without a signal. */
1252 if (tp->pending_waitstatus ().kind () == TARGET_WAITKIND_STOPPED)
1253 signo = tp->pending_waitstatus ().sig ();
1254 else
1255 signo = GDB_SIGNAL_0;
1256 }
1257 else
1258 signo = tp->stop_signal ();
1259 }
1260 else if (!target_is_non_stop_p ())
1261 {
1262 ptid_t last_ptid;
1263 process_stratum_target *last_target;
1264
1265 get_last_target_status (&last_target, &last_ptid, nullptr);
1266
1267 if (last_target == linux_target
1268 && lp->ptid.lwp () == last_ptid.lwp ())
1269 signo = tp->stop_signal ();
1270 }
1271 }
1272
1273 if (signo == GDB_SIGNAL_0)
1274 {
1275 linux_nat_debug_printf ("lwp %s has no pending signal",
1276 lp->ptid.to_string ().c_str ());
1277 }
1278 else if (!signal_pass_state (signo))
1279 {
1280 linux_nat_debug_printf
1281 ("lwp %s had signal %s but it is in no pass state",
1282 lp->ptid.to_string ().c_str (), gdb_signal_to_string (signo));
1283 }
1284 else
1285 {
1286 linux_nat_debug_printf ("lwp %s has pending signal %s",
1287 lp->ptid.to_string ().c_str (),
1288 gdb_signal_to_string (signo));
1289
1290 return gdb_signal_to_host (signo);
1291 }
1292
1293 return 0;
1294 }
1295
1296 /* If LP has a pending fork/vfork/clone status, return it. */
1297
1298 static gdb::optional<target_waitstatus>
1299 get_pending_child_status (lwp_info *lp)
1300 {
1301 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
1302
1303 linux_nat_debug_printf ("lwp %s (stopped = %d)",
1304 lp->ptid.to_string ().c_str (), lp->stopped);
1305
1306 /* Check in lwp_info::status. */
1307 if (WIFSTOPPED (lp->status) && linux_is_extended_waitstatus (lp->status))
1308 {
1309 int event = linux_ptrace_get_extended_event (lp->status);
1310
1311 if (event == PTRACE_EVENT_FORK
1312 || event == PTRACE_EVENT_VFORK
1313 || event == PTRACE_EVENT_CLONE)
1314 {
1315 unsigned long child_pid;
1316 int ret = ptrace (PTRACE_GETEVENTMSG, lp->ptid.lwp (), 0, &child_pid);
1317 if (ret == 0)
1318 {
1319 target_waitstatus ws;
1320
1321 if (event == PTRACE_EVENT_FORK)
1322 ws.set_forked (ptid_t (child_pid, child_pid));
1323 else if (event == PTRACE_EVENT_VFORK)
1324 ws.set_vforked (ptid_t (child_pid, child_pid));
1325 else if (event == PTRACE_EVENT_CLONE)
1326 ws.set_thread_cloned (ptid_t (lp->ptid.pid (), child_pid));
1327 else
1328 gdb_assert_not_reached ("unhandled");
1329
1330 return ws;
1331 }
1332 else
1333 {
1334 perror_warning_with_name (_("Failed to retrieve event msg"));
1335 return {};
1336 }
1337 }
1338 }
1339
1340 /* Check in lwp_info::waitstatus. */
1341 if (is_new_child_status (lp->waitstatus.kind ()))
1342 return lp->waitstatus;
1343
1344 thread_info *tp = linux_target->find_thread (lp->ptid);
1345
1346 /* Check in thread_info::pending_waitstatus. */
1347 if (tp->has_pending_waitstatus ()
1348 && is_new_child_status (tp->pending_waitstatus ().kind ()))
1349 return tp->pending_waitstatus ();
1350
1351 /* Check in thread_info::pending_follow. */
1352 if (is_new_child_status (tp->pending_follow.kind ()))
1353 return tp->pending_follow;
1354
1355 return {};
1356 }
1357
1358 /* Detach from LP. If SIGNO_P is non-NULL, then it points to the
1359 signal number that should be passed to the LWP when detaching.
1360 Otherwise pass any pending signal the LWP may have, if any. */
1361
1362 static void
1363 detach_one_lwp (struct lwp_info *lp, int *signo_p)
1364 {
1365 int lwpid = lp->ptid.lwp ();
1366 int signo;
1367
1368 /* If the lwp/thread we are about to detach has a pending fork/clone
1369 event, there is a process/thread GDB is attached to that the core
1370 of GDB doesn't know about. Detach from it. */
1371
1372 gdb::optional<target_waitstatus> ws = get_pending_child_status (lp);
1373 if (ws.has_value ())
1374 detach_one_pid (ws->child_ptid ().lwp (), 0);
1375
1376 /* If there is a pending SIGSTOP, get rid of it. */
1377 if (lp->signalled)
1378 {
1379 linux_nat_debug_printf ("Sending SIGCONT to %s",
1380 lp->ptid.to_string ().c_str ());
1381
1382 kill_lwp (lwpid, SIGCONT);
1383 lp->signalled = 0;
1384 }
1385
1386 if (signo_p == NULL)
1387 {
1388 /* Pass on any pending signal for this LWP. */
1389 signo = get_detach_signal (lp);
1390 }
1391 else
1392 signo = *signo_p;
1393
1394 linux_nat_debug_printf ("preparing to resume lwp %s (stopped = %d)",
1395 lp->ptid.to_string ().c_str (),
1396 lp->stopped);
1397
1398 /* Preparing to resume may try to write registers, and fail if the
1399 lwp is zombie. If that happens, ignore the error. We'll handle
1400 it below, when detach fails with ESRCH. */
1401 try
1402 {
1403 linux_target->low_prepare_to_resume (lp);
1404 }
1405 catch (const gdb_exception_error &ex)
1406 {
1407 if (!check_ptrace_stopped_lwp_gone (lp))
1408 throw;
1409 }
1410
1411 detach_one_pid (lwpid, signo);
1412
1413 delete_lwp (lp->ptid);
1414 }
1415
1416 static int
1417 detach_callback (struct lwp_info *lp)
1418 {
1419 /* We don't actually detach from the thread group leader just yet.
1420 If the thread group exits, we must reap the zombie clone lwps
1421 before we're able to reap the leader. */
1422 if (lp->ptid.lwp () != lp->ptid.pid ())
1423 detach_one_lwp (lp, NULL);
1424 return 0;
1425 }
1426
1427 void
1428 linux_nat_target::detach (inferior *inf, int from_tty)
1429 {
1430 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
1431
1432 struct lwp_info *main_lwp;
1433 int pid = inf->pid;
1434
1435 /* Don't unregister from the event loop, as there may be other
1436 inferiors running. */
1437
1438 /* Stop all threads before detaching. ptrace requires that the
1439 thread is stopped to successfully detach. */
1440 iterate_over_lwps (ptid_t (pid), stop_callback);
1441 /* ... and wait until all of them have reported back that
1442 they're no longer running. */
1443 iterate_over_lwps (ptid_t (pid), stop_wait_callback);
1444
1445 /* We can now safely remove breakpoints. We don't this in earlier
1446 in common code because this target doesn't currently support
1447 writing memory while the inferior is running. */
1448 remove_breakpoints_inf (current_inferior ());
1449
1450 iterate_over_lwps (ptid_t (pid), detach_callback);
1451
1452 /* We have detached from everything except the main thread now, so
1453 should only have one thread left. However, in non-stop mode the
1454 main thread might have exited, in which case we'll have no threads
1455 left. */
1456 gdb_assert (num_lwps (pid) == 1
1457 || (target_is_non_stop_p () && num_lwps (pid) == 0));
1458
1459 if (forks_exist_p ())
1460 {
1461 /* Multi-fork case. The current inferior_ptid is being detached
1462 from, but there are other viable forks to debug. Detach from
1463 the current fork, and context-switch to the first
1464 available. */
1465 linux_fork_detach (from_tty);
1466 }
1467 else
1468 {
1469 target_announce_detach (from_tty);
1470
1471 /* In non-stop mode it is possible that the main thread has exited,
1472 in which case we don't try to detach. */
1473 main_lwp = find_lwp_pid (ptid_t (pid));
1474 if (main_lwp != nullptr)
1475 {
1476 /* Pass on any pending signal for the last LWP. */
1477 int signo = get_detach_signal (main_lwp);
1478
1479 detach_one_lwp (main_lwp, &signo);
1480 }
1481 else
1482 gdb_assert (target_is_non_stop_p ());
1483
1484 detach_success (inf);
1485 }
1486
1487 close_proc_mem_file (pid);
1488 }
1489
1490 /* Resume execution of the inferior process. If STEP is nonzero,
1491 single-step it. If SIGNAL is nonzero, give it that signal. */
1492
1493 static void
1494 linux_resume_one_lwp_throw (struct lwp_info *lp, int step,
1495 enum gdb_signal signo)
1496 {
1497 lp->step = step;
1498
1499 /* stop_pc doubles as the PC the LWP had when it was last resumed.
1500 We only presently need that if the LWP is stepped though (to
1501 handle the case of stepping a breakpoint instruction). */
1502 if (step)
1503 {
1504 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
1505
1506 lp->stop_pc = regcache_read_pc (regcache);
1507 }
1508 else
1509 lp->stop_pc = 0;
1510
1511 linux_target->low_prepare_to_resume (lp);
1512 linux_target->low_resume (lp->ptid, step, signo);
1513
1514 /* Successfully resumed. Clear state that no longer makes sense,
1515 and mark the LWP as running. Must not do this before resuming
1516 otherwise if that fails other code will be confused. E.g., we'd
1517 later try to stop the LWP and hang forever waiting for a stop
1518 status. Note that we must not throw after this is cleared,
1519 otherwise handle_zombie_lwp_error would get confused. */
1520 lp->stopped = 0;
1521 lp->core = -1;
1522 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
1523 registers_changed_ptid (linux_target, lp->ptid);
1524 }
1525
1526 /* Called when we try to resume a stopped LWP and that errors out. If
1527 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
1528 or about to become), discard the error, clear any pending status
1529 the LWP may have, and return true (we'll collect the exit status
1530 soon enough). Otherwise, return false. */
1531
1532 static int
1533 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
1534 {
1535 /* If we get an error after resuming the LWP successfully, we'd
1536 confuse !T state for the LWP being gone. */
1537 gdb_assert (lp->stopped);
1538
1539 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
1540 because even if ptrace failed with ESRCH, the tracee may be "not
1541 yet fully dead", but already refusing ptrace requests. In that
1542 case the tracee has 'R (Running)' state for a little bit
1543 (observed in Linux 3.18). See also the note on ESRCH in the
1544 ptrace(2) man page. Instead, check whether the LWP has any state
1545 other than ptrace-stopped. */
1546
1547 /* Don't assume anything if /proc/PID/status can't be read. */
1548 if (linux_proc_pid_is_trace_stopped_nowarn (lp->ptid.lwp ()) == 0)
1549 {
1550 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
1551 lp->status = 0;
1552 lp->waitstatus.set_ignore ();
1553 return 1;
1554 }
1555 return 0;
1556 }
1557
1558 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
1559 disappears while we try to resume it. */
1560
1561 static void
1562 linux_resume_one_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1563 {
1564 try
1565 {
1566 linux_resume_one_lwp_throw (lp, step, signo);
1567 }
1568 catch (const gdb_exception_error &ex)
1569 {
1570 if (!check_ptrace_stopped_lwp_gone (lp))
1571 throw;
1572 }
1573 }
1574
1575 /* Resume LP. */
1576
1577 static void
1578 resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1579 {
1580 if (lp->stopped)
1581 {
1582 struct inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
1583
1584 if (inf->vfork_child != NULL)
1585 {
1586 linux_nat_debug_printf ("Not resuming sibling %s (vfork parent)",
1587 lp->ptid.to_string ().c_str ());
1588 }
1589 else if (!lwp_status_pending_p (lp))
1590 {
1591 linux_nat_debug_printf ("Resuming sibling %s, %s, %s",
1592 lp->ptid.to_string ().c_str (),
1593 (signo != GDB_SIGNAL_0
1594 ? strsignal (gdb_signal_to_host (signo))
1595 : "0"),
1596 step ? "step" : "resume");
1597
1598 linux_resume_one_lwp (lp, step, signo);
1599 }
1600 else
1601 {
1602 linux_nat_debug_printf ("Not resuming sibling %s (has pending)",
1603 lp->ptid.to_string ().c_str ());
1604 }
1605 }
1606 else
1607 linux_nat_debug_printf ("Not resuming sibling %s (not stopped)",
1608 lp->ptid.to_string ().c_str ());
1609 }
1610
1611 /* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1612 Resume LWP with the last stop signal, if it is in pass state. */
1613
1614 static int
1615 linux_nat_resume_callback (struct lwp_info *lp, struct lwp_info *except)
1616 {
1617 enum gdb_signal signo = GDB_SIGNAL_0;
1618
1619 if (lp == except)
1620 return 0;
1621
1622 if (lp->stopped)
1623 {
1624 struct thread_info *thread;
1625
1626 thread = linux_target->find_thread (lp->ptid);
1627 if (thread != NULL)
1628 {
1629 signo = thread->stop_signal ();
1630 thread->set_stop_signal (GDB_SIGNAL_0);
1631 }
1632 }
1633
1634 resume_lwp (lp, 0, signo);
1635 return 0;
1636 }
1637
1638 static int
1639 resume_clear_callback (struct lwp_info *lp)
1640 {
1641 lp->resumed = 0;
1642 lp->last_resume_kind = resume_stop;
1643 return 0;
1644 }
1645
1646 static int
1647 resume_set_callback (struct lwp_info *lp)
1648 {
1649 lp->resumed = 1;
1650 lp->last_resume_kind = resume_continue;
1651 return 0;
1652 }
1653
1654 void
1655 linux_nat_target::resume (ptid_t scope_ptid, int step, enum gdb_signal signo)
1656 {
1657 struct lwp_info *lp;
1658
1659 linux_nat_debug_printf ("Preparing to %s %s, %s, inferior_ptid %s",
1660 step ? "step" : "resume",
1661 scope_ptid.to_string ().c_str (),
1662 (signo != GDB_SIGNAL_0
1663 ? strsignal (gdb_signal_to_host (signo)) : "0"),
1664 inferior_ptid.to_string ().c_str ());
1665
1666 /* Mark the lwps we're resuming as resumed and update their
1667 last_resume_kind to resume_continue. */
1668 iterate_over_lwps (scope_ptid, resume_set_callback);
1669
1670 lp = find_lwp_pid (inferior_ptid);
1671 gdb_assert (lp != NULL);
1672
1673 /* Remember if we're stepping. */
1674 lp->last_resume_kind = step ? resume_step : resume_continue;
1675
1676 /* If we have a pending wait status for this thread, there is no
1677 point in resuming the process. But first make sure that
1678 linux_nat_wait won't preemptively handle the event - we
1679 should never take this short-circuit if we are going to
1680 leave LP running, since we have skipped resuming all the
1681 other threads. This bit of code needs to be synchronized
1682 with linux_nat_wait. */
1683
1684 if (lp->status && WIFSTOPPED (lp->status))
1685 {
1686 if (!lp->step
1687 && WSTOPSIG (lp->status)
1688 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
1689 {
1690 linux_nat_debug_printf
1691 ("Not short circuiting for ignored status 0x%x", lp->status);
1692
1693 /* FIXME: What should we do if we are supposed to continue
1694 this thread with a signal? */
1695 gdb_assert (signo == GDB_SIGNAL_0);
1696 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1697 lp->status = 0;
1698 }
1699 }
1700
1701 if (lwp_status_pending_p (lp))
1702 {
1703 /* FIXME: What should we do if we are supposed to continue
1704 this thread with a signal? */
1705 gdb_assert (signo == GDB_SIGNAL_0);
1706
1707 linux_nat_debug_printf ("Short circuiting for status %s",
1708 pending_status_str (lp).c_str ());
1709
1710 if (target_can_async_p ())
1711 {
1712 target_async (true);
1713 /* Tell the event loop we have something to process. */
1714 async_file_mark ();
1715 }
1716 return;
1717 }
1718
1719 /* No use iterating unless we're resuming other threads. */
1720 if (scope_ptid != lp->ptid)
1721 iterate_over_lwps (scope_ptid, [=] (struct lwp_info *info)
1722 {
1723 return linux_nat_resume_callback (info, lp);
1724 });
1725
1726 linux_nat_debug_printf ("%s %s, %s (resume event thread)",
1727 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1728 lp->ptid.to_string ().c_str (),
1729 (signo != GDB_SIGNAL_0
1730 ? strsignal (gdb_signal_to_host (signo)) : "0"));
1731
1732 linux_resume_one_lwp (lp, step, signo);
1733 }
1734
1735 /* Send a signal to an LWP. */
1736
1737 static int
1738 kill_lwp (int lwpid, int signo)
1739 {
1740 int ret;
1741
1742 errno = 0;
1743 ret = syscall (__NR_tkill, lwpid, signo);
1744 if (errno == ENOSYS)
1745 {
1746 /* If tkill fails, then we are not using nptl threads, a
1747 configuration we no longer support. */
1748 perror_with_name (("tkill"));
1749 }
1750 return ret;
1751 }
1752
1753 /* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1754 event, check if the core is interested in it: if not, ignore the
1755 event, and keep waiting; otherwise, we need to toggle the LWP's
1756 syscall entry/exit status, since the ptrace event itself doesn't
1757 indicate it, and report the trap to higher layers. */
1758
1759 static int
1760 linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1761 {
1762 struct target_waitstatus *ourstatus = &lp->waitstatus;
1763 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
1764 thread_info *thread = linux_target->find_thread (lp->ptid);
1765 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, thread);
1766
1767 if (stopping)
1768 {
1769 /* If we're stopping threads, there's a SIGSTOP pending, which
1770 makes it so that the LWP reports an immediate syscall return,
1771 followed by the SIGSTOP. Skip seeing that "return" using
1772 PTRACE_CONT directly, and let stop_wait_callback collect the
1773 SIGSTOP. Later when the thread is resumed, a new syscall
1774 entry event. If we didn't do this (and returned 0), we'd
1775 leave a syscall entry pending, and our caller, by using
1776 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1777 itself. Later, when the user re-resumes this LWP, we'd see
1778 another syscall entry event and we'd mistake it for a return.
1779
1780 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1781 (leaving immediately with LWP->signalled set, without issuing
1782 a PTRACE_CONT), it would still be problematic to leave this
1783 syscall enter pending, as later when the thread is resumed,
1784 it would then see the same syscall exit mentioned above,
1785 followed by the delayed SIGSTOP, while the syscall didn't
1786 actually get to execute. It seems it would be even more
1787 confusing to the user. */
1788
1789 linux_nat_debug_printf
1790 ("ignoring syscall %d for LWP %ld (stopping threads), resuming with "
1791 "PTRACE_CONT for SIGSTOP", syscall_number, lp->ptid.lwp ());
1792
1793 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1794 ptrace (PTRACE_CONT, lp->ptid.lwp (), 0, 0);
1795 lp->stopped = 0;
1796 return 1;
1797 }
1798
1799 /* Always update the entry/return state, even if this particular
1800 syscall isn't interesting to the core now. In async mode,
1801 the user could install a new catchpoint for this syscall
1802 between syscall enter/return, and we'll need to know to
1803 report a syscall return if that happens. */
1804 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1805 ? TARGET_WAITKIND_SYSCALL_RETURN
1806 : TARGET_WAITKIND_SYSCALL_ENTRY);
1807
1808 if (catch_syscall_enabled ())
1809 {
1810 if (catching_syscall_number (syscall_number))
1811 {
1812 /* Alright, an event to report. */
1813 if (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
1814 ourstatus->set_syscall_entry (syscall_number);
1815 else if (lp->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
1816 ourstatus->set_syscall_return (syscall_number);
1817 else
1818 gdb_assert_not_reached ("unexpected syscall state");
1819
1820 linux_nat_debug_printf
1821 ("stopping for %s of syscall %d for LWP %ld",
1822 (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1823 ? "entry" : "return"), syscall_number, lp->ptid.lwp ());
1824
1825 return 0;
1826 }
1827
1828 linux_nat_debug_printf
1829 ("ignoring %s of syscall %d for LWP %ld",
1830 (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1831 ? "entry" : "return"), syscall_number, lp->ptid.lwp ());
1832 }
1833 else
1834 {
1835 /* If we had been syscall tracing, and hence used PT_SYSCALL
1836 before on this LWP, it could happen that the user removes all
1837 syscall catchpoints before we get to process this event.
1838 There are two noteworthy issues here:
1839
1840 - When stopped at a syscall entry event, resuming with
1841 PT_STEP still resumes executing the syscall and reports a
1842 syscall return.
1843
1844 - Only PT_SYSCALL catches syscall enters. If we last
1845 single-stepped this thread, then this event can't be a
1846 syscall enter. If we last single-stepped this thread, this
1847 has to be a syscall exit.
1848
1849 The points above mean that the next resume, be it PT_STEP or
1850 PT_CONTINUE, can not trigger a syscall trace event. */
1851 linux_nat_debug_printf
1852 ("caught syscall event with no syscall catchpoints. %d for LWP %ld, "
1853 "ignoring", syscall_number, lp->ptid.lwp ());
1854 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1855 }
1856
1857 /* The core isn't interested in this event. For efficiency, avoid
1858 stopping all threads only to have the core resume them all again.
1859 Since we're not stopping threads, if we're still syscall tracing
1860 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1861 subsequent syscall. Simply resume using the inf-ptrace layer,
1862 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1863
1864 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
1865 return 1;
1866 }
1867
1868 /* See target.h. */
1869
1870 void
1871 linux_nat_target::follow_clone (ptid_t child_ptid)
1872 {
1873 lwp_info *new_lp = add_lwp (child_ptid);
1874 new_lp->stopped = 1;
1875
1876 /* If the thread_db layer is active, let it record the user
1877 level thread id and status, and add the thread to GDB's
1878 list. */
1879 if (!thread_db_notice_clone (inferior_ptid, new_lp->ptid))
1880 {
1881 /* The process is not using thread_db. Add the LWP to
1882 GDB's list. */
1883 add_thread (linux_target, new_lp->ptid);
1884 }
1885
1886 /* We just created NEW_LP so it cannot yet contain STATUS. */
1887 gdb_assert (new_lp->status == 0);
1888
1889 if (!pull_pid_from_list (&stopped_pids, child_ptid.lwp (), &new_lp->status))
1890 internal_error (_("no saved status for clone lwp"));
1891
1892 if (WSTOPSIG (new_lp->status) != SIGSTOP)
1893 {
1894 /* This can happen if someone starts sending signals to
1895 the new thread before it gets a chance to run, which
1896 have a lower number than SIGSTOP (e.g. SIGUSR1).
1897 This is an unlikely case, and harder to handle for
1898 fork / vfork than for clone, so we do not try - but
1899 we handle it for clone events here. */
1900
1901 new_lp->signalled = 1;
1902
1903 /* Save the wait status to report later. */
1904 linux_nat_debug_printf
1905 ("waitpid of new LWP %ld, saving status %s",
1906 (long) new_lp->ptid.lwp (), status_to_str (new_lp->status).c_str ());
1907 }
1908 else
1909 {
1910 new_lp->status = 0;
1911
1912 if (report_thread_events)
1913 new_lp->waitstatus.set_thread_created ();
1914 }
1915 }
1916
1917 /* Handle a GNU/Linux extended wait response. If we see a clone
1918 event, we need to add the new LWP to our list (and not report the
1919 trap to higher layers). This function returns non-zero if the
1920 event should be ignored and we should wait again. If STOPPING is
1921 true, the new LWP remains stopped, otherwise it is continued. */
1922
1923 static int
1924 linux_handle_extended_wait (struct lwp_info *lp, int status)
1925 {
1926 int pid = lp->ptid.lwp ();
1927 struct target_waitstatus *ourstatus = &lp->waitstatus;
1928 int event = linux_ptrace_get_extended_event (status);
1929
1930 /* All extended events we currently use are mid-syscall. Only
1931 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
1932 you have to be using PTRACE_SEIZE to get that. */
1933 lp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
1934
1935 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1936 || event == PTRACE_EVENT_CLONE)
1937 {
1938 unsigned long new_pid;
1939 int ret;
1940
1941 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
1942
1943 /* If we haven't already seen the new PID stop, wait for it now. */
1944 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1945 {
1946 /* The new child has a pending SIGSTOP. We can't affect it until it
1947 hits the SIGSTOP, but we're already attached. */
1948 ret = my_waitpid (new_pid, &status, __WALL);
1949 if (ret == -1)
1950 perror_with_name (_("waiting for new child"));
1951 else if (ret != new_pid)
1952 internal_error (_("wait returned unexpected PID %d"), ret);
1953 else if (!WIFSTOPPED (status))
1954 internal_error (_("wait returned unexpected status 0x%x"), status);
1955 }
1956
1957 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1958 {
1959 open_proc_mem_file (ptid_t (new_pid, new_pid));
1960
1961 /* The arch-specific native code may need to know about new
1962 forks even if those end up never mapped to an
1963 inferior. */
1964 linux_target->low_new_fork (lp, new_pid);
1965 }
1966 else if (event == PTRACE_EVENT_CLONE)
1967 {
1968 linux_target->low_new_clone (lp, new_pid);
1969 }
1970
1971 if (event == PTRACE_EVENT_FORK
1972 && linux_fork_checkpointing_p (lp->ptid.pid ()))
1973 {
1974 /* Handle checkpointing by linux-fork.c here as a special
1975 case. We don't want the follow-fork-mode or 'catch fork'
1976 to interfere with this. */
1977
1978 /* This won't actually modify the breakpoint list, but will
1979 physically remove the breakpoints from the child. */
1980 detach_breakpoints (ptid_t (new_pid, new_pid));
1981
1982 /* Retain child fork in ptrace (stopped) state. */
1983 if (!find_fork_pid (new_pid))
1984 add_fork (new_pid);
1985
1986 /* Report as spurious, so that infrun doesn't want to follow
1987 this fork. We're actually doing an infcall in
1988 linux-fork.c. */
1989 ourstatus->set_spurious ();
1990
1991 /* Report the stop to the core. */
1992 return 0;
1993 }
1994
1995 if (event == PTRACE_EVENT_FORK)
1996 ourstatus->set_forked (ptid_t (new_pid, new_pid));
1997 else if (event == PTRACE_EVENT_VFORK)
1998 ourstatus->set_vforked (ptid_t (new_pid, new_pid));
1999 else if (event == PTRACE_EVENT_CLONE)
2000 {
2001 linux_nat_debug_printf
2002 ("Got clone event from LWP %d, new child is LWP %ld", pid, new_pid);
2003
2004 /* Save the status again, we'll use it in follow_clone. */
2005 add_to_pid_list (&stopped_pids, new_pid, status);
2006
2007 ourstatus->set_thread_cloned (ptid_t (lp->ptid.pid (), new_pid));
2008 }
2009
2010 return 0;
2011 }
2012
2013 if (event == PTRACE_EVENT_EXEC)
2014 {
2015 linux_nat_debug_printf ("Got exec event from LWP %ld", lp->ptid.lwp ());
2016
2017 /* Close the previous /proc/PID/mem file for this inferior,
2018 which was using the address space which is now gone.
2019 Reading/writing from this file would return 0/EOF. */
2020 close_proc_mem_file (lp->ptid.pid ());
2021
2022 /* Open a new file for the new address space. */
2023 open_proc_mem_file (lp->ptid);
2024
2025 ourstatus->set_execd
2026 (make_unique_xstrdup (linux_proc_pid_to_exec_file (pid)));
2027
2028 /* The thread that execed must have been resumed, but, when a
2029 thread execs, it changes its tid to the tgid, and the old
2030 tgid thread might have not been resumed. */
2031 lp->resumed = 1;
2032
2033 /* All other LWPs are gone now. We'll have received a thread
2034 exit notification for all threads other the execing one.
2035 That one, if it wasn't the leader, just silently changes its
2036 tid to the tgid, and the previous leader vanishes. Since
2037 Linux 3.0, the former thread ID can be retrieved with
2038 PTRACE_GETEVENTMSG, but since we support older kernels, don't
2039 bother with it, and just walk the LWP list. Even with
2040 PTRACE_GETEVENTMSG, we'd still need to lookup the
2041 corresponding LWP object, and it would be an extra ptrace
2042 syscall, so this way may even be more efficient. */
2043 for (lwp_info *other_lp : all_lwps_safe ())
2044 if (other_lp != lp && other_lp->ptid.pid () == lp->ptid.pid ())
2045 exit_lwp (other_lp);
2046
2047 return 0;
2048 }
2049
2050 if (event == PTRACE_EVENT_VFORK_DONE)
2051 {
2052 linux_nat_debug_printf
2053 ("Got PTRACE_EVENT_VFORK_DONE from LWP %ld",
2054 lp->ptid.lwp ());
2055 ourstatus->set_vfork_done ();
2056 return 0;
2057 }
2058
2059 internal_error (_("unknown ptrace event %d"), event);
2060 }
2061
2062 /* Suspend waiting for a signal. We're mostly interested in
2063 SIGCHLD/SIGINT. */
2064
2065 static void
2066 wait_for_signal ()
2067 {
2068 linux_nat_debug_printf ("about to sigsuspend");
2069 sigsuspend (&suspend_mask);
2070
2071 /* If the quit flag is set, it means that the user pressed Ctrl-C
2072 and we're debugging a process that is running on a separate
2073 terminal, so we must forward the Ctrl-C to the inferior. (If the
2074 inferior is sharing GDB's terminal, then the Ctrl-C reaches the
2075 inferior directly.) We must do this here because functions that
2076 need to block waiting for a signal loop forever until there's an
2077 event to report before returning back to the event loop. */
2078 if (!target_terminal::is_ours ())
2079 {
2080 if (check_quit_flag ())
2081 target_pass_ctrlc ();
2082 }
2083 }
2084
2085 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2086 exited. */
2087
2088 static int
2089 wait_lwp (struct lwp_info *lp)
2090 {
2091 pid_t pid;
2092 int status = 0;
2093 int thread_dead = 0;
2094 sigset_t prev_mask;
2095
2096 gdb_assert (!lp->stopped);
2097 gdb_assert (lp->status == 0);
2098
2099 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2100 block_child_signals (&prev_mask);
2101
2102 for (;;)
2103 {
2104 pid = my_waitpid (lp->ptid.lwp (), &status, __WALL | WNOHANG);
2105 if (pid == -1 && errno == ECHILD)
2106 {
2107 /* The thread has previously exited. We need to delete it
2108 now because if this was a non-leader thread execing, we
2109 won't get an exit event. See comments on exec events at
2110 the top of the file. */
2111 thread_dead = 1;
2112 linux_nat_debug_printf ("%s vanished.",
2113 lp->ptid.to_string ().c_str ());
2114 }
2115 if (pid != 0)
2116 break;
2117
2118 /* Bugs 10970, 12702.
2119 Thread group leader may have exited in which case we'll lock up in
2120 waitpid if there are other threads, even if they are all zombies too.
2121 Basically, we're not supposed to use waitpid this way.
2122 tkill(pid,0) cannot be used here as it gets ESRCH for both
2123 for zombie and running processes.
2124
2125 As a workaround, check if we're waiting for the thread group leader and
2126 if it's a zombie, and avoid calling waitpid if it is.
2127
2128 This is racy, what if the tgl becomes a zombie right after we check?
2129 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2130 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
2131
2132 if (lp->ptid.pid () == lp->ptid.lwp ()
2133 && linux_proc_pid_is_zombie (lp->ptid.lwp ()))
2134 {
2135 thread_dead = 1;
2136 linux_nat_debug_printf ("Thread group leader %s vanished.",
2137 lp->ptid.to_string ().c_str ());
2138 break;
2139 }
2140
2141 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2142 get invoked despite our caller had them intentionally blocked by
2143 block_child_signals. This is sensitive only to the loop of
2144 linux_nat_wait_1 and there if we get called my_waitpid gets called
2145 again before it gets to sigsuspend so we can safely let the handlers
2146 get executed here. */
2147 wait_for_signal ();
2148 }
2149
2150 restore_child_signals_mask (&prev_mask);
2151
2152 if (!thread_dead)
2153 {
2154 gdb_assert (pid == lp->ptid.lwp ());
2155
2156 linux_nat_debug_printf ("waitpid %s received %s",
2157 lp->ptid.to_string ().c_str (),
2158 status_to_str (status).c_str ());
2159
2160 /* Check if the thread has exited. */
2161 if (WIFEXITED (status) || WIFSIGNALED (status))
2162 {
2163 if (report_exit_events_for (lp) || is_leader (lp))
2164 {
2165 linux_nat_debug_printf ("LWP %d exited.", lp->ptid.pid ());
2166
2167 /* If this is the leader exiting, it means the whole
2168 process is gone. Store the status to report to the
2169 core. Store it in lp->waitstatus, because lp->status
2170 would be ambiguous (W_EXITCODE(0,0) == 0). */
2171 lp->waitstatus = host_status_to_waitstatus (status);
2172 return 0;
2173 }
2174
2175 thread_dead = 1;
2176 linux_nat_debug_printf ("%s exited.",
2177 lp->ptid.to_string ().c_str ());
2178 }
2179 }
2180
2181 if (thread_dead)
2182 {
2183 exit_lwp (lp);
2184 return 0;
2185 }
2186
2187 gdb_assert (WIFSTOPPED (status));
2188 lp->stopped = 1;
2189
2190 if (lp->must_set_ptrace_flags)
2191 {
2192 inferior *inf = find_inferior_pid (linux_target, lp->ptid.pid ());
2193 int options = linux_nat_ptrace_options (inf->attach_flag);
2194
2195 linux_enable_event_reporting (lp->ptid.lwp (), options);
2196 lp->must_set_ptrace_flags = 0;
2197 }
2198
2199 /* Handle GNU/Linux's syscall SIGTRAPs. */
2200 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2201 {
2202 /* No longer need the sysgood bit. The ptrace event ends up
2203 recorded in lp->waitstatus if we care for it. We can carry
2204 on handling the event like a regular SIGTRAP from here
2205 on. */
2206 status = W_STOPCODE (SIGTRAP);
2207 if (linux_handle_syscall_trap (lp, 1))
2208 return wait_lwp (lp);
2209 }
2210 else
2211 {
2212 /* Almost all other ptrace-stops are known to be outside of system
2213 calls, with further exceptions in linux_handle_extended_wait. */
2214 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2215 }
2216
2217 /* Handle GNU/Linux's extended waitstatus for trace events. */
2218 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2219 && linux_is_extended_waitstatus (status))
2220 {
2221 linux_nat_debug_printf ("Handling extended status 0x%06x", status);
2222 linux_handle_extended_wait (lp, status);
2223 return 0;
2224 }
2225
2226 return status;
2227 }
2228
2229 /* Send a SIGSTOP to LP. */
2230
2231 static int
2232 stop_callback (struct lwp_info *lp)
2233 {
2234 if (!lp->stopped && !lp->signalled)
2235 {
2236 int ret;
2237
2238 linux_nat_debug_printf ("kill %s **<SIGSTOP>**",
2239 lp->ptid.to_string ().c_str ());
2240
2241 errno = 0;
2242 ret = kill_lwp (lp->ptid.lwp (), SIGSTOP);
2243 linux_nat_debug_printf ("lwp kill %d %s", ret,
2244 errno ? safe_strerror (errno) : "ERRNO-OK");
2245
2246 lp->signalled = 1;
2247 gdb_assert (lp->status == 0);
2248 }
2249
2250 return 0;
2251 }
2252
2253 /* Request a stop on LWP. */
2254
2255 void
2256 linux_stop_lwp (struct lwp_info *lwp)
2257 {
2258 stop_callback (lwp);
2259 }
2260
2261 /* See linux-nat.h */
2262
2263 void
2264 linux_stop_and_wait_all_lwps (void)
2265 {
2266 /* Stop all LWP's ... */
2267 iterate_over_lwps (minus_one_ptid, stop_callback);
2268
2269 /* ... and wait until all of them have reported back that
2270 they're no longer running. */
2271 iterate_over_lwps (minus_one_ptid, stop_wait_callback);
2272 }
2273
2274 /* See linux-nat.h */
2275
2276 void
2277 linux_unstop_all_lwps (void)
2278 {
2279 iterate_over_lwps (minus_one_ptid,
2280 [] (struct lwp_info *info)
2281 {
2282 return resume_stopped_resumed_lwps (info, minus_one_ptid);
2283 });
2284 }
2285
2286 /* Return non-zero if LWP PID has a pending SIGINT. */
2287
2288 static int
2289 linux_nat_has_pending_sigint (int pid)
2290 {
2291 sigset_t pending, blocked, ignored;
2292
2293 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2294
2295 if (sigismember (&pending, SIGINT)
2296 && !sigismember (&ignored, SIGINT))
2297 return 1;
2298
2299 return 0;
2300 }
2301
2302 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2303
2304 static int
2305 set_ignore_sigint (struct lwp_info *lp)
2306 {
2307 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2308 flag to consume the next one. */
2309 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2310 && WSTOPSIG (lp->status) == SIGINT)
2311 lp->status = 0;
2312 else
2313 lp->ignore_sigint = 1;
2314
2315 return 0;
2316 }
2317
2318 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2319 This function is called after we know the LWP has stopped; if the LWP
2320 stopped before the expected SIGINT was delivered, then it will never have
2321 arrived. Also, if the signal was delivered to a shared queue and consumed
2322 by a different thread, it will never be delivered to this LWP. */
2323
2324 static void
2325 maybe_clear_ignore_sigint (struct lwp_info *lp)
2326 {
2327 if (!lp->ignore_sigint)
2328 return;
2329
2330 if (!linux_nat_has_pending_sigint (lp->ptid.lwp ()))
2331 {
2332 linux_nat_debug_printf ("Clearing bogus flag for %s",
2333 lp->ptid.to_string ().c_str ());
2334 lp->ignore_sigint = 0;
2335 }
2336 }
2337
2338 /* Fetch the possible triggered data watchpoint info and store it in
2339 LP.
2340
2341 On some archs, like x86, that use debug registers to set
2342 watchpoints, it's possible that the way to know which watched
2343 address trapped, is to check the register that is used to select
2344 which address to watch. Problem is, between setting the watchpoint
2345 and reading back which data address trapped, the user may change
2346 the set of watchpoints, and, as a consequence, GDB changes the
2347 debug registers in the inferior. To avoid reading back a stale
2348 stopped-data-address when that happens, we cache in LP the fact
2349 that a watchpoint trapped, and the corresponding data address, as
2350 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2351 registers meanwhile, we have the cached data we can rely on. */
2352
2353 static int
2354 check_stopped_by_watchpoint (struct lwp_info *lp)
2355 {
2356 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
2357 inferior_ptid = lp->ptid;
2358
2359 if (linux_target->low_stopped_by_watchpoint ())
2360 {
2361 lp->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2362 lp->stopped_data_address_p
2363 = linux_target->low_stopped_data_address (&lp->stopped_data_address);
2364 }
2365
2366 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2367 }
2368
2369 /* Returns true if the LWP had stopped for a watchpoint. */
2370
2371 bool
2372 linux_nat_target::stopped_by_watchpoint ()
2373 {
2374 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2375
2376 gdb_assert (lp != NULL);
2377
2378 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2379 }
2380
2381 bool
2382 linux_nat_target::stopped_data_address (CORE_ADDR *addr_p)
2383 {
2384 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2385
2386 gdb_assert (lp != NULL);
2387
2388 *addr_p = lp->stopped_data_address;
2389
2390 return lp->stopped_data_address_p;
2391 }
2392
2393 /* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2394
2395 bool
2396 linux_nat_target::low_status_is_event (int status)
2397 {
2398 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2399 }
2400
2401 /* Wait until LP is stopped. */
2402
2403 static int
2404 stop_wait_callback (struct lwp_info *lp)
2405 {
2406 inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
2407
2408 /* If this is a vfork parent, bail out, it is not going to report
2409 any SIGSTOP until the vfork is done with. */
2410 if (inf->vfork_child != NULL)
2411 return 0;
2412
2413 if (!lp->stopped)
2414 {
2415 int status;
2416
2417 status = wait_lwp (lp);
2418 if (status == 0)
2419 return 0;
2420
2421 if (lp->ignore_sigint && WIFSTOPPED (status)
2422 && WSTOPSIG (status) == SIGINT)
2423 {
2424 lp->ignore_sigint = 0;
2425
2426 errno = 0;
2427 ptrace (PTRACE_CONT, lp->ptid.lwp (), 0, 0);
2428 lp->stopped = 0;
2429 linux_nat_debug_printf
2430 ("PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)",
2431 lp->ptid.to_string ().c_str (),
2432 errno ? safe_strerror (errno) : "OK");
2433
2434 return stop_wait_callback (lp);
2435 }
2436
2437 maybe_clear_ignore_sigint (lp);
2438
2439 if (WSTOPSIG (status) != SIGSTOP)
2440 {
2441 /* The thread was stopped with a signal other than SIGSTOP. */
2442
2443 linux_nat_debug_printf ("Pending event %s in %s",
2444 status_to_str ((int) status).c_str (),
2445 lp->ptid.to_string ().c_str ());
2446
2447 /* Save the sigtrap event. */
2448 lp->status = status;
2449 gdb_assert (lp->signalled);
2450 save_stop_reason (lp);
2451 }
2452 else
2453 {
2454 /* We caught the SIGSTOP that we intended to catch. */
2455
2456 linux_nat_debug_printf ("Expected SIGSTOP caught for %s.",
2457 lp->ptid.to_string ().c_str ());
2458
2459 lp->signalled = 0;
2460
2461 /* If we are waiting for this stop so we can report the thread
2462 stopped then we need to record this status. Otherwise, we can
2463 now discard this stop event. */
2464 if (lp->last_resume_kind == resume_stop)
2465 {
2466 lp->status = status;
2467 save_stop_reason (lp);
2468 }
2469 }
2470 }
2471
2472 return 0;
2473 }
2474
2475 /* Return non-zero if LP has a wait status pending. Discard the
2476 pending event and resume the LWP if the event that originally
2477 caused the stop became uninteresting. */
2478
2479 static int
2480 status_callback (struct lwp_info *lp)
2481 {
2482 /* Only report a pending wait status if we pretend that this has
2483 indeed been resumed. */
2484 if (!lp->resumed)
2485 return 0;
2486
2487 if (!lwp_status_pending_p (lp))
2488 return 0;
2489
2490 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
2491 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
2492 {
2493 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
2494 CORE_ADDR pc;
2495 int discard = 0;
2496
2497 pc = regcache_read_pc (regcache);
2498
2499 if (pc != lp->stop_pc)
2500 {
2501 linux_nat_debug_printf ("PC of %s changed. was=%s, now=%s",
2502 lp->ptid.to_string ().c_str (),
2503 paddress (current_inferior ()->arch (),
2504 lp->stop_pc),
2505 paddress (current_inferior ()->arch (), pc));
2506 discard = 1;
2507 }
2508
2509 #if !USE_SIGTRAP_SIGINFO
2510 else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
2511 {
2512 linux_nat_debug_printf ("previous breakpoint of %s, at %s gone",
2513 lp->ptid.to_string ().c_str (),
2514 paddress (current_inferior ()->arch (),
2515 lp->stop_pc));
2516
2517 discard = 1;
2518 }
2519 #endif
2520
2521 if (discard)
2522 {
2523 linux_nat_debug_printf ("pending event of %s cancelled.",
2524 lp->ptid.to_string ().c_str ());
2525
2526 lp->status = 0;
2527 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
2528 return 0;
2529 }
2530 }
2531
2532 return 1;
2533 }
2534
2535 /* Count the LWP's that have had events. */
2536
2537 static int
2538 count_events_callback (struct lwp_info *lp, int *count)
2539 {
2540 gdb_assert (count != NULL);
2541
2542 /* Select only resumed LWPs that have an event pending. */
2543 if (lp->resumed && lwp_status_pending_p (lp))
2544 (*count)++;
2545
2546 return 0;
2547 }
2548
2549 /* Select the LWP (if any) that is currently being single-stepped. */
2550
2551 static int
2552 select_singlestep_lwp_callback (struct lwp_info *lp)
2553 {
2554 if (lp->last_resume_kind == resume_step
2555 && lp->status != 0)
2556 return 1;
2557 else
2558 return 0;
2559 }
2560
2561 /* Returns true if LP has a status pending. */
2562
2563 static int
2564 lwp_status_pending_p (struct lwp_info *lp)
2565 {
2566 /* We check for lp->waitstatus in addition to lp->status, because we
2567 can have pending process exits recorded in lp->status and
2568 W_EXITCODE(0,0) happens to be 0. */
2569 return lp->status != 0 || lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE;
2570 }
2571
2572 /* Select the Nth LWP that has had an event. */
2573
2574 static int
2575 select_event_lwp_callback (struct lwp_info *lp, int *selector)
2576 {
2577 gdb_assert (selector != NULL);
2578
2579 /* Select only resumed LWPs that have an event pending. */
2580 if (lp->resumed && lwp_status_pending_p (lp))
2581 if ((*selector)-- == 0)
2582 return 1;
2583
2584 return 0;
2585 }
2586
2587 /* Called when the LWP stopped for a signal/trap. If it stopped for a
2588 trap check what caused it (breakpoint, watchpoint, trace, etc.),
2589 and save the result in the LWP's stop_reason field. If it stopped
2590 for a breakpoint, decrement the PC if necessary on the lwp's
2591 architecture. */
2592
2593 static void
2594 save_stop_reason (struct lwp_info *lp)
2595 {
2596 struct regcache *regcache;
2597 struct gdbarch *gdbarch;
2598 CORE_ADDR pc;
2599 CORE_ADDR sw_bp_pc;
2600 #if USE_SIGTRAP_SIGINFO
2601 siginfo_t siginfo;
2602 #endif
2603
2604 gdb_assert (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON);
2605 gdb_assert (lp->status != 0);
2606
2607 if (!linux_target->low_status_is_event (lp->status))
2608 return;
2609
2610 inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
2611 if (inf->starting_up)
2612 return;
2613
2614 regcache = get_thread_regcache (linux_target, lp->ptid);
2615 gdbarch = regcache->arch ();
2616
2617 pc = regcache_read_pc (regcache);
2618 sw_bp_pc = pc - gdbarch_decr_pc_after_break (gdbarch);
2619
2620 #if USE_SIGTRAP_SIGINFO
2621 if (linux_nat_get_siginfo (lp->ptid, &siginfo))
2622 {
2623 if (siginfo.si_signo == SIGTRAP)
2624 {
2625 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
2626 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
2627 {
2628 /* The si_code is ambiguous on this arch -- check debug
2629 registers. */
2630 if (!check_stopped_by_watchpoint (lp))
2631 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2632 }
2633 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
2634 {
2635 /* If we determine the LWP stopped for a SW breakpoint,
2636 trust it. Particularly don't check watchpoint
2637 registers, because, at least on s390, we'd find
2638 stopped-by-watchpoint as long as there's a watchpoint
2639 set. */
2640 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2641 }
2642 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
2643 {
2644 /* This can indicate either a hardware breakpoint or
2645 hardware watchpoint. Check debug registers. */
2646 if (!check_stopped_by_watchpoint (lp))
2647 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
2648 }
2649 else if (siginfo.si_code == TRAP_TRACE)
2650 {
2651 linux_nat_debug_printf ("%s stopped by trace",
2652 lp->ptid.to_string ().c_str ());
2653
2654 /* We may have single stepped an instruction that
2655 triggered a watchpoint. In that case, on some
2656 architectures (such as x86), instead of TRAP_HWBKPT,
2657 si_code indicates TRAP_TRACE, and we need to check
2658 the debug registers separately. */
2659 check_stopped_by_watchpoint (lp);
2660 }
2661 }
2662 }
2663 #else
2664 if ((!lp->step || lp->stop_pc == sw_bp_pc)
2665 && software_breakpoint_inserted_here_p (regcache->aspace (),
2666 sw_bp_pc))
2667 {
2668 /* The LWP was either continued, or stepped a software
2669 breakpoint instruction. */
2670 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2671 }
2672
2673 if (hardware_breakpoint_inserted_here_p (regcache->aspace (), pc))
2674 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
2675
2676 if (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
2677 check_stopped_by_watchpoint (lp);
2678 #endif
2679
2680 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2681 {
2682 linux_nat_debug_printf ("%s stopped by software breakpoint",
2683 lp->ptid.to_string ().c_str ());
2684
2685 /* Back up the PC if necessary. */
2686 if (pc != sw_bp_pc)
2687 regcache_write_pc (regcache, sw_bp_pc);
2688
2689 /* Update this so we record the correct stop PC below. */
2690 pc = sw_bp_pc;
2691 }
2692 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
2693 {
2694 linux_nat_debug_printf ("%s stopped by hardware breakpoint",
2695 lp->ptid.to_string ().c_str ());
2696 }
2697 else if (lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
2698 {
2699 linux_nat_debug_printf ("%s stopped by hardware watchpoint",
2700 lp->ptid.to_string ().c_str ());
2701 }
2702
2703 lp->stop_pc = pc;
2704 }
2705
2706
2707 /* Returns true if the LWP had stopped for a software breakpoint. */
2708
2709 bool
2710 linux_nat_target::stopped_by_sw_breakpoint ()
2711 {
2712 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2713
2714 gdb_assert (lp != NULL);
2715
2716 return lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2717 }
2718
2719 /* Implement the supports_stopped_by_sw_breakpoint method. */
2720
2721 bool
2722 linux_nat_target::supports_stopped_by_sw_breakpoint ()
2723 {
2724 return USE_SIGTRAP_SIGINFO;
2725 }
2726
2727 /* Returns true if the LWP had stopped for a hardware
2728 breakpoint/watchpoint. */
2729
2730 bool
2731 linux_nat_target::stopped_by_hw_breakpoint ()
2732 {
2733 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2734
2735 gdb_assert (lp != NULL);
2736
2737 return lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2738 }
2739
2740 /* Implement the supports_stopped_by_hw_breakpoint method. */
2741
2742 bool
2743 linux_nat_target::supports_stopped_by_hw_breakpoint ()
2744 {
2745 return USE_SIGTRAP_SIGINFO;
2746 }
2747
2748 /* Select one LWP out of those that have events pending. */
2749
2750 static void
2751 select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
2752 {
2753 int num_events = 0;
2754 int random_selector;
2755 struct lwp_info *event_lp = NULL;
2756
2757 /* Record the wait status for the original LWP. */
2758 (*orig_lp)->status = *status;
2759
2760 /* In all-stop, give preference to the LWP that is being
2761 single-stepped. There will be at most one, and it will be the
2762 LWP that the core is most interested in. If we didn't do this,
2763 then we'd have to handle pending step SIGTRAPs somehow in case
2764 the core later continues the previously-stepped thread, as
2765 otherwise we'd report the pending SIGTRAP then, and the core, not
2766 having stepped the thread, wouldn't understand what the trap was
2767 for, and therefore would report it to the user as a random
2768 signal. */
2769 if (!target_is_non_stop_p ())
2770 {
2771 event_lp = iterate_over_lwps (filter, select_singlestep_lwp_callback);
2772 if (event_lp != NULL)
2773 {
2774 linux_nat_debug_printf ("Select single-step %s",
2775 event_lp->ptid.to_string ().c_str ());
2776 }
2777 }
2778
2779 if (event_lp == NULL)
2780 {
2781 /* Pick one at random, out of those which have had events. */
2782
2783 /* First see how many events we have. */
2784 iterate_over_lwps (filter,
2785 [&] (struct lwp_info *info)
2786 {
2787 return count_events_callback (info, &num_events);
2788 });
2789 gdb_assert (num_events > 0);
2790
2791 /* Now randomly pick a LWP out of those that have had
2792 events. */
2793 random_selector = (int)
2794 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2795
2796 if (num_events > 1)
2797 linux_nat_debug_printf ("Found %d events, selecting #%d",
2798 num_events, random_selector);
2799
2800 event_lp
2801 = (iterate_over_lwps
2802 (filter,
2803 [&] (struct lwp_info *info)
2804 {
2805 return select_event_lwp_callback (info,
2806 &random_selector);
2807 }));
2808 }
2809
2810 if (event_lp != NULL)
2811 {
2812 /* Switch the event LWP. */
2813 *orig_lp = event_lp;
2814 *status = event_lp->status;
2815 }
2816
2817 /* Flush the wait status for the event LWP. */
2818 (*orig_lp)->status = 0;
2819 }
2820
2821 /* Return non-zero if LP has been resumed. */
2822
2823 static int
2824 resumed_callback (struct lwp_info *lp)
2825 {
2826 return lp->resumed;
2827 }
2828
2829 /* Check if we should go on and pass this event to common code.
2830
2831 If so, save the status to the lwp_info structure associated to LWPID. */
2832
2833 static void
2834 linux_nat_filter_event (int lwpid, int status)
2835 {
2836 struct lwp_info *lp;
2837 int event = linux_ptrace_get_extended_event (status);
2838
2839 lp = find_lwp_pid (ptid_t (lwpid));
2840
2841 /* Check for events reported by anything not in our LWP list. */
2842 if (lp == nullptr)
2843 {
2844 if (WIFSTOPPED (status))
2845 {
2846 if (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC)
2847 {
2848 /* A non-leader thread exec'ed after we've seen the
2849 leader zombie, and removed it from our lists (in
2850 check_zombie_leaders). The non-leader thread changes
2851 its tid to the tgid. */
2852 linux_nat_debug_printf
2853 ("Re-adding thread group leader LWP %d after exec.",
2854 lwpid);
2855
2856 lp = add_lwp (ptid_t (lwpid, lwpid));
2857 lp->stopped = 1;
2858 lp->resumed = 1;
2859 add_thread (linux_target, lp->ptid);
2860 }
2861 else
2862 {
2863 /* A process we are controlling has forked and the new
2864 child's stop was reported to us by the kernel. Save
2865 its PID and go back to waiting for the fork event to
2866 be reported - the stopped process might be returned
2867 from waitpid before or after the fork event is. */
2868 linux_nat_debug_printf
2869 ("Saving LWP %d status %s in stopped_pids list",
2870 lwpid, status_to_str (status).c_str ());
2871 add_to_pid_list (&stopped_pids, lwpid, status);
2872 }
2873 }
2874 else
2875 {
2876 /* Don't report an event for the exit of an LWP not in our
2877 list, i.e. not part of any inferior we're debugging.
2878 This can happen if we detach from a program we originally
2879 forked and then it exits. However, note that we may have
2880 earlier deleted a leader of an inferior we're debugging,
2881 in check_zombie_leaders. Re-add it back here if so. */
2882 for (inferior *inf : all_inferiors (linux_target))
2883 {
2884 if (inf->pid == lwpid)
2885 {
2886 linux_nat_debug_printf
2887 ("Re-adding thread group leader LWP %d after exit.",
2888 lwpid);
2889
2890 lp = add_lwp (ptid_t (lwpid, lwpid));
2891 lp->resumed = 1;
2892 add_thread (linux_target, lp->ptid);
2893 break;
2894 }
2895 }
2896 }
2897
2898 if (lp == nullptr)
2899 return;
2900 }
2901
2902 /* This LWP is stopped now. (And if dead, this prevents it from
2903 ever being continued.) */
2904 lp->stopped = 1;
2905
2906 if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
2907 {
2908 inferior *inf = find_inferior_pid (linux_target, lp->ptid.pid ());
2909 int options = linux_nat_ptrace_options (inf->attach_flag);
2910
2911 linux_enable_event_reporting (lp->ptid.lwp (), options);
2912 lp->must_set_ptrace_flags = 0;
2913 }
2914
2915 /* Handle GNU/Linux's syscall SIGTRAPs. */
2916 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2917 {
2918 /* No longer need the sysgood bit. The ptrace event ends up
2919 recorded in lp->waitstatus if we care for it. We can carry
2920 on handling the event like a regular SIGTRAP from here
2921 on. */
2922 status = W_STOPCODE (SIGTRAP);
2923 if (linux_handle_syscall_trap (lp, 0))
2924 return;
2925 }
2926 else
2927 {
2928 /* Almost all other ptrace-stops are known to be outside of system
2929 calls, with further exceptions in linux_handle_extended_wait. */
2930 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2931 }
2932
2933 /* Handle GNU/Linux's extended waitstatus for trace events. */
2934 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2935 && linux_is_extended_waitstatus (status))
2936 {
2937 linux_nat_debug_printf ("Handling extended status 0x%06x", status);
2938
2939 if (linux_handle_extended_wait (lp, status))
2940 return;
2941 }
2942
2943 /* Check if the thread has exited. */
2944 if (WIFEXITED (status) || WIFSIGNALED (status))
2945 {
2946 if (!report_exit_events_for (lp) && !is_leader (lp))
2947 {
2948 linux_nat_debug_printf ("%s exited.",
2949 lp->ptid.to_string ().c_str ());
2950
2951 /* If this was not the leader exiting, then the exit signal
2952 was not the end of the debugged application and should be
2953 ignored. */
2954 exit_lwp (lp);
2955 return;
2956 }
2957
2958 /* Note that even if the leader was ptrace-stopped, it can still
2959 exit, if e.g., some other thread brings down the whole
2960 process (calls `exit'). So don't assert that the lwp is
2961 resumed. */
2962 linux_nat_debug_printf ("LWP %ld exited (resumed=%d)",
2963 lp->ptid.lwp (), lp->resumed);
2964
2965 /* Dead LWP's aren't expected to reported a pending sigstop. */
2966 lp->signalled = 0;
2967
2968 /* Store the pending event in the waitstatus, because
2969 W_EXITCODE(0,0) == 0. */
2970 lp->waitstatus = host_status_to_waitstatus (status);
2971 return;
2972 }
2973
2974 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2975 an attempt to stop an LWP. */
2976 if (lp->signalled
2977 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2978 {
2979 lp->signalled = 0;
2980
2981 if (lp->last_resume_kind == resume_stop)
2982 {
2983 linux_nat_debug_printf ("resume_stop SIGSTOP caught for %s.",
2984 lp->ptid.to_string ().c_str ());
2985 }
2986 else
2987 {
2988 /* This is a delayed SIGSTOP. Filter out the event. */
2989
2990 linux_nat_debug_printf
2991 ("%s %s, 0, 0 (discard delayed SIGSTOP)",
2992 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2993 lp->ptid.to_string ().c_str ());
2994
2995 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
2996 gdb_assert (lp->resumed);
2997 return;
2998 }
2999 }
3000
3001 /* Make sure we don't report a SIGINT that we have already displayed
3002 for another thread. */
3003 if (lp->ignore_sigint
3004 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3005 {
3006 linux_nat_debug_printf ("Delayed SIGINT caught for %s.",
3007 lp->ptid.to_string ().c_str ());
3008
3009 /* This is a delayed SIGINT. */
3010 lp->ignore_sigint = 0;
3011
3012 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
3013 linux_nat_debug_printf ("%s %s, 0, 0 (discard SIGINT)",
3014 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3015 lp->ptid.to_string ().c_str ());
3016 gdb_assert (lp->resumed);
3017
3018 /* Discard the event. */
3019 return;
3020 }
3021
3022 /* Don't report signals that GDB isn't interested in, such as
3023 signals that are neither printed nor stopped upon. Stopping all
3024 threads can be a bit time-consuming, so if we want decent
3025 performance with heavily multi-threaded programs, especially when
3026 they're using a high frequency timer, we'd better avoid it if we
3027 can. */
3028 if (WIFSTOPPED (status))
3029 {
3030 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
3031
3032 if (!target_is_non_stop_p ())
3033 {
3034 /* Only do the below in all-stop, as we currently use SIGSTOP
3035 to implement target_stop (see linux_nat_stop) in
3036 non-stop. */
3037 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
3038 {
3039 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3040 forwarded to the entire process group, that is, all LWPs
3041 will receive it - unless they're using CLONE_THREAD to
3042 share signals. Since we only want to report it once, we
3043 mark it as ignored for all LWPs except this one. */
3044 iterate_over_lwps (ptid_t (lp->ptid.pid ()), set_ignore_sigint);
3045 lp->ignore_sigint = 0;
3046 }
3047 else
3048 maybe_clear_ignore_sigint (lp);
3049 }
3050
3051 /* When using hardware single-step, we need to report every signal.
3052 Otherwise, signals in pass_mask may be short-circuited
3053 except signals that might be caused by a breakpoint, or SIGSTOP
3054 if we sent the SIGSTOP and are waiting for it to arrive. */
3055 if (!lp->step
3056 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status))
3057 && (WSTOPSIG (status) != SIGSTOP
3058 || !linux_target->find_thread (lp->ptid)->stop_requested)
3059 && !linux_wstatus_maybe_breakpoint (status))
3060 {
3061 linux_resume_one_lwp (lp, lp->step, signo);
3062 linux_nat_debug_printf
3063 ("%s %s, %s (preempt 'handle')",
3064 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3065 lp->ptid.to_string ().c_str (),
3066 (signo != GDB_SIGNAL_0
3067 ? strsignal (gdb_signal_to_host (signo)) : "0"));
3068 return;
3069 }
3070 }
3071
3072 /* An interesting event. */
3073 gdb_assert (lp);
3074 lp->status = status;
3075 save_stop_reason (lp);
3076 }
3077
3078 /* Detect zombie thread group leaders, and "exit" them. We can't reap
3079 their exits until all other threads in the group have exited. */
3080
3081 static void
3082 check_zombie_leaders (void)
3083 {
3084 for (inferior *inf : all_inferiors ())
3085 {
3086 struct lwp_info *leader_lp;
3087
3088 if (inf->pid == 0)
3089 continue;
3090
3091 leader_lp = find_lwp_pid (ptid_t (inf->pid));
3092 if (leader_lp != NULL
3093 /* Check if there are other threads in the group, as we may
3094 have raced with the inferior simply exiting. Note this
3095 isn't a watertight check. If the inferior is
3096 multi-threaded and is exiting, it may be we see the
3097 leader as zombie before we reap all the non-leader
3098 threads. See comments below. */
3099 && num_lwps (inf->pid) > 1
3100 && linux_proc_pid_is_zombie (inf->pid))
3101 {
3102 /* A zombie leader in a multi-threaded program can mean one
3103 of three things:
3104
3105 #1 - Only the leader exited, not the whole program, e.g.,
3106 with pthread_exit. Since we can't reap the leader's exit
3107 status until all other threads are gone and reaped too,
3108 we want to delete the zombie leader right away, as it
3109 can't be debugged, we can't read its registers, etc.
3110 This is the main reason we check for zombie leaders
3111 disappearing.
3112
3113 #2 - The whole thread-group/process exited (a group exit,
3114 via e.g. exit(3), and there is (or will be shortly) an
3115 exit reported for each thread in the process, and then
3116 finally an exit for the leader once the non-leaders are
3117 reaped.
3118
3119 #3 - There are 3 or more threads in the group, and a
3120 thread other than the leader exec'd. See comments on
3121 exec events at the top of the file.
3122
3123 Ideally we would never delete the leader for case #2.
3124 Instead, we want to collect the exit status of each
3125 non-leader thread, and then finally collect the exit
3126 status of the leader as normal and use its exit code as
3127 whole-process exit code. Unfortunately, there's no
3128 race-free way to distinguish cases #1 and #2. We can't
3129 assume the exit events for the non-leaders threads are
3130 already pending in the kernel, nor can we assume the
3131 non-leader threads are in zombie state already. Between
3132 the leader becoming zombie and the non-leaders exiting
3133 and becoming zombie themselves, there's a small time
3134 window, so such a check would be racy. Temporarily
3135 pausing all threads and checking to see if all threads
3136 exit or not before re-resuming them would work in the
3137 case that all threads are running right now, but it
3138 wouldn't work if some thread is currently already
3139 ptrace-stopped, e.g., due to scheduler-locking.
3140
3141 So what we do is we delete the leader anyhow, and then
3142 later on when we see its exit status, we re-add it back.
3143 We also make sure that we only report a whole-process
3144 exit when we see the leader exiting, as opposed to when
3145 the last LWP in the LWP list exits, which can be a
3146 non-leader if we deleted the leader here. */
3147 linux_nat_debug_printf ("Thread group leader %d zombie "
3148 "(it exited, or another thread execd), "
3149 "deleting it.",
3150 inf->pid);
3151 exit_lwp (leader_lp);
3152 }
3153 }
3154 }
3155
3156 /* Convenience function that is called when we're about to return an
3157 event to the core. If the event is an exit or signalled event,
3158 then this decides whether to report it as process-wide event, as a
3159 thread exit event, or to suppress it. All other event kinds are
3160 passed through unmodified. */
3161
3162 static ptid_t
3163 filter_exit_event (struct lwp_info *event_child,
3164 struct target_waitstatus *ourstatus)
3165 {
3166 ptid_t ptid = event_child->ptid;
3167
3168 /* Note we must filter TARGET_WAITKIND_SIGNALLED as well, otherwise
3169 if a non-leader thread exits with a signal, we'd report it to the
3170 core which would interpret it as the whole-process exiting.
3171 There is no TARGET_WAITKIND_THREAD_SIGNALLED event kind. */
3172 if (ourstatus->kind () != TARGET_WAITKIND_EXITED
3173 && ourstatus->kind () != TARGET_WAITKIND_SIGNALLED)
3174 return ptid;
3175
3176 if (!is_leader (event_child))
3177 {
3178 if (report_exit_events_for (event_child))
3179 {
3180 ourstatus->set_thread_exited (0);
3181 /* Delete lwp, but not thread_info, infrun will need it to
3182 process the event. */
3183 exit_lwp (event_child, false);
3184 }
3185 else
3186 {
3187 ourstatus->set_ignore ();
3188 exit_lwp (event_child);
3189 }
3190 }
3191
3192 return ptid;
3193 }
3194
3195 static ptid_t
3196 linux_nat_wait_1 (ptid_t ptid, struct target_waitstatus *ourstatus,
3197 target_wait_flags target_options)
3198 {
3199 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
3200
3201 sigset_t prev_mask;
3202 enum resume_kind last_resume_kind;
3203 struct lwp_info *lp;
3204 int status;
3205
3206 /* The first time we get here after starting a new inferior, we may
3207 not have added it to the LWP list yet - this is the earliest
3208 moment at which we know its PID. */
3209 if (ptid.is_pid () && find_lwp_pid (ptid) == nullptr)
3210 {
3211 ptid_t lwp_ptid (ptid.pid (), ptid.pid ());
3212
3213 /* Upgrade the main thread's ptid. */
3214 thread_change_ptid (linux_target, ptid, lwp_ptid);
3215 lp = add_initial_lwp (lwp_ptid);
3216 lp->resumed = 1;
3217 }
3218
3219 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
3220 block_child_signals (&prev_mask);
3221
3222 /* First check if there is a LWP with a wait status pending. */
3223 lp = iterate_over_lwps (ptid, status_callback);
3224 if (lp != NULL)
3225 {
3226 linux_nat_debug_printf ("Using pending wait status %s for %s.",
3227 pending_status_str (lp).c_str (),
3228 lp->ptid.to_string ().c_str ());
3229 }
3230
3231 /* But if we don't find a pending event, we'll have to wait. Always
3232 pull all events out of the kernel. We'll randomly select an
3233 event LWP out of all that have events, to prevent starvation. */
3234
3235 while (lp == NULL)
3236 {
3237 pid_t lwpid;
3238
3239 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3240 quirks:
3241
3242 - If the thread group leader exits while other threads in the
3243 thread group still exist, waitpid(TGID, ...) hangs. That
3244 waitpid won't return an exit status until the other threads
3245 in the group are reaped.
3246
3247 - When a non-leader thread execs, that thread just vanishes
3248 without reporting an exit (so we'd hang if we waited for it
3249 explicitly in that case). The exec event is reported to
3250 the TGID pid. */
3251
3252 errno = 0;
3253 lwpid = my_waitpid (-1, &status, __WALL | WNOHANG);
3254
3255 linux_nat_debug_printf ("waitpid(-1, ...) returned %d, %s",
3256 lwpid,
3257 errno ? safe_strerror (errno) : "ERRNO-OK");
3258
3259 if (lwpid > 0)
3260 {
3261 linux_nat_debug_printf ("waitpid %ld received %s",
3262 (long) lwpid,
3263 status_to_str (status).c_str ());
3264
3265 linux_nat_filter_event (lwpid, status);
3266 /* Retry until nothing comes out of waitpid. A single
3267 SIGCHLD can indicate more than one child stopped. */
3268 continue;
3269 }
3270
3271 /* Now that we've pulled all events out of the kernel, resume
3272 LWPs that don't have an interesting event to report. */
3273 iterate_over_lwps (minus_one_ptid,
3274 [] (struct lwp_info *info)
3275 {
3276 return resume_stopped_resumed_lwps (info, minus_one_ptid);
3277 });
3278
3279 /* ... and find an LWP with a status to report to the core, if
3280 any. */
3281 lp = iterate_over_lwps (ptid, status_callback);
3282 if (lp != NULL)
3283 break;
3284
3285 /* Check for zombie thread group leaders. Those can't be reaped
3286 until all other threads in the thread group are. */
3287 check_zombie_leaders ();
3288
3289 /* If there are no resumed children left, bail. We'd be stuck
3290 forever in the sigsuspend call below otherwise. */
3291 if (iterate_over_lwps (ptid, resumed_callback) == NULL)
3292 {
3293 linux_nat_debug_printf ("exit (no resumed LWP)");
3294
3295 ourstatus->set_no_resumed ();
3296
3297 restore_child_signals_mask (&prev_mask);
3298 return minus_one_ptid;
3299 }
3300
3301 /* No interesting event to report to the core. */
3302
3303 if (target_options & TARGET_WNOHANG)
3304 {
3305 linux_nat_debug_printf ("no interesting events found");
3306
3307 ourstatus->set_ignore ();
3308 restore_child_signals_mask (&prev_mask);
3309 return minus_one_ptid;
3310 }
3311
3312 /* We shouldn't end up here unless we want to try again. */
3313 gdb_assert (lp == NULL);
3314
3315 /* Block until we get an event reported with SIGCHLD. */
3316 wait_for_signal ();
3317 }
3318
3319 gdb_assert (lp);
3320
3321 status = lp->status;
3322 lp->status = 0;
3323
3324 if (!target_is_non_stop_p ())
3325 {
3326 /* Now stop all other LWP's ... */
3327 iterate_over_lwps (minus_one_ptid, stop_callback);
3328
3329 /* ... and wait until all of them have reported back that
3330 they're no longer running. */
3331 iterate_over_lwps (minus_one_ptid, stop_wait_callback);
3332 }
3333
3334 /* If we're not waiting for a specific LWP, choose an event LWP from
3335 among those that have had events. Giving equal priority to all
3336 LWPs that have had events helps prevent starvation. */
3337 if (ptid == minus_one_ptid || ptid.is_pid ())
3338 select_event_lwp (ptid, &lp, &status);
3339
3340 gdb_assert (lp != NULL);
3341
3342 /* Now that we've selected our final event LWP, un-adjust its PC if
3343 it was a software breakpoint, and we can't reliably support the
3344 "stopped by software breakpoint" stop reason. */
3345 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3346 && !USE_SIGTRAP_SIGINFO)
3347 {
3348 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
3349 struct gdbarch *gdbarch = regcache->arch ();
3350 int decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3351
3352 if (decr_pc != 0)
3353 {
3354 CORE_ADDR pc;
3355
3356 pc = regcache_read_pc (regcache);
3357 regcache_write_pc (regcache, pc + decr_pc);
3358 }
3359 }
3360
3361 /* We'll need this to determine whether to report a SIGSTOP as
3362 GDB_SIGNAL_0. Need to take a copy because resume_clear_callback
3363 clears it. */
3364 last_resume_kind = lp->last_resume_kind;
3365
3366 if (!target_is_non_stop_p ())
3367 {
3368 /* In all-stop, from the core's perspective, all LWPs are now
3369 stopped until a new resume action is sent over. */
3370 iterate_over_lwps (minus_one_ptid, resume_clear_callback);
3371 }
3372 else
3373 {
3374 resume_clear_callback (lp);
3375 }
3376
3377 if (linux_target->low_status_is_event (status))
3378 {
3379 linux_nat_debug_printf ("trap ptid is %s.",
3380 lp->ptid.to_string ().c_str ());
3381 }
3382
3383 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3384 {
3385 *ourstatus = lp->waitstatus;
3386 lp->waitstatus.set_ignore ();
3387 }
3388 else
3389 *ourstatus = host_status_to_waitstatus (status);
3390
3391 linux_nat_debug_printf ("event found");
3392
3393 restore_child_signals_mask (&prev_mask);
3394
3395 if (last_resume_kind == resume_stop
3396 && ourstatus->kind () == TARGET_WAITKIND_STOPPED
3397 && WSTOPSIG (status) == SIGSTOP)
3398 {
3399 /* A thread that has been requested to stop by GDB with
3400 target_stop, and it stopped cleanly, so report as SIG0. The
3401 use of SIGSTOP is an implementation detail. */
3402 ourstatus->set_stopped (GDB_SIGNAL_0);
3403 }
3404
3405 if (ourstatus->kind () == TARGET_WAITKIND_EXITED
3406 || ourstatus->kind () == TARGET_WAITKIND_SIGNALLED)
3407 lp->core = -1;
3408 else
3409 lp->core = linux_common_core_of_thread (lp->ptid);
3410
3411 return filter_exit_event (lp, ourstatus);
3412 }
3413
3414 /* Resume LWPs that are currently stopped without any pending status
3415 to report, but are resumed from the core's perspective. */
3416
3417 static int
3418 resume_stopped_resumed_lwps (struct lwp_info *lp, const ptid_t wait_ptid)
3419 {
3420 inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
3421
3422 if (!lp->stopped)
3423 {
3424 linux_nat_debug_printf ("NOT resuming LWP %s, not stopped",
3425 lp->ptid.to_string ().c_str ());
3426 }
3427 else if (!lp->resumed)
3428 {
3429 linux_nat_debug_printf ("NOT resuming LWP %s, not resumed",
3430 lp->ptid.to_string ().c_str ());
3431 }
3432 else if (lwp_status_pending_p (lp))
3433 {
3434 linux_nat_debug_printf ("NOT resuming LWP %s, has pending status",
3435 lp->ptid.to_string ().c_str ());
3436 }
3437 else if (inf->vfork_child != nullptr)
3438 {
3439 linux_nat_debug_printf ("NOT resuming LWP %s (vfork parent)",
3440 lp->ptid.to_string ().c_str ());
3441 }
3442 else
3443 {
3444 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
3445 struct gdbarch *gdbarch = regcache->arch ();
3446
3447 try
3448 {
3449 CORE_ADDR pc = regcache_read_pc (regcache);
3450 int leave_stopped = 0;
3451
3452 /* Don't bother if there's a breakpoint at PC that we'd hit
3453 immediately, and we're not waiting for this LWP. */
3454 if (!lp->ptid.matches (wait_ptid))
3455 {
3456 if (breakpoint_inserted_here_p (regcache->aspace (), pc))
3457 leave_stopped = 1;
3458 }
3459
3460 if (!leave_stopped)
3461 {
3462 linux_nat_debug_printf
3463 ("resuming stopped-resumed LWP %s at %s: step=%d",
3464 lp->ptid.to_string ().c_str (), paddress (gdbarch, pc),
3465 lp->step);
3466
3467 linux_resume_one_lwp_throw (lp, lp->step, GDB_SIGNAL_0);
3468 }
3469 }
3470 catch (const gdb_exception_error &ex)
3471 {
3472 if (!check_ptrace_stopped_lwp_gone (lp))
3473 throw;
3474 }
3475 }
3476
3477 return 0;
3478 }
3479
3480 ptid_t
3481 linux_nat_target::wait (ptid_t ptid, struct target_waitstatus *ourstatus,
3482 target_wait_flags target_options)
3483 {
3484 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
3485
3486 ptid_t event_ptid;
3487
3488 linux_nat_debug_printf ("[%s], [%s]", ptid.to_string ().c_str (),
3489 target_options_to_string (target_options).c_str ());
3490
3491 /* Flush the async file first. */
3492 if (target_is_async_p ())
3493 async_file_flush ();
3494
3495 /* Resume LWPs that are currently stopped without any pending status
3496 to report, but are resumed from the core's perspective. LWPs get
3497 in this state if we find them stopping at a time we're not
3498 interested in reporting the event (target_wait on a
3499 specific_process, for example, see linux_nat_wait_1), and
3500 meanwhile the event became uninteresting. Don't bother resuming
3501 LWPs we're not going to wait for if they'd stop immediately. */
3502 if (target_is_non_stop_p ())
3503 iterate_over_lwps (minus_one_ptid,
3504 [=] (struct lwp_info *info)
3505 {
3506 return resume_stopped_resumed_lwps (info, ptid);
3507 });
3508
3509 event_ptid = linux_nat_wait_1 (ptid, ourstatus, target_options);
3510
3511 /* If we requested any event, and something came out, assume there
3512 may be more. If we requested a specific lwp or process, also
3513 assume there may be more. */
3514 if (target_is_async_p ()
3515 && ((ourstatus->kind () != TARGET_WAITKIND_IGNORE
3516 && ourstatus->kind () != TARGET_WAITKIND_NO_RESUMED)
3517 || ptid != minus_one_ptid))
3518 async_file_mark ();
3519
3520 return event_ptid;
3521 }
3522
3523 /* Kill one LWP. */
3524
3525 static void
3526 kill_one_lwp (pid_t pid)
3527 {
3528 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3529
3530 errno = 0;
3531 kill_lwp (pid, SIGKILL);
3532
3533 if (debug_linux_nat)
3534 {
3535 int save_errno = errno;
3536
3537 linux_nat_debug_printf
3538 ("kill (SIGKILL) %ld, 0, 0 (%s)", (long) pid,
3539 save_errno != 0 ? safe_strerror (save_errno) : "OK");
3540 }
3541
3542 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3543
3544 errno = 0;
3545 ptrace (PTRACE_KILL, pid, 0, 0);
3546 if (debug_linux_nat)
3547 {
3548 int save_errno = errno;
3549
3550 linux_nat_debug_printf
3551 ("PTRACE_KILL %ld, 0, 0 (%s)", (long) pid,
3552 save_errno ? safe_strerror (save_errno) : "OK");
3553 }
3554 }
3555
3556 /* Wait for an LWP to die. */
3557
3558 static void
3559 kill_wait_one_lwp (pid_t pid)
3560 {
3561 pid_t res;
3562
3563 /* We must make sure that there are no pending events (delayed
3564 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3565 program doesn't interfere with any following debugging session. */
3566
3567 do
3568 {
3569 res = my_waitpid (pid, NULL, __WALL);
3570 if (res != (pid_t) -1)
3571 {
3572 linux_nat_debug_printf ("wait %ld received unknown.", (long) pid);
3573
3574 /* The Linux kernel sometimes fails to kill a thread
3575 completely after PTRACE_KILL; that goes from the stop
3576 point in do_fork out to the one in get_signal_to_deliver
3577 and waits again. So kill it again. */
3578 kill_one_lwp (pid);
3579 }
3580 }
3581 while (res == pid);
3582
3583 gdb_assert (res == -1 && errno == ECHILD);
3584 }
3585
3586 /* Callback for iterate_over_lwps. */
3587
3588 static int
3589 kill_callback (struct lwp_info *lp)
3590 {
3591 kill_one_lwp (lp->ptid.lwp ());
3592 return 0;
3593 }
3594
3595 /* Callback for iterate_over_lwps. */
3596
3597 static int
3598 kill_wait_callback (struct lwp_info *lp)
3599 {
3600 kill_wait_one_lwp (lp->ptid.lwp ());
3601 return 0;
3602 }
3603
3604 /* Kill the fork/clone child of LP if it has an unfollowed child. */
3605
3606 static int
3607 kill_unfollowed_child_callback (lwp_info *lp)
3608 {
3609 gdb::optional<target_waitstatus> ws = get_pending_child_status (lp);
3610 if (ws.has_value ())
3611 {
3612 ptid_t child_ptid = ws->child_ptid ();
3613 int child_pid = child_ptid.pid ();
3614 int child_lwp = child_ptid.lwp ();
3615
3616 kill_one_lwp (child_lwp);
3617 kill_wait_one_lwp (child_lwp);
3618
3619 /* Let the arch-specific native code know this process is
3620 gone. */
3621 if (ws->kind () != TARGET_WAITKIND_THREAD_CLONED)
3622 linux_target->low_forget_process (child_pid);
3623 }
3624
3625 return 0;
3626 }
3627
3628 void
3629 linux_nat_target::kill ()
3630 {
3631 ptid_t pid_ptid (inferior_ptid.pid ());
3632
3633 /* If we're stopped while forking/cloning and we haven't followed
3634 yet, kill the child task. We need to do this first because the
3635 parent will be sleeping if this is a vfork. */
3636 iterate_over_lwps (pid_ptid, kill_unfollowed_child_callback);
3637
3638 if (forks_exist_p ())
3639 linux_fork_killall ();
3640 else
3641 {
3642 /* Stop all threads before killing them, since ptrace requires
3643 that the thread is stopped to successfully PTRACE_KILL. */
3644 iterate_over_lwps (pid_ptid, stop_callback);
3645 /* ... and wait until all of them have reported back that
3646 they're no longer running. */
3647 iterate_over_lwps (pid_ptid, stop_wait_callback);
3648
3649 /* Kill all LWP's ... */
3650 iterate_over_lwps (pid_ptid, kill_callback);
3651
3652 /* ... and wait until we've flushed all events. */
3653 iterate_over_lwps (pid_ptid, kill_wait_callback);
3654 }
3655
3656 target_mourn_inferior (inferior_ptid);
3657 }
3658
3659 void
3660 linux_nat_target::mourn_inferior ()
3661 {
3662 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
3663
3664 int pid = inferior_ptid.pid ();
3665
3666 purge_lwp_list (pid);
3667
3668 close_proc_mem_file (pid);
3669
3670 if (! forks_exist_p ())
3671 /* Normal case, no other forks available. */
3672 inf_ptrace_target::mourn_inferior ();
3673 else
3674 /* Multi-fork case. The current inferior_ptid has exited, but
3675 there are other viable forks to debug. Delete the exiting
3676 one and context-switch to the first available. */
3677 linux_fork_mourn_inferior ();
3678
3679 /* Let the arch-specific native code know this process is gone. */
3680 linux_target->low_forget_process (pid);
3681 }
3682
3683 /* Convert a native/host siginfo object, into/from the siginfo in the
3684 layout of the inferiors' architecture. */
3685
3686 static void
3687 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
3688 {
3689 /* If the low target didn't do anything, then just do a straight
3690 memcpy. */
3691 if (!linux_target->low_siginfo_fixup (siginfo, inf_siginfo, direction))
3692 {
3693 if (direction == 1)
3694 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
3695 else
3696 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
3697 }
3698 }
3699
3700 static enum target_xfer_status
3701 linux_xfer_siginfo (ptid_t ptid, enum target_object object,
3702 const char *annex, gdb_byte *readbuf,
3703 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3704 ULONGEST *xfered_len)
3705 {
3706 siginfo_t siginfo;
3707 gdb_byte inf_siginfo[sizeof (siginfo_t)];
3708
3709 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3710 gdb_assert (readbuf || writebuf);
3711
3712 if (offset > sizeof (siginfo))
3713 return TARGET_XFER_E_IO;
3714
3715 if (!linux_nat_get_siginfo (ptid, &siginfo))
3716 return TARGET_XFER_E_IO;
3717
3718 /* When GDB is built as a 64-bit application, ptrace writes into
3719 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3720 inferior with a 64-bit GDB should look the same as debugging it
3721 with a 32-bit GDB, we need to convert it. GDB core always sees
3722 the converted layout, so any read/write will have to be done
3723 post-conversion. */
3724 siginfo_fixup (&siginfo, inf_siginfo, 0);
3725
3726 if (offset + len > sizeof (siginfo))
3727 len = sizeof (siginfo) - offset;
3728
3729 if (readbuf != NULL)
3730 memcpy (readbuf, inf_siginfo + offset, len);
3731 else
3732 {
3733 memcpy (inf_siginfo + offset, writebuf, len);
3734
3735 /* Convert back to ptrace layout before flushing it out. */
3736 siginfo_fixup (&siginfo, inf_siginfo, 1);
3737
3738 int pid = get_ptrace_pid (ptid);
3739 errno = 0;
3740 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3741 if (errno != 0)
3742 return TARGET_XFER_E_IO;
3743 }
3744
3745 *xfered_len = len;
3746 return TARGET_XFER_OK;
3747 }
3748
3749 static enum target_xfer_status
3750 linux_nat_xfer_osdata (enum target_object object,
3751 const char *annex, gdb_byte *readbuf,
3752 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3753 ULONGEST *xfered_len);
3754
3755 static enum target_xfer_status
3756 linux_proc_xfer_memory_partial (int pid, gdb_byte *readbuf,
3757 const gdb_byte *writebuf, ULONGEST offset,
3758 LONGEST len, ULONGEST *xfered_len);
3759
3760 enum target_xfer_status
3761 linux_nat_target::xfer_partial (enum target_object object,
3762 const char *annex, gdb_byte *readbuf,
3763 const gdb_byte *writebuf,
3764 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
3765 {
3766 if (object == TARGET_OBJECT_SIGNAL_INFO)
3767 return linux_xfer_siginfo (inferior_ptid, object, annex, readbuf, writebuf,
3768 offset, len, xfered_len);
3769
3770 /* The target is connected but no live inferior is selected. Pass
3771 this request down to a lower stratum (e.g., the executable
3772 file). */
3773 if (object == TARGET_OBJECT_MEMORY && inferior_ptid == null_ptid)
3774 return TARGET_XFER_EOF;
3775
3776 if (object == TARGET_OBJECT_AUXV)
3777 return memory_xfer_auxv (this, object, annex, readbuf, writebuf,
3778 offset, len, xfered_len);
3779
3780 if (object == TARGET_OBJECT_OSDATA)
3781 return linux_nat_xfer_osdata (object, annex, readbuf, writebuf,
3782 offset, len, xfered_len);
3783
3784 if (object == TARGET_OBJECT_MEMORY)
3785 {
3786 /* GDB calculates all addresses in the largest possible address
3787 width. The address width must be masked before its final use
3788 by linux_proc_xfer_partial.
3789
3790 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
3791 int addr_bit = gdbarch_addr_bit (current_inferior ()->arch ());
3792
3793 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
3794 offset &= ((ULONGEST) 1 << addr_bit) - 1;
3795
3796 /* If /proc/pid/mem is writable, don't fallback to ptrace. If
3797 the write via /proc/pid/mem fails because the inferior execed
3798 (and we haven't seen the exec event yet), a subsequent ptrace
3799 poke would incorrectly write memory to the post-exec address
3800 space, while the core was trying to write to the pre-exec
3801 address space. */
3802 if (proc_mem_file_is_writable ())
3803 return linux_proc_xfer_memory_partial (inferior_ptid.pid (), readbuf,
3804 writebuf, offset, len,
3805 xfered_len);
3806 }
3807
3808 return inf_ptrace_target::xfer_partial (object, annex, readbuf, writebuf,
3809 offset, len, xfered_len);
3810 }
3811
3812 bool
3813 linux_nat_target::thread_alive (ptid_t ptid)
3814 {
3815 /* As long as a PTID is in lwp list, consider it alive. */
3816 return find_lwp_pid (ptid) != NULL;
3817 }
3818
3819 /* Implement the to_update_thread_list target method for this
3820 target. */
3821
3822 void
3823 linux_nat_target::update_thread_list ()
3824 {
3825 /* We add/delete threads from the list as clone/exit events are
3826 processed, so just try deleting exited threads still in the
3827 thread list. */
3828 delete_exited_threads ();
3829
3830 /* Update the processor core that each lwp/thread was last seen
3831 running on. */
3832 for (lwp_info *lwp : all_lwps ())
3833 {
3834 /* Avoid accessing /proc if the thread hasn't run since we last
3835 time we fetched the thread's core. Accessing /proc becomes
3836 noticeably expensive when we have thousands of LWPs. */
3837 if (lwp->core == -1)
3838 lwp->core = linux_common_core_of_thread (lwp->ptid);
3839 }
3840 }
3841
3842 std::string
3843 linux_nat_target::pid_to_str (ptid_t ptid)
3844 {
3845 if (ptid.lwp_p ()
3846 && (ptid.pid () != ptid.lwp ()
3847 || num_lwps (ptid.pid ()) > 1))
3848 return string_printf ("LWP %ld", ptid.lwp ());
3849
3850 return normal_pid_to_str (ptid);
3851 }
3852
3853 const char *
3854 linux_nat_target::thread_name (struct thread_info *thr)
3855 {
3856 return linux_proc_tid_get_name (thr->ptid);
3857 }
3858
3859 /* Accepts an integer PID; Returns a string representing a file that
3860 can be opened to get the symbols for the child process. */
3861
3862 const char *
3863 linux_nat_target::pid_to_exec_file (int pid)
3864 {
3865 return linux_proc_pid_to_exec_file (pid);
3866 }
3867
3868 /* Object representing an /proc/PID/mem open file. We keep one such
3869 file open per inferior.
3870
3871 It might be tempting to think about only ever opening one file at
3872 most for all inferiors, closing/reopening the file as we access
3873 memory of different inferiors, to minimize number of file
3874 descriptors open, which can otherwise run into resource limits.
3875 However, that does not work correctly -- if the inferior execs and
3876 we haven't processed the exec event yet, and, we opened a
3877 /proc/PID/mem file, we will get a mem file accessing the post-exec
3878 address space, thinking we're opening it for the pre-exec address
3879 space. That is dangerous as we can poke memory (e.g. clearing
3880 breakpoints) in the post-exec memory by mistake, corrupting the
3881 inferior. For that reason, we open the mem file as early as
3882 possible, right after spawning, forking or attaching to the
3883 inferior, when the inferior is stopped and thus before it has a
3884 chance of execing.
3885
3886 Note that after opening the file, even if the thread we opened it
3887 for subsequently exits, the open file is still usable for accessing
3888 memory. It's only when the whole process exits or execs that the
3889 file becomes invalid, at which point reads/writes return EOF. */
3890
3891 class proc_mem_file
3892 {
3893 public:
3894 proc_mem_file (ptid_t ptid, int fd)
3895 : m_ptid (ptid), m_fd (fd)
3896 {
3897 gdb_assert (m_fd != -1);
3898 }
3899
3900 ~proc_mem_file ()
3901 {
3902 linux_nat_debug_printf ("closing fd %d for /proc/%d/task/%ld/mem",
3903 m_fd, m_ptid.pid (), m_ptid.lwp ());
3904 close (m_fd);
3905 }
3906
3907 DISABLE_COPY_AND_ASSIGN (proc_mem_file);
3908
3909 int fd ()
3910 {
3911 return m_fd;
3912 }
3913
3914 private:
3915 /* The LWP this file was opened for. Just for debugging
3916 purposes. */
3917 ptid_t m_ptid;
3918
3919 /* The file descriptor. */
3920 int m_fd = -1;
3921 };
3922
3923 /* The map between an inferior process id, and the open /proc/PID/mem
3924 file. This is stored in a map instead of in a per-inferior
3925 structure because we need to be able to access memory of processes
3926 which don't have a corresponding struct inferior object. E.g.,
3927 with "detach-on-fork on" (the default), and "follow-fork parent"
3928 (also default), we don't create an inferior for the fork child, but
3929 we still need to remove breakpoints from the fork child's
3930 memory. */
3931 static std::unordered_map<int, proc_mem_file> proc_mem_file_map;
3932
3933 /* Close the /proc/PID/mem file for PID. */
3934
3935 static void
3936 close_proc_mem_file (pid_t pid)
3937 {
3938 proc_mem_file_map.erase (pid);
3939 }
3940
3941 /* Open the /proc/PID/mem file for the process (thread group) of PTID.
3942 We actually open /proc/PID/task/LWP/mem, as that's the LWP we know
3943 exists and is stopped right now. We prefer the
3944 /proc/PID/task/LWP/mem form over /proc/LWP/mem to avoid tid-reuse
3945 races, just in case this is ever called on an already-waited
3946 LWP. */
3947
3948 static void
3949 open_proc_mem_file (ptid_t ptid)
3950 {
3951 auto iter = proc_mem_file_map.find (ptid.pid ());
3952 gdb_assert (iter == proc_mem_file_map.end ());
3953
3954 char filename[64];
3955 xsnprintf (filename, sizeof filename,
3956 "/proc/%d/task/%ld/mem", ptid.pid (), ptid.lwp ());
3957
3958 int fd = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
3959
3960 if (fd == -1)
3961 {
3962 warning (_("opening /proc/PID/mem file for lwp %d.%ld failed: %s (%d)"),
3963 ptid.pid (), ptid.lwp (),
3964 safe_strerror (errno), errno);
3965 return;
3966 }
3967
3968 proc_mem_file_map.emplace (std::piecewise_construct,
3969 std::forward_as_tuple (ptid.pid ()),
3970 std::forward_as_tuple (ptid, fd));
3971
3972 linux_nat_debug_printf ("opened fd %d for lwp %d.%ld",
3973 fd, ptid.pid (), ptid.lwp ());
3974 }
3975
3976 /* Helper for linux_proc_xfer_memory_partial and
3977 proc_mem_file_is_writable. FD is the already opened /proc/pid/mem
3978 file, and PID is the pid of the corresponding process. The rest of
3979 the arguments are like linux_proc_xfer_memory_partial's. */
3980
3981 static enum target_xfer_status
3982 linux_proc_xfer_memory_partial_fd (int fd, int pid,
3983 gdb_byte *readbuf, const gdb_byte *writebuf,
3984 ULONGEST offset, LONGEST len,
3985 ULONGEST *xfered_len)
3986 {
3987 ssize_t ret;
3988
3989 gdb_assert (fd != -1);
3990
3991 /* Use pread64/pwrite64 if available, since they save a syscall and
3992 can handle 64-bit offsets even on 32-bit platforms (for instance,
3993 SPARC debugging a SPARC64 application). But only use them if the
3994 offset isn't so high that when cast to off_t it'd be negative, as
3995 seen on SPARC64. pread64/pwrite64 outright reject such offsets.
3996 lseek does not. */
3997 #ifdef HAVE_PREAD64
3998 if ((off_t) offset >= 0)
3999 ret = (readbuf != nullptr
4000 ? pread64 (fd, readbuf, len, offset)
4001 : pwrite64 (fd, writebuf, len, offset));
4002 else
4003 #endif
4004 {
4005 ret = lseek (fd, offset, SEEK_SET);
4006 if (ret != -1)
4007 ret = (readbuf != nullptr
4008 ? read (fd, readbuf, len)
4009 : write (fd, writebuf, len));
4010 }
4011
4012 if (ret == -1)
4013 {
4014 linux_nat_debug_printf ("accessing fd %d for pid %d failed: %s (%d)",
4015 fd, pid, safe_strerror (errno), errno);
4016 return TARGET_XFER_E_IO;
4017 }
4018 else if (ret == 0)
4019 {
4020 /* EOF means the address space is gone, the whole process exited
4021 or execed. */
4022 linux_nat_debug_printf ("accessing fd %d for pid %d got EOF",
4023 fd, pid);
4024 return TARGET_XFER_EOF;
4025 }
4026 else
4027 {
4028 *xfered_len = ret;
4029 return TARGET_XFER_OK;
4030 }
4031 }
4032
4033 /* Implement the to_xfer_partial target method using /proc/PID/mem.
4034 Because we can use a single read/write call, this can be much more
4035 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
4036 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running
4037 threads. */
4038
4039 static enum target_xfer_status
4040 linux_proc_xfer_memory_partial (int pid, gdb_byte *readbuf,
4041 const gdb_byte *writebuf, ULONGEST offset,
4042 LONGEST len, ULONGEST *xfered_len)
4043 {
4044 auto iter = proc_mem_file_map.find (pid);
4045 if (iter == proc_mem_file_map.end ())
4046 return TARGET_XFER_EOF;
4047
4048 int fd = iter->second.fd ();
4049
4050 return linux_proc_xfer_memory_partial_fd (fd, pid, readbuf, writebuf, offset,
4051 len, xfered_len);
4052 }
4053
4054 /* Check whether /proc/pid/mem is writable in the current kernel, and
4055 return true if so. It wasn't writable before Linux 2.6.39, but
4056 there's no way to know whether the feature was backported to older
4057 kernels. So we check to see if it works. The result is cached,
4058 and this is guaranteed to be called once early during inferior
4059 startup, so that any warning is printed out consistently between
4060 GDB invocations. Note we don't call it during GDB startup instead
4061 though, because then we might warn with e.g. just "gdb --version"
4062 on sandboxed systems. See PR gdb/29907. */
4063
4064 static bool
4065 proc_mem_file_is_writable ()
4066 {
4067 static gdb::optional<bool> writable;
4068
4069 if (writable.has_value ())
4070 return *writable;
4071
4072 writable.emplace (false);
4073
4074 /* We check whether /proc/pid/mem is writable by trying to write to
4075 one of our variables via /proc/self/mem. */
4076
4077 int fd = gdb_open_cloexec ("/proc/self/mem", O_RDWR | O_LARGEFILE, 0).release ();
4078
4079 if (fd == -1)
4080 {
4081 warning (_("opening /proc/self/mem file failed: %s (%d)"),
4082 safe_strerror (errno), errno);
4083 return *writable;
4084 }
4085
4086 SCOPE_EXIT { close (fd); };
4087
4088 /* This is the variable we try to write to. Note OFFSET below. */
4089 volatile gdb_byte test_var = 0;
4090
4091 gdb_byte writebuf[] = {0x55};
4092 ULONGEST offset = (uintptr_t) &test_var;
4093 ULONGEST xfered_len;
4094
4095 enum target_xfer_status res
4096 = linux_proc_xfer_memory_partial_fd (fd, getpid (), nullptr, writebuf,
4097 offset, 1, &xfered_len);
4098
4099 if (res == TARGET_XFER_OK)
4100 {
4101 gdb_assert (xfered_len == 1);
4102 gdb_assert (test_var == 0x55);
4103 /* Success. */
4104 *writable = true;
4105 }
4106
4107 return *writable;
4108 }
4109
4110 /* Parse LINE as a signal set and add its set bits to SIGS. */
4111
4112 static void
4113 add_line_to_sigset (const char *line, sigset_t *sigs)
4114 {
4115 int len = strlen (line) - 1;
4116 const char *p;
4117 int signum;
4118
4119 if (line[len] != '\n')
4120 error (_("Could not parse signal set: %s"), line);
4121
4122 p = line;
4123 signum = len * 4;
4124 while (len-- > 0)
4125 {
4126 int digit;
4127
4128 if (*p >= '0' && *p <= '9')
4129 digit = *p - '0';
4130 else if (*p >= 'a' && *p <= 'f')
4131 digit = *p - 'a' + 10;
4132 else
4133 error (_("Could not parse signal set: %s"), line);
4134
4135 signum -= 4;
4136
4137 if (digit & 1)
4138 sigaddset (sigs, signum + 1);
4139 if (digit & 2)
4140 sigaddset (sigs, signum + 2);
4141 if (digit & 4)
4142 sigaddset (sigs, signum + 3);
4143 if (digit & 8)
4144 sigaddset (sigs, signum + 4);
4145
4146 p++;
4147 }
4148 }
4149
4150 /* Find process PID's pending signals from /proc/pid/status and set
4151 SIGS to match. */
4152
4153 void
4154 linux_proc_pending_signals (int pid, sigset_t *pending,
4155 sigset_t *blocked, sigset_t *ignored)
4156 {
4157 char buffer[PATH_MAX], fname[PATH_MAX];
4158
4159 sigemptyset (pending);
4160 sigemptyset (blocked);
4161 sigemptyset (ignored);
4162 xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
4163 gdb_file_up procfile = gdb_fopen_cloexec (fname, "r");
4164 if (procfile == NULL)
4165 error (_("Could not open %s"), fname);
4166
4167 while (fgets (buffer, PATH_MAX, procfile.get ()) != NULL)
4168 {
4169 /* Normal queued signals are on the SigPnd line in the status
4170 file. However, 2.6 kernels also have a "shared" pending
4171 queue for delivering signals to a thread group, so check for
4172 a ShdPnd line also.
4173
4174 Unfortunately some Red Hat kernels include the shared pending
4175 queue but not the ShdPnd status field. */
4176
4177 if (startswith (buffer, "SigPnd:\t"))
4178 add_line_to_sigset (buffer + 8, pending);
4179 else if (startswith (buffer, "ShdPnd:\t"))
4180 add_line_to_sigset (buffer + 8, pending);
4181 else if (startswith (buffer, "SigBlk:\t"))
4182 add_line_to_sigset (buffer + 8, blocked);
4183 else if (startswith (buffer, "SigIgn:\t"))
4184 add_line_to_sigset (buffer + 8, ignored);
4185 }
4186 }
4187
4188 static enum target_xfer_status
4189 linux_nat_xfer_osdata (enum target_object object,
4190 const char *annex, gdb_byte *readbuf,
4191 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4192 ULONGEST *xfered_len)
4193 {
4194 gdb_assert (object == TARGET_OBJECT_OSDATA);
4195
4196 *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
4197 if (*xfered_len == 0)
4198 return TARGET_XFER_EOF;
4199 else
4200 return TARGET_XFER_OK;
4201 }
4202
4203 std::vector<static_tracepoint_marker>
4204 linux_nat_target::static_tracepoint_markers_by_strid (const char *strid)
4205 {
4206 char s[IPA_CMD_BUF_SIZE];
4207 int pid = inferior_ptid.pid ();
4208 std::vector<static_tracepoint_marker> markers;
4209 const char *p = s;
4210 ptid_t ptid = ptid_t (pid, 0);
4211 static_tracepoint_marker marker;
4212
4213 /* Pause all */
4214 target_stop (ptid);
4215
4216 strcpy (s, "qTfSTM");
4217 agent_run_command (pid, s, strlen (s) + 1);
4218
4219 /* Unpause all. */
4220 SCOPE_EXIT { target_continue_no_signal (ptid); };
4221
4222 while (*p++ == 'm')
4223 {
4224 do
4225 {
4226 parse_static_tracepoint_marker_definition (p, &p, &marker);
4227
4228 if (strid == NULL || marker.str_id == strid)
4229 markers.push_back (std::move (marker));
4230 }
4231 while (*p++ == ','); /* comma-separated list */
4232
4233 strcpy (s, "qTsSTM");
4234 agent_run_command (pid, s, strlen (s) + 1);
4235 p = s;
4236 }
4237
4238 return markers;
4239 }
4240
4241 /* target_can_async_p implementation. */
4242
4243 bool
4244 linux_nat_target::can_async_p ()
4245 {
4246 /* This flag should be checked in the common target.c code. */
4247 gdb_assert (target_async_permitted);
4248
4249 /* Otherwise, this targets is always able to support async mode. */
4250 return true;
4251 }
4252
4253 bool
4254 linux_nat_target::supports_non_stop ()
4255 {
4256 return true;
4257 }
4258
4259 /* to_always_non_stop_p implementation. */
4260
4261 bool
4262 linux_nat_target::always_non_stop_p ()
4263 {
4264 return true;
4265 }
4266
4267 bool
4268 linux_nat_target::supports_multi_process ()
4269 {
4270 return true;
4271 }
4272
4273 bool
4274 linux_nat_target::supports_disable_randomization ()
4275 {
4276 return true;
4277 }
4278
4279 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4280 so we notice when any child changes state, and notify the
4281 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4282 above to wait for the arrival of a SIGCHLD. */
4283
4284 static void
4285 sigchld_handler (int signo)
4286 {
4287 int old_errno = errno;
4288
4289 if (debug_linux_nat)
4290 gdb_stdlog->write_async_safe ("sigchld\n", sizeof ("sigchld\n") - 1);
4291
4292 if (signo == SIGCHLD)
4293 {
4294 /* Let the event loop know that there are events to handle. */
4295 linux_nat_target::async_file_mark_if_open ();
4296 }
4297
4298 errno = old_errno;
4299 }
4300
4301 /* Callback registered with the target events file descriptor. */
4302
4303 static void
4304 handle_target_event (int error, gdb_client_data client_data)
4305 {
4306 inferior_event_handler (INF_REG_EVENT);
4307 }
4308
4309 /* target_async implementation. */
4310
4311 void
4312 linux_nat_target::async (bool enable)
4313 {
4314 if (enable == is_async_p ())
4315 return;
4316
4317 /* Block child signals while we create/destroy the pipe, as their
4318 handler writes to it. */
4319 gdb::block_signals blocker;
4320
4321 if (enable)
4322 {
4323 if (!async_file_open ())
4324 internal_error ("creating event pipe failed.");
4325
4326 add_file_handler (async_wait_fd (), handle_target_event, NULL,
4327 "linux-nat");
4328
4329 /* There may be pending events to handle. Tell the event loop
4330 to poll them. */
4331 async_file_mark ();
4332 }
4333 else
4334 {
4335 delete_file_handler (async_wait_fd ());
4336 async_file_close ();
4337 }
4338 }
4339
4340 /* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
4341 event came out. */
4342
4343 static int
4344 linux_nat_stop_lwp (struct lwp_info *lwp)
4345 {
4346 if (!lwp->stopped)
4347 {
4348 linux_nat_debug_printf ("running -> suspending %s",
4349 lwp->ptid.to_string ().c_str ());
4350
4351
4352 if (lwp->last_resume_kind == resume_stop)
4353 {
4354 linux_nat_debug_printf ("already stopping LWP %ld at GDB's request",
4355 lwp->ptid.lwp ());
4356 return 0;
4357 }
4358
4359 stop_callback (lwp);
4360 lwp->last_resume_kind = resume_stop;
4361 }
4362 else
4363 {
4364 /* Already known to be stopped; do nothing. */
4365
4366 if (debug_linux_nat)
4367 {
4368 if (linux_target->find_thread (lwp->ptid)->stop_requested)
4369 linux_nat_debug_printf ("already stopped/stop_requested %s",
4370 lwp->ptid.to_string ().c_str ());
4371 else
4372 linux_nat_debug_printf ("already stopped/no stop_requested yet %s",
4373 lwp->ptid.to_string ().c_str ());
4374 }
4375 }
4376 return 0;
4377 }
4378
4379 void
4380 linux_nat_target::stop (ptid_t ptid)
4381 {
4382 LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;
4383 iterate_over_lwps (ptid, linux_nat_stop_lwp);
4384 }
4385
4386 /* When requests are passed down from the linux-nat layer to the
4387 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
4388 used. The address space pointer is stored in the inferior object,
4389 but the common code that is passed such ptid can't tell whether
4390 lwpid is a "main" process id or not (it assumes so). We reverse
4391 look up the "main" process id from the lwp here. */
4392
4393 struct address_space *
4394 linux_nat_target::thread_address_space (ptid_t ptid)
4395 {
4396 struct lwp_info *lwp;
4397 struct inferior *inf;
4398 int pid;
4399
4400 if (ptid.lwp () == 0)
4401 {
4402 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
4403 tgid. */
4404 lwp = find_lwp_pid (ptid);
4405 pid = lwp->ptid.pid ();
4406 }
4407 else
4408 {
4409 /* A (pid,lwpid,0) ptid. */
4410 pid = ptid.pid ();
4411 }
4412
4413 inf = find_inferior_pid (this, pid);
4414 gdb_assert (inf != NULL);
4415 return inf->aspace;
4416 }
4417
4418 /* Return the cached value of the processor core for thread PTID. */
4419
4420 int
4421 linux_nat_target::core_of_thread (ptid_t ptid)
4422 {
4423 struct lwp_info *info = find_lwp_pid (ptid);
4424
4425 if (info)
4426 return info->core;
4427 return -1;
4428 }
4429
4430 /* Implementation of to_filesystem_is_local. */
4431
4432 bool
4433 linux_nat_target::filesystem_is_local ()
4434 {
4435 struct inferior *inf = current_inferior ();
4436
4437 if (inf->fake_pid_p || inf->pid == 0)
4438 return true;
4439
4440 return linux_ns_same (inf->pid, LINUX_NS_MNT);
4441 }
4442
4443 /* Convert the INF argument passed to a to_fileio_* method
4444 to a process ID suitable for passing to its corresponding
4445 linux_mntns_* function. If INF is non-NULL then the
4446 caller is requesting the filesystem seen by INF. If INF
4447 is NULL then the caller is requesting the filesystem seen
4448 by the GDB. We fall back to GDB's filesystem in the case
4449 that INF is non-NULL but its PID is unknown. */
4450
4451 static pid_t
4452 linux_nat_fileio_pid_of (struct inferior *inf)
4453 {
4454 if (inf == NULL || inf->fake_pid_p || inf->pid == 0)
4455 return getpid ();
4456 else
4457 return inf->pid;
4458 }
4459
4460 /* Implementation of to_fileio_open. */
4461
4462 int
4463 linux_nat_target::fileio_open (struct inferior *inf, const char *filename,
4464 int flags, int mode, int warn_if_slow,
4465 fileio_error *target_errno)
4466 {
4467 int nat_flags;
4468 mode_t nat_mode;
4469 int fd;
4470
4471 if (fileio_to_host_openflags (flags, &nat_flags) == -1
4472 || fileio_to_host_mode (mode, &nat_mode) == -1)
4473 {
4474 *target_errno = FILEIO_EINVAL;
4475 return -1;
4476 }
4477
4478 fd = linux_mntns_open_cloexec (linux_nat_fileio_pid_of (inf),
4479 filename, nat_flags, nat_mode);
4480 if (fd == -1)
4481 *target_errno = host_to_fileio_error (errno);
4482
4483 return fd;
4484 }
4485
4486 /* Implementation of to_fileio_readlink. */
4487
4488 gdb::optional<std::string>
4489 linux_nat_target::fileio_readlink (struct inferior *inf, const char *filename,
4490 fileio_error *target_errno)
4491 {
4492 char buf[PATH_MAX];
4493 int len;
4494
4495 len = linux_mntns_readlink (linux_nat_fileio_pid_of (inf),
4496 filename, buf, sizeof (buf));
4497 if (len < 0)
4498 {
4499 *target_errno = host_to_fileio_error (errno);
4500 return {};
4501 }
4502
4503 return std::string (buf, len);
4504 }
4505
4506 /* Implementation of to_fileio_unlink. */
4507
4508 int
4509 linux_nat_target::fileio_unlink (struct inferior *inf, const char *filename,
4510 fileio_error *target_errno)
4511 {
4512 int ret;
4513
4514 ret = linux_mntns_unlink (linux_nat_fileio_pid_of (inf),
4515 filename);
4516 if (ret == -1)
4517 *target_errno = host_to_fileio_error (errno);
4518
4519 return ret;
4520 }
4521
4522 /* Implementation of the to_thread_events method. */
4523
4524 void
4525 linux_nat_target::thread_events (int enable)
4526 {
4527 report_thread_events = enable;
4528 }
4529
4530 bool
4531 linux_nat_target::supports_set_thread_options (gdb_thread_options options)
4532 {
4533 constexpr gdb_thread_options supported_options
4534 = GDB_THREAD_OPTION_CLONE | GDB_THREAD_OPTION_EXIT;
4535 return ((options & supported_options) == options);
4536 }
4537
4538 linux_nat_target::linux_nat_target ()
4539 {
4540 /* We don't change the stratum; this target will sit at
4541 process_stratum and thread_db will set at thread_stratum. This
4542 is a little strange, since this is a multi-threaded-capable
4543 target, but we want to be on the stack below thread_db, and we
4544 also want to be used for single-threaded processes. */
4545 }
4546
4547 /* See linux-nat.h. */
4548
4549 bool
4550 linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
4551 {
4552 int pid = get_ptrace_pid (ptid);
4553 return ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo) == 0;
4554 }
4555
4556 /* See nat/linux-nat.h. */
4557
4558 ptid_t
4559 current_lwp_ptid (void)
4560 {
4561 gdb_assert (inferior_ptid.lwp_p ());
4562 return inferior_ptid;
4563 }
4564
4565 /* Implement 'maintenance info linux-lwps'. Displays some basic
4566 information about all the current lwp_info objects. */
4567
4568 static void
4569 maintenance_info_lwps (const char *arg, int from_tty)
4570 {
4571 if (all_lwps ().size () == 0)
4572 {
4573 gdb_printf ("No Linux LWPs\n");
4574 return;
4575 }
4576
4577 /* Start the width at 8 to match the column heading below, then
4578 figure out the widest ptid string. We'll use this to build our
4579 output table below. */
4580 size_t ptid_width = 8;
4581 for (lwp_info *lp : all_lwps ())
4582 ptid_width = std::max (ptid_width, lp->ptid.to_string ().size ());
4583
4584 /* Setup the table headers. */
4585 struct ui_out *uiout = current_uiout;
4586 ui_out_emit_table table_emitter (uiout, 2, -1, "linux-lwps");
4587 uiout->table_header (ptid_width, ui_left, "lwp-ptid", _("LWP Ptid"));
4588 uiout->table_header (9, ui_left, "thread-info", _("Thread ID"));
4589 uiout->table_body ();
4590
4591 /* Display one table row for each lwp_info. */
4592 for (lwp_info *lp : all_lwps ())
4593 {
4594 ui_out_emit_tuple tuple_emitter (uiout, "lwp-entry");
4595
4596 thread_info *th = linux_target->find_thread (lp->ptid);
4597
4598 uiout->field_string ("lwp-ptid", lp->ptid.to_string ().c_str ());
4599 if (th == nullptr)
4600 uiout->field_string ("thread-info", "None");
4601 else
4602 uiout->field_string ("thread-info", print_full_thread_id (th));
4603
4604 uiout->message ("\n");
4605 }
4606 }
4607
4608 void _initialize_linux_nat ();
4609 void
4610 _initialize_linux_nat ()
4611 {
4612 add_setshow_boolean_cmd ("linux-nat", class_maintenance,
4613 &debug_linux_nat, _("\
4614 Set debugging of GNU/Linux native target."), _(" \
4615 Show debugging of GNU/Linux native target."), _(" \
4616 When on, print debug messages relating to the GNU/Linux native target."),
4617 nullptr,
4618 show_debug_linux_nat,
4619 &setdebuglist, &showdebuglist);
4620
4621 add_setshow_boolean_cmd ("linux-namespaces", class_maintenance,
4622 &debug_linux_namespaces, _("\
4623 Set debugging of GNU/Linux namespaces module."), _("\
4624 Show debugging of GNU/Linux namespaces module."), _("\
4625 Enables printf debugging output."),
4626 NULL,
4627 NULL,
4628 &setdebuglist, &showdebuglist);
4629
4630 /* Install a SIGCHLD handler. */
4631 sigchld_action.sa_handler = sigchld_handler;
4632 sigemptyset (&sigchld_action.sa_mask);
4633 sigchld_action.sa_flags = SA_RESTART;
4634
4635 /* Make it the default. */
4636 sigaction (SIGCHLD, &sigchld_action, NULL);
4637
4638 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4639 gdb_sigmask (SIG_SETMASK, NULL, &suspend_mask);
4640 sigdelset (&suspend_mask, SIGCHLD);
4641
4642 sigemptyset (&blocked_mask);
4643
4644 lwp_lwpid_htab_create ();
4645
4646 add_cmd ("linux-lwps", class_maintenance, maintenance_info_lwps,
4647 _("List the Linux LWPS."), &maintenanceinfolist);
4648 }
4649 \f
4650
4651 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4652 the GNU/Linux Threads library and therefore doesn't really belong
4653 here. */
4654
4655 /* NPTL reserves the first two RT signals, but does not provide any
4656 way for the debugger to query the signal numbers - fortunately
4657 they don't change. */
4658 static int lin_thread_signals[] = { __SIGRTMIN, __SIGRTMIN + 1 };
4659
4660 /* See linux-nat.h. */
4661
4662 unsigned int
4663 lin_thread_get_thread_signal_num (void)
4664 {
4665 return sizeof (lin_thread_signals) / sizeof (lin_thread_signals[0]);
4666 }
4667
4668 /* See linux-nat.h. */
4669
4670 int
4671 lin_thread_get_thread_signal (unsigned int i)
4672 {
4673 gdb_assert (i < lin_thread_get_thread_signal_num ());
4674 return lin_thread_signals[i];
4675 }