gdb, gdbserver: detach fork child when detaching from fork parent
[binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright (C) 2001-2021 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "inferior.h"
22 #include "infrun.h"
23 #include "target.h"
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdbsupport/gdb_wait.h"
27 #include <unistd.h>
28 #include <sys/syscall.h>
29 #include "nat/gdb_ptrace.h"
30 #include "linux-nat.h"
31 #include "nat/linux-ptrace.h"
32 #include "nat/linux-procfs.h"
33 #include "nat/linux-personality.h"
34 #include "linux-fork.h"
35 #include "gdbthread.h"
36 #include "gdbcmd.h"
37 #include "regcache.h"
38 #include "regset.h"
39 #include "inf-child.h"
40 #include "inf-ptrace.h"
41 #include "auxv.h"
42 #include <sys/procfs.h> /* for elf_gregset etc. */
43 #include "elf-bfd.h" /* for elfcore_write_* */
44 #include "gregset.h" /* for gregset */
45 #include "gdbcore.h" /* for get_exec_file */
46 #include <ctype.h> /* for isdigit */
47 #include <sys/stat.h> /* for struct stat */
48 #include <fcntl.h> /* for O_RDONLY */
49 #include "inf-loop.h"
50 #include "gdbsupport/event-loop.h"
51 #include "event-top.h"
52 #include <pwd.h>
53 #include <sys/types.h>
54 #include <dirent.h>
55 #include "xml-support.h"
56 #include <sys/vfs.h>
57 #include "solib.h"
58 #include "nat/linux-osdata.h"
59 #include "linux-tdep.h"
60 #include "symfile.h"
61 #include "gdbsupport/agent.h"
62 #include "tracepoint.h"
63 #include "gdbsupport/buffer.h"
64 #include "target-descriptions.h"
65 #include "gdbsupport/filestuff.h"
66 #include "objfiles.h"
67 #include "nat/linux-namespaces.h"
68 #include "gdbsupport/fileio.h"
69 #include "gdbsupport/scope-exit.h"
70 #include "gdbsupport/gdb-sigmask.h"
71 #include "gdbsupport/common-debug.h"
72 #include <unordered_map>
73
74 /* This comment documents high-level logic of this file.
75
76 Waiting for events in sync mode
77 ===============================
78
79 When waiting for an event in a specific thread, we just use waitpid,
80 passing the specific pid, and not passing WNOHANG.
81
82 When waiting for an event in all threads, waitpid is not quite good:
83
84 - If the thread group leader exits while other threads in the thread
85 group still exist, waitpid(TGID, ...) hangs. That waitpid won't
86 return an exit status until the other threads in the group are
87 reaped.
88
89 - When a non-leader thread execs, that thread just vanishes without
90 reporting an exit (so we'd hang if we waited for it explicitly in
91 that case). The exec event is instead reported to the TGID pid.
92
93 The solution is to always use -1 and WNOHANG, together with
94 sigsuspend.
95
96 First, we use non-blocking waitpid to check for events. If nothing is
97 found, we use sigsuspend to wait for SIGCHLD. When SIGCHLD arrives,
98 it means something happened to a child process. As soon as we know
99 there's an event, we get back to calling nonblocking waitpid.
100
101 Note that SIGCHLD should be blocked between waitpid and sigsuspend
102 calls, so that we don't miss a signal. If SIGCHLD arrives in between,
103 when it's blocked, the signal becomes pending and sigsuspend
104 immediately notices it and returns.
105
106 Waiting for events in async mode (TARGET_WNOHANG)
107 =================================================
108
109 In async mode, GDB should always be ready to handle both user input
110 and target events, so neither blocking waitpid nor sigsuspend are
111 viable options. Instead, we should asynchronously notify the GDB main
112 event loop whenever there's an unprocessed event from the target. We
113 detect asynchronous target events by handling SIGCHLD signals. To
114 notify the event loop about target events, the self-pipe trick is used
115 --- a pipe is registered as waitable event source in the event loop,
116 the event loop select/poll's on the read end of this pipe (as well on
117 other event sources, e.g., stdin), and the SIGCHLD handler writes a
118 byte to this pipe. This is more portable than relying on
119 pselect/ppoll, since on kernels that lack those syscalls, libc
120 emulates them with select/poll+sigprocmask, and that is racy
121 (a.k.a. plain broken).
122
123 Obviously, if we fail to notify the event loop if there's a target
124 event, it's bad. OTOH, if we notify the event loop when there's no
125 event from the target, linux_nat_wait will detect that there's no real
126 event to report, and return event of type TARGET_WAITKIND_IGNORE.
127 This is mostly harmless, but it will waste time and is better avoided.
128
129 The main design point is that every time GDB is outside linux-nat.c,
130 we have a SIGCHLD handler installed that is called when something
131 happens to the target and notifies the GDB event loop. Whenever GDB
132 core decides to handle the event, and calls into linux-nat.c, we
133 process things as in sync mode, except that the we never block in
134 sigsuspend.
135
136 While processing an event, we may end up momentarily blocked in
137 waitpid calls. Those waitpid calls, while blocking, are guarantied to
138 return quickly. E.g., in all-stop mode, before reporting to the core
139 that an LWP hit a breakpoint, all LWPs are stopped by sending them
140 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
141 Note that this is different from blocking indefinitely waiting for the
142 next event --- here, we're already handling an event.
143
144 Use of signals
145 ==============
146
147 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
148 signal is not entirely significant; we just need for a signal to be delivered,
149 so that we can intercept it. SIGSTOP's advantage is that it can not be
150 blocked. A disadvantage is that it is not a real-time signal, so it can only
151 be queued once; we do not keep track of other sources of SIGSTOP.
152
153 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
154 use them, because they have special behavior when the signal is generated -
155 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
156 kills the entire thread group.
157
158 A delivered SIGSTOP would stop the entire thread group, not just the thread we
159 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
160 cancel it (by PTRACE_CONT without passing SIGSTOP).
161
162 We could use a real-time signal instead. This would solve those problems; we
163 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
164 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
165 generates it, and there are races with trying to find a signal that is not
166 blocked.
167
168 Exec events
169 ===========
170
171 The case of a thread group (process) with 3 or more threads, and a
172 thread other than the leader execs is worth detailing:
173
174 On an exec, the Linux kernel destroys all threads except the execing
175 one in the thread group, and resets the execing thread's tid to the
176 tgid. No exit notification is sent for the execing thread -- from the
177 ptracer's perspective, it appears as though the execing thread just
178 vanishes. Until we reap all other threads except the leader and the
179 execing thread, the leader will be zombie, and the execing thread will
180 be in `D (disc sleep)' state. As soon as all other threads are
181 reaped, the execing thread changes its tid to the tgid, and the
182 previous (zombie) leader vanishes, giving place to the "new"
183 leader. */
184
185 #ifndef O_LARGEFILE
186 #define O_LARGEFILE 0
187 #endif
188
189 struct linux_nat_target *linux_target;
190
191 /* Does the current host support PTRACE_GETREGSET? */
192 enum tribool have_ptrace_getregset = TRIBOOL_UNKNOWN;
193
194 static unsigned int debug_linux_nat;
195 static void
196 show_debug_linux_nat (struct ui_file *file, int from_tty,
197 struct cmd_list_element *c, const char *value)
198 {
199 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
200 value);
201 }
202
203 /* Print a linux-nat debug statement. */
204
205 #define linux_nat_debug_printf(fmt, ...) \
206 debug_prefixed_printf_cond (debug_linux_nat, "linux-nat", fmt, ##__VA_ARGS__)
207
208 struct simple_pid_list
209 {
210 int pid;
211 int status;
212 struct simple_pid_list *next;
213 };
214 static struct simple_pid_list *stopped_pids;
215
216 /* Whether target_thread_events is in effect. */
217 static int report_thread_events;
218
219 /* Async mode support. */
220
221 /* The read/write ends of the pipe registered as waitable file in the
222 event loop. */
223 static int linux_nat_event_pipe[2] = { -1, -1 };
224
225 /* True if we're currently in async mode. */
226 #define linux_is_async_p() (linux_nat_event_pipe[0] != -1)
227
228 /* Flush the event pipe. */
229
230 static void
231 async_file_flush (void)
232 {
233 int ret;
234 char buf;
235
236 do
237 {
238 ret = read (linux_nat_event_pipe[0], &buf, 1);
239 }
240 while (ret >= 0 || (ret == -1 && errno == EINTR));
241 }
242
243 /* Put something (anything, doesn't matter what, or how much) in event
244 pipe, so that the select/poll in the event-loop realizes we have
245 something to process. */
246
247 static void
248 async_file_mark (void)
249 {
250 int ret;
251
252 /* It doesn't really matter what the pipe contains, as long we end
253 up with something in it. Might as well flush the previous
254 left-overs. */
255 async_file_flush ();
256
257 do
258 {
259 ret = write (linux_nat_event_pipe[1], "+", 1);
260 }
261 while (ret == -1 && errno == EINTR);
262
263 /* Ignore EAGAIN. If the pipe is full, the event loop will already
264 be awakened anyway. */
265 }
266
267 static int kill_lwp (int lwpid, int signo);
268
269 static int stop_callback (struct lwp_info *lp);
270
271 static void block_child_signals (sigset_t *prev_mask);
272 static void restore_child_signals_mask (sigset_t *prev_mask);
273
274 struct lwp_info;
275 static struct lwp_info *add_lwp (ptid_t ptid);
276 static void purge_lwp_list (int pid);
277 static void delete_lwp (ptid_t ptid);
278 static struct lwp_info *find_lwp_pid (ptid_t ptid);
279
280 static int lwp_status_pending_p (struct lwp_info *lp);
281
282 static void save_stop_reason (struct lwp_info *lp);
283
284 static void close_proc_mem_file (pid_t pid);
285 static void open_proc_mem_file (ptid_t ptid);
286
287 \f
288 /* LWP accessors. */
289
290 /* See nat/linux-nat.h. */
291
292 ptid_t
293 ptid_of_lwp (struct lwp_info *lwp)
294 {
295 return lwp->ptid;
296 }
297
298 /* See nat/linux-nat.h. */
299
300 void
301 lwp_set_arch_private_info (struct lwp_info *lwp,
302 struct arch_lwp_info *info)
303 {
304 lwp->arch_private = info;
305 }
306
307 /* See nat/linux-nat.h. */
308
309 struct arch_lwp_info *
310 lwp_arch_private_info (struct lwp_info *lwp)
311 {
312 return lwp->arch_private;
313 }
314
315 /* See nat/linux-nat.h. */
316
317 int
318 lwp_is_stopped (struct lwp_info *lwp)
319 {
320 return lwp->stopped;
321 }
322
323 /* See nat/linux-nat.h. */
324
325 enum target_stop_reason
326 lwp_stop_reason (struct lwp_info *lwp)
327 {
328 return lwp->stop_reason;
329 }
330
331 /* See nat/linux-nat.h. */
332
333 int
334 lwp_is_stepping (struct lwp_info *lwp)
335 {
336 return lwp->step;
337 }
338
339 \f
340 /* Trivial list manipulation functions to keep track of a list of
341 new stopped processes. */
342 static void
343 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
344 {
345 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
346
347 new_pid->pid = pid;
348 new_pid->status = status;
349 new_pid->next = *listp;
350 *listp = new_pid;
351 }
352
353 static int
354 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
355 {
356 struct simple_pid_list **p;
357
358 for (p = listp; *p != NULL; p = &(*p)->next)
359 if ((*p)->pid == pid)
360 {
361 struct simple_pid_list *next = (*p)->next;
362
363 *statusp = (*p)->status;
364 xfree (*p);
365 *p = next;
366 return 1;
367 }
368 return 0;
369 }
370
371 /* Return the ptrace options that we want to try to enable. */
372
373 static int
374 linux_nat_ptrace_options (int attached)
375 {
376 int options = 0;
377
378 if (!attached)
379 options |= PTRACE_O_EXITKILL;
380
381 options |= (PTRACE_O_TRACESYSGOOD
382 | PTRACE_O_TRACEVFORKDONE
383 | PTRACE_O_TRACEVFORK
384 | PTRACE_O_TRACEFORK
385 | PTRACE_O_TRACEEXEC);
386
387 return options;
388 }
389
390 /* Initialize ptrace and procfs warnings and check for supported
391 ptrace features given PID.
392
393 ATTACHED should be nonzero iff we attached to the inferior. */
394
395 static void
396 linux_init_ptrace_procfs (pid_t pid, int attached)
397 {
398 int options = linux_nat_ptrace_options (attached);
399
400 linux_enable_event_reporting (pid, options);
401 linux_ptrace_init_warnings ();
402 linux_proc_init_warnings ();
403 }
404
405 linux_nat_target::~linux_nat_target ()
406 {}
407
408 void
409 linux_nat_target::post_attach (int pid)
410 {
411 linux_init_ptrace_procfs (pid, 1);
412 }
413
414 void
415 linux_nat_target::post_startup_inferior (ptid_t ptid)
416 {
417 linux_init_ptrace_procfs (ptid.pid (), 0);
418 }
419
420 /* Return the number of known LWPs in the tgid given by PID. */
421
422 static int
423 num_lwps (int pid)
424 {
425 int count = 0;
426
427 for (const lwp_info *lp ATTRIBUTE_UNUSED : all_lwps ())
428 if (lp->ptid.pid () == pid)
429 count++;
430
431 return count;
432 }
433
434 /* Deleter for lwp_info unique_ptr specialisation. */
435
436 struct lwp_deleter
437 {
438 void operator() (struct lwp_info *lwp) const
439 {
440 delete_lwp (lwp->ptid);
441 }
442 };
443
444 /* A unique_ptr specialisation for lwp_info. */
445
446 typedef std::unique_ptr<struct lwp_info, lwp_deleter> lwp_info_up;
447
448 /* Target hook for follow_fork. */
449
450 void
451 linux_nat_target::follow_fork (inferior *child_inf, ptid_t child_ptid,
452 target_waitkind fork_kind, bool follow_child,
453 bool detach_fork)
454 {
455 inf_ptrace_target::follow_fork (child_inf, child_ptid, fork_kind,
456 follow_child, detach_fork);
457
458 if (!follow_child)
459 {
460 bool has_vforked = fork_kind == TARGET_WAITKIND_VFORKED;
461 ptid_t parent_ptid = inferior_ptid;
462 int parent_pid = parent_ptid.lwp ();
463 int child_pid = child_ptid.lwp ();
464
465 /* We're already attached to the parent, by default. */
466 lwp_info *child_lp = add_lwp (child_ptid);
467 child_lp->stopped = 1;
468 child_lp->last_resume_kind = resume_stop;
469
470 /* Detach new forked process? */
471 if (detach_fork)
472 {
473 int child_stop_signal = 0;
474 bool detach_child = true;
475
476 /* Move CHILD_LP into a unique_ptr and clear the source pointer
477 to prevent us doing anything stupid with it. */
478 lwp_info_up child_lp_ptr (child_lp);
479 child_lp = nullptr;
480
481 linux_target->low_prepare_to_resume (child_lp_ptr.get ());
482
483 /* When debugging an inferior in an architecture that supports
484 hardware single stepping on a kernel without commit
485 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
486 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
487 set if the parent process had them set.
488 To work around this, single step the child process
489 once before detaching to clear the flags. */
490
491 /* Note that we consult the parent's architecture instead of
492 the child's because there's no inferior for the child at
493 this point. */
494 if (!gdbarch_software_single_step_p (target_thread_architecture
495 (parent_ptid)))
496 {
497 int status;
498
499 linux_disable_event_reporting (child_pid);
500 if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
501 perror_with_name (_("Couldn't do single step"));
502 if (my_waitpid (child_pid, &status, 0) < 0)
503 perror_with_name (_("Couldn't wait vfork process"));
504 else
505 {
506 detach_child = WIFSTOPPED (status);
507 child_stop_signal = WSTOPSIG (status);
508 }
509 }
510
511 if (detach_child)
512 {
513 int signo = child_stop_signal;
514
515 if (signo != 0
516 && !signal_pass_state (gdb_signal_from_host (signo)))
517 signo = 0;
518 ptrace (PTRACE_DETACH, child_pid, 0, signo);
519
520 close_proc_mem_file (child_pid);
521 }
522 }
523
524 if (has_vforked)
525 {
526 struct lwp_info *parent_lp;
527
528 parent_lp = find_lwp_pid (parent_ptid);
529 gdb_assert (linux_supports_tracefork () >= 0);
530
531 if (linux_supports_tracevforkdone ())
532 {
533 linux_nat_debug_printf ("waiting for VFORK_DONE on %d",
534 parent_pid);
535 parent_lp->stopped = 1;
536
537 /* We'll handle the VFORK_DONE event like any other
538 event, in target_wait. */
539 }
540 else
541 {
542 /* We can't insert breakpoints until the child has
543 finished with the shared memory region. We need to
544 wait until that happens. Ideal would be to just
545 call:
546 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
547 - waitpid (parent_pid, &status, __WALL);
548 However, most architectures can't handle a syscall
549 being traced on the way out if it wasn't traced on
550 the way in.
551
552 We might also think to loop, continuing the child
553 until it exits or gets a SIGTRAP. One problem is
554 that the child might call ptrace with PTRACE_TRACEME.
555
556 There's no simple and reliable way to figure out when
557 the vforked child will be done with its copy of the
558 shared memory. We could step it out of the syscall,
559 two instructions, let it go, and then single-step the
560 parent once. When we have hardware single-step, this
561 would work; with software single-step it could still
562 be made to work but we'd have to be able to insert
563 single-step breakpoints in the child, and we'd have
564 to insert -just- the single-step breakpoint in the
565 parent. Very awkward.
566
567 In the end, the best we can do is to make sure it
568 runs for a little while. Hopefully it will be out of
569 range of any breakpoints we reinsert. Usually this
570 is only the single-step breakpoint at vfork's return
571 point. */
572
573 linux_nat_debug_printf ("no VFORK_DONE support, sleeping a bit");
574
575 usleep (10000);
576
577 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
578 and leave it pending. The next linux_nat_resume call
579 will notice a pending event, and bypasses actually
580 resuming the inferior. */
581 parent_lp->status = 0;
582 parent_lp->waitstatus.set_vfork_done ();
583 parent_lp->stopped = 1;
584
585 /* If we're in async mode, need to tell the event loop
586 there's something here to process. */
587 if (target_is_async_p ())
588 async_file_mark ();
589 }
590 }
591 }
592 else
593 {
594 struct lwp_info *child_lp;
595
596 child_lp = add_lwp (child_ptid);
597 child_lp->stopped = 1;
598 child_lp->last_resume_kind = resume_stop;
599 }
600 }
601
602 \f
603 int
604 linux_nat_target::insert_fork_catchpoint (int pid)
605 {
606 return !linux_supports_tracefork ();
607 }
608
609 int
610 linux_nat_target::remove_fork_catchpoint (int pid)
611 {
612 return 0;
613 }
614
615 int
616 linux_nat_target::insert_vfork_catchpoint (int pid)
617 {
618 return !linux_supports_tracefork ();
619 }
620
621 int
622 linux_nat_target::remove_vfork_catchpoint (int pid)
623 {
624 return 0;
625 }
626
627 int
628 linux_nat_target::insert_exec_catchpoint (int pid)
629 {
630 return !linux_supports_tracefork ();
631 }
632
633 int
634 linux_nat_target::remove_exec_catchpoint (int pid)
635 {
636 return 0;
637 }
638
639 int
640 linux_nat_target::set_syscall_catchpoint (int pid, bool needed, int any_count,
641 gdb::array_view<const int> syscall_counts)
642 {
643 if (!linux_supports_tracesysgood ())
644 return 1;
645
646 /* On GNU/Linux, we ignore the arguments. It means that we only
647 enable the syscall catchpoints, but do not disable them.
648
649 Also, we do not use the `syscall_counts' information because we do not
650 filter system calls here. We let GDB do the logic for us. */
651 return 0;
652 }
653
654 /* List of known LWPs, keyed by LWP PID. This speeds up the common
655 case of mapping a PID returned from the kernel to our corresponding
656 lwp_info data structure. */
657 static htab_t lwp_lwpid_htab;
658
659 /* Calculate a hash from a lwp_info's LWP PID. */
660
661 static hashval_t
662 lwp_info_hash (const void *ap)
663 {
664 const struct lwp_info *lp = (struct lwp_info *) ap;
665 pid_t pid = lp->ptid.lwp ();
666
667 return iterative_hash_object (pid, 0);
668 }
669
670 /* Equality function for the lwp_info hash table. Compares the LWP's
671 PID. */
672
673 static int
674 lwp_lwpid_htab_eq (const void *a, const void *b)
675 {
676 const struct lwp_info *entry = (const struct lwp_info *) a;
677 const struct lwp_info *element = (const struct lwp_info *) b;
678
679 return entry->ptid.lwp () == element->ptid.lwp ();
680 }
681
682 /* Create the lwp_lwpid_htab hash table. */
683
684 static void
685 lwp_lwpid_htab_create (void)
686 {
687 lwp_lwpid_htab = htab_create (100, lwp_info_hash, lwp_lwpid_htab_eq, NULL);
688 }
689
690 /* Add LP to the hash table. */
691
692 static void
693 lwp_lwpid_htab_add_lwp (struct lwp_info *lp)
694 {
695 void **slot;
696
697 slot = htab_find_slot (lwp_lwpid_htab, lp, INSERT);
698 gdb_assert (slot != NULL && *slot == NULL);
699 *slot = lp;
700 }
701
702 /* Head of doubly-linked list of known LWPs. Sorted by reverse
703 creation order. This order is assumed in some cases. E.g.,
704 reaping status after killing alls lwps of a process: the leader LWP
705 must be reaped last. */
706
707 static intrusive_list<lwp_info> lwp_list;
708
709 /* See linux-nat.h. */
710
711 lwp_info_range
712 all_lwps ()
713 {
714 return lwp_info_range (lwp_list.begin ());
715 }
716
717 /* See linux-nat.h. */
718
719 lwp_info_safe_range
720 all_lwps_safe ()
721 {
722 return lwp_info_safe_range (lwp_list.begin ());
723 }
724
725 /* Add LP to sorted-by-reverse-creation-order doubly-linked list. */
726
727 static void
728 lwp_list_add (struct lwp_info *lp)
729 {
730 lwp_list.push_front (*lp);
731 }
732
733 /* Remove LP from sorted-by-reverse-creation-order doubly-linked
734 list. */
735
736 static void
737 lwp_list_remove (struct lwp_info *lp)
738 {
739 /* Remove from sorted-by-creation-order list. */
740 lwp_list.erase (lwp_list.iterator_to (*lp));
741 }
742
743 \f
744
745 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
746 _initialize_linux_nat. */
747 static sigset_t suspend_mask;
748
749 /* Signals to block to make that sigsuspend work. */
750 static sigset_t blocked_mask;
751
752 /* SIGCHLD action. */
753 static struct sigaction sigchld_action;
754
755 /* Block child signals (SIGCHLD and linux threads signals), and store
756 the previous mask in PREV_MASK. */
757
758 static void
759 block_child_signals (sigset_t *prev_mask)
760 {
761 /* Make sure SIGCHLD is blocked. */
762 if (!sigismember (&blocked_mask, SIGCHLD))
763 sigaddset (&blocked_mask, SIGCHLD);
764
765 gdb_sigmask (SIG_BLOCK, &blocked_mask, prev_mask);
766 }
767
768 /* Restore child signals mask, previously returned by
769 block_child_signals. */
770
771 static void
772 restore_child_signals_mask (sigset_t *prev_mask)
773 {
774 gdb_sigmask (SIG_SETMASK, prev_mask, NULL);
775 }
776
777 /* Mask of signals to pass directly to the inferior. */
778 static sigset_t pass_mask;
779
780 /* Update signals to pass to the inferior. */
781 void
782 linux_nat_target::pass_signals
783 (gdb::array_view<const unsigned char> pass_signals)
784 {
785 int signo;
786
787 sigemptyset (&pass_mask);
788
789 for (signo = 1; signo < NSIG; signo++)
790 {
791 int target_signo = gdb_signal_from_host (signo);
792 if (target_signo < pass_signals.size () && pass_signals[target_signo])
793 sigaddset (&pass_mask, signo);
794 }
795 }
796
797 \f
798
799 /* Prototypes for local functions. */
800 static int stop_wait_callback (struct lwp_info *lp);
801 static int resume_stopped_resumed_lwps (struct lwp_info *lp, const ptid_t wait_ptid);
802 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
803
804 \f
805
806 /* Destroy and free LP. */
807
808 lwp_info::~lwp_info ()
809 {
810 /* Let the arch specific bits release arch_lwp_info. */
811 linux_target->low_delete_thread (this->arch_private);
812 }
813
814 /* Traversal function for purge_lwp_list. */
815
816 static int
817 lwp_lwpid_htab_remove_pid (void **slot, void *info)
818 {
819 struct lwp_info *lp = (struct lwp_info *) *slot;
820 int pid = *(int *) info;
821
822 if (lp->ptid.pid () == pid)
823 {
824 htab_clear_slot (lwp_lwpid_htab, slot);
825 lwp_list_remove (lp);
826 delete lp;
827 }
828
829 return 1;
830 }
831
832 /* Remove all LWPs belong to PID from the lwp list. */
833
834 static void
835 purge_lwp_list (int pid)
836 {
837 htab_traverse_noresize (lwp_lwpid_htab, lwp_lwpid_htab_remove_pid, &pid);
838 }
839
840 /* Add the LWP specified by PTID to the list. PTID is the first LWP
841 in the process. Return a pointer to the structure describing the
842 new LWP.
843
844 This differs from add_lwp in that we don't let the arch specific
845 bits know about this new thread. Current clients of this callback
846 take the opportunity to install watchpoints in the new thread, and
847 we shouldn't do that for the first thread. If we're spawning a
848 child ("run"), the thread executes the shell wrapper first, and we
849 shouldn't touch it until it execs the program we want to debug.
850 For "attach", it'd be okay to call the callback, but it's not
851 necessary, because watchpoints can't yet have been inserted into
852 the inferior. */
853
854 static struct lwp_info *
855 add_initial_lwp (ptid_t ptid)
856 {
857 gdb_assert (ptid.lwp_p ());
858
859 lwp_info *lp = new lwp_info (ptid);
860
861
862 /* Add to sorted-by-reverse-creation-order list. */
863 lwp_list_add (lp);
864
865 /* Add to keyed-by-pid htab. */
866 lwp_lwpid_htab_add_lwp (lp);
867
868 return lp;
869 }
870
871 /* Add the LWP specified by PID to the list. Return a pointer to the
872 structure describing the new LWP. The LWP should already be
873 stopped. */
874
875 static struct lwp_info *
876 add_lwp (ptid_t ptid)
877 {
878 struct lwp_info *lp;
879
880 lp = add_initial_lwp (ptid);
881
882 /* Let the arch specific bits know about this new thread. Current
883 clients of this callback take the opportunity to install
884 watchpoints in the new thread. We don't do this for the first
885 thread though. See add_initial_lwp. */
886 linux_target->low_new_thread (lp);
887
888 return lp;
889 }
890
891 /* Remove the LWP specified by PID from the list. */
892
893 static void
894 delete_lwp (ptid_t ptid)
895 {
896 lwp_info dummy (ptid);
897
898 void **slot = htab_find_slot (lwp_lwpid_htab, &dummy, NO_INSERT);
899 if (slot == NULL)
900 return;
901
902 lwp_info *lp = *(struct lwp_info **) slot;
903 gdb_assert (lp != NULL);
904
905 htab_clear_slot (lwp_lwpid_htab, slot);
906
907 /* Remove from sorted-by-creation-order list. */
908 lwp_list_remove (lp);
909
910 /* Release. */
911 delete lp;
912 }
913
914 /* Return a pointer to the structure describing the LWP corresponding
915 to PID. If no corresponding LWP could be found, return NULL. */
916
917 static struct lwp_info *
918 find_lwp_pid (ptid_t ptid)
919 {
920 int lwp;
921
922 if (ptid.lwp_p ())
923 lwp = ptid.lwp ();
924 else
925 lwp = ptid.pid ();
926
927 lwp_info dummy (ptid_t (0, lwp));
928 return (struct lwp_info *) htab_find (lwp_lwpid_htab, &dummy);
929 }
930
931 /* See nat/linux-nat.h. */
932
933 struct lwp_info *
934 iterate_over_lwps (ptid_t filter,
935 gdb::function_view<iterate_over_lwps_ftype> callback)
936 {
937 for (lwp_info *lp : all_lwps_safe ())
938 {
939 if (lp->ptid.matches (filter))
940 {
941 if (callback (lp) != 0)
942 return lp;
943 }
944 }
945
946 return NULL;
947 }
948
949 /* Update our internal state when changing from one checkpoint to
950 another indicated by NEW_PTID. We can only switch single-threaded
951 applications, so we only create one new LWP, and the previous list
952 is discarded. */
953
954 void
955 linux_nat_switch_fork (ptid_t new_ptid)
956 {
957 struct lwp_info *lp;
958
959 purge_lwp_list (inferior_ptid.pid ());
960
961 lp = add_lwp (new_ptid);
962 lp->stopped = 1;
963
964 /* This changes the thread's ptid while preserving the gdb thread
965 num. Also changes the inferior pid, while preserving the
966 inferior num. */
967 thread_change_ptid (linux_target, inferior_ptid, new_ptid);
968
969 /* We've just told GDB core that the thread changed target id, but,
970 in fact, it really is a different thread, with different register
971 contents. */
972 registers_changed ();
973 }
974
975 /* Handle the exit of a single thread LP. */
976
977 static void
978 exit_lwp (struct lwp_info *lp)
979 {
980 struct thread_info *th = find_thread_ptid (linux_target, lp->ptid);
981
982 if (th)
983 {
984 if (print_thread_events)
985 printf_unfiltered (_("[%s exited]\n"),
986 target_pid_to_str (lp->ptid).c_str ());
987
988 delete_thread (th);
989 }
990
991 delete_lwp (lp->ptid);
992 }
993
994 /* Wait for the LWP specified by LP, which we have just attached to.
995 Returns a wait status for that LWP, to cache. */
996
997 static int
998 linux_nat_post_attach_wait (ptid_t ptid, int *signalled)
999 {
1000 pid_t new_pid, pid = ptid.lwp ();
1001 int status;
1002
1003 if (linux_proc_pid_is_stopped (pid))
1004 {
1005 linux_nat_debug_printf ("Attaching to a stopped process");
1006
1007 /* The process is definitely stopped. It is in a job control
1008 stop, unless the kernel predates the TASK_STOPPED /
1009 TASK_TRACED distinction, in which case it might be in a
1010 ptrace stop. Make sure it is in a ptrace stop; from there we
1011 can kill it, signal it, et cetera.
1012
1013 First make sure there is a pending SIGSTOP. Since we are
1014 already attached, the process can not transition from stopped
1015 to running without a PTRACE_CONT; so we know this signal will
1016 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1017 probably already in the queue (unless this kernel is old
1018 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1019 is not an RT signal, it can only be queued once. */
1020 kill_lwp (pid, SIGSTOP);
1021
1022 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1023 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1024 ptrace (PTRACE_CONT, pid, 0, 0);
1025 }
1026
1027 /* Make sure the initial process is stopped. The user-level threads
1028 layer might want to poke around in the inferior, and that won't
1029 work if things haven't stabilized yet. */
1030 new_pid = my_waitpid (pid, &status, __WALL);
1031 gdb_assert (pid == new_pid);
1032
1033 if (!WIFSTOPPED (status))
1034 {
1035 /* The pid we tried to attach has apparently just exited. */
1036 linux_nat_debug_printf ("Failed to stop %d: %s", pid,
1037 status_to_str (status).c_str ());
1038 return status;
1039 }
1040
1041 if (WSTOPSIG (status) != SIGSTOP)
1042 {
1043 *signalled = 1;
1044 linux_nat_debug_printf ("Received %s after attaching",
1045 status_to_str (status).c_str ());
1046 }
1047
1048 return status;
1049 }
1050
1051 void
1052 linux_nat_target::create_inferior (const char *exec_file,
1053 const std::string &allargs,
1054 char **env, int from_tty)
1055 {
1056 maybe_disable_address_space_randomization restore_personality
1057 (disable_randomization);
1058
1059 /* The fork_child mechanism is synchronous and calls target_wait, so
1060 we have to mask the async mode. */
1061
1062 /* Make sure we report all signals during startup. */
1063 pass_signals ({});
1064
1065 inf_ptrace_target::create_inferior (exec_file, allargs, env, from_tty);
1066
1067 open_proc_mem_file (inferior_ptid);
1068 }
1069
1070 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1071 already attached. Returns true if a new LWP is found, false
1072 otherwise. */
1073
1074 static int
1075 attach_proc_task_lwp_callback (ptid_t ptid)
1076 {
1077 struct lwp_info *lp;
1078
1079 /* Ignore LWPs we're already attached to. */
1080 lp = find_lwp_pid (ptid);
1081 if (lp == NULL)
1082 {
1083 int lwpid = ptid.lwp ();
1084
1085 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1086 {
1087 int err = errno;
1088
1089 /* Be quiet if we simply raced with the thread exiting.
1090 EPERM is returned if the thread's task still exists, and
1091 is marked as exited or zombie, as well as other
1092 conditions, so in that case, confirm the status in
1093 /proc/PID/status. */
1094 if (err == ESRCH
1095 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1096 {
1097 linux_nat_debug_printf
1098 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1099 lwpid, err, safe_strerror (err));
1100
1101 }
1102 else
1103 {
1104 std::string reason
1105 = linux_ptrace_attach_fail_reason_string (ptid, err);
1106
1107 warning (_("Cannot attach to lwp %d: %s"),
1108 lwpid, reason.c_str ());
1109 }
1110 }
1111 else
1112 {
1113 linux_nat_debug_printf ("PTRACE_ATTACH %s, 0, 0 (OK)",
1114 target_pid_to_str (ptid).c_str ());
1115
1116 lp = add_lwp (ptid);
1117
1118 /* The next time we wait for this LWP we'll see a SIGSTOP as
1119 PTRACE_ATTACH brings it to a halt. */
1120 lp->signalled = 1;
1121
1122 /* We need to wait for a stop before being able to make the
1123 next ptrace call on this LWP. */
1124 lp->must_set_ptrace_flags = 1;
1125
1126 /* So that wait collects the SIGSTOP. */
1127 lp->resumed = 1;
1128
1129 /* Also add the LWP to gdb's thread list, in case a
1130 matching libthread_db is not found (or the process uses
1131 raw clone). */
1132 add_thread (linux_target, lp->ptid);
1133 set_running (linux_target, lp->ptid, true);
1134 set_executing (linux_target, lp->ptid, true);
1135 }
1136
1137 return 1;
1138 }
1139 return 0;
1140 }
1141
1142 void
1143 linux_nat_target::attach (const char *args, int from_tty)
1144 {
1145 struct lwp_info *lp;
1146 int status;
1147 ptid_t ptid;
1148
1149 /* Make sure we report all signals during attach. */
1150 pass_signals ({});
1151
1152 try
1153 {
1154 inf_ptrace_target::attach (args, from_tty);
1155 }
1156 catch (const gdb_exception_error &ex)
1157 {
1158 pid_t pid = parse_pid_to_attach (args);
1159 std::string reason = linux_ptrace_attach_fail_reason (pid);
1160
1161 if (!reason.empty ())
1162 throw_error (ex.error, "warning: %s\n%s", reason.c_str (),
1163 ex.what ());
1164 else
1165 throw_error (ex.error, "%s", ex.what ());
1166 }
1167
1168 /* The ptrace base target adds the main thread with (pid,0,0)
1169 format. Decorate it with lwp info. */
1170 ptid = ptid_t (inferior_ptid.pid (),
1171 inferior_ptid.pid ());
1172 thread_change_ptid (linux_target, inferior_ptid, ptid);
1173
1174 /* Add the initial process as the first LWP to the list. */
1175 lp = add_initial_lwp (ptid);
1176
1177 status = linux_nat_post_attach_wait (lp->ptid, &lp->signalled);
1178 if (!WIFSTOPPED (status))
1179 {
1180 if (WIFEXITED (status))
1181 {
1182 int exit_code = WEXITSTATUS (status);
1183
1184 target_terminal::ours ();
1185 target_mourn_inferior (inferior_ptid);
1186 if (exit_code == 0)
1187 error (_("Unable to attach: program exited normally."));
1188 else
1189 error (_("Unable to attach: program exited with code %d."),
1190 exit_code);
1191 }
1192 else if (WIFSIGNALED (status))
1193 {
1194 enum gdb_signal signo;
1195
1196 target_terminal::ours ();
1197 target_mourn_inferior (inferior_ptid);
1198
1199 signo = gdb_signal_from_host (WTERMSIG (status));
1200 error (_("Unable to attach: program terminated with signal "
1201 "%s, %s."),
1202 gdb_signal_to_name (signo),
1203 gdb_signal_to_string (signo));
1204 }
1205
1206 internal_error (__FILE__, __LINE__,
1207 _("unexpected status %d for PID %ld"),
1208 status, (long) ptid.lwp ());
1209 }
1210
1211 lp->stopped = 1;
1212
1213 open_proc_mem_file (lp->ptid);
1214
1215 /* Save the wait status to report later. */
1216 lp->resumed = 1;
1217 linux_nat_debug_printf ("waitpid %ld, saving status %s",
1218 (long) lp->ptid.pid (),
1219 status_to_str (status).c_str ());
1220
1221 lp->status = status;
1222
1223 /* We must attach to every LWP. If /proc is mounted, use that to
1224 find them now. The inferior may be using raw clone instead of
1225 using pthreads. But even if it is using pthreads, thread_db
1226 walks structures in the inferior's address space to find the list
1227 of threads/LWPs, and those structures may well be corrupted.
1228 Note that once thread_db is loaded, we'll still use it to list
1229 threads and associate pthread info with each LWP. */
1230 linux_proc_attach_tgid_threads (lp->ptid.pid (),
1231 attach_proc_task_lwp_callback);
1232
1233 if (target_can_async_p ())
1234 target_async (1);
1235 }
1236
1237 /* Ptrace-detach the thread with pid PID. */
1238
1239 static void
1240 detach_one_pid (int pid, int signo)
1241 {
1242 if (ptrace (PTRACE_DETACH, pid, 0, signo) < 0)
1243 {
1244 int save_errno = errno;
1245
1246 /* We know the thread exists, so ESRCH must mean the lwp is
1247 zombie. This can happen if one of the already-detached
1248 threads exits the whole thread group. In that case we're
1249 still attached, and must reap the lwp. */
1250 if (save_errno == ESRCH)
1251 {
1252 int ret, status;
1253
1254 ret = my_waitpid (pid, &status, __WALL);
1255 if (ret == -1)
1256 {
1257 warning (_("Couldn't reap LWP %d while detaching: %s"),
1258 pid, safe_strerror (errno));
1259 }
1260 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1261 {
1262 warning (_("Reaping LWP %d while detaching "
1263 "returned unexpected status 0x%x"),
1264 pid, status);
1265 }
1266 }
1267 else
1268 error (_("Can't detach %d: %s"),
1269 pid, safe_strerror (save_errno));
1270 }
1271 else
1272 linux_nat_debug_printf ("PTRACE_DETACH (%d, %s, 0) (OK)",
1273 pid, strsignal (signo));
1274 }
1275
1276 /* Get pending signal of THREAD as a host signal number, for detaching
1277 purposes. This is the signal the thread last stopped for, which we
1278 need to deliver to the thread when detaching, otherwise, it'd be
1279 suppressed/lost. */
1280
1281 static int
1282 get_detach_signal (struct lwp_info *lp)
1283 {
1284 enum gdb_signal signo = GDB_SIGNAL_0;
1285
1286 /* If we paused threads momentarily, we may have stored pending
1287 events in lp->status or lp->waitstatus (see stop_wait_callback),
1288 and GDB core hasn't seen any signal for those threads.
1289 Otherwise, the last signal reported to the core is found in the
1290 thread object's stop_signal.
1291
1292 There's a corner case that isn't handled here at present. Only
1293 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1294 stop_signal make sense as a real signal to pass to the inferior.
1295 Some catchpoint related events, like
1296 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1297 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1298 those traps are debug API (ptrace in our case) related and
1299 induced; the inferior wouldn't see them if it wasn't being
1300 traced. Hence, we should never pass them to the inferior, even
1301 when set to pass state. Since this corner case isn't handled by
1302 infrun.c when proceeding with a signal, for consistency, neither
1303 do we handle it here (or elsewhere in the file we check for
1304 signal pass state). Normally SIGTRAP isn't set to pass state, so
1305 this is really a corner case. */
1306
1307 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
1308 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1309 else if (lp->status)
1310 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1311 else
1312 {
1313 struct thread_info *tp = find_thread_ptid (linux_target, lp->ptid);
1314
1315 if (target_is_non_stop_p () && !tp->executing ())
1316 {
1317 if (tp->has_pending_waitstatus ())
1318 {
1319 /* If the thread has a pending event, and it was stopped with a
1320 signal, use that signal to resume it. If it has a pending
1321 event of another kind, it was not stopped with a signal, so
1322 resume it without a signal. */
1323 if (tp->pending_waitstatus ().kind () == TARGET_WAITKIND_STOPPED)
1324 signo = tp->pending_waitstatus ().sig ();
1325 else
1326 signo = GDB_SIGNAL_0;
1327 }
1328 else
1329 signo = tp->stop_signal ();
1330 }
1331 else if (!target_is_non_stop_p ())
1332 {
1333 ptid_t last_ptid;
1334 process_stratum_target *last_target;
1335
1336 get_last_target_status (&last_target, &last_ptid, nullptr);
1337
1338 if (last_target == linux_target
1339 && lp->ptid.lwp () == last_ptid.lwp ())
1340 signo = tp->stop_signal ();
1341 }
1342 }
1343
1344 if (signo == GDB_SIGNAL_0)
1345 {
1346 linux_nat_debug_printf ("lwp %s has no pending signal",
1347 target_pid_to_str (lp->ptid).c_str ());
1348 }
1349 else if (!signal_pass_state (signo))
1350 {
1351 linux_nat_debug_printf
1352 ("lwp %s had signal %s but it is in no pass state",
1353 target_pid_to_str (lp->ptid).c_str (), gdb_signal_to_string (signo));
1354 }
1355 else
1356 {
1357 linux_nat_debug_printf ("lwp %s has pending signal %s",
1358 target_pid_to_str (lp->ptid).c_str (),
1359 gdb_signal_to_string (signo));
1360
1361 return gdb_signal_to_host (signo);
1362 }
1363
1364 return 0;
1365 }
1366
1367 /* Detach from LP. If SIGNO_P is non-NULL, then it points to the
1368 signal number that should be passed to the LWP when detaching.
1369 Otherwise pass any pending signal the LWP may have, if any. */
1370
1371 static void
1372 detach_one_lwp (struct lwp_info *lp, int *signo_p)
1373 {
1374 int lwpid = lp->ptid.lwp ();
1375 int signo;
1376
1377 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1378
1379 /* If the lwp/thread we are about to detach has a pending fork event,
1380 there is a process GDB is attached to that the core of GDB doesn't know
1381 about. Detach from it. */
1382
1383 /* Check in lwp_info::status. */
1384 if (WIFSTOPPED (lp->status) && linux_is_extended_waitstatus (lp->status))
1385 {
1386 int event = linux_ptrace_get_extended_event (lp->status);
1387
1388 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1389 {
1390 unsigned long child_pid;
1391 int ret = ptrace (PTRACE_GETEVENTMSG, lp->ptid.lwp (), 0, &child_pid);
1392 if (ret == 0)
1393 detach_one_pid (child_pid, 0);
1394 else
1395 perror_warning_with_name (_("Failed to detach fork child"));
1396 }
1397 }
1398
1399 /* Check in lwp_info::waitstatus. */
1400 if (lp->waitstatus.kind () == TARGET_WAITKIND_VFORKED
1401 || lp->waitstatus.kind () == TARGET_WAITKIND_FORKED)
1402 detach_one_pid (lp->waitstatus.child_ptid ().pid (), 0);
1403
1404
1405 /* Check in thread_info::pending_waitstatus. */
1406 thread_info *tp = find_thread_ptid (linux_target, lp->ptid);
1407 if (tp->has_pending_waitstatus ())
1408 {
1409 const target_waitstatus &ws = tp->pending_waitstatus ();
1410
1411 if (ws.kind () == TARGET_WAITKIND_VFORKED
1412 || ws.kind () == TARGET_WAITKIND_FORKED)
1413 detach_one_pid (ws.child_ptid ().pid (), 0);
1414 }
1415
1416 /* Check in thread_info::pending_follow. */
1417 if (tp->pending_follow.kind () == TARGET_WAITKIND_VFORKED
1418 || tp->pending_follow.kind () == TARGET_WAITKIND_FORKED)
1419 detach_one_pid (tp->pending_follow.child_ptid ().pid (), 0);
1420
1421 if (lp->status != 0)
1422 linux_nat_debug_printf ("Pending %s for %s on detach.",
1423 strsignal (WSTOPSIG (lp->status)),
1424 target_pid_to_str (lp->ptid).c_str ());
1425
1426 /* If there is a pending SIGSTOP, get rid of it. */
1427 if (lp->signalled)
1428 {
1429 linux_nat_debug_printf ("Sending SIGCONT to %s",
1430 target_pid_to_str (lp->ptid).c_str ());
1431
1432 kill_lwp (lwpid, SIGCONT);
1433 lp->signalled = 0;
1434 }
1435
1436 if (signo_p == NULL)
1437 {
1438 /* Pass on any pending signal for this LWP. */
1439 signo = get_detach_signal (lp);
1440 }
1441 else
1442 signo = *signo_p;
1443
1444 /* Preparing to resume may try to write registers, and fail if the
1445 lwp is zombie. If that happens, ignore the error. We'll handle
1446 it below, when detach fails with ESRCH. */
1447 try
1448 {
1449 linux_target->low_prepare_to_resume (lp);
1450 }
1451 catch (const gdb_exception_error &ex)
1452 {
1453 if (!check_ptrace_stopped_lwp_gone (lp))
1454 throw;
1455 }
1456
1457 detach_one_pid (lwpid, signo);
1458
1459 delete_lwp (lp->ptid);
1460 }
1461
1462 static int
1463 detach_callback (struct lwp_info *lp)
1464 {
1465 /* We don't actually detach from the thread group leader just yet.
1466 If the thread group exits, we must reap the zombie clone lwps
1467 before we're able to reap the leader. */
1468 if (lp->ptid.lwp () != lp->ptid.pid ())
1469 detach_one_lwp (lp, NULL);
1470 return 0;
1471 }
1472
1473 void
1474 linux_nat_target::detach (inferior *inf, int from_tty)
1475 {
1476 struct lwp_info *main_lwp;
1477 int pid = inf->pid;
1478
1479 /* Don't unregister from the event loop, as there may be other
1480 inferiors running. */
1481
1482 /* Stop all threads before detaching. ptrace requires that the
1483 thread is stopped to successfully detach. */
1484 iterate_over_lwps (ptid_t (pid), stop_callback);
1485 /* ... and wait until all of them have reported back that
1486 they're no longer running. */
1487 iterate_over_lwps (ptid_t (pid), stop_wait_callback);
1488
1489 /* We can now safely remove breakpoints. We don't this in earlier
1490 in common code because this target doesn't currently support
1491 writing memory while the inferior is running. */
1492 remove_breakpoints_inf (current_inferior ());
1493
1494 iterate_over_lwps (ptid_t (pid), detach_callback);
1495
1496 /* Only the initial process should be left right now. */
1497 gdb_assert (num_lwps (pid) == 1);
1498
1499 main_lwp = find_lwp_pid (ptid_t (pid));
1500
1501 if (forks_exist_p ())
1502 {
1503 /* Multi-fork case. The current inferior_ptid is being detached
1504 from, but there are other viable forks to debug. Detach from
1505 the current fork, and context-switch to the first
1506 available. */
1507 linux_fork_detach (from_tty);
1508 }
1509 else
1510 {
1511 target_announce_detach (from_tty);
1512
1513 /* Pass on any pending signal for the last LWP. */
1514 int signo = get_detach_signal (main_lwp);
1515
1516 detach_one_lwp (main_lwp, &signo);
1517
1518 detach_success (inf);
1519 }
1520
1521 close_proc_mem_file (pid);
1522 }
1523
1524 /* Resume execution of the inferior process. If STEP is nonzero,
1525 single-step it. If SIGNAL is nonzero, give it that signal. */
1526
1527 static void
1528 linux_resume_one_lwp_throw (struct lwp_info *lp, int step,
1529 enum gdb_signal signo)
1530 {
1531 lp->step = step;
1532
1533 /* stop_pc doubles as the PC the LWP had when it was last resumed.
1534 We only presently need that if the LWP is stepped though (to
1535 handle the case of stepping a breakpoint instruction). */
1536 if (step)
1537 {
1538 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
1539
1540 lp->stop_pc = regcache_read_pc (regcache);
1541 }
1542 else
1543 lp->stop_pc = 0;
1544
1545 linux_target->low_prepare_to_resume (lp);
1546 linux_target->low_resume (lp->ptid, step, signo);
1547
1548 /* Successfully resumed. Clear state that no longer makes sense,
1549 and mark the LWP as running. Must not do this before resuming
1550 otherwise if that fails other code will be confused. E.g., we'd
1551 later try to stop the LWP and hang forever waiting for a stop
1552 status. Note that we must not throw after this is cleared,
1553 otherwise handle_zombie_lwp_error would get confused. */
1554 lp->stopped = 0;
1555 lp->core = -1;
1556 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
1557 registers_changed_ptid (linux_target, lp->ptid);
1558 }
1559
1560 /* Called when we try to resume a stopped LWP and that errors out. If
1561 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
1562 or about to become), discard the error, clear any pending status
1563 the LWP may have, and return true (we'll collect the exit status
1564 soon enough). Otherwise, return false. */
1565
1566 static int
1567 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
1568 {
1569 /* If we get an error after resuming the LWP successfully, we'd
1570 confuse !T state for the LWP being gone. */
1571 gdb_assert (lp->stopped);
1572
1573 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
1574 because even if ptrace failed with ESRCH, the tracee may be "not
1575 yet fully dead", but already refusing ptrace requests. In that
1576 case the tracee has 'R (Running)' state for a little bit
1577 (observed in Linux 3.18). See also the note on ESRCH in the
1578 ptrace(2) man page. Instead, check whether the LWP has any state
1579 other than ptrace-stopped. */
1580
1581 /* Don't assume anything if /proc/PID/status can't be read. */
1582 if (linux_proc_pid_is_trace_stopped_nowarn (lp->ptid.lwp ()) == 0)
1583 {
1584 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
1585 lp->status = 0;
1586 lp->waitstatus.set_ignore ();
1587 return 1;
1588 }
1589 return 0;
1590 }
1591
1592 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
1593 disappears while we try to resume it. */
1594
1595 static void
1596 linux_resume_one_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1597 {
1598 try
1599 {
1600 linux_resume_one_lwp_throw (lp, step, signo);
1601 }
1602 catch (const gdb_exception_error &ex)
1603 {
1604 if (!check_ptrace_stopped_lwp_gone (lp))
1605 throw;
1606 }
1607 }
1608
1609 /* Resume LP. */
1610
1611 static void
1612 resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1613 {
1614 if (lp->stopped)
1615 {
1616 struct inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
1617
1618 if (inf->vfork_child != NULL)
1619 {
1620 linux_nat_debug_printf ("Not resuming %s (vfork parent)",
1621 target_pid_to_str (lp->ptid).c_str ());
1622 }
1623 else if (!lwp_status_pending_p (lp))
1624 {
1625 linux_nat_debug_printf ("Resuming sibling %s, %s, %s",
1626 target_pid_to_str (lp->ptid).c_str (),
1627 (signo != GDB_SIGNAL_0
1628 ? strsignal (gdb_signal_to_host (signo))
1629 : "0"),
1630 step ? "step" : "resume");
1631
1632 linux_resume_one_lwp (lp, step, signo);
1633 }
1634 else
1635 {
1636 linux_nat_debug_printf ("Not resuming sibling %s (has pending)",
1637 target_pid_to_str (lp->ptid).c_str ());
1638 }
1639 }
1640 else
1641 linux_nat_debug_printf ("Not resuming sibling %s (not stopped)",
1642 target_pid_to_str (lp->ptid).c_str ());
1643 }
1644
1645 /* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1646 Resume LWP with the last stop signal, if it is in pass state. */
1647
1648 static int
1649 linux_nat_resume_callback (struct lwp_info *lp, struct lwp_info *except)
1650 {
1651 enum gdb_signal signo = GDB_SIGNAL_0;
1652
1653 if (lp == except)
1654 return 0;
1655
1656 if (lp->stopped)
1657 {
1658 struct thread_info *thread;
1659
1660 thread = find_thread_ptid (linux_target, lp->ptid);
1661 if (thread != NULL)
1662 {
1663 signo = thread->stop_signal ();
1664 thread->set_stop_signal (GDB_SIGNAL_0);
1665 }
1666 }
1667
1668 resume_lwp (lp, 0, signo);
1669 return 0;
1670 }
1671
1672 static int
1673 resume_clear_callback (struct lwp_info *lp)
1674 {
1675 lp->resumed = 0;
1676 lp->last_resume_kind = resume_stop;
1677 return 0;
1678 }
1679
1680 static int
1681 resume_set_callback (struct lwp_info *lp)
1682 {
1683 lp->resumed = 1;
1684 lp->last_resume_kind = resume_continue;
1685 return 0;
1686 }
1687
1688 void
1689 linux_nat_target::resume (ptid_t ptid, int step, enum gdb_signal signo)
1690 {
1691 struct lwp_info *lp;
1692 int resume_many;
1693
1694 linux_nat_debug_printf ("Preparing to %s %s, %s, inferior_ptid %s",
1695 step ? "step" : "resume",
1696 target_pid_to_str (ptid).c_str (),
1697 (signo != GDB_SIGNAL_0
1698 ? strsignal (gdb_signal_to_host (signo)) : "0"),
1699 target_pid_to_str (inferior_ptid).c_str ());
1700
1701 /* A specific PTID means `step only this process id'. */
1702 resume_many = (minus_one_ptid == ptid
1703 || ptid.is_pid ());
1704
1705 /* Mark the lwps we're resuming as resumed and update their
1706 last_resume_kind to resume_continue. */
1707 iterate_over_lwps (ptid, resume_set_callback);
1708
1709 /* See if it's the current inferior that should be handled
1710 specially. */
1711 if (resume_many)
1712 lp = find_lwp_pid (inferior_ptid);
1713 else
1714 lp = find_lwp_pid (ptid);
1715 gdb_assert (lp != NULL);
1716
1717 /* Remember if we're stepping. */
1718 lp->last_resume_kind = step ? resume_step : resume_continue;
1719
1720 /* If we have a pending wait status for this thread, there is no
1721 point in resuming the process. But first make sure that
1722 linux_nat_wait won't preemptively handle the event - we
1723 should never take this short-circuit if we are going to
1724 leave LP running, since we have skipped resuming all the
1725 other threads. This bit of code needs to be synchronized
1726 with linux_nat_wait. */
1727
1728 if (lp->status && WIFSTOPPED (lp->status))
1729 {
1730 if (!lp->step
1731 && WSTOPSIG (lp->status)
1732 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
1733 {
1734 linux_nat_debug_printf
1735 ("Not short circuiting for ignored status 0x%x", lp->status);
1736
1737 /* FIXME: What should we do if we are supposed to continue
1738 this thread with a signal? */
1739 gdb_assert (signo == GDB_SIGNAL_0);
1740 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1741 lp->status = 0;
1742 }
1743 }
1744
1745 if (lwp_status_pending_p (lp))
1746 {
1747 /* FIXME: What should we do if we are supposed to continue
1748 this thread with a signal? */
1749 gdb_assert (signo == GDB_SIGNAL_0);
1750
1751 linux_nat_debug_printf ("Short circuiting for status 0x%x",
1752 lp->status);
1753
1754 if (target_can_async_p ())
1755 {
1756 target_async (1);
1757 /* Tell the event loop we have something to process. */
1758 async_file_mark ();
1759 }
1760 return;
1761 }
1762
1763 if (resume_many)
1764 iterate_over_lwps (ptid, [=] (struct lwp_info *info)
1765 {
1766 return linux_nat_resume_callback (info, lp);
1767 });
1768
1769 linux_nat_debug_printf ("%s %s, %s (resume event thread)",
1770 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1771 target_pid_to_str (lp->ptid).c_str (),
1772 (signo != GDB_SIGNAL_0
1773 ? strsignal (gdb_signal_to_host (signo)) : "0"));
1774
1775 linux_resume_one_lwp (lp, step, signo);
1776
1777 if (target_can_async_p ())
1778 target_async (1);
1779 }
1780
1781 /* Send a signal to an LWP. */
1782
1783 static int
1784 kill_lwp (int lwpid, int signo)
1785 {
1786 int ret;
1787
1788 errno = 0;
1789 ret = syscall (__NR_tkill, lwpid, signo);
1790 if (errno == ENOSYS)
1791 {
1792 /* If tkill fails, then we are not using nptl threads, a
1793 configuration we no longer support. */
1794 perror_with_name (("tkill"));
1795 }
1796 return ret;
1797 }
1798
1799 /* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1800 event, check if the core is interested in it: if not, ignore the
1801 event, and keep waiting; otherwise, we need to toggle the LWP's
1802 syscall entry/exit status, since the ptrace event itself doesn't
1803 indicate it, and report the trap to higher layers. */
1804
1805 static int
1806 linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1807 {
1808 struct target_waitstatus *ourstatus = &lp->waitstatus;
1809 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
1810 thread_info *thread = find_thread_ptid (linux_target, lp->ptid);
1811 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, thread);
1812
1813 if (stopping)
1814 {
1815 /* If we're stopping threads, there's a SIGSTOP pending, which
1816 makes it so that the LWP reports an immediate syscall return,
1817 followed by the SIGSTOP. Skip seeing that "return" using
1818 PTRACE_CONT directly, and let stop_wait_callback collect the
1819 SIGSTOP. Later when the thread is resumed, a new syscall
1820 entry event. If we didn't do this (and returned 0), we'd
1821 leave a syscall entry pending, and our caller, by using
1822 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1823 itself. Later, when the user re-resumes this LWP, we'd see
1824 another syscall entry event and we'd mistake it for a return.
1825
1826 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1827 (leaving immediately with LWP->signalled set, without issuing
1828 a PTRACE_CONT), it would still be problematic to leave this
1829 syscall enter pending, as later when the thread is resumed,
1830 it would then see the same syscall exit mentioned above,
1831 followed by the delayed SIGSTOP, while the syscall didn't
1832 actually get to execute. It seems it would be even more
1833 confusing to the user. */
1834
1835 linux_nat_debug_printf
1836 ("ignoring syscall %d for LWP %ld (stopping threads), resuming with "
1837 "PTRACE_CONT for SIGSTOP", syscall_number, lp->ptid.lwp ());
1838
1839 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1840 ptrace (PTRACE_CONT, lp->ptid.lwp (), 0, 0);
1841 lp->stopped = 0;
1842 return 1;
1843 }
1844
1845 /* Always update the entry/return state, even if this particular
1846 syscall isn't interesting to the core now. In async mode,
1847 the user could install a new catchpoint for this syscall
1848 between syscall enter/return, and we'll need to know to
1849 report a syscall return if that happens. */
1850 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1851 ? TARGET_WAITKIND_SYSCALL_RETURN
1852 : TARGET_WAITKIND_SYSCALL_ENTRY);
1853
1854 if (catch_syscall_enabled ())
1855 {
1856 if (catching_syscall_number (syscall_number))
1857 {
1858 /* Alright, an event to report. */
1859 if (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
1860 ourstatus->set_syscall_entry (syscall_number);
1861 else if (lp->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
1862 ourstatus->set_syscall_return (syscall_number);
1863 else
1864 gdb_assert_not_reached ("unexpected syscall state");
1865
1866 linux_nat_debug_printf
1867 ("stopping for %s of syscall %d for LWP %ld",
1868 (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1869 ? "entry" : "return"), syscall_number, lp->ptid.lwp ());
1870
1871 return 0;
1872 }
1873
1874 linux_nat_debug_printf
1875 ("ignoring %s of syscall %d for LWP %ld",
1876 (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1877 ? "entry" : "return"), syscall_number, lp->ptid.lwp ());
1878 }
1879 else
1880 {
1881 /* If we had been syscall tracing, and hence used PT_SYSCALL
1882 before on this LWP, it could happen that the user removes all
1883 syscall catchpoints before we get to process this event.
1884 There are two noteworthy issues here:
1885
1886 - When stopped at a syscall entry event, resuming with
1887 PT_STEP still resumes executing the syscall and reports a
1888 syscall return.
1889
1890 - Only PT_SYSCALL catches syscall enters. If we last
1891 single-stepped this thread, then this event can't be a
1892 syscall enter. If we last single-stepped this thread, this
1893 has to be a syscall exit.
1894
1895 The points above mean that the next resume, be it PT_STEP or
1896 PT_CONTINUE, can not trigger a syscall trace event. */
1897 linux_nat_debug_printf
1898 ("caught syscall event with no syscall catchpoints. %d for LWP %ld, "
1899 "ignoring", syscall_number, lp->ptid.lwp ());
1900 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1901 }
1902
1903 /* The core isn't interested in this event. For efficiency, avoid
1904 stopping all threads only to have the core resume them all again.
1905 Since we're not stopping threads, if we're still syscall tracing
1906 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1907 subsequent syscall. Simply resume using the inf-ptrace layer,
1908 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1909
1910 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
1911 return 1;
1912 }
1913
1914 /* Handle a GNU/Linux extended wait response. If we see a clone
1915 event, we need to add the new LWP to our list (and not report the
1916 trap to higher layers). This function returns non-zero if the
1917 event should be ignored and we should wait again. If STOPPING is
1918 true, the new LWP remains stopped, otherwise it is continued. */
1919
1920 static int
1921 linux_handle_extended_wait (struct lwp_info *lp, int status)
1922 {
1923 int pid = lp->ptid.lwp ();
1924 struct target_waitstatus *ourstatus = &lp->waitstatus;
1925 int event = linux_ptrace_get_extended_event (status);
1926
1927 /* All extended events we currently use are mid-syscall. Only
1928 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
1929 you have to be using PTRACE_SEIZE to get that. */
1930 lp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
1931
1932 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1933 || event == PTRACE_EVENT_CLONE)
1934 {
1935 unsigned long new_pid;
1936 int ret;
1937
1938 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
1939
1940 /* If we haven't already seen the new PID stop, wait for it now. */
1941 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1942 {
1943 /* The new child has a pending SIGSTOP. We can't affect it until it
1944 hits the SIGSTOP, but we're already attached. */
1945 ret = my_waitpid (new_pid, &status, __WALL);
1946 if (ret == -1)
1947 perror_with_name (_("waiting for new child"));
1948 else if (ret != new_pid)
1949 internal_error (__FILE__, __LINE__,
1950 _("wait returned unexpected PID %d"), ret);
1951 else if (!WIFSTOPPED (status))
1952 internal_error (__FILE__, __LINE__,
1953 _("wait returned unexpected status 0x%x"), status);
1954 }
1955
1956 ptid_t child_ptid (new_pid, new_pid);
1957
1958 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1959 {
1960 open_proc_mem_file (child_ptid);
1961
1962 /* The arch-specific native code may need to know about new
1963 forks even if those end up never mapped to an
1964 inferior. */
1965 linux_target->low_new_fork (lp, new_pid);
1966 }
1967 else if (event == PTRACE_EVENT_CLONE)
1968 {
1969 linux_target->low_new_clone (lp, new_pid);
1970 }
1971
1972 if (event == PTRACE_EVENT_FORK
1973 && linux_fork_checkpointing_p (lp->ptid.pid ()))
1974 {
1975 /* Handle checkpointing by linux-fork.c here as a special
1976 case. We don't want the follow-fork-mode or 'catch fork'
1977 to interfere with this. */
1978
1979 /* This won't actually modify the breakpoint list, but will
1980 physically remove the breakpoints from the child. */
1981 detach_breakpoints (ptid_t (new_pid, new_pid));
1982
1983 /* Retain child fork in ptrace (stopped) state. */
1984 if (!find_fork_pid (new_pid))
1985 add_fork (new_pid);
1986
1987 /* Report as spurious, so that infrun doesn't want to follow
1988 this fork. We're actually doing an infcall in
1989 linux-fork.c. */
1990 ourstatus->set_spurious ();
1991
1992 /* Report the stop to the core. */
1993 return 0;
1994 }
1995
1996 if (event == PTRACE_EVENT_FORK)
1997 ourstatus->set_forked (child_ptid);
1998 else if (event == PTRACE_EVENT_VFORK)
1999 ourstatus->set_vforked (child_ptid);
2000 else if (event == PTRACE_EVENT_CLONE)
2001 {
2002 struct lwp_info *new_lp;
2003
2004 ourstatus->set_ignore ();
2005
2006 linux_nat_debug_printf
2007 ("Got clone event from LWP %d, new child is LWP %ld", pid, new_pid);
2008
2009 new_lp = add_lwp (ptid_t (lp->ptid.pid (), new_pid));
2010 new_lp->stopped = 1;
2011 new_lp->resumed = 1;
2012
2013 /* If the thread_db layer is active, let it record the user
2014 level thread id and status, and add the thread to GDB's
2015 list. */
2016 if (!thread_db_notice_clone (lp->ptid, new_lp->ptid))
2017 {
2018 /* The process is not using thread_db. Add the LWP to
2019 GDB's list. */
2020 target_post_attach (new_lp->ptid.lwp ());
2021 add_thread (linux_target, new_lp->ptid);
2022 }
2023
2024 /* Even if we're stopping the thread for some reason
2025 internal to this module, from the perspective of infrun
2026 and the user/frontend, this new thread is running until
2027 it next reports a stop. */
2028 set_running (linux_target, new_lp->ptid, true);
2029 set_executing (linux_target, new_lp->ptid, true);
2030
2031 if (WSTOPSIG (status) != SIGSTOP)
2032 {
2033 /* This can happen if someone starts sending signals to
2034 the new thread before it gets a chance to run, which
2035 have a lower number than SIGSTOP (e.g. SIGUSR1).
2036 This is an unlikely case, and harder to handle for
2037 fork / vfork than for clone, so we do not try - but
2038 we handle it for clone events here. */
2039
2040 new_lp->signalled = 1;
2041
2042 /* We created NEW_LP so it cannot yet contain STATUS. */
2043 gdb_assert (new_lp->status == 0);
2044
2045 /* Save the wait status to report later. */
2046 linux_nat_debug_printf
2047 ("waitpid of new LWP %ld, saving status %s",
2048 (long) new_lp->ptid.lwp (), status_to_str (status).c_str ());
2049 new_lp->status = status;
2050 }
2051 else if (report_thread_events)
2052 {
2053 new_lp->waitstatus.set_thread_created ();
2054 new_lp->status = status;
2055 }
2056
2057 return 1;
2058 }
2059
2060 return 0;
2061 }
2062
2063 if (event == PTRACE_EVENT_EXEC)
2064 {
2065 linux_nat_debug_printf ("Got exec event from LWP %ld", lp->ptid.lwp ());
2066
2067 /* Close the previous /proc/PID/mem file for this inferior,
2068 which was using the address space which is now gone.
2069 Reading/writing from this file would return 0/EOF. */
2070 close_proc_mem_file (lp->ptid.pid ());
2071
2072 /* Open a new file for the new address space. */
2073 open_proc_mem_file (lp->ptid);
2074
2075 ourstatus->set_execd
2076 (make_unique_xstrdup (linux_proc_pid_to_exec_file (pid)));
2077
2078 /* The thread that execed must have been resumed, but, when a
2079 thread execs, it changes its tid to the tgid, and the old
2080 tgid thread might have not been resumed. */
2081 lp->resumed = 1;
2082 return 0;
2083 }
2084
2085 if (event == PTRACE_EVENT_VFORK_DONE)
2086 {
2087 if (current_inferior ()->waiting_for_vfork_done)
2088 {
2089 linux_nat_debug_printf
2090 ("Got expected PTRACE_EVENT_VFORK_DONE from LWP %ld: stopping",
2091 lp->ptid.lwp ());
2092
2093 ourstatus->set_vfork_done ();
2094 return 0;
2095 }
2096
2097 linux_nat_debug_printf
2098 ("Got PTRACE_EVENT_VFORK_DONE from LWP %ld: ignoring", lp->ptid.lwp ());
2099
2100 return 1;
2101 }
2102
2103 internal_error (__FILE__, __LINE__,
2104 _("unknown ptrace event %d"), event);
2105 }
2106
2107 /* Suspend waiting for a signal. We're mostly interested in
2108 SIGCHLD/SIGINT. */
2109
2110 static void
2111 wait_for_signal ()
2112 {
2113 linux_nat_debug_printf ("about to sigsuspend");
2114 sigsuspend (&suspend_mask);
2115
2116 /* If the quit flag is set, it means that the user pressed Ctrl-C
2117 and we're debugging a process that is running on a separate
2118 terminal, so we must forward the Ctrl-C to the inferior. (If the
2119 inferior is sharing GDB's terminal, then the Ctrl-C reaches the
2120 inferior directly.) We must do this here because functions that
2121 need to block waiting for a signal loop forever until there's an
2122 event to report before returning back to the event loop. */
2123 if (!target_terminal::is_ours ())
2124 {
2125 if (check_quit_flag ())
2126 target_pass_ctrlc ();
2127 }
2128 }
2129
2130 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2131 exited. */
2132
2133 static int
2134 wait_lwp (struct lwp_info *lp)
2135 {
2136 pid_t pid;
2137 int status = 0;
2138 int thread_dead = 0;
2139 sigset_t prev_mask;
2140
2141 gdb_assert (!lp->stopped);
2142 gdb_assert (lp->status == 0);
2143
2144 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2145 block_child_signals (&prev_mask);
2146
2147 for (;;)
2148 {
2149 pid = my_waitpid (lp->ptid.lwp (), &status, __WALL | WNOHANG);
2150 if (pid == -1 && errno == ECHILD)
2151 {
2152 /* The thread has previously exited. We need to delete it
2153 now because if this was a non-leader thread execing, we
2154 won't get an exit event. See comments on exec events at
2155 the top of the file. */
2156 thread_dead = 1;
2157 linux_nat_debug_printf ("%s vanished.",
2158 target_pid_to_str (lp->ptid).c_str ());
2159 }
2160 if (pid != 0)
2161 break;
2162
2163 /* Bugs 10970, 12702.
2164 Thread group leader may have exited in which case we'll lock up in
2165 waitpid if there are other threads, even if they are all zombies too.
2166 Basically, we're not supposed to use waitpid this way.
2167 tkill(pid,0) cannot be used here as it gets ESRCH for both
2168 for zombie and running processes.
2169
2170 As a workaround, check if we're waiting for the thread group leader and
2171 if it's a zombie, and avoid calling waitpid if it is.
2172
2173 This is racy, what if the tgl becomes a zombie right after we check?
2174 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2175 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
2176
2177 if (lp->ptid.pid () == lp->ptid.lwp ()
2178 && linux_proc_pid_is_zombie (lp->ptid.lwp ()))
2179 {
2180 thread_dead = 1;
2181 linux_nat_debug_printf ("Thread group leader %s vanished.",
2182 target_pid_to_str (lp->ptid).c_str ());
2183 break;
2184 }
2185
2186 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2187 get invoked despite our caller had them intentionally blocked by
2188 block_child_signals. This is sensitive only to the loop of
2189 linux_nat_wait_1 and there if we get called my_waitpid gets called
2190 again before it gets to sigsuspend so we can safely let the handlers
2191 get executed here. */
2192 wait_for_signal ();
2193 }
2194
2195 restore_child_signals_mask (&prev_mask);
2196
2197 if (!thread_dead)
2198 {
2199 gdb_assert (pid == lp->ptid.lwp ());
2200
2201 linux_nat_debug_printf ("waitpid %s received %s",
2202 target_pid_to_str (lp->ptid).c_str (),
2203 status_to_str (status).c_str ());
2204
2205 /* Check if the thread has exited. */
2206 if (WIFEXITED (status) || WIFSIGNALED (status))
2207 {
2208 if (report_thread_events
2209 || lp->ptid.pid () == lp->ptid.lwp ())
2210 {
2211 linux_nat_debug_printf ("LWP %d exited.", lp->ptid.pid ());
2212
2213 /* If this is the leader exiting, it means the whole
2214 process is gone. Store the status to report to the
2215 core. Store it in lp->waitstatus, because lp->status
2216 would be ambiguous (W_EXITCODE(0,0) == 0). */
2217 lp->waitstatus = host_status_to_waitstatus (status);
2218 return 0;
2219 }
2220
2221 thread_dead = 1;
2222 linux_nat_debug_printf ("%s exited.",
2223 target_pid_to_str (lp->ptid).c_str ());
2224 }
2225 }
2226
2227 if (thread_dead)
2228 {
2229 exit_lwp (lp);
2230 return 0;
2231 }
2232
2233 gdb_assert (WIFSTOPPED (status));
2234 lp->stopped = 1;
2235
2236 if (lp->must_set_ptrace_flags)
2237 {
2238 inferior *inf = find_inferior_pid (linux_target, lp->ptid.pid ());
2239 int options = linux_nat_ptrace_options (inf->attach_flag);
2240
2241 linux_enable_event_reporting (lp->ptid.lwp (), options);
2242 lp->must_set_ptrace_flags = 0;
2243 }
2244
2245 /* Handle GNU/Linux's syscall SIGTRAPs. */
2246 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2247 {
2248 /* No longer need the sysgood bit. The ptrace event ends up
2249 recorded in lp->waitstatus if we care for it. We can carry
2250 on handling the event like a regular SIGTRAP from here
2251 on. */
2252 status = W_STOPCODE (SIGTRAP);
2253 if (linux_handle_syscall_trap (lp, 1))
2254 return wait_lwp (lp);
2255 }
2256 else
2257 {
2258 /* Almost all other ptrace-stops are known to be outside of system
2259 calls, with further exceptions in linux_handle_extended_wait. */
2260 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2261 }
2262
2263 /* Handle GNU/Linux's extended waitstatus for trace events. */
2264 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2265 && linux_is_extended_waitstatus (status))
2266 {
2267 linux_nat_debug_printf ("Handling extended status 0x%06x", status);
2268 linux_handle_extended_wait (lp, status);
2269 return 0;
2270 }
2271
2272 return status;
2273 }
2274
2275 /* Send a SIGSTOP to LP. */
2276
2277 static int
2278 stop_callback (struct lwp_info *lp)
2279 {
2280 if (!lp->stopped && !lp->signalled)
2281 {
2282 int ret;
2283
2284 linux_nat_debug_printf ("kill %s **<SIGSTOP>**",
2285 target_pid_to_str (lp->ptid).c_str ());
2286
2287 errno = 0;
2288 ret = kill_lwp (lp->ptid.lwp (), SIGSTOP);
2289 linux_nat_debug_printf ("lwp kill %d %s", ret,
2290 errno ? safe_strerror (errno) : "ERRNO-OK");
2291
2292 lp->signalled = 1;
2293 gdb_assert (lp->status == 0);
2294 }
2295
2296 return 0;
2297 }
2298
2299 /* Request a stop on LWP. */
2300
2301 void
2302 linux_stop_lwp (struct lwp_info *lwp)
2303 {
2304 stop_callback (lwp);
2305 }
2306
2307 /* See linux-nat.h */
2308
2309 void
2310 linux_stop_and_wait_all_lwps (void)
2311 {
2312 /* Stop all LWP's ... */
2313 iterate_over_lwps (minus_one_ptid, stop_callback);
2314
2315 /* ... and wait until all of them have reported back that
2316 they're no longer running. */
2317 iterate_over_lwps (minus_one_ptid, stop_wait_callback);
2318 }
2319
2320 /* See linux-nat.h */
2321
2322 void
2323 linux_unstop_all_lwps (void)
2324 {
2325 iterate_over_lwps (minus_one_ptid,
2326 [] (struct lwp_info *info)
2327 {
2328 return resume_stopped_resumed_lwps (info, minus_one_ptid);
2329 });
2330 }
2331
2332 /* Return non-zero if LWP PID has a pending SIGINT. */
2333
2334 static int
2335 linux_nat_has_pending_sigint (int pid)
2336 {
2337 sigset_t pending, blocked, ignored;
2338
2339 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2340
2341 if (sigismember (&pending, SIGINT)
2342 && !sigismember (&ignored, SIGINT))
2343 return 1;
2344
2345 return 0;
2346 }
2347
2348 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2349
2350 static int
2351 set_ignore_sigint (struct lwp_info *lp)
2352 {
2353 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2354 flag to consume the next one. */
2355 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2356 && WSTOPSIG (lp->status) == SIGINT)
2357 lp->status = 0;
2358 else
2359 lp->ignore_sigint = 1;
2360
2361 return 0;
2362 }
2363
2364 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2365 This function is called after we know the LWP has stopped; if the LWP
2366 stopped before the expected SIGINT was delivered, then it will never have
2367 arrived. Also, if the signal was delivered to a shared queue and consumed
2368 by a different thread, it will never be delivered to this LWP. */
2369
2370 static void
2371 maybe_clear_ignore_sigint (struct lwp_info *lp)
2372 {
2373 if (!lp->ignore_sigint)
2374 return;
2375
2376 if (!linux_nat_has_pending_sigint (lp->ptid.lwp ()))
2377 {
2378 linux_nat_debug_printf ("Clearing bogus flag for %s",
2379 target_pid_to_str (lp->ptid).c_str ());
2380 lp->ignore_sigint = 0;
2381 }
2382 }
2383
2384 /* Fetch the possible triggered data watchpoint info and store it in
2385 LP.
2386
2387 On some archs, like x86, that use debug registers to set
2388 watchpoints, it's possible that the way to know which watched
2389 address trapped, is to check the register that is used to select
2390 which address to watch. Problem is, between setting the watchpoint
2391 and reading back which data address trapped, the user may change
2392 the set of watchpoints, and, as a consequence, GDB changes the
2393 debug registers in the inferior. To avoid reading back a stale
2394 stopped-data-address when that happens, we cache in LP the fact
2395 that a watchpoint trapped, and the corresponding data address, as
2396 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2397 registers meanwhile, we have the cached data we can rely on. */
2398
2399 static int
2400 check_stopped_by_watchpoint (struct lwp_info *lp)
2401 {
2402 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
2403 inferior_ptid = lp->ptid;
2404
2405 if (linux_target->low_stopped_by_watchpoint ())
2406 {
2407 lp->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2408 lp->stopped_data_address_p
2409 = linux_target->low_stopped_data_address (&lp->stopped_data_address);
2410 }
2411
2412 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2413 }
2414
2415 /* Returns true if the LWP had stopped for a watchpoint. */
2416
2417 bool
2418 linux_nat_target::stopped_by_watchpoint ()
2419 {
2420 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2421
2422 gdb_assert (lp != NULL);
2423
2424 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2425 }
2426
2427 bool
2428 linux_nat_target::stopped_data_address (CORE_ADDR *addr_p)
2429 {
2430 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2431
2432 gdb_assert (lp != NULL);
2433
2434 *addr_p = lp->stopped_data_address;
2435
2436 return lp->stopped_data_address_p;
2437 }
2438
2439 /* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2440
2441 bool
2442 linux_nat_target::low_status_is_event (int status)
2443 {
2444 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2445 }
2446
2447 /* Wait until LP is stopped. */
2448
2449 static int
2450 stop_wait_callback (struct lwp_info *lp)
2451 {
2452 inferior *inf = find_inferior_ptid (linux_target, lp->ptid);
2453
2454 /* If this is a vfork parent, bail out, it is not going to report
2455 any SIGSTOP until the vfork is done with. */
2456 if (inf->vfork_child != NULL)
2457 return 0;
2458
2459 if (!lp->stopped)
2460 {
2461 int status;
2462
2463 status = wait_lwp (lp);
2464 if (status == 0)
2465 return 0;
2466
2467 if (lp->ignore_sigint && WIFSTOPPED (status)
2468 && WSTOPSIG (status) == SIGINT)
2469 {
2470 lp->ignore_sigint = 0;
2471
2472 errno = 0;
2473 ptrace (PTRACE_CONT, lp->ptid.lwp (), 0, 0);
2474 lp->stopped = 0;
2475 linux_nat_debug_printf
2476 ("PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)",
2477 target_pid_to_str (lp->ptid).c_str (),
2478 errno ? safe_strerror (errno) : "OK");
2479
2480 return stop_wait_callback (lp);
2481 }
2482
2483 maybe_clear_ignore_sigint (lp);
2484
2485 if (WSTOPSIG (status) != SIGSTOP)
2486 {
2487 /* The thread was stopped with a signal other than SIGSTOP. */
2488
2489 linux_nat_debug_printf ("Pending event %s in %s",
2490 status_to_str ((int) status).c_str (),
2491 target_pid_to_str (lp->ptid).c_str ());
2492
2493 /* Save the sigtrap event. */
2494 lp->status = status;
2495 gdb_assert (lp->signalled);
2496 save_stop_reason (lp);
2497 }
2498 else
2499 {
2500 /* We caught the SIGSTOP that we intended to catch. */
2501
2502 linux_nat_debug_printf ("Expected SIGSTOP caught for %s.",
2503 target_pid_to_str (lp->ptid).c_str ());
2504
2505 lp->signalled = 0;
2506
2507 /* If we are waiting for this stop so we can report the thread
2508 stopped then we need to record this status. Otherwise, we can
2509 now discard this stop event. */
2510 if (lp->last_resume_kind == resume_stop)
2511 {
2512 lp->status = status;
2513 save_stop_reason (lp);
2514 }
2515 }
2516 }
2517
2518 return 0;
2519 }
2520
2521 /* Return non-zero if LP has a wait status pending. Discard the
2522 pending event and resume the LWP if the event that originally
2523 caused the stop became uninteresting. */
2524
2525 static int
2526 status_callback (struct lwp_info *lp)
2527 {
2528 /* Only report a pending wait status if we pretend that this has
2529 indeed been resumed. */
2530 if (!lp->resumed)
2531 return 0;
2532
2533 if (!lwp_status_pending_p (lp))
2534 return 0;
2535
2536 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
2537 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
2538 {
2539 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
2540 CORE_ADDR pc;
2541 int discard = 0;
2542
2543 pc = regcache_read_pc (regcache);
2544
2545 if (pc != lp->stop_pc)
2546 {
2547 linux_nat_debug_printf ("PC of %s changed. was=%s, now=%s",
2548 target_pid_to_str (lp->ptid).c_str (),
2549 paddress (target_gdbarch (), lp->stop_pc),
2550 paddress (target_gdbarch (), pc));
2551 discard = 1;
2552 }
2553
2554 #if !USE_SIGTRAP_SIGINFO
2555 else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
2556 {
2557 linux_nat_debug_printf ("previous breakpoint of %s, at %s gone",
2558 target_pid_to_str (lp->ptid).c_str (),
2559 paddress (target_gdbarch (), lp->stop_pc));
2560
2561 discard = 1;
2562 }
2563 #endif
2564
2565 if (discard)
2566 {
2567 linux_nat_debug_printf ("pending event of %s cancelled.",
2568 target_pid_to_str (lp->ptid).c_str ());
2569
2570 lp->status = 0;
2571 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
2572 return 0;
2573 }
2574 }
2575
2576 return 1;
2577 }
2578
2579 /* Count the LWP's that have had events. */
2580
2581 static int
2582 count_events_callback (struct lwp_info *lp, int *count)
2583 {
2584 gdb_assert (count != NULL);
2585
2586 /* Select only resumed LWPs that have an event pending. */
2587 if (lp->resumed && lwp_status_pending_p (lp))
2588 (*count)++;
2589
2590 return 0;
2591 }
2592
2593 /* Select the LWP (if any) that is currently being single-stepped. */
2594
2595 static int
2596 select_singlestep_lwp_callback (struct lwp_info *lp)
2597 {
2598 if (lp->last_resume_kind == resume_step
2599 && lp->status != 0)
2600 return 1;
2601 else
2602 return 0;
2603 }
2604
2605 /* Returns true if LP has a status pending. */
2606
2607 static int
2608 lwp_status_pending_p (struct lwp_info *lp)
2609 {
2610 /* We check for lp->waitstatus in addition to lp->status, because we
2611 can have pending process exits recorded in lp->status and
2612 W_EXITCODE(0,0) happens to be 0. */
2613 return lp->status != 0 || lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE;
2614 }
2615
2616 /* Select the Nth LWP that has had an event. */
2617
2618 static int
2619 select_event_lwp_callback (struct lwp_info *lp, int *selector)
2620 {
2621 gdb_assert (selector != NULL);
2622
2623 /* Select only resumed LWPs that have an event pending. */
2624 if (lp->resumed && lwp_status_pending_p (lp))
2625 if ((*selector)-- == 0)
2626 return 1;
2627
2628 return 0;
2629 }
2630
2631 /* Called when the LWP stopped for a signal/trap. If it stopped for a
2632 trap check what caused it (breakpoint, watchpoint, trace, etc.),
2633 and save the result in the LWP's stop_reason field. If it stopped
2634 for a breakpoint, decrement the PC if necessary on the lwp's
2635 architecture. */
2636
2637 static void
2638 save_stop_reason (struct lwp_info *lp)
2639 {
2640 struct regcache *regcache;
2641 struct gdbarch *gdbarch;
2642 CORE_ADDR pc;
2643 CORE_ADDR sw_bp_pc;
2644 #if USE_SIGTRAP_SIGINFO
2645 siginfo_t siginfo;
2646 #endif
2647
2648 gdb_assert (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON);
2649 gdb_assert (lp->status != 0);
2650
2651 if (!linux_target->low_status_is_event (lp->status))
2652 return;
2653
2654 regcache = get_thread_regcache (linux_target, lp->ptid);
2655 gdbarch = regcache->arch ();
2656
2657 pc = regcache_read_pc (regcache);
2658 sw_bp_pc = pc - gdbarch_decr_pc_after_break (gdbarch);
2659
2660 #if USE_SIGTRAP_SIGINFO
2661 if (linux_nat_get_siginfo (lp->ptid, &siginfo))
2662 {
2663 if (siginfo.si_signo == SIGTRAP)
2664 {
2665 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
2666 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
2667 {
2668 /* The si_code is ambiguous on this arch -- check debug
2669 registers. */
2670 if (!check_stopped_by_watchpoint (lp))
2671 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2672 }
2673 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
2674 {
2675 /* If we determine the LWP stopped for a SW breakpoint,
2676 trust it. Particularly don't check watchpoint
2677 registers, because, at least on s390, we'd find
2678 stopped-by-watchpoint as long as there's a watchpoint
2679 set. */
2680 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2681 }
2682 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
2683 {
2684 /* This can indicate either a hardware breakpoint or
2685 hardware watchpoint. Check debug registers. */
2686 if (!check_stopped_by_watchpoint (lp))
2687 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
2688 }
2689 else if (siginfo.si_code == TRAP_TRACE)
2690 {
2691 linux_nat_debug_printf ("%s stopped by trace",
2692 target_pid_to_str (lp->ptid).c_str ());
2693
2694 /* We may have single stepped an instruction that
2695 triggered a watchpoint. In that case, on some
2696 architectures (such as x86), instead of TRAP_HWBKPT,
2697 si_code indicates TRAP_TRACE, and we need to check
2698 the debug registers separately. */
2699 check_stopped_by_watchpoint (lp);
2700 }
2701 }
2702 }
2703 #else
2704 if ((!lp->step || lp->stop_pc == sw_bp_pc)
2705 && software_breakpoint_inserted_here_p (regcache->aspace (),
2706 sw_bp_pc))
2707 {
2708 /* The LWP was either continued, or stepped a software
2709 breakpoint instruction. */
2710 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2711 }
2712
2713 if (hardware_breakpoint_inserted_here_p (regcache->aspace (), pc))
2714 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
2715
2716 if (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
2717 check_stopped_by_watchpoint (lp);
2718 #endif
2719
2720 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2721 {
2722 linux_nat_debug_printf ("%s stopped by software breakpoint",
2723 target_pid_to_str (lp->ptid).c_str ());
2724
2725 /* Back up the PC if necessary. */
2726 if (pc != sw_bp_pc)
2727 regcache_write_pc (regcache, sw_bp_pc);
2728
2729 /* Update this so we record the correct stop PC below. */
2730 pc = sw_bp_pc;
2731 }
2732 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
2733 {
2734 linux_nat_debug_printf ("%s stopped by hardware breakpoint",
2735 target_pid_to_str (lp->ptid).c_str ());
2736 }
2737 else if (lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
2738 {
2739 linux_nat_debug_printf ("%s stopped by hardware watchpoint",
2740 target_pid_to_str (lp->ptid).c_str ());
2741 }
2742
2743 lp->stop_pc = pc;
2744 }
2745
2746
2747 /* Returns true if the LWP had stopped for a software breakpoint. */
2748
2749 bool
2750 linux_nat_target::stopped_by_sw_breakpoint ()
2751 {
2752 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2753
2754 gdb_assert (lp != NULL);
2755
2756 return lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2757 }
2758
2759 /* Implement the supports_stopped_by_sw_breakpoint method. */
2760
2761 bool
2762 linux_nat_target::supports_stopped_by_sw_breakpoint ()
2763 {
2764 return USE_SIGTRAP_SIGINFO;
2765 }
2766
2767 /* Returns true if the LWP had stopped for a hardware
2768 breakpoint/watchpoint. */
2769
2770 bool
2771 linux_nat_target::stopped_by_hw_breakpoint ()
2772 {
2773 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2774
2775 gdb_assert (lp != NULL);
2776
2777 return lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2778 }
2779
2780 /* Implement the supports_stopped_by_hw_breakpoint method. */
2781
2782 bool
2783 linux_nat_target::supports_stopped_by_hw_breakpoint ()
2784 {
2785 return USE_SIGTRAP_SIGINFO;
2786 }
2787
2788 /* Select one LWP out of those that have events pending. */
2789
2790 static void
2791 select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
2792 {
2793 int num_events = 0;
2794 int random_selector;
2795 struct lwp_info *event_lp = NULL;
2796
2797 /* Record the wait status for the original LWP. */
2798 (*orig_lp)->status = *status;
2799
2800 /* In all-stop, give preference to the LWP that is being
2801 single-stepped. There will be at most one, and it will be the
2802 LWP that the core is most interested in. If we didn't do this,
2803 then we'd have to handle pending step SIGTRAPs somehow in case
2804 the core later continues the previously-stepped thread, as
2805 otherwise we'd report the pending SIGTRAP then, and the core, not
2806 having stepped the thread, wouldn't understand what the trap was
2807 for, and therefore would report it to the user as a random
2808 signal. */
2809 if (!target_is_non_stop_p ())
2810 {
2811 event_lp = iterate_over_lwps (filter, select_singlestep_lwp_callback);
2812 if (event_lp != NULL)
2813 {
2814 linux_nat_debug_printf ("Select single-step %s",
2815 target_pid_to_str (event_lp->ptid).c_str ());
2816 }
2817 }
2818
2819 if (event_lp == NULL)
2820 {
2821 /* Pick one at random, out of those which have had events. */
2822
2823 /* First see how many events we have. */
2824 iterate_over_lwps (filter,
2825 [&] (struct lwp_info *info)
2826 {
2827 return count_events_callback (info, &num_events);
2828 });
2829 gdb_assert (num_events > 0);
2830
2831 /* Now randomly pick a LWP out of those that have had
2832 events. */
2833 random_selector = (int)
2834 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2835
2836 if (num_events > 1)
2837 linux_nat_debug_printf ("Found %d events, selecting #%d",
2838 num_events, random_selector);
2839
2840 event_lp
2841 = (iterate_over_lwps
2842 (filter,
2843 [&] (struct lwp_info *info)
2844 {
2845 return select_event_lwp_callback (info,
2846 &random_selector);
2847 }));
2848 }
2849
2850 if (event_lp != NULL)
2851 {
2852 /* Switch the event LWP. */
2853 *orig_lp = event_lp;
2854 *status = event_lp->status;
2855 }
2856
2857 /* Flush the wait status for the event LWP. */
2858 (*orig_lp)->status = 0;
2859 }
2860
2861 /* Return non-zero if LP has been resumed. */
2862
2863 static int
2864 resumed_callback (struct lwp_info *lp)
2865 {
2866 return lp->resumed;
2867 }
2868
2869 /* Check if we should go on and pass this event to common code.
2870
2871 If so, save the status to the lwp_info structure associated to LWPID. */
2872
2873 static void
2874 linux_nat_filter_event (int lwpid, int status)
2875 {
2876 struct lwp_info *lp;
2877 int event = linux_ptrace_get_extended_event (status);
2878
2879 lp = find_lwp_pid (ptid_t (lwpid));
2880
2881 /* Check for stop events reported by a process we didn't already
2882 know about - anything not already in our LWP list.
2883
2884 If we're expecting to receive stopped processes after
2885 fork, vfork, and clone events, then we'll just add the
2886 new one to our list and go back to waiting for the event
2887 to be reported - the stopped process might be returned
2888 from waitpid before or after the event is.
2889
2890 But note the case of a non-leader thread exec'ing after the
2891 leader having exited, and gone from our lists. The non-leader
2892 thread changes its tid to the tgid. */
2893
2894 if (WIFSTOPPED (status) && lp == NULL
2895 && (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC))
2896 {
2897 /* A multi-thread exec after we had seen the leader exiting. */
2898 linux_nat_debug_printf ("Re-adding thread group leader LWP %d.", lwpid);
2899
2900 lp = add_lwp (ptid_t (lwpid, lwpid));
2901 lp->stopped = 1;
2902 lp->resumed = 1;
2903 add_thread (linux_target, lp->ptid);
2904 }
2905
2906 if (WIFSTOPPED (status) && !lp)
2907 {
2908 linux_nat_debug_printf ("saving LWP %ld status %s in stopped_pids list",
2909 (long) lwpid, status_to_str (status).c_str ());
2910 add_to_pid_list (&stopped_pids, lwpid, status);
2911 return;
2912 }
2913
2914 /* Make sure we don't report an event for the exit of an LWP not in
2915 our list, i.e. not part of the current process. This can happen
2916 if we detach from a program we originally forked and then it
2917 exits. */
2918 if (!WIFSTOPPED (status) && !lp)
2919 return;
2920
2921 /* This LWP is stopped now. (And if dead, this prevents it from
2922 ever being continued.) */
2923 lp->stopped = 1;
2924
2925 if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
2926 {
2927 inferior *inf = find_inferior_pid (linux_target, lp->ptid.pid ());
2928 int options = linux_nat_ptrace_options (inf->attach_flag);
2929
2930 linux_enable_event_reporting (lp->ptid.lwp (), options);
2931 lp->must_set_ptrace_flags = 0;
2932 }
2933
2934 /* Handle GNU/Linux's syscall SIGTRAPs. */
2935 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2936 {
2937 /* No longer need the sysgood bit. The ptrace event ends up
2938 recorded in lp->waitstatus if we care for it. We can carry
2939 on handling the event like a regular SIGTRAP from here
2940 on. */
2941 status = W_STOPCODE (SIGTRAP);
2942 if (linux_handle_syscall_trap (lp, 0))
2943 return;
2944 }
2945 else
2946 {
2947 /* Almost all other ptrace-stops are known to be outside of system
2948 calls, with further exceptions in linux_handle_extended_wait. */
2949 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2950 }
2951
2952 /* Handle GNU/Linux's extended waitstatus for trace events. */
2953 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2954 && linux_is_extended_waitstatus (status))
2955 {
2956 linux_nat_debug_printf ("Handling extended status 0x%06x", status);
2957
2958 if (linux_handle_extended_wait (lp, status))
2959 return;
2960 }
2961
2962 /* Check if the thread has exited. */
2963 if (WIFEXITED (status) || WIFSIGNALED (status))
2964 {
2965 if (!report_thread_events
2966 && num_lwps (lp->ptid.pid ()) > 1)
2967 {
2968 linux_nat_debug_printf ("%s exited.",
2969 target_pid_to_str (lp->ptid).c_str ());
2970
2971 /* If there is at least one more LWP, then the exit signal
2972 was not the end of the debugged application and should be
2973 ignored. */
2974 exit_lwp (lp);
2975 return;
2976 }
2977
2978 /* Note that even if the leader was ptrace-stopped, it can still
2979 exit, if e.g., some other thread brings down the whole
2980 process (calls `exit'). So don't assert that the lwp is
2981 resumed. */
2982 linux_nat_debug_printf ("LWP %ld exited (resumed=%d)",
2983 lp->ptid.lwp (), lp->resumed);
2984
2985 /* Dead LWP's aren't expected to reported a pending sigstop. */
2986 lp->signalled = 0;
2987
2988 /* Store the pending event in the waitstatus, because
2989 W_EXITCODE(0,0) == 0. */
2990 lp->waitstatus = host_status_to_waitstatus (status);
2991 return;
2992 }
2993
2994 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2995 an attempt to stop an LWP. */
2996 if (lp->signalled
2997 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2998 {
2999 lp->signalled = 0;
3000
3001 if (lp->last_resume_kind == resume_stop)
3002 {
3003 linux_nat_debug_printf ("resume_stop SIGSTOP caught for %s.",
3004 target_pid_to_str (lp->ptid).c_str ());
3005 }
3006 else
3007 {
3008 /* This is a delayed SIGSTOP. Filter out the event. */
3009
3010 linux_nat_debug_printf
3011 ("%s %s, 0, 0 (discard delayed SIGSTOP)",
3012 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3013 target_pid_to_str (lp->ptid).c_str ());
3014
3015 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
3016 gdb_assert (lp->resumed);
3017 return;
3018 }
3019 }
3020
3021 /* Make sure we don't report a SIGINT that we have already displayed
3022 for another thread. */
3023 if (lp->ignore_sigint
3024 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3025 {
3026 linux_nat_debug_printf ("Delayed SIGINT caught for %s.",
3027 target_pid_to_str (lp->ptid).c_str ());
3028
3029 /* This is a delayed SIGINT. */
3030 lp->ignore_sigint = 0;
3031
3032 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
3033 linux_nat_debug_printf ("%s %s, 0, 0 (discard SIGINT)",
3034 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3035 target_pid_to_str (lp->ptid).c_str ());
3036 gdb_assert (lp->resumed);
3037
3038 /* Discard the event. */
3039 return;
3040 }
3041
3042 /* Don't report signals that GDB isn't interested in, such as
3043 signals that are neither printed nor stopped upon. Stopping all
3044 threads can be a bit time-consuming, so if we want decent
3045 performance with heavily multi-threaded programs, especially when
3046 they're using a high frequency timer, we'd better avoid it if we
3047 can. */
3048 if (WIFSTOPPED (status))
3049 {
3050 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
3051
3052 if (!target_is_non_stop_p ())
3053 {
3054 /* Only do the below in all-stop, as we currently use SIGSTOP
3055 to implement target_stop (see linux_nat_stop) in
3056 non-stop. */
3057 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
3058 {
3059 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3060 forwarded to the entire process group, that is, all LWPs
3061 will receive it - unless they're using CLONE_THREAD to
3062 share signals. Since we only want to report it once, we
3063 mark it as ignored for all LWPs except this one. */
3064 iterate_over_lwps (ptid_t (lp->ptid.pid ()), set_ignore_sigint);
3065 lp->ignore_sigint = 0;
3066 }
3067 else
3068 maybe_clear_ignore_sigint (lp);
3069 }
3070
3071 /* When using hardware single-step, we need to report every signal.
3072 Otherwise, signals in pass_mask may be short-circuited
3073 except signals that might be caused by a breakpoint, or SIGSTOP
3074 if we sent the SIGSTOP and are waiting for it to arrive. */
3075 if (!lp->step
3076 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status))
3077 && (WSTOPSIG (status) != SIGSTOP
3078 || !find_thread_ptid (linux_target, lp->ptid)->stop_requested)
3079 && !linux_wstatus_maybe_breakpoint (status))
3080 {
3081 linux_resume_one_lwp (lp, lp->step, signo);
3082 linux_nat_debug_printf
3083 ("%s %s, %s (preempt 'handle')",
3084 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3085 target_pid_to_str (lp->ptid).c_str (),
3086 (signo != GDB_SIGNAL_0
3087 ? strsignal (gdb_signal_to_host (signo)) : "0"));
3088 return;
3089 }
3090 }
3091
3092 /* An interesting event. */
3093 gdb_assert (lp);
3094 lp->status = status;
3095 save_stop_reason (lp);
3096 }
3097
3098 /* Detect zombie thread group leaders, and "exit" them. We can't reap
3099 their exits until all other threads in the group have exited. */
3100
3101 static void
3102 check_zombie_leaders (void)
3103 {
3104 for (inferior *inf : all_inferiors ())
3105 {
3106 struct lwp_info *leader_lp;
3107
3108 if (inf->pid == 0)
3109 continue;
3110
3111 leader_lp = find_lwp_pid (ptid_t (inf->pid));
3112 if (leader_lp != NULL
3113 /* Check if there are other threads in the group, as we may
3114 have raced with the inferior simply exiting. */
3115 && num_lwps (inf->pid) > 1
3116 && linux_proc_pid_is_zombie (inf->pid))
3117 {
3118 linux_nat_debug_printf ("Thread group leader %d zombie "
3119 "(it exited, or another thread execd).",
3120 inf->pid);
3121
3122 /* A leader zombie can mean one of two things:
3123
3124 - It exited, and there's an exit status pending
3125 available, or only the leader exited (not the whole
3126 program). In the latter case, we can't waitpid the
3127 leader's exit status until all other threads are gone.
3128
3129 - There are 3 or more threads in the group, and a thread
3130 other than the leader exec'd. See comments on exec
3131 events at the top of the file. We could try
3132 distinguishing the exit and exec cases, by waiting once
3133 more, and seeing if something comes out, but it doesn't
3134 sound useful. The previous leader _does_ go away, and
3135 we'll re-add the new one once we see the exec event
3136 (which is just the same as what would happen if the
3137 previous leader did exit voluntarily before some other
3138 thread execs). */
3139
3140 linux_nat_debug_printf ("Thread group leader %d vanished.", inf->pid);
3141 exit_lwp (leader_lp);
3142 }
3143 }
3144 }
3145
3146 /* Convenience function that is called when the kernel reports an exit
3147 event. This decides whether to report the event to GDB as a
3148 process exit event, a thread exit event, or to suppress the
3149 event. */
3150
3151 static ptid_t
3152 filter_exit_event (struct lwp_info *event_child,
3153 struct target_waitstatus *ourstatus)
3154 {
3155 ptid_t ptid = event_child->ptid;
3156
3157 if (num_lwps (ptid.pid ()) > 1)
3158 {
3159 if (report_thread_events)
3160 ourstatus->set_thread_exited (0);
3161 else
3162 ourstatus->set_ignore ();
3163
3164 exit_lwp (event_child);
3165 }
3166
3167 return ptid;
3168 }
3169
3170 static ptid_t
3171 linux_nat_wait_1 (ptid_t ptid, struct target_waitstatus *ourstatus,
3172 target_wait_flags target_options)
3173 {
3174 sigset_t prev_mask;
3175 enum resume_kind last_resume_kind;
3176 struct lwp_info *lp;
3177 int status;
3178
3179 linux_nat_debug_printf ("enter");
3180
3181 /* The first time we get here after starting a new inferior, we may
3182 not have added it to the LWP list yet - this is the earliest
3183 moment at which we know its PID. */
3184 if (ptid.is_pid () && find_lwp_pid (ptid) == nullptr)
3185 {
3186 ptid_t lwp_ptid (ptid.pid (), ptid.pid ());
3187
3188 /* Upgrade the main thread's ptid. */
3189 thread_change_ptid (linux_target, ptid, lwp_ptid);
3190 lp = add_initial_lwp (lwp_ptid);
3191 lp->resumed = 1;
3192 }
3193
3194 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
3195 block_child_signals (&prev_mask);
3196
3197 /* First check if there is a LWP with a wait status pending. */
3198 lp = iterate_over_lwps (ptid, status_callback);
3199 if (lp != NULL)
3200 {
3201 linux_nat_debug_printf ("Using pending wait status %s for %s.",
3202 status_to_str (lp->status).c_str (),
3203 target_pid_to_str (lp->ptid).c_str ());
3204 }
3205
3206 /* But if we don't find a pending event, we'll have to wait. Always
3207 pull all events out of the kernel. We'll randomly select an
3208 event LWP out of all that have events, to prevent starvation. */
3209
3210 while (lp == NULL)
3211 {
3212 pid_t lwpid;
3213
3214 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3215 quirks:
3216
3217 - If the thread group leader exits while other threads in the
3218 thread group still exist, waitpid(TGID, ...) hangs. That
3219 waitpid won't return an exit status until the other threads
3220 in the group are reaped.
3221
3222 - When a non-leader thread execs, that thread just vanishes
3223 without reporting an exit (so we'd hang if we waited for it
3224 explicitly in that case). The exec event is reported to
3225 the TGID pid. */
3226
3227 errno = 0;
3228 lwpid = my_waitpid (-1, &status, __WALL | WNOHANG);
3229
3230 linux_nat_debug_printf ("waitpid(-1, ...) returned %d, %s",
3231 lwpid,
3232 errno ? safe_strerror (errno) : "ERRNO-OK");
3233
3234 if (lwpid > 0)
3235 {
3236 linux_nat_debug_printf ("waitpid %ld received %s",
3237 (long) lwpid,
3238 status_to_str (status).c_str ());
3239
3240 linux_nat_filter_event (lwpid, status);
3241 /* Retry until nothing comes out of waitpid. A single
3242 SIGCHLD can indicate more than one child stopped. */
3243 continue;
3244 }
3245
3246 /* Now that we've pulled all events out of the kernel, resume
3247 LWPs that don't have an interesting event to report. */
3248 iterate_over_lwps (minus_one_ptid,
3249 [] (struct lwp_info *info)
3250 {
3251 return resume_stopped_resumed_lwps (info, minus_one_ptid);
3252 });
3253
3254 /* ... and find an LWP with a status to report to the core, if
3255 any. */
3256 lp = iterate_over_lwps (ptid, status_callback);
3257 if (lp != NULL)
3258 break;
3259
3260 /* Check for zombie thread group leaders. Those can't be reaped
3261 until all other threads in the thread group are. */
3262 check_zombie_leaders ();
3263
3264 /* If there are no resumed children left, bail. We'd be stuck
3265 forever in the sigsuspend call below otherwise. */
3266 if (iterate_over_lwps (ptid, resumed_callback) == NULL)
3267 {
3268 linux_nat_debug_printf ("exit (no resumed LWP)");
3269
3270 ourstatus->set_no_resumed ();
3271
3272 restore_child_signals_mask (&prev_mask);
3273 return minus_one_ptid;
3274 }
3275
3276 /* No interesting event to report to the core. */
3277
3278 if (target_options & TARGET_WNOHANG)
3279 {
3280 linux_nat_debug_printf ("exit (ignore)");
3281
3282 ourstatus->set_ignore ();
3283 restore_child_signals_mask (&prev_mask);
3284 return minus_one_ptid;
3285 }
3286
3287 /* We shouldn't end up here unless we want to try again. */
3288 gdb_assert (lp == NULL);
3289
3290 /* Block until we get an event reported with SIGCHLD. */
3291 wait_for_signal ();
3292 }
3293
3294 gdb_assert (lp);
3295
3296 status = lp->status;
3297 lp->status = 0;
3298
3299 if (!target_is_non_stop_p ())
3300 {
3301 /* Now stop all other LWP's ... */
3302 iterate_over_lwps (minus_one_ptid, stop_callback);
3303
3304 /* ... and wait until all of them have reported back that
3305 they're no longer running. */
3306 iterate_over_lwps (minus_one_ptid, stop_wait_callback);
3307 }
3308
3309 /* If we're not waiting for a specific LWP, choose an event LWP from
3310 among those that have had events. Giving equal priority to all
3311 LWPs that have had events helps prevent starvation. */
3312 if (ptid == minus_one_ptid || ptid.is_pid ())
3313 select_event_lwp (ptid, &lp, &status);
3314
3315 gdb_assert (lp != NULL);
3316
3317 /* Now that we've selected our final event LWP, un-adjust its PC if
3318 it was a software breakpoint, and we can't reliably support the
3319 "stopped by software breakpoint" stop reason. */
3320 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3321 && !USE_SIGTRAP_SIGINFO)
3322 {
3323 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
3324 struct gdbarch *gdbarch = regcache->arch ();
3325 int decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3326
3327 if (decr_pc != 0)
3328 {
3329 CORE_ADDR pc;
3330
3331 pc = regcache_read_pc (regcache);
3332 regcache_write_pc (regcache, pc + decr_pc);
3333 }
3334 }
3335
3336 /* We'll need this to determine whether to report a SIGSTOP as
3337 GDB_SIGNAL_0. Need to take a copy because resume_clear_callback
3338 clears it. */
3339 last_resume_kind = lp->last_resume_kind;
3340
3341 if (!target_is_non_stop_p ())
3342 {
3343 /* In all-stop, from the core's perspective, all LWPs are now
3344 stopped until a new resume action is sent over. */
3345 iterate_over_lwps (minus_one_ptid, resume_clear_callback);
3346 }
3347 else
3348 {
3349 resume_clear_callback (lp);
3350 }
3351
3352 if (linux_target->low_status_is_event (status))
3353 {
3354 linux_nat_debug_printf ("trap ptid is %s.",
3355 target_pid_to_str (lp->ptid).c_str ());
3356 }
3357
3358 if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3359 {
3360 *ourstatus = lp->waitstatus;
3361 lp->waitstatus.set_ignore ();
3362 }
3363 else
3364 *ourstatus = host_status_to_waitstatus (status);
3365
3366 linux_nat_debug_printf ("exit");
3367
3368 restore_child_signals_mask (&prev_mask);
3369
3370 if (last_resume_kind == resume_stop
3371 && ourstatus->kind () == TARGET_WAITKIND_STOPPED
3372 && WSTOPSIG (status) == SIGSTOP)
3373 {
3374 /* A thread that has been requested to stop by GDB with
3375 target_stop, and it stopped cleanly, so report as SIG0. The
3376 use of SIGSTOP is an implementation detail. */
3377 ourstatus->set_stopped (GDB_SIGNAL_0);
3378 }
3379
3380 if (ourstatus->kind () == TARGET_WAITKIND_EXITED
3381 || ourstatus->kind () == TARGET_WAITKIND_SIGNALLED)
3382 lp->core = -1;
3383 else
3384 lp->core = linux_common_core_of_thread (lp->ptid);
3385
3386 if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
3387 return filter_exit_event (lp, ourstatus);
3388
3389 return lp->ptid;
3390 }
3391
3392 /* Resume LWPs that are currently stopped without any pending status
3393 to report, but are resumed from the core's perspective. */
3394
3395 static int
3396 resume_stopped_resumed_lwps (struct lwp_info *lp, const ptid_t wait_ptid)
3397 {
3398 if (!lp->stopped)
3399 {
3400 linux_nat_debug_printf ("NOT resuming LWP %s, not stopped",
3401 target_pid_to_str (lp->ptid).c_str ());
3402 }
3403 else if (!lp->resumed)
3404 {
3405 linux_nat_debug_printf ("NOT resuming LWP %s, not resumed",
3406 target_pid_to_str (lp->ptid).c_str ());
3407 }
3408 else if (lwp_status_pending_p (lp))
3409 {
3410 linux_nat_debug_printf ("NOT resuming LWP %s, has pending status",
3411 target_pid_to_str (lp->ptid).c_str ());
3412 }
3413 else
3414 {
3415 struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
3416 struct gdbarch *gdbarch = regcache->arch ();
3417
3418 try
3419 {
3420 CORE_ADDR pc = regcache_read_pc (regcache);
3421 int leave_stopped = 0;
3422
3423 /* Don't bother if there's a breakpoint at PC that we'd hit
3424 immediately, and we're not waiting for this LWP. */
3425 if (!lp->ptid.matches (wait_ptid))
3426 {
3427 if (breakpoint_inserted_here_p (regcache->aspace (), pc))
3428 leave_stopped = 1;
3429 }
3430
3431 if (!leave_stopped)
3432 {
3433 linux_nat_debug_printf
3434 ("resuming stopped-resumed LWP %s at %s: step=%d",
3435 target_pid_to_str (lp->ptid).c_str (), paddress (gdbarch, pc),
3436 lp->step);
3437
3438 linux_resume_one_lwp_throw (lp, lp->step, GDB_SIGNAL_0);
3439 }
3440 }
3441 catch (const gdb_exception_error &ex)
3442 {
3443 if (!check_ptrace_stopped_lwp_gone (lp))
3444 throw;
3445 }
3446 }
3447
3448 return 0;
3449 }
3450
3451 ptid_t
3452 linux_nat_target::wait (ptid_t ptid, struct target_waitstatus *ourstatus,
3453 target_wait_flags target_options)
3454 {
3455 ptid_t event_ptid;
3456
3457 linux_nat_debug_printf ("[%s], [%s]", target_pid_to_str (ptid).c_str (),
3458 target_options_to_string (target_options).c_str ());
3459
3460 /* Flush the async file first. */
3461 if (target_is_async_p ())
3462 async_file_flush ();
3463
3464 /* Resume LWPs that are currently stopped without any pending status
3465 to report, but are resumed from the core's perspective. LWPs get
3466 in this state if we find them stopping at a time we're not
3467 interested in reporting the event (target_wait on a
3468 specific_process, for example, see linux_nat_wait_1), and
3469 meanwhile the event became uninteresting. Don't bother resuming
3470 LWPs we're not going to wait for if they'd stop immediately. */
3471 if (target_is_non_stop_p ())
3472 iterate_over_lwps (minus_one_ptid,
3473 [=] (struct lwp_info *info)
3474 {
3475 return resume_stopped_resumed_lwps (info, ptid);
3476 });
3477
3478 event_ptid = linux_nat_wait_1 (ptid, ourstatus, target_options);
3479
3480 /* If we requested any event, and something came out, assume there
3481 may be more. If we requested a specific lwp or process, also
3482 assume there may be more. */
3483 if (target_is_async_p ()
3484 && ((ourstatus->kind () != TARGET_WAITKIND_IGNORE
3485 && ourstatus->kind () != TARGET_WAITKIND_NO_RESUMED)
3486 || ptid != minus_one_ptid))
3487 async_file_mark ();
3488
3489 return event_ptid;
3490 }
3491
3492 /* Kill one LWP. */
3493
3494 static void
3495 kill_one_lwp (pid_t pid)
3496 {
3497 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3498
3499 errno = 0;
3500 kill_lwp (pid, SIGKILL);
3501
3502 if (debug_linux_nat)
3503 {
3504 int save_errno = errno;
3505
3506 linux_nat_debug_printf
3507 ("kill (SIGKILL) %ld, 0, 0 (%s)", (long) pid,
3508 save_errno != 0 ? safe_strerror (save_errno) : "OK");
3509 }
3510
3511 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3512
3513 errno = 0;
3514 ptrace (PTRACE_KILL, pid, 0, 0);
3515 if (debug_linux_nat)
3516 {
3517 int save_errno = errno;
3518
3519 linux_nat_debug_printf
3520 ("PTRACE_KILL %ld, 0, 0 (%s)", (long) pid,
3521 save_errno ? safe_strerror (save_errno) : "OK");
3522 }
3523 }
3524
3525 /* Wait for an LWP to die. */
3526
3527 static void
3528 kill_wait_one_lwp (pid_t pid)
3529 {
3530 pid_t res;
3531
3532 /* We must make sure that there are no pending events (delayed
3533 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3534 program doesn't interfere with any following debugging session. */
3535
3536 do
3537 {
3538 res = my_waitpid (pid, NULL, __WALL);
3539 if (res != (pid_t) -1)
3540 {
3541 linux_nat_debug_printf ("wait %ld received unknown.", (long) pid);
3542
3543 /* The Linux kernel sometimes fails to kill a thread
3544 completely after PTRACE_KILL; that goes from the stop
3545 point in do_fork out to the one in get_signal_to_deliver
3546 and waits again. So kill it again. */
3547 kill_one_lwp (pid);
3548 }
3549 }
3550 while (res == pid);
3551
3552 gdb_assert (res == -1 && errno == ECHILD);
3553 }
3554
3555 /* Callback for iterate_over_lwps. */
3556
3557 static int
3558 kill_callback (struct lwp_info *lp)
3559 {
3560 kill_one_lwp (lp->ptid.lwp ());
3561 return 0;
3562 }
3563
3564 /* Callback for iterate_over_lwps. */
3565
3566 static int
3567 kill_wait_callback (struct lwp_info *lp)
3568 {
3569 kill_wait_one_lwp (lp->ptid.lwp ());
3570 return 0;
3571 }
3572
3573 /* Kill the fork children of any threads of inferior INF that are
3574 stopped at a fork event. */
3575
3576 static void
3577 kill_unfollowed_fork_children (struct inferior *inf)
3578 {
3579 for (thread_info *thread : inf->non_exited_threads ())
3580 {
3581 struct target_waitstatus *ws = &thread->pending_follow;
3582
3583 if (ws->kind () == TARGET_WAITKIND_FORKED
3584 || ws->kind () == TARGET_WAITKIND_VFORKED)
3585 {
3586 ptid_t child_ptid = ws->child_ptid ();
3587 int child_pid = child_ptid.pid ();
3588 int child_lwp = child_ptid.lwp ();
3589
3590 kill_one_lwp (child_lwp);
3591 kill_wait_one_lwp (child_lwp);
3592
3593 /* Let the arch-specific native code know this process is
3594 gone. */
3595 linux_target->low_forget_process (child_pid);
3596 }
3597 }
3598 }
3599
3600 void
3601 linux_nat_target::kill ()
3602 {
3603 /* If we're stopped while forking and we haven't followed yet,
3604 kill the other task. We need to do this first because the
3605 parent will be sleeping if this is a vfork. */
3606 kill_unfollowed_fork_children (current_inferior ());
3607
3608 if (forks_exist_p ())
3609 linux_fork_killall ();
3610 else
3611 {
3612 ptid_t ptid = ptid_t (inferior_ptid.pid ());
3613
3614 /* Stop all threads before killing them, since ptrace requires
3615 that the thread is stopped to successfully PTRACE_KILL. */
3616 iterate_over_lwps (ptid, stop_callback);
3617 /* ... and wait until all of them have reported back that
3618 they're no longer running. */
3619 iterate_over_lwps (ptid, stop_wait_callback);
3620
3621 /* Kill all LWP's ... */
3622 iterate_over_lwps (ptid, kill_callback);
3623
3624 /* ... and wait until we've flushed all events. */
3625 iterate_over_lwps (ptid, kill_wait_callback);
3626 }
3627
3628 target_mourn_inferior (inferior_ptid);
3629 }
3630
3631 void
3632 linux_nat_target::mourn_inferior ()
3633 {
3634 int pid = inferior_ptid.pid ();
3635
3636 purge_lwp_list (pid);
3637
3638 close_proc_mem_file (pid);
3639
3640 if (! forks_exist_p ())
3641 /* Normal case, no other forks available. */
3642 inf_ptrace_target::mourn_inferior ();
3643 else
3644 /* Multi-fork case. The current inferior_ptid has exited, but
3645 there are other viable forks to debug. Delete the exiting
3646 one and context-switch to the first available. */
3647 linux_fork_mourn_inferior ();
3648
3649 /* Let the arch-specific native code know this process is gone. */
3650 linux_target->low_forget_process (pid);
3651 }
3652
3653 /* Convert a native/host siginfo object, into/from the siginfo in the
3654 layout of the inferiors' architecture. */
3655
3656 static void
3657 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
3658 {
3659 /* If the low target didn't do anything, then just do a straight
3660 memcpy. */
3661 if (!linux_target->low_siginfo_fixup (siginfo, inf_siginfo, direction))
3662 {
3663 if (direction == 1)
3664 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
3665 else
3666 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
3667 }
3668 }
3669
3670 static enum target_xfer_status
3671 linux_xfer_siginfo (enum target_object object,
3672 const char *annex, gdb_byte *readbuf,
3673 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3674 ULONGEST *xfered_len)
3675 {
3676 int pid;
3677 siginfo_t siginfo;
3678 gdb_byte inf_siginfo[sizeof (siginfo_t)];
3679
3680 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3681 gdb_assert (readbuf || writebuf);
3682
3683 pid = inferior_ptid.lwp ();
3684 if (pid == 0)
3685 pid = inferior_ptid.pid ();
3686
3687 if (offset > sizeof (siginfo))
3688 return TARGET_XFER_E_IO;
3689
3690 errno = 0;
3691 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3692 if (errno != 0)
3693 return TARGET_XFER_E_IO;
3694
3695 /* When GDB is built as a 64-bit application, ptrace writes into
3696 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3697 inferior with a 64-bit GDB should look the same as debugging it
3698 with a 32-bit GDB, we need to convert it. GDB core always sees
3699 the converted layout, so any read/write will have to be done
3700 post-conversion. */
3701 siginfo_fixup (&siginfo, inf_siginfo, 0);
3702
3703 if (offset + len > sizeof (siginfo))
3704 len = sizeof (siginfo) - offset;
3705
3706 if (readbuf != NULL)
3707 memcpy (readbuf, inf_siginfo + offset, len);
3708 else
3709 {
3710 memcpy (inf_siginfo + offset, writebuf, len);
3711
3712 /* Convert back to ptrace layout before flushing it out. */
3713 siginfo_fixup (&siginfo, inf_siginfo, 1);
3714
3715 errno = 0;
3716 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3717 if (errno != 0)
3718 return TARGET_XFER_E_IO;
3719 }
3720
3721 *xfered_len = len;
3722 return TARGET_XFER_OK;
3723 }
3724
3725 static enum target_xfer_status
3726 linux_nat_xfer_osdata (enum target_object object,
3727 const char *annex, gdb_byte *readbuf,
3728 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3729 ULONGEST *xfered_len);
3730
3731 static enum target_xfer_status
3732 linux_proc_xfer_memory_partial (gdb_byte *readbuf, const gdb_byte *writebuf,
3733 ULONGEST offset, LONGEST len, ULONGEST *xfered_len);
3734
3735 enum target_xfer_status
3736 linux_nat_target::xfer_partial (enum target_object object,
3737 const char *annex, gdb_byte *readbuf,
3738 const gdb_byte *writebuf,
3739 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
3740 {
3741 if (object == TARGET_OBJECT_SIGNAL_INFO)
3742 return linux_xfer_siginfo (object, annex, readbuf, writebuf,
3743 offset, len, xfered_len);
3744
3745 /* The target is connected but no live inferior is selected. Pass
3746 this request down to a lower stratum (e.g., the executable
3747 file). */
3748 if (object == TARGET_OBJECT_MEMORY && inferior_ptid == null_ptid)
3749 return TARGET_XFER_EOF;
3750
3751 if (object == TARGET_OBJECT_AUXV)
3752 return memory_xfer_auxv (this, object, annex, readbuf, writebuf,
3753 offset, len, xfered_len);
3754
3755 if (object == TARGET_OBJECT_OSDATA)
3756 return linux_nat_xfer_osdata (object, annex, readbuf, writebuf,
3757 offset, len, xfered_len);
3758
3759 if (object == TARGET_OBJECT_MEMORY)
3760 {
3761 /* GDB calculates all addresses in the largest possible address
3762 width. The address width must be masked before its final use
3763 by linux_proc_xfer_partial.
3764
3765 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
3766 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
3767
3768 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
3769 offset &= ((ULONGEST) 1 << addr_bit) - 1;
3770
3771 return linux_proc_xfer_memory_partial (readbuf, writebuf,
3772 offset, len, xfered_len);
3773 }
3774
3775 return inf_ptrace_target::xfer_partial (object, annex, readbuf, writebuf,
3776 offset, len, xfered_len);
3777 }
3778
3779 bool
3780 linux_nat_target::thread_alive (ptid_t ptid)
3781 {
3782 /* As long as a PTID is in lwp list, consider it alive. */
3783 return find_lwp_pid (ptid) != NULL;
3784 }
3785
3786 /* Implement the to_update_thread_list target method for this
3787 target. */
3788
3789 void
3790 linux_nat_target::update_thread_list ()
3791 {
3792 /* We add/delete threads from the list as clone/exit events are
3793 processed, so just try deleting exited threads still in the
3794 thread list. */
3795 delete_exited_threads ();
3796
3797 /* Update the processor core that each lwp/thread was last seen
3798 running on. */
3799 for (lwp_info *lwp : all_lwps ())
3800 {
3801 /* Avoid accessing /proc if the thread hasn't run since we last
3802 time we fetched the thread's core. Accessing /proc becomes
3803 noticeably expensive when we have thousands of LWPs. */
3804 if (lwp->core == -1)
3805 lwp->core = linux_common_core_of_thread (lwp->ptid);
3806 }
3807 }
3808
3809 std::string
3810 linux_nat_target::pid_to_str (ptid_t ptid)
3811 {
3812 if (ptid.lwp_p ()
3813 && (ptid.pid () != ptid.lwp ()
3814 || num_lwps (ptid.pid ()) > 1))
3815 return string_printf ("LWP %ld", ptid.lwp ());
3816
3817 return normal_pid_to_str (ptid);
3818 }
3819
3820 const char *
3821 linux_nat_target::thread_name (struct thread_info *thr)
3822 {
3823 return linux_proc_tid_get_name (thr->ptid);
3824 }
3825
3826 /* Accepts an integer PID; Returns a string representing a file that
3827 can be opened to get the symbols for the child process. */
3828
3829 char *
3830 linux_nat_target::pid_to_exec_file (int pid)
3831 {
3832 return linux_proc_pid_to_exec_file (pid);
3833 }
3834
3835 /* Object representing an /proc/PID/mem open file. We keep one such
3836 file open per inferior.
3837
3838 It might be tempting to think about only ever opening one file at
3839 most for all inferiors, closing/reopening the file as we access
3840 memory of different inferiors, to minimize number of file
3841 descriptors open, which can otherwise run into resource limits.
3842 However, that does not work correctly -- if the inferior execs and
3843 we haven't processed the exec event yet, and, we opened a
3844 /proc/PID/mem file, we will get a mem file accessing the post-exec
3845 address space, thinking we're opening it for the pre-exec address
3846 space. That is dangerous as we can poke memory (e.g. clearing
3847 breakpoints) in the post-exec memory by mistake, corrupting the
3848 inferior. For that reason, we open the mem file as early as
3849 possible, right after spawning, forking or attaching to the
3850 inferior, when the inferior is stopped and thus before it has a
3851 chance of execing.
3852
3853 Note that after opening the file, even if the thread we opened it
3854 for subsequently exits, the open file is still usable for accessing
3855 memory. It's only when the whole process exits or execs that the
3856 file becomes invalid, at which point reads/writes return EOF. */
3857
3858 class proc_mem_file
3859 {
3860 public:
3861 proc_mem_file (ptid_t ptid, int fd)
3862 : m_ptid (ptid), m_fd (fd)
3863 {
3864 gdb_assert (m_fd != -1);
3865 }
3866
3867 ~proc_mem_file ()
3868 {
3869 linux_nat_debug_printf ("closing fd %d for /proc/%d/task/%ld/mem",
3870 m_fd, m_ptid.pid (), m_ptid.lwp ());
3871 close (m_fd);
3872 }
3873
3874 DISABLE_COPY_AND_ASSIGN (proc_mem_file);
3875
3876 int fd ()
3877 {
3878 return m_fd;
3879 }
3880
3881 private:
3882 /* The LWP this file was opened for. Just for debugging
3883 purposes. */
3884 ptid_t m_ptid;
3885
3886 /* The file descriptor. */
3887 int m_fd = -1;
3888 };
3889
3890 /* The map between an inferior process id, and the open /proc/PID/mem
3891 file. This is stored in a map instead of in a per-inferior
3892 structure because we need to be able to access memory of processes
3893 which don't have a corresponding struct inferior object. E.g.,
3894 with "detach-on-fork on" (the default), and "follow-fork parent"
3895 (also default), we don't create an inferior for the fork child, but
3896 we still need to remove breakpoints from the fork child's
3897 memory. */
3898 static std::unordered_map<int, proc_mem_file> proc_mem_file_map;
3899
3900 /* Close the /proc/PID/mem file for PID. */
3901
3902 static void
3903 close_proc_mem_file (pid_t pid)
3904 {
3905 proc_mem_file_map.erase (pid);
3906 }
3907
3908 /* Open the /proc/PID/mem file for the process (thread group) of PTID.
3909 We actually open /proc/PID/task/LWP/mem, as that's the LWP we know
3910 exists and is stopped right now. We prefer the
3911 /proc/PID/task/LWP/mem form over /proc/LWP/mem to avoid tid-reuse
3912 races, just in case this is ever called on an already-waited
3913 LWP. */
3914
3915 static void
3916 open_proc_mem_file (ptid_t ptid)
3917 {
3918 auto iter = proc_mem_file_map.find (ptid.pid ());
3919 gdb_assert (iter == proc_mem_file_map.end ());
3920
3921 char filename[64];
3922 xsnprintf (filename, sizeof filename,
3923 "/proc/%d/task/%ld/mem", ptid.pid (), ptid.lwp ());
3924
3925 int fd = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
3926
3927 if (fd == -1)
3928 {
3929 warning (_("opening /proc/PID/mem file for lwp %d.%ld failed: %s (%d)"),
3930 ptid.pid (), ptid.lwp (),
3931 safe_strerror (errno), errno);
3932 return;
3933 }
3934
3935 proc_mem_file_map.emplace (std::piecewise_construct,
3936 std::forward_as_tuple (ptid.pid ()),
3937 std::forward_as_tuple (ptid, fd));
3938
3939 linux_nat_debug_printf ("opened fd %d for lwp %d.%ld\n",
3940 fd, ptid.pid (), ptid.lwp ());
3941 }
3942
3943 /* Implement the to_xfer_partial target method using /proc/PID/mem.
3944 Because we can use a single read/write call, this can be much more
3945 efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
3946 PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running
3947 threads. */
3948
3949 static enum target_xfer_status
3950 linux_proc_xfer_memory_partial (gdb_byte *readbuf, const gdb_byte *writebuf,
3951 ULONGEST offset, LONGEST len,
3952 ULONGEST *xfered_len)
3953 {
3954 ssize_t ret;
3955
3956 auto iter = proc_mem_file_map.find (inferior_ptid.pid ());
3957 if (iter == proc_mem_file_map.end ())
3958 return TARGET_XFER_EOF;
3959
3960 int fd = iter->second.fd ();
3961
3962 gdb_assert (fd != -1);
3963
3964 /* Use pread64/pwrite64 if available, since they save a syscall and can
3965 handle 64-bit offsets even on 32-bit platforms (for instance, SPARC
3966 debugging a SPARC64 application). */
3967 #ifdef HAVE_PREAD64
3968 ret = (readbuf ? pread64 (fd, readbuf, len, offset)
3969 : pwrite64 (fd, writebuf, len, offset));
3970 #else
3971 ret = lseek (fd, offset, SEEK_SET);
3972 if (ret != -1)
3973 ret = (readbuf ? read (fd, readbuf, len)
3974 : write (fd, writebuf, len));
3975 #endif
3976
3977 if (ret == -1)
3978 {
3979 linux_nat_debug_printf ("accessing fd %d for pid %d failed: %s (%d)\n",
3980 fd, inferior_ptid.pid (),
3981 safe_strerror (errno), errno);
3982 return TARGET_XFER_EOF;
3983 }
3984 else if (ret == 0)
3985 {
3986 /* EOF means the address space is gone, the whole process exited
3987 or execed. */
3988 linux_nat_debug_printf ("accessing fd %d for pid %d got EOF\n",
3989 fd, inferior_ptid.pid ());
3990 return TARGET_XFER_EOF;
3991 }
3992 else
3993 {
3994 *xfered_len = ret;
3995 return TARGET_XFER_OK;
3996 }
3997 }
3998
3999 /* Parse LINE as a signal set and add its set bits to SIGS. */
4000
4001 static void
4002 add_line_to_sigset (const char *line, sigset_t *sigs)
4003 {
4004 int len = strlen (line) - 1;
4005 const char *p;
4006 int signum;
4007
4008 if (line[len] != '\n')
4009 error (_("Could not parse signal set: %s"), line);
4010
4011 p = line;
4012 signum = len * 4;
4013 while (len-- > 0)
4014 {
4015 int digit;
4016
4017 if (*p >= '0' && *p <= '9')
4018 digit = *p - '0';
4019 else if (*p >= 'a' && *p <= 'f')
4020 digit = *p - 'a' + 10;
4021 else
4022 error (_("Could not parse signal set: %s"), line);
4023
4024 signum -= 4;
4025
4026 if (digit & 1)
4027 sigaddset (sigs, signum + 1);
4028 if (digit & 2)
4029 sigaddset (sigs, signum + 2);
4030 if (digit & 4)
4031 sigaddset (sigs, signum + 3);
4032 if (digit & 8)
4033 sigaddset (sigs, signum + 4);
4034
4035 p++;
4036 }
4037 }
4038
4039 /* Find process PID's pending signals from /proc/pid/status and set
4040 SIGS to match. */
4041
4042 void
4043 linux_proc_pending_signals (int pid, sigset_t *pending,
4044 sigset_t *blocked, sigset_t *ignored)
4045 {
4046 char buffer[PATH_MAX], fname[PATH_MAX];
4047
4048 sigemptyset (pending);
4049 sigemptyset (blocked);
4050 sigemptyset (ignored);
4051 xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
4052 gdb_file_up procfile = gdb_fopen_cloexec (fname, "r");
4053 if (procfile == NULL)
4054 error (_("Could not open %s"), fname);
4055
4056 while (fgets (buffer, PATH_MAX, procfile.get ()) != NULL)
4057 {
4058 /* Normal queued signals are on the SigPnd line in the status
4059 file. However, 2.6 kernels also have a "shared" pending
4060 queue for delivering signals to a thread group, so check for
4061 a ShdPnd line also.
4062
4063 Unfortunately some Red Hat kernels include the shared pending
4064 queue but not the ShdPnd status field. */
4065
4066 if (startswith (buffer, "SigPnd:\t"))
4067 add_line_to_sigset (buffer + 8, pending);
4068 else if (startswith (buffer, "ShdPnd:\t"))
4069 add_line_to_sigset (buffer + 8, pending);
4070 else if (startswith (buffer, "SigBlk:\t"))
4071 add_line_to_sigset (buffer + 8, blocked);
4072 else if (startswith (buffer, "SigIgn:\t"))
4073 add_line_to_sigset (buffer + 8, ignored);
4074 }
4075 }
4076
4077 static enum target_xfer_status
4078 linux_nat_xfer_osdata (enum target_object object,
4079 const char *annex, gdb_byte *readbuf,
4080 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4081 ULONGEST *xfered_len)
4082 {
4083 gdb_assert (object == TARGET_OBJECT_OSDATA);
4084
4085 *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
4086 if (*xfered_len == 0)
4087 return TARGET_XFER_EOF;
4088 else
4089 return TARGET_XFER_OK;
4090 }
4091
4092 std::vector<static_tracepoint_marker>
4093 linux_nat_target::static_tracepoint_markers_by_strid (const char *strid)
4094 {
4095 char s[IPA_CMD_BUF_SIZE];
4096 int pid = inferior_ptid.pid ();
4097 std::vector<static_tracepoint_marker> markers;
4098 const char *p = s;
4099 ptid_t ptid = ptid_t (pid, 0);
4100 static_tracepoint_marker marker;
4101
4102 /* Pause all */
4103 target_stop (ptid);
4104
4105 memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4106 s[sizeof ("qTfSTM")] = 0;
4107
4108 agent_run_command (pid, s, strlen (s) + 1);
4109
4110 /* Unpause all. */
4111 SCOPE_EXIT { target_continue_no_signal (ptid); };
4112
4113 while (*p++ == 'm')
4114 {
4115 do
4116 {
4117 parse_static_tracepoint_marker_definition (p, &p, &marker);
4118
4119 if (strid == NULL || marker.str_id == strid)
4120 markers.push_back (std::move (marker));
4121 }
4122 while (*p++ == ','); /* comma-separated list */
4123
4124 memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4125 s[sizeof ("qTsSTM")] = 0;
4126 agent_run_command (pid, s, strlen (s) + 1);
4127 p = s;
4128 }
4129
4130 return markers;
4131 }
4132
4133 /* target_is_async_p implementation. */
4134
4135 bool
4136 linux_nat_target::is_async_p ()
4137 {
4138 return linux_is_async_p ();
4139 }
4140
4141 /* target_can_async_p implementation. */
4142
4143 bool
4144 linux_nat_target::can_async_p ()
4145 {
4146 /* This flag should be checked in the common target.c code. */
4147 gdb_assert (target_async_permitted);
4148
4149 /* Otherwise, this targets is always able to support async mode. */
4150 return true;
4151 }
4152
4153 bool
4154 linux_nat_target::supports_non_stop ()
4155 {
4156 return true;
4157 }
4158
4159 /* to_always_non_stop_p implementation. */
4160
4161 bool
4162 linux_nat_target::always_non_stop_p ()
4163 {
4164 return true;
4165 }
4166
4167 bool
4168 linux_nat_target::supports_multi_process ()
4169 {
4170 return true;
4171 }
4172
4173 bool
4174 linux_nat_target::supports_disable_randomization ()
4175 {
4176 return true;
4177 }
4178
4179 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4180 so we notice when any child changes state, and notify the
4181 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4182 above to wait for the arrival of a SIGCHLD. */
4183
4184 static void
4185 sigchld_handler (int signo)
4186 {
4187 int old_errno = errno;
4188
4189 if (debug_linux_nat)
4190 gdb_stdlog->write_async_safe ("sigchld\n", sizeof ("sigchld\n") - 1);
4191
4192 if (signo == SIGCHLD
4193 && linux_nat_event_pipe[0] != -1)
4194 async_file_mark (); /* Let the event loop know that there are
4195 events to handle. */
4196
4197 errno = old_errno;
4198 }
4199
4200 /* Callback registered with the target events file descriptor. */
4201
4202 static void
4203 handle_target_event (int error, gdb_client_data client_data)
4204 {
4205 inferior_event_handler (INF_REG_EVENT);
4206 }
4207
4208 /* Create/destroy the target events pipe. Returns previous state. */
4209
4210 static int
4211 linux_async_pipe (int enable)
4212 {
4213 int previous = linux_is_async_p ();
4214
4215 if (previous != enable)
4216 {
4217 sigset_t prev_mask;
4218
4219 /* Block child signals while we create/destroy the pipe, as
4220 their handler writes to it. */
4221 block_child_signals (&prev_mask);
4222
4223 if (enable)
4224 {
4225 if (gdb_pipe_cloexec (linux_nat_event_pipe) == -1)
4226 internal_error (__FILE__, __LINE__,
4227 "creating event pipe failed.");
4228
4229 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4230 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4231 }
4232 else
4233 {
4234 close (linux_nat_event_pipe[0]);
4235 close (linux_nat_event_pipe[1]);
4236 linux_nat_event_pipe[0] = -1;
4237 linux_nat_event_pipe[1] = -1;
4238 }
4239
4240 restore_child_signals_mask (&prev_mask);
4241 }
4242
4243 return previous;
4244 }
4245
4246 int
4247 linux_nat_target::async_wait_fd ()
4248 {
4249 return linux_nat_event_pipe[0];
4250 }
4251
4252 /* target_async implementation. */
4253
4254 void
4255 linux_nat_target::async (int enable)
4256 {
4257 if (enable)
4258 {
4259 if (!linux_async_pipe (1))
4260 {
4261 add_file_handler (linux_nat_event_pipe[0],
4262 handle_target_event, NULL,
4263 "linux-nat");
4264 /* There may be pending events to handle. Tell the event loop
4265 to poll them. */
4266 async_file_mark ();
4267 }
4268 }
4269 else
4270 {
4271 delete_file_handler (linux_nat_event_pipe[0]);
4272 linux_async_pipe (0);
4273 }
4274 return;
4275 }
4276
4277 /* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
4278 event came out. */
4279
4280 static int
4281 linux_nat_stop_lwp (struct lwp_info *lwp)
4282 {
4283 if (!lwp->stopped)
4284 {
4285 linux_nat_debug_printf ("running -> suspending %s",
4286 target_pid_to_str (lwp->ptid).c_str ());
4287
4288
4289 if (lwp->last_resume_kind == resume_stop)
4290 {
4291 linux_nat_debug_printf ("already stopping LWP %ld at GDB's request",
4292 lwp->ptid.lwp ());
4293 return 0;
4294 }
4295
4296 stop_callback (lwp);
4297 lwp->last_resume_kind = resume_stop;
4298 }
4299 else
4300 {
4301 /* Already known to be stopped; do nothing. */
4302
4303 if (debug_linux_nat)
4304 {
4305 if (find_thread_ptid (linux_target, lwp->ptid)->stop_requested)
4306 linux_nat_debug_printf ("already stopped/stop_requested %s",
4307 target_pid_to_str (lwp->ptid).c_str ());
4308 else
4309 linux_nat_debug_printf ("already stopped/no stop_requested yet %s",
4310 target_pid_to_str (lwp->ptid).c_str ());
4311 }
4312 }
4313 return 0;
4314 }
4315
4316 void
4317 linux_nat_target::stop (ptid_t ptid)
4318 {
4319 iterate_over_lwps (ptid, linux_nat_stop_lwp);
4320 }
4321
4322 void
4323 linux_nat_target::close ()
4324 {
4325 /* Unregister from the event loop. */
4326 if (is_async_p ())
4327 async (0);
4328
4329 inf_ptrace_target::close ();
4330 }
4331
4332 /* When requests are passed down from the linux-nat layer to the
4333 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
4334 used. The address space pointer is stored in the inferior object,
4335 but the common code that is passed such ptid can't tell whether
4336 lwpid is a "main" process id or not (it assumes so). We reverse
4337 look up the "main" process id from the lwp here. */
4338
4339 struct address_space *
4340 linux_nat_target::thread_address_space (ptid_t ptid)
4341 {
4342 struct lwp_info *lwp;
4343 struct inferior *inf;
4344 int pid;
4345
4346 if (ptid.lwp () == 0)
4347 {
4348 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
4349 tgid. */
4350 lwp = find_lwp_pid (ptid);
4351 pid = lwp->ptid.pid ();
4352 }
4353 else
4354 {
4355 /* A (pid,lwpid,0) ptid. */
4356 pid = ptid.pid ();
4357 }
4358
4359 inf = find_inferior_pid (this, pid);
4360 gdb_assert (inf != NULL);
4361 return inf->aspace;
4362 }
4363
4364 /* Return the cached value of the processor core for thread PTID. */
4365
4366 int
4367 linux_nat_target::core_of_thread (ptid_t ptid)
4368 {
4369 struct lwp_info *info = find_lwp_pid (ptid);
4370
4371 if (info)
4372 return info->core;
4373 return -1;
4374 }
4375
4376 /* Implementation of to_filesystem_is_local. */
4377
4378 bool
4379 linux_nat_target::filesystem_is_local ()
4380 {
4381 struct inferior *inf = current_inferior ();
4382
4383 if (inf->fake_pid_p || inf->pid == 0)
4384 return true;
4385
4386 return linux_ns_same (inf->pid, LINUX_NS_MNT);
4387 }
4388
4389 /* Convert the INF argument passed to a to_fileio_* method
4390 to a process ID suitable for passing to its corresponding
4391 linux_mntns_* function. If INF is non-NULL then the
4392 caller is requesting the filesystem seen by INF. If INF
4393 is NULL then the caller is requesting the filesystem seen
4394 by the GDB. We fall back to GDB's filesystem in the case
4395 that INF is non-NULL but its PID is unknown. */
4396
4397 static pid_t
4398 linux_nat_fileio_pid_of (struct inferior *inf)
4399 {
4400 if (inf == NULL || inf->fake_pid_p || inf->pid == 0)
4401 return getpid ();
4402 else
4403 return inf->pid;
4404 }
4405
4406 /* Implementation of to_fileio_open. */
4407
4408 int
4409 linux_nat_target::fileio_open (struct inferior *inf, const char *filename,
4410 int flags, int mode, int warn_if_slow,
4411 int *target_errno)
4412 {
4413 int nat_flags;
4414 mode_t nat_mode;
4415 int fd;
4416
4417 if (fileio_to_host_openflags (flags, &nat_flags) == -1
4418 || fileio_to_host_mode (mode, &nat_mode) == -1)
4419 {
4420 *target_errno = FILEIO_EINVAL;
4421 return -1;
4422 }
4423
4424 fd = linux_mntns_open_cloexec (linux_nat_fileio_pid_of (inf),
4425 filename, nat_flags, nat_mode);
4426 if (fd == -1)
4427 *target_errno = host_to_fileio_error (errno);
4428
4429 return fd;
4430 }
4431
4432 /* Implementation of to_fileio_readlink. */
4433
4434 gdb::optional<std::string>
4435 linux_nat_target::fileio_readlink (struct inferior *inf, const char *filename,
4436 int *target_errno)
4437 {
4438 char buf[PATH_MAX];
4439 int len;
4440
4441 len = linux_mntns_readlink (linux_nat_fileio_pid_of (inf),
4442 filename, buf, sizeof (buf));
4443 if (len < 0)
4444 {
4445 *target_errno = host_to_fileio_error (errno);
4446 return {};
4447 }
4448
4449 return std::string (buf, len);
4450 }
4451
4452 /* Implementation of to_fileio_unlink. */
4453
4454 int
4455 linux_nat_target::fileio_unlink (struct inferior *inf, const char *filename,
4456 int *target_errno)
4457 {
4458 int ret;
4459
4460 ret = linux_mntns_unlink (linux_nat_fileio_pid_of (inf),
4461 filename);
4462 if (ret == -1)
4463 *target_errno = host_to_fileio_error (errno);
4464
4465 return ret;
4466 }
4467
4468 /* Implementation of the to_thread_events method. */
4469
4470 void
4471 linux_nat_target::thread_events (int enable)
4472 {
4473 report_thread_events = enable;
4474 }
4475
4476 linux_nat_target::linux_nat_target ()
4477 {
4478 /* We don't change the stratum; this target will sit at
4479 process_stratum and thread_db will set at thread_stratum. This
4480 is a little strange, since this is a multi-threaded-capable
4481 target, but we want to be on the stack below thread_db, and we
4482 also want to be used for single-threaded processes. */
4483 }
4484
4485 /* See linux-nat.h. */
4486
4487 int
4488 linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
4489 {
4490 int pid;
4491
4492 pid = ptid.lwp ();
4493 if (pid == 0)
4494 pid = ptid.pid ();
4495
4496 errno = 0;
4497 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo);
4498 if (errno != 0)
4499 {
4500 memset (siginfo, 0, sizeof (*siginfo));
4501 return 0;
4502 }
4503 return 1;
4504 }
4505
4506 /* See nat/linux-nat.h. */
4507
4508 ptid_t
4509 current_lwp_ptid (void)
4510 {
4511 gdb_assert (inferior_ptid.lwp_p ());
4512 return inferior_ptid;
4513 }
4514
4515 void _initialize_linux_nat ();
4516 void
4517 _initialize_linux_nat ()
4518 {
4519 add_setshow_zuinteger_cmd ("lin-lwp", class_maintenance,
4520 &debug_linux_nat, _("\
4521 Set debugging of GNU/Linux lwp module."), _("\
4522 Show debugging of GNU/Linux lwp module."), _("\
4523 Enables printf debugging output."),
4524 NULL,
4525 show_debug_linux_nat,
4526 &setdebuglist, &showdebuglist);
4527
4528 add_setshow_boolean_cmd ("linux-namespaces", class_maintenance,
4529 &debug_linux_namespaces, _("\
4530 Set debugging of GNU/Linux namespaces module."), _("\
4531 Show debugging of GNU/Linux namespaces module."), _("\
4532 Enables printf debugging output."),
4533 NULL,
4534 NULL,
4535 &setdebuglist, &showdebuglist);
4536
4537 /* Install a SIGCHLD handler. */
4538 sigchld_action.sa_handler = sigchld_handler;
4539 sigemptyset (&sigchld_action.sa_mask);
4540 sigchld_action.sa_flags = SA_RESTART;
4541
4542 /* Make it the default. */
4543 sigaction (SIGCHLD, &sigchld_action, NULL);
4544
4545 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4546 gdb_sigmask (SIG_SETMASK, NULL, &suspend_mask);
4547 sigdelset (&suspend_mask, SIGCHLD);
4548
4549 sigemptyset (&blocked_mask);
4550
4551 lwp_lwpid_htab_create ();
4552 }
4553 \f
4554
4555 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4556 the GNU/Linux Threads library and therefore doesn't really belong
4557 here. */
4558
4559 /* NPTL reserves the first two RT signals, but does not provide any
4560 way for the debugger to query the signal numbers - fortunately
4561 they don't change. */
4562 static int lin_thread_signals[] = { __SIGRTMIN, __SIGRTMIN + 1 };
4563
4564 /* See linux-nat.h. */
4565
4566 unsigned int
4567 lin_thread_get_thread_signal_num (void)
4568 {
4569 return sizeof (lin_thread_signals) / sizeof (lin_thread_signals[0]);
4570 }
4571
4572 /* See linux-nat.h. */
4573
4574 int
4575 lin_thread_get_thread_signal (unsigned int i)
4576 {
4577 gdb_assert (i < lin_thread_get_thread_signal_num ());
4578 return lin_thread_signals[i];
4579 }